summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/i2c/busses/i2c-viapro6
-rw-r--r--Documentation/i2c/writing-clients4
-rw-r--r--Documentation/networking/dccp.txt56
-rw-r--r--Documentation/networking/ip-sysctl.txt5
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile4
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/Makefile1
-rw-r--r--arch/arm/configs/omap_h2_1610_defconfig97
-rw-r--r--arch/arm/lib/csumpartial.S4
-rw-r--r--arch/arm/mach-ixp4xx/Kconfig10
-rw-r--r--arch/arm/mach-ixp4xx/Makefile1
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-pci.c77
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-power.c92
-rw-r--r--arch/arm/mach-ixp4xx/nslu2-setup.c134
-rw-r--r--arch/arm/mach-omap1/Kconfig32
-rw-r--r--arch/arm/mach-omap1/Makefile5
-rw-r--r--arch/arm/mach-omap1/board-generic.c33
-rw-r--r--arch/arm/mach-omap1/board-h2.c15
-rw-r--r--arch/arm/mach-omap1/board-h3.c19
-rw-r--r--arch/arm/mach-omap1/board-innovator.c42
-rw-r--r--arch/arm/mach-omap1/board-netstar.c15
-rw-r--r--arch/arm/mach-omap1/board-osk.c17
-rw-r--r--arch/arm/mach-omap1/board-palmte.c87
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c23
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c9
-rw-r--r--arch/arm/mach-omap1/clock.c792
-rw-r--r--arch/arm/mach-omap1/clock.h768
-rw-r--r--arch/arm/mach-omap1/devices.c221
-rw-r--r--arch/arm/mach-omap1/id.c9
-rw-r--r--arch/arm/mach-omap1/io.c13
-rw-r--r--arch/arm/mach-omap1/irq.c23
-rw-r--r--arch/arm/mach-omap1/leds-h2p2-debug.c43
-rw-r--r--arch/arm/mach-omap1/leds.c1
-rw-r--r--arch/arm/mach-omap1/mux.c289
-rw-r--r--arch/arm/mach-omap1/serial.c9
-rw-r--r--arch/arm/mach-omap1/time.c4
-rw-r--r--arch/arm/mach-omap2/Kconfig22
-rw-r--r--arch/arm/mach-omap2/Makefile13
-rw-r--r--arch/arm/mach-omap2/Makefile.boot3
-rw-r--r--arch/arm/mach-omap2/board-generic.c80
-rw-r--r--arch/arm/mach-omap2/board-h4.c197
-rw-r--r--arch/arm/mach-omap2/clock.c1129
-rw-r--r--arch/arm/mach-omap2/clock.h2103
-rw-r--r--arch/arm/mach-omap2/devices.c89
-rw-r--r--arch/arm/mach-omap2/id.c124
-rw-r--r--arch/arm/mach-omap2/io.c53
-rw-r--r--arch/arm/mach-omap2/irq.c149
-rw-r--r--arch/arm/mach-omap2/mux.c65
-rw-r--r--arch/arm/mach-omap2/prcm.h419
-rw-r--r--arch/arm/mach-omap2/serial.c180
-rw-r--r--arch/arm/mach-omap2/sram-fn.S333
-rw-r--r--arch/arm/mach-omap2/timer-gp.c126
-rw-r--r--arch/arm/mach-pxa/Kconfig7
-rw-r--r--arch/arm/mach-pxa/Makefile5
-rw-r--r--arch/arm/mach-pxa/corgi_ssp.c2
-rw-r--r--arch/arm/mach-pxa/sharpsl.h87
-rw-r--r--arch/arm/mach-pxa/sharpsl_pm.c992
-rw-r--r--arch/arm/mach-pxa/ssp.c128
-rw-r--r--arch/arm/mm/Kconfig6
-rw-r--r--arch/arm/plat-omap/Makefile2
-rw-r--r--arch/arm/plat-omap/clock.c1315
-rw-r--r--arch/arm/plat-omap/clock.h120
-rw-r--r--arch/arm/plat-omap/common.c50
-rw-r--r--arch/arm/plat-omap/devices.c381
-rw-r--r--arch/arm/plat-omap/dma.c910
-rw-r--r--arch/arm/plat-omap/gpio.c40
-rw-r--r--arch/arm/plat-omap/mcbsp.c22
-rw-r--r--arch/arm/plat-omap/mux.c65
-rw-r--r--arch/arm/plat-omap/pm.c103
-rw-r--r--arch/arm/plat-omap/sleep.S139
-rw-r--r--arch/arm/plat-omap/sram.c143
-rw-r--r--arch/arm/plat-omap/sram.h21
-rw-r--r--arch/arm/plat-omap/usb.c11
-rw-r--r--arch/i386/pci/fixup.c3
-rw-r--r--arch/ia64/kernel/efi.c2
-rw-r--r--arch/ia64/kernel/kprobes.c22
-rw-r--r--arch/ia64/kernel/mca.c120
-rw-r--r--arch/ia64/kernel/mca_drv.c17
-rw-r--r--arch/ia64/kernel/process.c6
-rw-r--r--arch/ia64/kernel/setup.c1
-rw-r--r--arch/ia64/kernel/signal.c11
-rw-r--r--arch/ia64/kernel/traps.c44
-rw-r--r--arch/ia64/mm/discontig.c19
-rw-r--r--arch/ia64/mm/tlb.c85
-rw-r--r--arch/ia64/pci/pci.c106
-rw-r--r--arch/ia64/sn/kernel/io_init.c2
-rw-r--r--arch/ia64/sn/kernel/xpc.h2
-rw-r--r--arch/ia64/sn/kernel/xpc_main.c102
-rw-r--r--arch/ia64/sn/kernel/xpc_partition.c8
-rw-r--r--arch/ia64/sn/pci/tioce_provider.c6
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/kernel/Makefile13
-rw-r--r--arch/powerpc/kernel/asm-offsets.c1
-rw-r--r--arch/powerpc/kernel/cpu_setup_power4.S (renamed from arch/ppc64/kernel/cpu_setup_power4.S)8
-rw-r--r--arch/powerpc/kernel/cputable.c19
-rw-r--r--arch/powerpc/kernel/firmware.c (renamed from arch/ppc64/kernel/firmware.c)2
-rw-r--r--arch/powerpc/kernel/fpu.S24
-rw-r--r--arch/powerpc/kernel/head_64.S91
-rw-r--r--arch/powerpc/kernel/ioctl32.c (renamed from arch/ppc64/kernel/ioctl32.c)4
-rw-r--r--arch/powerpc/kernel/irq.c (renamed from arch/ppc64/kernel/irq.c)265
-rw-r--r--arch/powerpc/kernel/lparcfg.c (renamed from arch/ppc64/kernel/lparcfg.c)13
-rw-r--r--arch/powerpc/kernel/misc_32.S23
-rw-r--r--arch/powerpc/kernel/misc_64.S8
-rw-r--r--arch/powerpc/kernel/paca.c (renamed from arch/ppc64/kernel/pacaData.c)7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c5
-rw-r--r--arch/powerpc/kernel/proc_ppc64.c (renamed from arch/ppc64/kernel/proc_ppc64.c)12
-rw-r--r--arch/powerpc/kernel/prom.c35
-rw-r--r--arch/powerpc/kernel/prom_init.c187
-rw-r--r--arch/powerpc/kernel/rtas-proc.c2
-rw-r--r--arch/powerpc/kernel/rtas.c5
-rw-r--r--arch/powerpc/kernel/rtas_pci.c (renamed from arch/ppc64/kernel/rtas_pci.c)47
-rw-r--r--arch/powerpc/kernel/setup-common.c37
-rw-r--r--arch/powerpc/kernel/setup.h6
-rw-r--r--arch/powerpc/kernel/setup_32.c18
-rw-r--r--arch/powerpc/kernel/setup_64.c93
-rw-r--r--arch/powerpc/kernel/signal_32.c1
-rw-r--r--arch/powerpc/kernel/smp.c10
-rw-r--r--arch/powerpc/kernel/sys_ppc32.c1
-rw-r--r--arch/powerpc/kernel/sysfs.c (renamed from arch/ppc64/kernel/sysfs.c)2
-rw-r--r--arch/powerpc/kernel/time.c31
-rw-r--r--arch/powerpc/kernel/traps.c2
-rw-r--r--arch/powerpc/lib/bitops.c2
-rw-r--r--arch/powerpc/mm/hash_utils_64.c38
-rw-r--r--arch/powerpc/mm/init_32.c3
-rw-r--r--arch/powerpc/mm/init_64.c20
-rw-r--r--arch/powerpc/mm/mem.c4
-rw-r--r--arch/powerpc/mm/pgtable_64.c7
-rw-r--r--arch/powerpc/mm/stab.c21
-rw-r--r--arch/powerpc/oprofile/op_model_power4.c3
-rw-r--r--arch/powerpc/platforms/chrp/setup.c4
-rw-r--r--arch/powerpc/platforms/iseries/irq.c24
-rw-r--r--arch/powerpc/platforms/iseries/misc.S1
-rw-r--r--arch/powerpc/platforms/iseries/setup.c27
-rw-r--r--arch/powerpc/platforms/maple/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pci.c3
-rw-r--r--arch/powerpc/platforms/powermac/pic.c3
-rw-r--r--arch/powerpc/platforms/powermac/smp.c51
-rw-r--r--arch/powerpc/platforms/pseries/Makefile2
-rw-r--r--arch/powerpc/platforms/pseries/eeh.c (renamed from arch/ppc64/kernel/eeh.c)659
-rw-r--r--arch/powerpc/platforms/pseries/eeh_event.c155
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c3
-rw-r--r--arch/powerpc/platforms/pseries/pci.c3
-rw-r--r--arch/powerpc/platforms/pseries/reconfig.c2
-rw-r--r--arch/powerpc/platforms/pseries/rtasd.c8
-rw-r--r--arch/powerpc/platforms/pseries/scanlog.c (renamed from arch/ppc64/kernel/scanlog.c)0
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/powerpc/platforms/pseries/smp.c5
-rw-r--r--arch/powerpc/platforms/pseries/xics.c7
-rw-r--r--arch/powerpc/sysdev/u3_iommu.c2
-rw-r--r--arch/powerpc/xmon/Makefile2
-rw-r--r--arch/powerpc/xmon/nonstdio.c134
-rw-r--r--arch/powerpc/xmon/nonstdio.h28
-rw-r--r--arch/powerpc/xmon/setjmp.S176
-rw-r--r--arch/powerpc/xmon/start_32.c235
-rw-r--r--arch/powerpc/xmon/start_64.c167
-rw-r--r--arch/powerpc/xmon/start_8xx.c255
-rw-r--r--arch/powerpc/xmon/subr_prf.c54
-rw-r--r--arch/powerpc/xmon/xmon.c50
-rw-r--r--arch/ppc/boot/include/of1275.h3
-rw-r--r--arch/ppc/boot/of1275/Makefile2
-rw-r--r--arch/ppc/boot/of1275/call_prom.c74
-rw-r--r--arch/ppc/boot/of1275/claim.c97
-rw-r--r--arch/ppc/boot/of1275/finddevice.c19
-rw-r--r--arch/ppc/boot/openfirmware/Makefile3
-rw-r--r--arch/ppc/kernel/Makefile5
-rw-r--r--arch/ppc/kernel/head_booke.h2
-rw-r--r--arch/ppc/kernel/idle.c2
-rw-r--r--arch/ppc/kernel/irq.c165
-rw-r--r--arch/ppc/kernel/misc.S4
-rw-r--r--arch/ppc/kernel/pci.c14
-rw-r--r--arch/ppc/kernel/ppc_ksyms.c7
-rw-r--r--arch/ppc/kernel/setup.c1
-rw-r--r--arch/ppc/platforms/pmac_pic.c3
-rw-r--r--arch/ppc/platforms/prep_setup.c9
-rw-r--r--arch/ppc64/Kconfig4
-rw-r--r--arch/ppc64/boot/addRamDisk.c207
-rw-r--r--arch/ppc64/kernel/Makefile16
-rw-r--r--arch/ppc64/kernel/asm-offsets.c1
-rw-r--r--arch/ppc64/kernel/head.S84
-rw-r--r--arch/ppc64/kernel/idle.c1
-rw-r--r--arch/ppc64/kernel/misc.S8
-rw-r--r--arch/ppc64/kernel/nvram.c5
-rw-r--r--arch/ppc64/kernel/pci.c10
-rw-r--r--arch/ppc64/kernel/pci_dn.c21
-rw-r--r--arch/ppc64/kernel/prom.c9
-rw-r--r--arch/ppc64/kernel/prom_init.c3
-rw-r--r--arch/ppc64/kernel/vdso.c5
-rw-r--r--arch/sparc/kernel/cpu.c4
-rw-r--r--arch/sparc/kernel/pcic.c2
-rw-r--r--arch/sparc/mm/fault.c2
-rw-r--r--arch/sparc64/kernel/cpu.c4
-rw-r--r--arch/sparc64/kernel/ioctl32.c2
-rw-r--r--arch/sparc64/kernel/sbus.c2
-rw-r--r--arch/sparc64/mm/fault.c2
-rw-r--r--drivers/acpi/glue.c1
-rw-r--r--drivers/atm/horizon.c4
-rw-r--r--drivers/char/agp/ali-agp.c1
-rw-r--r--drivers/char/agp/amd-k7-agp.c1
-rw-r--r--drivers/char/agp/amd64-agp.c1
-rw-r--r--drivers/char/agp/ati-agp.c1
-rw-r--r--drivers/char/agp/backend.c9
-rw-r--r--drivers/char/agp/efficeon-agp.c1
-rw-r--r--drivers/char/agp/generic.c9
-rw-r--r--drivers/char/agp/i460-agp.c18
-rw-r--r--drivers/char/agp/intel-agp.c6
-rw-r--r--drivers/char/agp/nvidia-agp.c1
-rw-r--r--drivers/char/agp/sis-agp.c1
-rw-r--r--drivers/char/agp/sworks-agp.c1
-rw-r--r--drivers/char/agp/uninorth-agp.c1
-rw-r--r--drivers/char/agp/via-agp.c1
-rw-r--r--drivers/char/epca.c1
-rw-r--r--drivers/char/synclink.c1
-rw-r--r--drivers/char/synclinkmp.c1
-rw-r--r--drivers/char/watchdog/pcwd_pci.c1
-rw-r--r--drivers/char/watchdog/wdt_pci.c1
-rw-r--r--drivers/hwmon/hdaps.c41
-rw-r--r--drivers/hwmon/w83627hf.c16
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c1
-rw-r--r--drivers/i2c/busses/i2c-ali1563.c1
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c1
-rw-r--r--drivers/i2c/busses/i2c-amd756.c1
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c1
-rw-r--r--drivers/i2c/busses/i2c-hydra.c1
-rw-r--r--drivers/i2c/busses/i2c-i801.c1
-rw-r--r--drivers/i2c/busses/i2c-i810.c1
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c1
-rw-r--r--drivers/i2c/busses/i2c-piix4.c1
-rw-r--r--drivers/i2c/busses/i2c-prosavage.c1
-rw-r--r--drivers/i2c/busses/i2c-savage4.c1
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c1
-rw-r--r--drivers/i2c/busses/i2c-sis630.c1
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c1
-rw-r--r--drivers/i2c/busses/i2c-via.c1
-rw-r--r--drivers/i2c/busses/i2c-viapro.c28
-rw-r--r--drivers/i2c/busses/i2c-voodoo3.c1
-rw-r--r--drivers/i2c/chips/ds1337.c4
-rw-r--r--drivers/ide/Kconfig9
-rw-r--r--drivers/ide/ide-floppy.c6
-rw-r--r--drivers/ide/ide-iops.c6
-rw-r--r--drivers/ide/ide-taskfile.c2
-rw-r--r--drivers/ide/ide.c1
-rw-r--r--drivers/ide/legacy/ide-cs.c7
-rw-r--r--drivers/ide/pci/Makefile1
-rw-r--r--drivers/ide/pci/amd74xx.c3
-rw-r--r--drivers/ide/pci/cs5535.c305
-rw-r--r--drivers/ide/pci/cy82c693.c2
-rw-r--r--drivers/ide/pci/siimage.c9
-rw-r--r--drivers/ide/setup-pci.c12
-rw-r--r--drivers/infiniband/core/user_mad.c129
-rw-r--r--drivers/infiniband/core/uverbs_cmd.c12
-rw-r--r--drivers/infiniband/core/verbs.c12
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cmd.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_cq.c16
-rw-r--r--drivers/infiniband/hw/mthca/mthca_dev.h2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_main.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.c3
-rw-r--r--drivers/infiniband/hw/mthca/mthca_provider.h1
-rw-r--r--drivers/infiniband/hw/mthca/mthca_qp.c113
-rw-r--r--drivers/infiniband/hw/mthca/mthca_srq.c22
-rw-r--r--drivers/infiniband/hw/mthca/mthca_wqe.h3
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib.h15
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_fs.c177
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_main.c72
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c26
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_vlan.c7
-rw-r--r--drivers/net/bnx2.c16
-rw-r--r--drivers/net/fs_enet/fs_enet-main.c1
-rw-r--r--drivers/net/fs_enet/mac-fcc.c1
-rw-r--r--drivers/net/fs_enet/mac-fec.c1
-rw-r--r--drivers/net/fs_enet/mac-scc.c1
-rw-r--r--drivers/net/spider_net.c1
-rw-r--r--drivers/pci/access.c2
-rw-r--r--drivers/pci/hotplug/pciehp.h135
-rw-r--r--drivers/pci/hotplug/pciehp_core.c108
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c1979
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c113
-rw-r--r--drivers/pci/hotplug/pciehp_pci.c840
-rw-r--r--drivers/pci/hotplug/pciehprm.h52
-rw-r--r--drivers/pci/hotplug/pciehprm_acpi.c1727
-rw-r--r--drivers/pci/hotplug/pciehprm_nonacpi.c464
-rw-r--r--drivers/pci/hotplug/pciehprm_nonacpi.h56
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c81
-rw-r--r--drivers/pci/hotplug/rpaphp.h2
-rw-r--r--drivers/pci/hotplug/rpaphp_pci.c76
-rw-r--r--drivers/pci/hotplug/shpchp_pci.c2
-rw-r--r--drivers/pci/msi.c20
-rw-r--r--drivers/pci/pci-acpi.c11
-rw-r--r--drivers/pci/pci-driver.c11
-rw-r--r--drivers/pci/pci.c46
-rw-r--r--drivers/pci/quirks.c19
-rw-r--r--drivers/pcmcia/Kconfig2
-rw-r--r--drivers/pcmcia/Makefile6
-rw-r--r--drivers/pcmcia/au1000_db1x00.c1
-rw-r--r--drivers/pcmcia/au1000_generic.h2
-rw-r--r--drivers/pcmcia/au1000_pb1x00.c1
-rw-r--r--drivers/pcmcia/au1000_xxs1500.c1
-rw-r--r--drivers/pcmcia/i82365.c1
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c24
-rw-r--r--drivers/sbus/char/cpwatchdog.c2
-rw-r--r--drivers/sbus/char/display7seg.c4
-rw-r--r--drivers/sbus/char/rtc.c22
-rw-r--r--drivers/scsi/ide-scsi.c5
-rw-r--r--drivers/scsi/lpfc/lpfc_init.c1
-rw-r--r--drivers/usb/gadget/goku_udc.c1
-rw-r--r--drivers/usb/gadget/net2280.c1
-rw-r--r--drivers/usb/host/ehci-pci.c1
-rw-r--r--drivers/usb/host/ohci-pci.c1
-rw-r--r--drivers/usb/host/uhci-hcd.c1
-rw-r--r--drivers/video/backlight/corgi_bl.c6
-rw-r--r--fs/Kconfig53
-rw-r--r--fs/cifs/CHANGES2
-rw-r--r--fs/cifs/cifs_unicode.c5
-rw-r--r--fs/cifs/cifsfs.c11
-rw-r--r--fs/cifs/cifsproto.h2
-rw-r--r--fs/cifs/cifssmb.c2
-rw-r--r--fs/cifs/cn_cifs.h37
-rw-r--r--fs/cifs/connect.c1
-rw-r--r--fs/cifs/inode.c2
-rw-r--r--fs/jfs/namei.c3
-rw-r--r--include/asm-alpha/ide.h4
-rw-r--r--include/asm-arm/arch-ixp4xx/hardware.h1
-rw-r--r--include/asm-arm/arch-ixp4xx/irqs.h7
-rw-r--r--include/asm-arm/arch-ixp4xx/nslu2.h96
-rw-r--r--include/asm-arm/arch-omap/board-h4.h6
-rw-r--r--include/asm-arm/arch-omap/board-innovator.h4
-rw-r--r--include/asm-arm/arch-omap/clock.h91
-rw-r--r--include/asm-arm/arch-omap/common.h2
-rw-r--r--include/asm-arm/arch-omap/cpu.h82
-rw-r--r--include/asm-arm/arch-omap/dma.h261
-rw-r--r--include/asm-arm/arch-omap/entry-macro.S14
-rw-r--r--include/asm-arm/arch-omap/fpga.h4
-rw-r--r--include/asm-arm/arch-omap/gpio.h2
-rw-r--r--include/asm-arm/arch-omap/hardware.h8
-rw-r--r--include/asm-arm/arch-omap/io.h32
-rw-r--r--include/asm-arm/arch-omap/irqs.h13
-rw-r--r--include/asm-arm/arch-omap/memory.h4
-rw-r--r--include/asm-arm/arch-omap/menelaus.h22
-rw-r--r--include/asm-arm/arch-omap/mux.h327
-rw-r--r--include/asm-arm/arch-omap/omap1510.h6
-rw-r--r--include/asm-arm/arch-omap/omap24xx.h17
-rw-r--r--include/asm-arm/arch-omap/omapfb.h281
-rw-r--r--include/asm-arm/arch-omap/pm.h40
-rw-r--r--include/asm-arm/arch-omap/prcm.h429
-rw-r--r--include/asm-arm/arch-omap/sram.h38
-rw-r--r--include/asm-arm/arch-omap/system.h35
-rw-r--r--include/asm-arm/arch-omap/timex.h8
-rw-r--r--include/asm-arm/arch-omap/uncompress.h6
-rw-r--r--include/asm-arm/arch-pxa/sharpsl.h8
-rw-r--r--include/asm-arm/arch-pxa/ssp.h8
-rw-r--r--include/asm-i386/ide.h6
-rw-r--r--include/asm-i386/msi.h9
-rw-r--r--include/asm-i386/smp.h6
-rw-r--r--include/asm-ia64/kdebug.h30
-rw-r--r--include/asm-ia64/mmu_context.h81
-rw-r--r--include/asm-ia64/msi.h3
-rw-r--r--include/asm-ia64/tlbflush.h1
-rw-r--r--include/asm-powerpc/abs_addr.h (renamed from include/asm-ppc64/abs_addr.h)6
-rw-r--r--include/asm-powerpc/asm-compat.h55
-rw-r--r--include/asm-powerpc/atomic.h188
-rw-r--r--include/asm-powerpc/bitops.h41
-rw-r--r--include/asm-powerpc/bug.h19
-rw-r--r--include/asm-powerpc/cache.h40
-rw-r--r--include/asm-powerpc/cacheflush.h (renamed from include/asm-ppc64/cacheflush.h)52
-rw-r--r--include/asm-powerpc/compat.h (renamed from include/asm-ppc64/compat.h)8
-rw-r--r--include/asm-powerpc/cputable.h6
-rw-r--r--include/asm-powerpc/current.h27
-rw-r--r--include/asm-powerpc/eeh_event.h52
-rw-r--r--include/asm-powerpc/firmware.h6
-rw-r--r--include/asm-powerpc/futex.h5
-rw-r--r--include/asm-powerpc/hvcall.h (renamed from include/asm-ppc64/hvcall.h)14
-rw-r--r--include/asm-powerpc/hw_irq.h1
-rw-r--r--include/asm-powerpc/irq.h5
-rw-r--r--include/asm-powerpc/lppaca.h (renamed from include/asm-ppc64/lppaca.h)9
-rw-r--r--include/asm-powerpc/paca.h (renamed from include/asm-ppc64/paca.h)15
-rw-r--r--include/asm-powerpc/ppc-pci.h52
-rw-r--r--include/asm-powerpc/ppc_asm.h39
-rw-r--r--include/asm-powerpc/processor.h70
-rw-r--r--include/asm-powerpc/reg.h7
-rw-r--r--include/asm-powerpc/reg_8xx.h (renamed from include/asm-ppc/cache.h)50
-rw-r--r--include/asm-powerpc/signal.h (renamed from include/asm-ppc/signal.h)41
-rw-r--r--include/asm-powerpc/sparsemem.h4
-rw-r--r--include/asm-powerpc/system.h2
-rw-r--r--include/asm-powerpc/systemcfg.h (renamed from include/asm-ppc64/systemcfg.h)6
-rw-r--r--include/asm-powerpc/tce.h (renamed from include/asm-ppc64/tce.h)6
-rw-r--r--include/asm-powerpc/uaccess.h40
-rw-r--r--include/asm-powerpc/xmon.h1
-rw-r--r--include/asm-ppc/cacheflush.h49
-rw-r--r--include/asm-ppc/current.h11
-rw-r--r--include/asm-ppc64/cache.h36
-rw-r--r--include/asm-ppc64/current.h16
-rw-r--r--include/asm-ppc64/eeh.h46
-rw-r--r--include/asm-ppc64/mmu.h6
-rw-r--r--include/asm-ppc64/mmzone.h8
-rw-r--r--include/asm-ppc64/page.h2
-rw-r--r--include/asm-ppc64/pci-bridge.h1
-rw-r--r--include/asm-ppc64/pgalloc.h4
-rw-r--r--include/asm-ppc64/prom.h2
-rw-r--r--include/asm-ppc64/signal.h132
-rw-r--r--include/asm-ppc64/system.h2
-rw-r--r--include/asm-sh/ide.h4
-rw-r--r--include/asm-sh64/ide.h4
-rw-r--r--include/asm-x86_64/msi.h4
-rw-r--r--include/asm-x86_64/smp.h6
-rw-r--r--include/linux/connector.h2
-rw-r--r--include/linux/genetlink.h51
-rw-r--r--include/linux/ide.h8
-rw-r--r--include/linux/if_ether.h4
-rw-r--r--include/linux/netdevice.h7
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h159
-rw-r--r--include/linux/netfilter/nf_conntrack_ftp.h44
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h27
-rw-r--r--include/linux/netfilter/nf_conntrack_tcp.h56
-rw-r--r--include/linux/netfilter/nf_conntrack_tuple_common.h13
-rw-r--r--include/linux/netfilter/nfnetlink.h2
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack.h152
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_ftp.h39
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_icmp.h9
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_sctp.h21
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_tcp.h47
-rw-r--r--include/linux/netfilter_ipv4/ip_conntrack_tuple.h10
-rw-r--r--include/linux/netfilter_ipv6.h1
-rw-r--r--include/linux/netlink.h24
-rw-r--r--include/linux/pci-acpi.h5
-rw-r--r--include/linux/pci.h13
-rw-r--r--include/linux/pci_ids.h3
-rw-r--r--include/linux/skbuff.h46
-rw-r--r--include/linux/sysctl.h38
-rw-r--r--include/linux/tcp.h16
-rw-r--r--include/net/genetlink.h154
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_icmp.h11
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h43
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_icmpv6.h27
-rw-r--r--include/net/netfilter/nf_conntrack.h354
-rw-r--r--include/net/netfilter/nf_conntrack_compat.h108
-rw-r--r--include/net/netfilter/nf_conntrack_core.h76
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h51
-rw-r--r--include/net/netfilter/nf_conntrack_l3proto.h93
-rw-r--r--include/net/netfilter/nf_conntrack_protocol.h105
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h190
-rw-r--r--include/net/netlink.h883
-rw-r--r--include/net/sock.h6
-rw-r--r--include/net/tcp.h74
-rw-r--r--include/rdma/ib_user_verbs.h9
-rw-r--r--include/rdma/ib_verbs.h2
-rw-r--r--kernel/ptrace.c2
-rw-r--r--kernel/sched.c3
-rw-r--r--kernel/signal.c2
-rw-r--r--kernel/sys.c2
-rw-r--r--mm/page_alloc.c2
-rw-r--r--net/core/datagram.c21
-rw-r--r--net/core/dev.c12
-rw-r--r--net/core/netpoll.c18
-rw-r--r--net/core/rtnetlink.c83
-rw-r--r--net/core/skbuff.c15
-rw-r--r--net/ipv4/icmp.c6
-rw-r--r--net/ipv4/igmp.c19
-rw-r--r--net/ipv4/inet_diag.c9
-rw-r--r--net/ipv4/ip_gre.c15
-rw-r--r--net/ipv4/netfilter/Kconfig41
-rw-r--r--net/ipv4/netfilter/Makefile6
-rw-r--r--net/ipv4/netfilter/ip_conntrack_netlink.c85
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_icmp.c26
-rw-r--r--net/ipv4/netfilter/ip_conntrack_proto_tcp.c11
-rw-r--r--net/ipv4/netfilter/ip_nat_helper_pptp.c28
-rw-r--r--net/ipv4/netfilter/ipt_CLUSTERIP.c12
-rw-r--r--net/ipv4/netfilter/ipt_CONNMARK.c22
-rw-r--r--net/ipv4/netfilter/ipt_NOTRACK.c4
-rw-r--r--net/ipv4/netfilter/ipt_connbytes.c39
-rw-r--r--net/ipv4/netfilter/ipt_connmark.c10
-rw-r--r--net/ipv4/netfilter/ipt_conntrack.c96
-rw-r--r--net/ipv4/netfilter/ipt_helper.c54
-rw-r--r--net/ipv4/netfilter/ipt_state.c6
-rw-r--r--net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c571
-rw-r--r--net/ipv4/netfilter/nf_conntrack_proto_icmp.c301
-rw-r--r--net/ipv4/sysctl_net_ipv4.c8
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_bic.c12
-rw-r--r--net/ipv4/tcp_cong.c40
-rw-r--r--net/ipv4/tcp_highspeed.c11
-rw-r--r--net/ipv4/tcp_htcp.c13
-rw-r--r--net/ipv4/tcp_hybla.c6
-rw-r--r--net/ipv4/tcp_input.c288
-rw-r--r--net/ipv4/tcp_ipv4.c28
-rw-r--r--net/ipv4/tcp_minisocks.c7
-rw-r--r--net/ipv4/tcp_output.c61
-rw-r--r--net/ipv4/tcp_scalable.c14
-rw-r--r--net/ipv4/tcp_timer.c4
-rw-r--r--net/ipv4/tcp_vegas.c42
-rw-r--r--net/ipv4/udp.c7
-rw-r--r--net/ipv6/addrconf.c3
-rw-r--r--net/ipv6/icmp.c21
-rw-r--r--net/ipv6/ip6_input.c5
-rw-r--r--net/ipv6/ip6_output.c6
-rw-r--r--net/ipv6/ip6_tunnel.c1
-rw-r--r--net/ipv6/netfilter/Kconfig14
-rw-r--r--net/ipv6/netfilter/Makefile6
-rw-r--r--net/ipv6/netfilter/ip6t_MARK.c6
-rw-r--r--net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c556
-rw-r--r--net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c272
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c885
-rw-r--r--net/ipv6/raw.c46
-rw-r--r--net/ipv6/route.c2
-rw-r--r--net/ipv6/tcp_ipv6.c20
-rw-r--r--net/ipv6/udp.c25
-rw-r--r--net/netfilter/Kconfig74
-rw-r--r--net/netfilter/Makefile8
-rw-r--r--net/netfilter/nf_conntrack_core.c1538
-rw-r--r--net/netfilter/nf_conntrack_ftp.c698
-rw-r--r--net/netfilter/nf_conntrack_l3proto_generic.c98
-rw-r--r--net/netfilter/nf_conntrack_proto_generic.c85
-rw-r--r--net/netfilter/nf_conntrack_proto_sctp.c670
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c1162
-rw-r--r--net/netfilter/nf_conntrack_proto_udp.c216
-rw-r--r--net/netfilter/nf_conntrack_standalone.c869
-rw-r--r--net/netfilter/nfnetlink.c21
-rw-r--r--net/netlink/Makefile2
-rw-r--r--net/netlink/af_netlink.c97
-rw-r--r--net/netlink/attr.c328
-rw-r--r--net/netlink/genetlink.c579
-rw-r--r--net/rxrpc/transport.c15
-rw-r--r--net/sunrpc/clnt.c32
-rw-r--r--net/sunrpc/socklib.c5
-rw-r--r--net/sunrpc/svcsock.c9
-rw-r--r--net/xfrm/xfrm_user.c69
-rw-r--r--sound/pci/ad1889.c1
-rw-r--r--sound/pci/ali5451/ali5451.c1
-rw-r--r--sound/pci/als4000.c1
-rw-r--r--sound/pci/atiixp.c1
-rw-r--r--sound/pci/atiixp_modem.c1
-rw-r--r--sound/pci/au88x0/au88x0.c1
-rw-r--r--sound/pci/azt3328.c1
-rw-r--r--sound/pci/bt87x.c5
-rw-r--r--sound/pci/ca0106/ca0106_main.c1
-rw-r--r--sound/pci/cmipci.c1
-rw-r--r--sound/pci/cs4281.c1
-rw-r--r--sound/pci/cs46xx/cs46xx.c1
-rw-r--r--sound/pci/emu10k1/emu10k1.c1
-rw-r--r--sound/pci/emu10k1/emu10k1x.c1
-rw-r--r--sound/pci/ens1370.c1
-rw-r--r--sound/pci/es1938.c1
-rw-r--r--sound/pci/es1968.c1
-rw-r--r--sound/pci/fm801.c1
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/ice1712/ice1712.c1
-rw-r--r--sound/pci/ice1712/ice1724.c1
-rw-r--r--sound/pci/intel8x0.c1
-rw-r--r--sound/pci/intel8x0m.c1
-rw-r--r--sound/pci/korg1212/korg1212.c1
-rw-r--r--sound/pci/maestro3.c1
-rw-r--r--sound/pci/mixart/mixart.c1
-rw-r--r--sound/pci/nm256/nm256.c1
-rw-r--r--sound/pci/rme32.c1
-rw-r--r--sound/pci/rme96.c1
-rw-r--r--sound/pci/rme9652/hdsp.c1
-rw-r--r--sound/pci/rme9652/hdspm.c1
-rw-r--r--sound/pci/rme9652/rme9652.c1
-rw-r--r--sound/pci/sonicvibes.c1
-rw-r--r--sound/pci/trident/trident.c1
-rw-r--r--sound/pci/via82xx.c1
-rw-r--r--sound/pci/via82xx_modem.c1
-rw-r--r--sound/pci/vx222/vx222.c1
-rw-r--r--sound/pci/ymfpci/ymfpci.c1
564 files changed, 29058 insertions, 11882 deletions
diff --git a/Documentation/i2c/busses/i2c-viapro b/Documentation/i2c/busses/i2c-viapro
index 9363b8bd610..16775663b9f 100644
--- a/Documentation/i2c/busses/i2c-viapro
+++ b/Documentation/i2c/busses/i2c-viapro
@@ -7,12 +7,10 @@ Supported adapters:
* VIA Technologies, Inc. VT82C686A/B
Datasheet: Sometimes available at the VIA website
- * VIA Technologies, Inc. VT8231, VT8233, VT8233A, VT8235, VT8237
- Datasheet: available on request from Via
+ * VIA Technologies, Inc. VT8231, VT8233, VT8233A, VT8235, VT8237R
+ Datasheet: available on request from VIA
Authors:
- Frodo Looijaard <frodol@dds.nl>,
- Philip Edelbrock <phil@netroedge.com>,
Kyösti Mälkki <kmalkki@cc.hut.fi>,
Mark D. Studebaker <mdsxyz123@yahoo.com>,
Jean Delvare <khali@linux-fr.org>
diff --git a/Documentation/i2c/writing-clients b/Documentation/i2c/writing-clients
index cff7b652588..d19993cc060 100644
--- a/Documentation/i2c/writing-clients
+++ b/Documentation/i2c/writing-clients
@@ -412,7 +412,7 @@ For now, you can ignore the `flags' parameter. It is there for future use.
release_region(address,FOO_EXTENT);
/* SENSORS ONLY END */
ERROR1:
- kfree(new_client);
+ kfree(data);
ERROR0:
return err;
}
@@ -443,7 +443,7 @@ much simpler than the attachment code, fortunately!
release_region(client->addr,LM78_EXTENT);
/* HYBRID SENSORS CHIP ONLY END */
- kfree(data);
+ kfree(i2c_get_clientdata(client));
return 0;
}
diff --git a/Documentation/networking/dccp.txt b/Documentation/networking/dccp.txt
new file mode 100644
index 00000000000..c45daabd3bf
--- /dev/null
+++ b/Documentation/networking/dccp.txt
@@ -0,0 +1,56 @@
+DCCP protocol
+============
+
+Last updated: 10 November 2005
+
+Contents
+========
+
+- Introduction
+- Missing features
+- Socket options
+- Notes
+
+Introduction
+============
+
+Datagram Congestion Control Protocol (DCCP) is an unreliable, connection
+based protocol designed to solve issues present in UDP and TCP particularly
+for real time and multimedia traffic.
+
+It has a base protocol and pluggable congestion control IDs (CCIDs).
+
+It is at draft RFC status and the homepage for DCCP as a protocol is at:
+ http://www.icir.org/kohler/dcp/
+
+Missing features
+================
+
+The DCCP implementation does not currently have all the features that are in
+the draft RFC.
+
+In particular the following are missing:
+- CCID2 support
+- feature negotiation
+
+When testing against other implementations it appears that elapsed time
+options are not coded compliant to the specification.
+
+Socket options
+==============
+
+DCCP_SOCKOPT_PACKET_SIZE is used for CCID3 to set default packet size for
+calculations.
+
+DCCP_SOCKOPT_SERVICE sets the service. This is compulsory as per the
+specification. If you don't set it you will get EPROTO.
+
+Notes
+=====
+
+SELinux does not yet have support for DCCP. You will need to turn it off or
+else you will get EACCES.
+
+DCCP does not travel through NAT successfully at present. This is because
+the checksum covers the psuedo-header as per TCP and UDP. It should be
+relatively trivial to add Linux NAT support for DCCP.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 65895bb5141..ebc09a159f6 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -78,6 +78,11 @@ inet_peer_gc_maxtime - INTEGER
TCP variables:
+tcp_abc - INTEGER
+ Controls Appropriate Byte Count defined in RFC3465. If set to
+ 0 then does congestion avoid once per ack. 1 is conservative
+ value, and 2 is more agressive.
+
tcp_syn_retries - INTEGER
Number of times initial SYNs for an active TCP connection attempt
will be retransmitted. Should not be higher than 255. Default value
diff --git a/MAINTAINERS b/MAINTAINERS
index 5541f9970b8..0b03a88e88b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -707,7 +707,7 @@ DCCP PROTOCOL
P: Arnaldo Carvalho de Melo
M: acme@mandriva.com
L: dccp@vger.kernel.org
-W: http://www.wlug.org.nz/DCCP
+W: http://linux-net.osdl.org/index.php/DCCP
S: Maintained
DECnet NETWORK LAYER
diff --git a/Makefile b/Makefile
index ea96da1572d..6d1f727f439 100644
--- a/Makefile
+++ b/Makefile
@@ -347,7 +347,7 @@ AFLAGS_KERNEL =
# Needed to be compatible with the O= option
LINUXINCLUDE := -Iinclude \
$(if $(KBUILD_SRC),-Iinclude2 -I$(srctree)/include) \
- -imacros include/linux/autoconf.h
+ -include include/linux/autoconf.h
CPPFLAGS := -D__KERNEL__ $(LINUXINCLUDE)
@@ -407,7 +407,7 @@ outputmakefile:
# of make so .config is not included in this case either (for *config).
no-dot-config-targets := clean mrproper distclean \
- cscope TAGS tags help %docs check%
+ cscope TAGS tags help %docs check% kernelrelease
config-targets := 0
mixed-targets := 0
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index ec77721507c..3df7cbd924a 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -239,6 +239,8 @@ source "arch/arm/plat-omap/Kconfig"
source "arch/arm/mach-omap1/Kconfig"
+source "arch/arm/mach-omap2/Kconfig"
+
source "arch/arm/mach-s3c2410/Kconfig"
source "arch/arm/mach-lh7a40x/Kconfig"
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 114cda7f1b7..81bd2193fe6 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -93,6 +93,7 @@ textaddr-$(CONFIG_ARCH_FORTUNET) := 0xc0008000
machine-$(CONFIG_ARCH_IXP4XX) := ixp4xx
machine-$(CONFIG_ARCH_IXP2000) := ixp2000
machine-$(CONFIG_ARCH_OMAP1) := omap1
+ machine-$(CONFIG_ARCH_OMAP2) := omap2
incdir-$(CONFIG_ARCH_OMAP) := omap
machine-$(CONFIG_ARCH_S3C2410) := s3c2410
machine-$(CONFIG_ARCH_LH7A40X) := lh7a40x
diff --git a/arch/arm/configs/omap_h2_1610_defconfig b/arch/arm/configs/omap_h2_1610_defconfig
index 4198677cd39..529f0f72e1e 100644
--- a/arch/arm/configs/omap_h2_1610_defconfig
+++ b/arch/arm/configs/omap_h2_1610_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.13
-# Mon Sep 5 18:07:12 2005
+# Linux kernel version: 2.6.14
+# Wed Nov 9 18:53:40 2005
#
CONFIG_ARM=y
CONFIG_MMU=y
@@ -22,6 +22,7 @@ CONFIG_INIT_ENV_ARG_LIMIT=32
# General setup
#
CONFIG_LOCALVERSION=""
+CONFIG_LOCALVERSION_AUTO=y
CONFIG_SWAP=y
CONFIG_SYSVIPC=y
# CONFIG_POSIX_MQUEUE is not set
@@ -31,6 +32,7 @@ CONFIG_SYSCTL=y
# CONFIG_HOTPLUG is not set
CONFIG_KOBJECT_UEVENT=y
# CONFIG_IKCONFIG is not set
+CONFIG_INITRAMFS_SOURCE=""
# CONFIG_EMBEDDED is not set
CONFIG_KALLSYMS=y
# CONFIG_KALLSYMS_EXTRA_PASS is not set
@@ -60,6 +62,23 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_KMOD is not set
#
+# Block layer
+#
+
+#
+# IO Schedulers
+#
+CONFIG_IOSCHED_NOOP=y
+CONFIG_IOSCHED_AS=y
+CONFIG_IOSCHED_DEADLINE=y
+CONFIG_IOSCHED_CFQ=y
+CONFIG_DEFAULT_AS=y
+# CONFIG_DEFAULT_DEADLINE is not set
+# CONFIG_DEFAULT_CFQ is not set
+# CONFIG_DEFAULT_NOOP is not set
+CONFIG_DEFAULT_IOSCHED="anticipatory"
+
+#
# System Type
#
# CONFIG_ARCH_CLPS7500 is not set
@@ -81,6 +100,7 @@ CONFIG_OBSOLETE_MODPARM=y
# CONFIG_ARCH_LH7A40X is not set
CONFIG_ARCH_OMAP=y
# CONFIG_ARCH_VERSATILE is not set
+# CONFIG_ARCH_REALVIEW is not set
# CONFIG_ARCH_IMX is not set
# CONFIG_ARCH_H720X is not set
# CONFIG_ARCH_AAEC2000 is not set
@@ -112,7 +132,7 @@ CONFIG_OMAP_SERIAL_WAKE=y
# OMAP Core Type
#
# CONFIG_ARCH_OMAP730 is not set
-# CONFIG_ARCH_OMAP1510 is not set
+# CONFIG_ARCH_OMAP15XX is not set
CONFIG_ARCH_OMAP16XX=y
#
@@ -177,6 +197,8 @@ CONFIG_FLATMEM_MANUAL=y
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
+# CONFIG_SPARSEMEM_STATIC is not set
+CONFIG_SPLIT_PTLOCK_CPUS=4096
# CONFIG_LEDS is not set
CONFIG_ALIGNMENT_TRAP=y
@@ -258,14 +280,19 @@ CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_ESP is not set
# CONFIG_INET_IPCOMP is not set
# CONFIG_INET_TUNNEL is not set
-CONFIG_IP_TCPDIAG=y
-# CONFIG_IP_TCPDIAG_IPV6 is not set
+CONFIG_INET_DIAG=y
+CONFIG_INET_TCP_DIAG=y
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_NETFILTER is not set
#
+# DCCP Configuration (EXPERIMENTAL)
+#
+# CONFIG_IP_DCCP is not set
+
+#
# SCTP Configuration (EXPERIMENTAL)
#
# CONFIG_IP_SCTP is not set
@@ -281,6 +308,10 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
+
+#
+# QoS and/or fair queueing
+#
# CONFIG_NET_SCHED is not set
# CONFIG_NET_CLS_ROUTE is not set
@@ -291,6 +322,7 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_HAMRADIO is not set
# CONFIG_IRDA is not set
# CONFIG_BT is not set
+# CONFIG_IEEE80211 is not set
#
# Device Drivers
@@ -328,21 +360,13 @@ CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_BLK_DEV_INITRD=y
-CONFIG_INITRAMFS_SOURCE=""
# CONFIG_CDROM_PKTCDVD is not set
-
-#
-# IO Schedulers
-#
-CONFIG_IOSCHED_NOOP=y
-CONFIG_IOSCHED_AS=y
-CONFIG_IOSCHED_DEADLINE=y
-CONFIG_IOSCHED_CFQ=y
CONFIG_ATA_OVER_ETH=m
#
# SCSI device support
#
+# CONFIG_RAID_ATTRS is not set
CONFIG_SCSI=y
CONFIG_SCSI_PROC_FS=y
@@ -369,10 +393,12 @@ CONFIG_SCSI_PROC_FS=y
# CONFIG_SCSI_SPI_ATTRS is not set
# CONFIG_SCSI_FC_ATTRS is not set
# CONFIG_SCSI_ISCSI_ATTRS is not set
+# CONFIG_SCSI_SAS_ATTRS is not set
#
# SCSI low-level drivers
#
+# CONFIG_ISCSI_TCP is not set
# CONFIG_SCSI_SATA is not set
# CONFIG_SCSI_DEBUG is not set
@@ -404,6 +430,11 @@ CONFIG_NETDEVICES=y
# CONFIG_TUN is not set
#
+# PHY device support
+#
+# CONFIG_PHYLIB is not set
+
+#
# Ethernet (10 or 100Mbit)
#
CONFIG_NET_ETHERNET=y
@@ -439,6 +470,7 @@ CONFIG_PPP=y
# CONFIG_PPP_SYNC_TTY is not set
# CONFIG_PPP_DEFLATE is not set
# CONFIG_PPP_BSDCOMP is not set
+# CONFIG_PPP_MPPE is not set
# CONFIG_PPPOE is not set
CONFIG_SLIP=y
CONFIG_SLIP_COMPRESSED=y
@@ -541,18 +573,18 @@ CONFIG_WATCHDOG_NOWAYOUT=y
#
# TPM devices
#
+# CONFIG_TELCLOCK is not set
#
# I2C support
#
# CONFIG_I2C is not set
-# CONFIG_I2C_SENSOR is not set
-CONFIG_ISP1301_OMAP=y
#
# Hardware Monitoring support
#
CONFIG_HWMON=y
+# CONFIG_HWMON_VID is not set
# CONFIG_HWMON_DEBUG_CHIP is not set
#
@@ -560,6 +592,10 @@ CONFIG_HWMON=y
#
#
+# Multimedia Capabilities Port drivers
+#
+
+#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
@@ -576,7 +612,6 @@ CONFIG_FB=y
# CONFIG_FB_CFB_FILLRECT is not set
# CONFIG_FB_CFB_COPYAREA is not set
# CONFIG_FB_CFB_IMAGEBLIT is not set
-# CONFIG_FB_SOFT_CURSOR is not set
# CONFIG_FB_MACMODES is not set
CONFIG_FB_MODE_HELPERS=y
# CONFIG_FB_TILEBLITTING is not set
@@ -589,6 +624,7 @@ CONFIG_FB_MODE_HELPERS=y
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
CONFIG_FONTS=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
@@ -600,6 +636,7 @@ CONFIG_FONT_8x16=y
# CONFIG_FONT_SUN8x16 is not set
# CONFIG_FONT_SUN12x22 is not set
# CONFIG_FONT_10x18 is not set
+# CONFIG_FONT_RL is not set
#
# Logo configuration
@@ -624,10 +661,10 @@ CONFIG_SOUND=y
# Open Sound System
#
CONFIG_SOUND_PRIME=y
+# CONFIG_OBSOLETE_OSS_DRIVER is not set
# CONFIG_SOUND_MSNDCLAS is not set
# CONFIG_SOUND_MSNDPIN is not set
# CONFIG_SOUND_OSS is not set
-# CONFIG_SOUND_AD1980 is not set
#
# USB support
@@ -637,22 +674,21 @@ CONFIG_USB_ARCH_HAS_OHCI=y
# CONFIG_USB is not set
#
+# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
+#
+
+#
# USB Gadget Support
#
-CONFIG_USB_GADGET=y
-# CONFIG_USB_GADGET_DEBUG_FILES is not set
-CONFIG_USB_GADGET_SELECTED=y
+# CONFIG_USB_GADGET is not set
# CONFIG_USB_GADGET_NET2280 is not set
# CONFIG_USB_GADGET_PXA2XX is not set
# CONFIG_USB_GADGET_GOKU is not set
# CONFIG_USB_GADGET_LH7A40X is not set
-CONFIG_USB_GADGET_OMAP=y
-CONFIG_USB_OMAP=y
+# CONFIG_USB_GADGET_OMAP is not set
# CONFIG_USB_GADGET_DUMMY_HCD is not set
-# CONFIG_USB_GADGET_DUALSPEED is not set
# CONFIG_USB_ZERO is not set
-CONFIG_USB_ETH=y
-CONFIG_USB_ETH_RNDIS=y
+# CONFIG_USB_ETH is not set
# CONFIG_USB_GADGETFS is not set
# CONFIG_USB_FILE_STORAGE is not set
# CONFIG_USB_G_SERIAL is not set
@@ -673,10 +709,6 @@ CONFIG_EXT2_FS=y
# CONFIG_REISERFS_FS is not set
# CONFIG_JFS_FS is not set
# CONFIG_FS_POSIX_ACL is not set
-
-#
-# XFS support
-#
# CONFIG_XFS_FS is not set
# CONFIG_MINIX_FS is not set
CONFIG_ROMFS_FS=y
@@ -685,6 +717,7 @@ CONFIG_INOTIFY=y
CONFIG_DNOTIFY=y
# CONFIG_AUTOFS_FS is not set
# CONFIG_AUTOFS4_FS is not set
+# CONFIG_FUSE_FS is not set
#
# CD-ROM/DVD Filesystems
@@ -706,10 +739,10 @@ CONFIG_FAT_DEFAULT_CODEPAGE=437
#
CONFIG_PROC_FS=y
CONFIG_SYSFS=y
-# CONFIG_DEVPTS_FS_XATTR is not set
# CONFIG_TMPFS is not set
# CONFIG_HUGETLB_PAGE is not set
CONFIG_RAMFS=y
+# CONFIG_RELAYFS_FS is not set
#
# Miscellaneous filesystems
@@ -750,6 +783,7 @@ CONFIG_RPCSEC_GSS_KRB5=y
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
# CONFIG_AFS_FS is not set
+# CONFIG_9P_FS is not set
#
# Partition Types
@@ -859,6 +893,7 @@ CONFIG_CRYPTO_DES=y
# Library routines
#
# CONFIG_CRC_CCITT is not set
+# CONFIG_CRC16 is not set
CONFIG_CRC32=y
# CONFIG_LIBCRC32C is not set
CONFIG_ZLIB_INFLATE=y
diff --git a/arch/arm/lib/csumpartial.S b/arch/arm/lib/csumpartial.S
index cb5e3708f11..fe797cf320b 100644
--- a/arch/arm/lib/csumpartial.S
+++ b/arch/arm/lib/csumpartial.S
@@ -39,6 +39,7 @@ td3 .req lr
/* we must have at least one byte. */
tst buf, #1 @ odd address?
+ movne sum, sum, ror #8
ldrneb td0, [buf], #1
subne len, len, #1
adcnes sum, sum, td0, put_byte_1
@@ -103,6 +104,9 @@ ENTRY(csum_partial)
cmp len, #8 @ Ensure that we have at least
blo .less8 @ 8 bytes to copy.
+ tst buf, #1
+ movne sum, sum, ror #8
+
adds sum, sum, #0 @ C = 0
tst buf, #3 @ Test destination alignment
blne .not_aligned @ aligh destination, return here
diff --git a/arch/arm/mach-ixp4xx/Kconfig b/arch/arm/mach-ixp4xx/Kconfig
index 89762a26495..385285851cb 100644
--- a/arch/arm/mach-ixp4xx/Kconfig
+++ b/arch/arm/mach-ixp4xx/Kconfig
@@ -8,6 +8,16 @@ menu "Intel IXP4xx Implementation Options"
comment "IXP4xx Platforms"
+# This entry is placed on top because otherwise it would have
+# been shown as a submenu.
+config MACH_NSLU2
+ bool
+ prompt "NSLU2" if !(MACH_IXDP465 || MACH_IXDPG425 || ARCH_IXDP425 || ARCH_ADI_COYOTE || ARCH_AVILA || ARCH_IXCDP1100 || ARCH_PRPMC1100 || MACH_GTWX5715)
+ help
+ Say 'Y' here if you want your kernel to support Linksys's
+ NSLU2 NAS device. For more information on this platform,
+ see http://www.nslu2-linux.org
+
config ARCH_AVILA
bool "Avila"
help
diff --git a/arch/arm/mach-ixp4xx/Makefile b/arch/arm/mach-ixp4xx/Makefile
index ddecbda4a63..7a15629c18d 100644
--- a/arch/arm/mach-ixp4xx/Makefile
+++ b/arch/arm/mach-ixp4xx/Makefile
@@ -8,4 +8,5 @@ obj-$(CONFIG_ARCH_IXDP4XX) += ixdp425-pci.o ixdp425-setup.o
obj-$(CONFIG_MACH_IXDPG425) += ixdpg425-pci.o coyote-setup.o
obj-$(CONFIG_ARCH_ADI_COYOTE) += coyote-pci.o coyote-setup.o
obj-$(CONFIG_MACH_GTWX5715) += gtwx5715-pci.o gtwx5715-setup.o
+obj-$(CONFIG_MACH_NSLU2) += nslu2-pci.o nslu2-setup.o nslu2-power.o
diff --git a/arch/arm/mach-ixp4xx/nslu2-pci.c b/arch/arm/mach-ixp4xx/nslu2-pci.c
new file mode 100644
index 00000000000..a575f2e0b2c
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nslu2-pci.c
@@ -0,0 +1,77 @@
+/*
+ * arch/arm/mach-ixp4xx/nslu2-pci.c
+ *
+ * NSLU2 board-level PCI initialization
+ *
+ * based on ixdp425-pci.c:
+ * Copyright (C) 2002 Intel Corporation.
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Maintainer: http://www.nslu2-linux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/mach/pci.h>
+#include <asm/mach-types.h>
+
+void __init nslu2_pci_preinit(void)
+{
+ set_irq_type(IRQ_NSLU2_PCI_INTA, IRQT_LOW);
+ set_irq_type(IRQ_NSLU2_PCI_INTB, IRQT_LOW);
+ set_irq_type(IRQ_NSLU2_PCI_INTC, IRQT_LOW);
+
+ gpio_line_isr_clear(NSLU2_PCI_INTA_PIN);
+ gpio_line_isr_clear(NSLU2_PCI_INTB_PIN);
+ gpio_line_isr_clear(NSLU2_PCI_INTC_PIN);
+
+ /* INTD is not configured as GPIO is used
+ * for the power input button.
+ */
+
+ ixp4xx_pci_preinit();
+}
+
+static int __init nslu2_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
+{
+ static int pci_irq_table[NSLU2_PCI_IRQ_LINES] = {
+ IRQ_NSLU2_PCI_INTA,
+ IRQ_NSLU2_PCI_INTB,
+ IRQ_NSLU2_PCI_INTC,
+ };
+
+ int irq = -1;
+
+ if (slot >= 1 && slot <= NSLU2_PCI_MAX_DEV &&
+ pin >= 1 && pin <= NSLU2_PCI_IRQ_LINES) {
+ irq = pci_irq_table[(slot + pin - 2) % NSLU2_PCI_IRQ_LINES];
+ }
+
+ return irq;
+}
+
+struct hw_pci __initdata nslu2_pci = {
+ .nr_controllers = 1,
+ .preinit = nslu2_pci_preinit,
+ .swizzle = pci_std_swizzle,
+ .setup = ixp4xx_setup,
+ .scan = ixp4xx_scan_bus,
+ .map_irq = nslu2_map_irq,
+};
+
+int __init nslu2_pci_init(void) /* monkey see, monkey do */
+{
+ if (machine_is_nslu2())
+ pci_common_init(&nslu2_pci);
+
+ return 0;
+}
+
+subsys_initcall(nslu2_pci_init);
diff --git a/arch/arm/mach-ixp4xx/nslu2-power.c b/arch/arm/mach-ixp4xx/nslu2-power.c
new file mode 100644
index 00000000000..18fbc8c0fb3
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nslu2-power.c
@@ -0,0 +1,92 @@
+/*
+ * arch/arm/mach-ixp4xx/nslu2-power.c
+ *
+ * NSLU2 Power/Reset driver
+ *
+ * Copyright (C) 2005 Tower Technologies
+ *
+ * based on nslu2-io.c
+ * Copyright (C) 2004 Karen Spearel
+ *
+ * Author: Alessandro Zummo <a.zummo@towertech.it>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/reboot.h>
+#include <linux/interrupt.h>
+
+#include <asm/mach-types.h>
+
+extern void ctrl_alt_del(void);
+
+static irqreturn_t nslu2_power_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* Signal init to do the ctrlaltdel action, this will bypass init if
+ * it hasn't started and do a kernel_restart.
+ */
+ ctrl_alt_del();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t nslu2_reset_handler(int irq, void *dev_id, struct pt_regs *regs)
+{
+ /* This is the paper-clip reset, it shuts the machine down directly.
+ */
+ machine_power_off();
+
+ return IRQ_HANDLED;
+}
+
+static int __init nslu2_power_init(void)
+{
+ if (!(machine_is_nslu2()))
+ return 0;
+
+ *IXP4XX_GPIO_GPISR = 0x20400000; /* read the 2 irqs to clr */
+
+ set_irq_type(NSLU2_RB_IRQ, IRQT_LOW);
+ set_irq_type(NSLU2_PB_IRQ, IRQT_HIGH);
+
+ gpio_line_isr_clear(NSLU2_RB_GPIO);
+ gpio_line_isr_clear(NSLU2_PB_GPIO);
+
+ if (request_irq(NSLU2_RB_IRQ, &nslu2_reset_handler,
+ SA_INTERRUPT, "NSLU2 reset button", NULL) < 0) {
+
+ printk(KERN_DEBUG "Reset Button IRQ %d not available\n",
+ NSLU2_RB_IRQ);
+
+ return -EIO;
+ }
+
+ if (request_irq(NSLU2_PB_IRQ, &nslu2_power_handler,
+ SA_INTERRUPT, "NSLU2 power button", NULL) < 0) {
+
+ printk(KERN_DEBUG "Power Button IRQ %d not available\n",
+ NSLU2_PB_IRQ);
+
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static void __exit nslu2_power_exit(void)
+{
+ free_irq(NSLU2_RB_IRQ, NULL);
+ free_irq(NSLU2_PB_IRQ, NULL);
+}
+
+module_init(nslu2_power_init);
+module_exit(nslu2_power_exit);
+
+MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
+MODULE_DESCRIPTION("NSLU2 Power/Reset driver");
+MODULE_LICENSE("GPL");
diff --git a/arch/arm/mach-ixp4xx/nslu2-setup.c b/arch/arm/mach-ixp4xx/nslu2-setup.c
new file mode 100644
index 00000000000..289e94cb65c
--- /dev/null
+++ b/arch/arm/mach-ixp4xx/nslu2-setup.c
@@ -0,0 +1,134 @@
+/*
+ * arch/arm/mach-ixp4xx/nslu2-setup.c
+ *
+ * NSLU2 board-setup
+ *
+ * based ixdp425-setup.c:
+ * Copyright (C) 2003-2004 MontaVista Software, Inc.
+ *
+ * Author: Mark Rakes <mrakes at mac.com>
+ * Maintainers: http://www.nslu2-linux.org/
+ *
+ * Fixed missing init_time in MACHINE_START kas11 10/22/04
+ * Changed to conform to new style __init ixdp425 kas11 10/22/04
+ */
+
+#include <linux/kernel.h>
+#include <linux/serial.h>
+#include <linux/serial_8250.h>
+
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/flash.h>
+
+static struct flash_platform_data nslu2_flash_data = {
+ .map_name = "cfi_probe",
+ .width = 2,
+};
+
+static struct resource nslu2_flash_resource = {
+ .start = NSLU2_FLASH_BASE,
+ .end = NSLU2_FLASH_BASE + NSLU2_FLASH_SIZE,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device nslu2_flash = {
+ .name = "IXP4XX-Flash",
+ .id = 0,
+ .dev.platform_data = &nslu2_flash_data,
+ .num_resources = 1,
+ .resource = &nslu2_flash_resource,
+};
+
+static struct ixp4xx_i2c_pins nslu2_i2c_gpio_pins = {
+ .sda_pin = NSLU2_SDA_PIN,
+ .scl_pin = NSLU2_SCL_PIN,
+};
+
+static struct platform_device nslu2_i2c_controller = {
+ .name = "IXP4XX-I2C",
+ .id = 0,
+ .dev.platform_data = &nslu2_i2c_gpio_pins,
+ .num_resources = 0,
+};
+
+static struct resource nslu2_uart_resources[] = {
+ {
+ .start = IXP4XX_UART1_BASE_PHYS,
+ .end = IXP4XX_UART1_BASE_PHYS + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = IXP4XX_UART2_BASE_PHYS,
+ .end = IXP4XX_UART2_BASE_PHYS + 0x0fff,
+ .flags = IORESOURCE_MEM,
+ }
+};
+
+static struct plat_serial8250_port nslu2_uart_data[] = {
+ {
+ .mapbase = IXP4XX_UART1_BASE_PHYS,
+ .membase = (char *)IXP4XX_UART1_BASE_VIRT + REG_OFFSET,
+ .irq = IRQ_IXP4XX_UART1,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = IXP4XX_UART_XTAL,
+ },
+ {
+ .mapbase = IXP4XX_UART2_BASE_PHYS,
+ .membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
+ .irq = IRQ_IXP4XX_UART2,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = IXP4XX_UART_XTAL,
+ },
+ { }
+};
+
+static struct platform_device nslu2_uart = {
+ .name = "serial8250",
+ .id = PLAT8250_DEV_PLATFORM,
+ .dev.platform_data = nslu2_uart_data,
+ .num_resources = 2,
+ .resource = nslu2_uart_resources,
+};
+
+static struct platform_device *nslu2_devices[] __initdata = {
+ &nslu2_i2c_controller,
+ &nslu2_flash,
+ &nslu2_uart,
+};
+
+static void nslu2_power_off(void)
+{
+ /* This causes the box to drop the power and go dead. */
+
+ /* enable the pwr cntl gpio */
+ gpio_line_config(NSLU2_PO_GPIO, IXP4XX_GPIO_OUT);
+
+ /* do the deed */
+ gpio_line_set(NSLU2_PO_GPIO, IXP4XX_GPIO_HIGH);
+}
+
+static void __init nslu2_init(void)
+{
+ ixp4xx_sys_init();
+
+ pm_power_off = nslu2_power_off;
+
+ platform_add_devices(nslu2_devices, ARRAY_SIZE(nslu2_devices));
+}
+
+MACHINE_START(NSLU2, "Linksys NSLU2")
+ /* Maintainer: www.nslu2-linux.org */
+ .phys_ram = PHYS_OFFSET,
+ .phys_io = IXP4XX_PERIPHERAL_BASE_PHYS,
+ .io_pg_offst = ((IXP4XX_PERIPHERAL_BASE_VIRT) >> 18) & 0xFFFC,
+ .boot_params = 0x00000100,
+ .map_io = ixp4xx_map_io,
+ .init_irq = ixp4xx_init_irq,
+ .timer = &ixp4xx_timer,
+ .init_machine = nslu2_init,
+MACHINE_END
diff --git a/arch/arm/mach-omap1/Kconfig b/arch/arm/mach-omap1/Kconfig
index 27fc2e8e5fc..86a0f0d1434 100644
--- a/arch/arm/mach-omap1/Kconfig
+++ b/arch/arm/mach-omap1/Kconfig
@@ -6,10 +6,10 @@ config ARCH_OMAP730
bool "OMAP730 Based System"
select ARCH_OMAP_OTG
-config ARCH_OMAP1510
+config ARCH_OMAP15XX
depends on ARCH_OMAP1
default y
- bool "OMAP1510 Based System"
+ bool "OMAP15xx Based System"
config ARCH_OMAP16XX
depends on ARCH_OMAP1
@@ -21,7 +21,7 @@ comment "OMAP Board Type"
config MACH_OMAP_INNOVATOR
bool "TI Innovator"
- depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX)
+ depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
help
TI OMAP 1510 or 1610 Innovator board support. Say Y here if you
have such a board.
@@ -64,20 +64,30 @@ config MACH_OMAP_PERSEUS2
config MACH_VOICEBLUE
bool "Voiceblue"
- depends on ARCH_OMAP1 && ARCH_OMAP1510
+ depends on ARCH_OMAP1 && ARCH_OMAP15XX
help
Support for Voiceblue GSM/VoIP gateway. Say Y here if you have
such a board.
config MACH_NETSTAR
bool "NetStar"
- depends on ARCH_OMAP1 && ARCH_OMAP1510
+ depends on ARCH_OMAP1 && ARCH_OMAP15XX
help
Support for NetStar PBX. Say Y here if you have such a board.
+config MACH_OMAP_PALMTE
+ bool "Palm Tungsten E"
+ depends on ARCH_OMAP1 && ARCH_OMAP15XX
+ help
+ Support for the Palm Tungsten E PDA. Currently only the LCD panel
+ is supported. To boot the kernel, you'll need a PalmOS compatible
+ bootloader; check out http://palmtelinux.sourceforge.net for more
+ informations.
+ Say Y here if you have such a PDA, say NO otherwise.
+
config MACH_OMAP_GENERIC
bool "Generic OMAP board"
- depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX)
+ depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX)
help
Support for generic OMAP-1510, 1610 or 1710 board with
no FPGA. Can be used as template for porting Linux to
@@ -121,32 +131,32 @@ config OMAP_ARM_182MHZ
config OMAP_ARM_168MHZ
bool "OMAP ARM 168 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+ depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
help
Enable 168MHz clock for OMAP CPU. If unsure, say N.
config OMAP_ARM_150MHZ
bool "OMAP ARM 150 MHz CPU"
- depends on ARCH_OMAP1 && ARCH_OMAP1510
+ depends on ARCH_OMAP1 && ARCH_OMAP15XX
help
Enable 150MHz clock for OMAP CPU. If unsure, say N.
config OMAP_ARM_120MHZ
bool "OMAP ARM 120 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+ depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
help
Enable 120MHz clock for OMAP CPU. If unsure, say N.
config OMAP_ARM_60MHZ
bool "OMAP ARM 60 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+ depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
default y
help
Enable 60MHz clock for OMAP CPU. If unsure, say Y.
config OMAP_ARM_30MHZ
bool "OMAP ARM 30 MHz CPU"
- depends on ARCH_OMAP1 && (ARCH_OMAP1510 || ARCH_OMAP16XX || ARCH_OMAP730)
+ depends on ARCH_OMAP1 && (ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730)
help
Enable 30MHz clock for OMAP CPU. If unsure, say N.
diff --git a/arch/arm/mach-omap1/Makefile b/arch/arm/mach-omap1/Makefile
index 181a93deaae..b0b00156faa 100644
--- a/arch/arm/mach-omap1/Makefile
+++ b/arch/arm/mach-omap1/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := io.o id.o irq.o time.o serial.o devices.o
+obj-y := io.o id.o clock.o irq.o time.o mux.o serial.o devices.o
led-y := leds.o
# Specific board support
@@ -15,8 +15,9 @@ obj-$(CONFIG_MACH_OMAP_OSK) += board-osk.o
obj-$(CONFIG_MACH_OMAP_H3) += board-h3.o
obj-$(CONFIG_MACH_VOICEBLUE) += board-voiceblue.o
obj-$(CONFIG_MACH_NETSTAR) += board-netstar.o
+obj-$(CONFIG_MACH_OMAP_PALMTE) += board-palmte.o
-ifeq ($(CONFIG_ARCH_OMAP1510),y)
+ifeq ($(CONFIG_ARCH_OMAP15XX),y)
# Innovator-1510 FPGA
obj-$(CONFIG_MACH_OMAP_INNOVATOR) += fpga.o
endif
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index c209c7172a9..4b292e93fbe 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -15,7 +15,7 @@
#include <linux/kernel.h>
#include <linux/init.h>
-#include <linux/device.h>
+#include <linux/platform_device.h>
#include <asm/hardware.h>
#include <asm/mach-types.h>
@@ -28,8 +28,6 @@
#include <asm/arch/board.h>
#include <asm/arch/common.h>
-static int __initdata generic_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
static void __init omap_generic_init_irq(void)
{
omap_init_irq();
@@ -37,7 +35,7 @@ static void __init omap_generic_init_irq(void)
/* assume no Mini-AB port */
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
static struct omap_usb_config generic1510_usb_config __initdata = {
.register_host = 1,
.register_dev = 1,
@@ -76,21 +74,19 @@ static struct omap_mmc_config generic_mmc_config __initdata = {
#endif
+static struct omap_uart_config generic_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
static struct omap_board_config_kernel generic_config[] = {
{ OMAP_TAG_USB, NULL },
{ OMAP_TAG_MMC, &generic_mmc_config },
+ { OMAP_TAG_UART, &generic_uart_config },
};
static void __init omap_generic_init(void)
{
- const struct omap_uart_config *uart_conf;
-
- /*
- * Make sure the serial ports are muxed on at this point.
- * You have to mux them off in device drivers later on
- * if not needed.
- */
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
generic_config[0].data = &generic1510_usb_config;
}
@@ -101,20 +97,9 @@ static void __init omap_generic_init(void)
}
#endif
- uart_conf = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
- if (uart_conf != NULL) {
- unsigned int enabled_ports, i;
-
- enabled_ports = uart_conf->enabled_uarts;
- for (i = 0; i < 3; i++) {
- if (!(enabled_ports & (1 << i)))
- generic_serial_ports[i] = 0;
- }
- }
-
omap_board_config = generic_config;
omap_board_config_size = ARRAY_SIZE(generic_config);
- omap_serial_init(generic_serial_ports);
+ omap_serial_init();
}
static void __init omap_generic_map_io(void)
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 4ee6bd8a50b..a07e2c9307f 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -40,8 +40,6 @@
extern int omap_gpio_init(void);
-static int __initdata h2_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
static struct mtd_partition h2_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
@@ -160,9 +158,20 @@ static struct omap_mmc_config h2_mmc_config __initdata = {
},
};
+static struct omap_uart_config h2_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_lcd_config h2_lcd_config __initdata = {
+ .panel_name = "h2",
+ .ctrl_name = "internal",
+};
+
static struct omap_board_config_kernel h2_config[] = {
{ OMAP_TAG_USB, &h2_usb_config },
{ OMAP_TAG_MMC, &h2_mmc_config },
+ { OMAP_TAG_UART, &h2_uart_config },
+ { OMAP_TAG_LCD, &h2_lcd_config },
};
static void __init h2_init(void)
@@ -180,12 +189,12 @@ static void __init h2_init(void)
platform_add_devices(h2_devices, ARRAY_SIZE(h2_devices));
omap_board_config = h2_config;
omap_board_config_size = ARRAY_SIZE(h2_config);
+ omap_serial_init();
}
static void __init h2_map_io(void)
{
omap_map_common_io();
- omap_serial_init(h2_serial_ports);
}
MACHINE_START(OMAP_H2, "TI-H2")
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index fc824361430..668e278433c 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -41,8 +41,6 @@
extern int omap_gpio_init(void);
-static int __initdata h3_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
static struct mtd_partition h3_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
@@ -168,9 +166,20 @@ static struct omap_mmc_config h3_mmc_config __initdata = {
},
};
+static struct omap_uart_config h3_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_lcd_config h3_lcd_config __initdata = {
+ .panel_name = "h3",
+ .ctrl_name = "internal",
+};
+
static struct omap_board_config_kernel h3_config[] = {
- { OMAP_TAG_USB, &h3_usb_config },
- { OMAP_TAG_MMC, &h3_mmc_config },
+ { OMAP_TAG_USB, &h3_usb_config },
+ { OMAP_TAG_MMC, &h3_mmc_config },
+ { OMAP_TAG_UART, &h3_uart_config },
+ { OMAP_TAG_LCD, &h3_lcd_config },
};
static void __init h3_init(void)
@@ -180,6 +189,7 @@ static void __init h3_init(void)
(void) platform_add_devices(devices, ARRAY_SIZE(devices));
omap_board_config = h3_config;
omap_board_config_size = ARRAY_SIZE(h3_config);
+ omap_serial_init();
}
static void __init h3_init_smc91x(void)
@@ -201,7 +211,6 @@ void h3_init_irq(void)
static void __init h3_map_io(void)
{
omap_map_common_io();
- omap_serial_init(h3_serial_ports);
}
MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index a2eac853b2d..95f1ff36cdc 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -36,8 +36,6 @@
#include <asm/arch/usb.h>
#include <asm/arch/common.h>
-static int __initdata innovator_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
static struct mtd_partition innovator_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
@@ -99,7 +97,7 @@ static struct platform_device innovator_flash_device = {
.resource = &innovator_flash_resource,
};
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
/* Only FPGA needs to be mapped here. All others are done with ioremap */
static struct map_desc innovator1510_io_desc[] __initdata = {
@@ -136,7 +134,7 @@ static struct platform_device *innovator1510_devices[] __initdata = {
&innovator1510_smc91x_device,
};
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
#ifdef CONFIG_ARCH_OMAP16XX
@@ -185,7 +183,7 @@ void innovator_init_irq(void)
{
omap_init_irq();
omap_gpio_init();
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
omap1510_fpga_init_irq();
}
@@ -193,7 +191,7 @@ void innovator_init_irq(void)
innovator_init_smc91x();
}
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
static struct omap_usb_config innovator1510_usb_config __initdata = {
/* for bundled non-standard host and peripheral cables */
.hmc_mode = 4,
@@ -205,6 +203,11 @@ static struct omap_usb_config innovator1510_usb_config __initdata = {
.register_dev = 1,
.pins[0] = 2,
};
+
+static struct omap_lcd_config innovator1510_lcd_config __initdata = {
+ .panel_name = "inn1510",
+ .ctrl_name = "internal",
+};
#endif
#ifdef CONFIG_ARCH_OMAP16XX
@@ -222,6 +225,11 @@ static struct omap_usb_config h2_usb_config __initdata = {
.pins[1] = 3,
};
+
+static struct omap_lcd_config innovator1610_lcd_config __initdata = {
+ .panel_name = "inn1610",
+ .ctrl_name = "internal",
+};
#endif
static struct omap_mmc_config innovator_mmc_config __initdata = {
@@ -234,14 +242,20 @@ static struct omap_mmc_config innovator_mmc_config __initdata = {
},
};
+static struct omap_uart_config innovator_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
static struct omap_board_config_kernel innovator_config[] = {
{ OMAP_TAG_USB, NULL },
+ { OMAP_TAG_LCD, NULL },
{ OMAP_TAG_MMC, &innovator_mmc_config },
+ { OMAP_TAG_UART, &innovator_uart_config },
};
static void __init innovator_init(void)
{
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
platform_add_devices(innovator1510_devices, ARRAY_SIZE(innovator1510_devices));
}
@@ -252,23 +266,28 @@ static void __init innovator_init(void)
}
#endif
-#ifdef CONFIG_ARCH_OMAP1510
- if (cpu_is_omap1510())
+#ifdef CONFIG_ARCH_OMAP15XX
+ if (cpu_is_omap1510()) {
innovator_config[0].data = &innovator1510_usb_config;
+ innovator_config[1].data = &innovator1510_lcd_config;
+ }
#endif
#ifdef CONFIG_ARCH_OMAP16XX
- if (cpu_is_omap1610())
+ if (cpu_is_omap1610()) {
innovator_config[0].data = &h2_usb_config;
+ innovator_config[1].data = &innovator1610_lcd_config;
+ }
#endif
omap_board_config = innovator_config;
omap_board_config_size = ARRAY_SIZE(innovator_config);
+ omap_serial_init();
}
static void __init innovator_map_io(void)
{
omap_map_common_io();
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
iotable_init(innovator1510_io_desc, ARRAY_SIZE(innovator1510_io_desc));
udelay(10); /* Delay needed for FPGA */
@@ -280,7 +299,6 @@ static void __init innovator_map_io(void)
fpga_read(OMAP1510_FPGA_BOARD_REV));
}
#endif
- omap_serial_init(innovator_serial_ports);
}
MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
diff --git a/arch/arm/mach-omap1/board-netstar.c b/arch/arm/mach-omap1/board-netstar.c
index c851c2e4dfc..0448fa7de8a 100644
--- a/arch/arm/mach-omap1/board-netstar.c
+++ b/arch/arm/mach-omap1/board-netstar.c
@@ -55,6 +55,14 @@ static struct platform_device *netstar_devices[] __initdata = {
&netstar_smc91x_device,
};
+static struct omap_uart_config netstar_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_board_config_kernel netstar_config[] = {
+ { OMAP_TAG_UART, &netstar_uart_config },
+};
+
static void __init netstar_init_irq(void)
{
omap_init_irq();
@@ -92,14 +100,15 @@ static void __init netstar_init(void)
/* Switch off red LED */
omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
omap_writeb(0x80, OMAP_LPG1_LCR);
-}
-static int __initdata omap_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
+ omap_board_config = netstar_config;
+ omap_board_config_size = ARRAY_SIZE(netstar_config);
+ omap_serial_init();
+}
static void __init netstar_map_io(void)
{
omap_map_common_io();
- omap_serial_init(omap_serial_ports);
}
#define MACHINE_PANICED 1
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a88524e7c31..e990e1bc166 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -46,8 +46,6 @@
#include <asm/arch/tc.h>
#include <asm/arch/common.h>
-static int __initdata osk_serial_ports[OMAP_MAX_NR_PORTS] = {1, 0, 0};
-
static struct mtd_partition osk_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
@@ -155,7 +153,7 @@ static void __init osk_init_smc91x(void)
}
/* Check EMIFS wait states to fix errors with SMC_GET_PKT_HDR */
- EMIFS_CCS(1) |= 0x2;
+ EMIFS_CCS(1) |= 0x3;
}
static void __init osk_init_cf(void)
@@ -193,8 +191,19 @@ static struct omap_usb_config osk_usb_config __initdata = {
.pins[0] = 2,
};
+static struct omap_uart_config osk_uart_config __initdata = {
+ .enabled_uarts = (1 << 0),
+};
+
+static struct omap_lcd_config osk_lcd_config __initdata = {
+ .panel_name = "osk",
+ .ctrl_name = "internal",
+};
+
static struct omap_board_config_kernel osk_config[] = {
{ OMAP_TAG_USB, &osk_usb_config },
+ { OMAP_TAG_UART, &osk_uart_config },
+ { OMAP_TAG_LCD, &osk_lcd_config },
};
#ifdef CONFIG_OMAP_OSK_MISTRAL
@@ -254,13 +263,13 @@ static void __init osk_init(void)
omap_board_config_size = ARRAY_SIZE(osk_config);
USB_TRANSCEIVER_CTRL_REG |= (3 << 1);
+ omap_serial_init();
osk_mistral_init();
}
static void __init osk_map_io(void)
{
omap_map_common_io();
- omap_serial_init(osk_serial_ports);
}
MACHINE_START(OMAP_OSK, "TI-OSK")
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
new file mode 100644
index 00000000000..540b20d78cc
--- /dev/null
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -0,0 +1,87 @@
+/*
+ * linux/arch/arm/mach-omap1/board-palmte.c
+ *
+ * Modified from board-generic.c
+ *
+ * Support for the Palm Tungsten E PDA.
+ *
+ * Original version : Laurent Gonzalez
+ *
+ * Maintainters : http://palmtelinux.sf.net
+ * palmtelinux-developpers@lists.sf.net
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/notifier.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+#include <asm/hardware/clock.h>
+
+static void __init omap_generic_init_irq(void)
+{
+ omap_init_irq();
+}
+
+static struct omap_usb_config palmte_usb_config __initdata = {
+ .register_dev = 1,
+ .hmc_mode = 0,
+ .pins[0] = 3,
+};
+
+static struct omap_mmc_config palmte_mmc_config __initdata = {
+ .mmc [0] = {
+ .enabled = 1,
+ .wire4 = 1,
+ .wp_pin = OMAP_MPUIO(3),
+ .power_pin = -1,
+ .switch_pin = -1,
+ },
+};
+
+static struct omap_lcd_config palmte_lcd_config __initdata = {
+ .panel_name = "palmte",
+ .ctrl_name = "internal",
+};
+
+static struct omap_board_config_kernel palmte_config[] = {
+ { OMAP_TAG_USB, &palmte_usb_config },
+ { OMAP_TAG_MMC, &palmte_mmc_config },
+ { OMAP_TAG_LCD, &palmte_lcd_config },
+};
+
+static void __init omap_generic_init(void)
+{
+ omap_board_config = palmte_config;
+ omap_board_config_size = ARRAY_SIZE(palmte_config);
+}
+
+static void __init omap_generic_map_io(void)
+{
+ omap_map_common_io();
+}
+
+MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
+ .phys_ram = 0x10000000,
+ .phys_io = 0xfff00000,
+ .io_pg_offst = ((0xfef00000) >> 18) & 0xfffc,
+ .boot_params = 0x10000100,
+ .map_io = omap_generic_map_io,
+ .init_irq = omap_generic_init_irq,
+ .init_machine = omap_generic_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index 354b157acb3..bd900b7ab33 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -29,6 +29,7 @@
#include <asm/arch/mux.h>
#include <asm/arch/fpga.h>
#include <asm/arch/common.h>
+#include <asm/arch/board.h>
static struct resource smc91x_resources[] = {
[0] = {
@@ -43,8 +44,6 @@ static struct resource smc91x_resources[] = {
},
};
-static int __initdata p2_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 0};
-
static struct mtd_partition p2_partitions[] = {
/* bootloader (U-Boot, etc) in first sector */
{
@@ -111,9 +110,27 @@ static struct platform_device *devices[] __initdata = {
&smc91x_device,
};
+static struct omap_uart_config perseus2_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1)),
+};
+
+static struct omap_lcd_config perseus2_lcd_config __initdata = {
+ .panel_name = "p2",
+ .ctrl_name = "internal",
+};
+
+static struct omap_board_config_kernel perseus2_config[] = {
+ { OMAP_TAG_UART, &perseus2_uart_config },
+ { OMAP_TAG_LCD, &perseus2_lcd_config },
+};
+
static void __init omap_perseus2_init(void)
{
(void) platform_add_devices(devices, ARRAY_SIZE(devices));
+
+ omap_board_config = perseus2_config;
+ omap_board_config_size = ARRAY_SIZE(perseus2_config);
+ omap_serial_init();
}
static void __init perseus2_init_smc91x(void)
@@ -131,7 +148,6 @@ void omap_perseus2_init_irq(void)
omap_gpio_init();
perseus2_init_smc91x();
}
-
/* Only FPGA needs to be mapped here. All others are done with ioremap */
static struct map_desc omap_perseus2_io_desc[] __initdata = {
{
@@ -179,7 +195,6 @@ static void __init omap_perseus2_map_io(void)
* It is used as the Ethernet controller interrupt
*/
omap_writel(omap_readl(OMAP730_IO_CONF_9) & 0x1FFFFFFF, OMAP730_IO_CONF_9);
- omap_serial_init(p2_serial_ports);
}
MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 3f018b29686..6f9a6220e78 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -150,9 +150,14 @@ static struct omap_mmc_config voiceblue_mmc_config __initdata = {
},
};
+static struct omap_uart_config voiceblue_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
static struct omap_board_config_kernel voiceblue_config[] = {
{ OMAP_TAG_USB, &voiceblue_usb_config },
{ OMAP_TAG_MMC, &voiceblue_mmc_config },
+ { OMAP_TAG_UART, &voiceblue_uart_config },
};
static void __init voiceblue_init_irq(void)
@@ -191,6 +196,7 @@ static void __init voiceblue_init(void)
platform_add_devices(voiceblue_devices, ARRAY_SIZE(voiceblue_devices));
omap_board_config = voiceblue_config;
omap_board_config_size = ARRAY_SIZE(voiceblue_config);
+ omap_serial_init();
/* There is a good chance board is going up, so enable power LED
* (it is connected through invertor) */
@@ -198,12 +204,9 @@ static void __init voiceblue_init(void)
omap_writeb(0x00, OMAP_LPG1_PMR); /* Disable clock */
}
-static int __initdata omap_serial_ports[OMAP_MAX_NR_PORTS] = {1, 1, 1};
-
static void __init voiceblue_map_io(void)
{
omap_map_common_io();
- omap_serial_init(omap_serial_ports);
}
#define MACHINE_PANICED 1
diff --git a/arch/arm/mach-omap1/clock.c b/arch/arm/mach-omap1/clock.c
new file mode 100644
index 00000000000..4277eee44ed
--- /dev/null
+++ b/arch/arm/mach-omap1/clock.c
@@ -0,0 +1,792 @@
+/*
+ * linux/arch/arm/mach-omap1/clock.c
+ *
+ * Copyright (C) 2004 - 2005 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ * Modified to use omap shared clock framework by
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/err.h>
+
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#include <asm/arch/usb.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/sram.h>
+
+#include "clock.h"
+
+__u32 arm_idlect1_mask;
+
+/*-------------------------------------------------------------------------
+ * Omap1 specific clock functions
+ *-------------------------------------------------------------------------*/
+
+static void omap1_watchdog_recalc(struct clk * clk)
+{
+ clk->rate = clk->parent->rate / 14;
+}
+
+static void omap1_uart_recalc(struct clk * clk)
+{
+ unsigned int val = omap_readl(clk->enable_reg);
+ if (val & clk->enable_bit)
+ clk->rate = 48000000;
+ else
+ clk->rate = 12000000;
+}
+
+static int omap1_clk_enable_dsp_domain(struct clk *clk)
+{
+ int retval;
+
+ retval = omap1_clk_use(&api_ck.clk);
+ if (!retval) {
+ retval = omap1_clk_enable(clk);
+ omap1_clk_unuse(&api_ck.clk);
+ }
+
+ return retval;
+}
+
+static void omap1_clk_disable_dsp_domain(struct clk *clk)
+{
+ if (omap1_clk_use(&api_ck.clk) == 0) {
+ omap1_clk_disable(clk);
+ omap1_clk_unuse(&api_ck.clk);
+ }
+}
+
+static int omap1_clk_enable_uart_functional(struct clk *clk)
+{
+ int ret;
+ struct uart_clk *uclk;
+
+ ret = omap1_clk_enable(clk);
+ if (ret == 0) {
+ /* Set smart idle acknowledgement mode */
+ uclk = (struct uart_clk *)clk;
+ omap_writeb((omap_readb(uclk->sysc_addr) & ~0x10) | 8,
+ uclk->sysc_addr);
+ }
+
+ return ret;
+}
+
+static void omap1_clk_disable_uart_functional(struct clk *clk)
+{
+ struct uart_clk *uclk;
+
+ /* Set force idle acknowledgement mode */
+ uclk = (struct uart_clk *)clk;
+ omap_writeb((omap_readb(uclk->sysc_addr) & ~0x18), uclk->sysc_addr);
+
+ omap1_clk_disable(clk);
+}
+
+static void omap1_clk_allow_idle(struct clk *clk)
+{
+ struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
+
+ if (!(clk->flags & CLOCK_IDLE_CONTROL))
+ return;
+
+ if (iclk->no_idle_count > 0 && !(--iclk->no_idle_count))
+ arm_idlect1_mask |= 1 << iclk->idlect_shift;
+}
+
+static void omap1_clk_deny_idle(struct clk *clk)
+{
+ struct arm_idlect1_clk * iclk = (struct arm_idlect1_clk *)clk;
+
+ if (!(clk->flags & CLOCK_IDLE_CONTROL))
+ return;
+
+ if (iclk->no_idle_count++ == 0)
+ arm_idlect1_mask &= ~(1 << iclk->idlect_shift);
+}
+
+static __u16 verify_ckctl_value(__u16 newval)
+{
+ /* This function checks for following limitations set
+ * by the hardware (all conditions must be true):
+ * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
+ * ARM_CK >= TC_CK
+ * DSP_CK >= TC_CK
+ * DSPMMU_CK >= TC_CK
+ *
+ * In addition following rules are enforced:
+ * LCD_CK <= TC_CK
+ * ARMPER_CK <= TC_CK
+ *
+ * However, maximum frequencies are not checked for!
+ */
+ __u8 per_exp;
+ __u8 lcd_exp;
+ __u8 arm_exp;
+ __u8 dsp_exp;
+ __u8 tc_exp;
+ __u8 dspmmu_exp;
+
+ per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
+ lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
+ arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
+ dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
+ tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
+ dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
+
+ if (dspmmu_exp < dsp_exp)
+ dspmmu_exp = dsp_exp;
+ if (dspmmu_exp > dsp_exp+1)
+ dspmmu_exp = dsp_exp+1;
+ if (tc_exp < arm_exp)
+ tc_exp = arm_exp;
+ if (tc_exp < dspmmu_exp)
+ tc_exp = dspmmu_exp;
+ if (tc_exp > lcd_exp)
+ lcd_exp = tc_exp;
+ if (tc_exp > per_exp)
+ per_exp = tc_exp;
+
+ newval &= 0xf000;
+ newval |= per_exp << CKCTL_PERDIV_OFFSET;
+ newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
+ newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
+ newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
+ newval |= tc_exp << CKCTL_TCDIV_OFFSET;
+ newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
+
+ return newval;
+}
+
+static int calc_dsor_exp(struct clk *clk, unsigned long rate)
+{
+ /* Note: If target frequency is too low, this function will return 4,
+ * which is invalid value. Caller must check for this value and act
+ * accordingly.
+ *
+ * Note: This function does not check for following limitations set
+ * by the hardware (all conditions must be true):
+ * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
+ * ARM_CK >= TC_CK
+ * DSP_CK >= TC_CK
+ * DSPMMU_CK >= TC_CK
+ */
+ unsigned long realrate;
+ struct clk * parent;
+ unsigned dsor_exp;
+
+ if (unlikely(!(clk->flags & RATE_CKCTL)))
+ return -EINVAL;
+
+ parent = clk->parent;
+ if (unlikely(parent == 0))
+ return -EIO;
+
+ realrate = parent->rate;
+ for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
+ if (realrate <= rate)
+ break;
+
+ realrate /= 2;
+ }
+
+ return dsor_exp;
+}
+
+static void omap1_ckctl_recalc(struct clk * clk)
+{
+ int dsor;
+
+ /* Calculate divisor encoded as 2-bit exponent */
+ dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
+
+ if (unlikely(clk->rate == clk->parent->rate / dsor))
+ return; /* No change, quick exit */
+ clk->rate = clk->parent->rate / dsor;
+
+ if (unlikely(clk->flags & RATE_PROPAGATES))
+ propagate_rate(clk);
+}
+
+static void omap1_ckctl_recalc_dsp_domain(struct clk * clk)
+{
+ int dsor;
+
+ /* Calculate divisor encoded as 2-bit exponent
+ *
+ * The clock control bits are in DSP domain,
+ * so api_ck is needed for access.
+ * Note that DSP_CKCTL virt addr = phys addr, so
+ * we must use __raw_readw() instead of omap_readw().
+ */
+ omap1_clk_use(&api_ck.clk);
+ dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
+ omap1_clk_unuse(&api_ck.clk);
+
+ if (unlikely(clk->rate == clk->parent->rate / dsor))
+ return; /* No change, quick exit */
+ clk->rate = clk->parent->rate / dsor;
+
+ if (unlikely(clk->flags & RATE_PROPAGATES))
+ propagate_rate(clk);
+}
+
+/* MPU virtual clock functions */
+static int omap1_select_table_rate(struct clk * clk, unsigned long rate)
+{
+ /* Find the highest supported frequency <= rate and switch to it */
+ struct mpu_rate * ptr;
+
+ if (clk != &virtual_ck_mpu)
+ return -EINVAL;
+
+ for (ptr = rate_table; ptr->rate; ptr++) {
+ if (ptr->xtal != ck_ref.rate)
+ continue;
+
+ /* DPLL1 cannot be reprogrammed without risking system crash */
+ if (likely(ck_dpll1.rate!=0) && ptr->pll_rate != ck_dpll1.rate)
+ continue;
+
+ /* Can check only after xtal frequency check */
+ if (ptr->rate <= rate)
+ break;
+ }
+
+ if (!ptr->rate)
+ return -EINVAL;
+
+ /*
+ * In most cases we should not need to reprogram DPLL.
+ * Reprogramming the DPLL is tricky, it must be done from SRAM.
+ */
+ omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
+
+ ck_dpll1.rate = ptr->pll_rate;
+ propagate_rate(&ck_dpll1);
+ return 0;
+}
+
+static int omap1_clk_set_rate_dsp_domain(struct clk *clk, unsigned long rate)
+{
+ int ret = -EINVAL;
+ int dsor_exp;
+ __u16 regval;
+
+ if (clk->flags & RATE_CKCTL) {
+ dsor_exp = calc_dsor_exp(clk, rate);
+ if (dsor_exp > 3)
+ dsor_exp = -EINVAL;
+ if (dsor_exp < 0)
+ return dsor_exp;
+
+ regval = __raw_readw(DSP_CKCTL);
+ regval &= ~(3 << clk->rate_offset);
+ regval |= dsor_exp << clk->rate_offset;
+ __raw_writew(regval, DSP_CKCTL);
+ clk->rate = clk->parent->rate / (1 << dsor_exp);
+ ret = 0;
+ }
+
+ if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
+ propagate_rate(clk);
+
+ return ret;
+}
+
+static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate)
+{
+ /* Find the highest supported frequency <= rate */
+ struct mpu_rate * ptr;
+ long highest_rate;
+
+ if (clk != &virtual_ck_mpu)
+ return -EINVAL;
+
+ highest_rate = -EINVAL;
+
+ for (ptr = rate_table; ptr->rate; ptr++) {
+ if (ptr->xtal != ck_ref.rate)
+ continue;
+
+ highest_rate = ptr->rate;
+
+ /* Can check only after xtal frequency check */
+ if (ptr->rate <= rate)
+ break;
+ }
+
+ return highest_rate;
+}
+
+static unsigned calc_ext_dsor(unsigned long rate)
+{
+ unsigned dsor;
+
+ /* MCLK and BCLK divisor selection is not linear:
+ * freq = 96MHz / dsor
+ *
+ * RATIO_SEL range: dsor <-> RATIO_SEL
+ * 0..6: (RATIO_SEL+2) <-> (dsor-2)
+ * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
+ * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
+ * can not be used.
+ */
+ for (dsor = 2; dsor < 96; ++dsor) {
+ if ((dsor & 1) && dsor > 8)
+ continue;
+ if (rate >= 96000000 / dsor)
+ break;
+ }
+ return dsor;
+}
+
+/* Only needed on 1510 */
+static int omap1_set_uart_rate(struct clk * clk, unsigned long rate)
+{
+ unsigned int val;
+
+ val = omap_readl(clk->enable_reg);
+ if (rate == 12000000)
+ val &= ~(1 << clk->enable_bit);
+ else if (rate == 48000000)
+ val |= (1 << clk->enable_bit);
+ else
+ return -EINVAL;
+ omap_writel(val, clk->enable_reg);
+ clk->rate = rate;
+
+ return 0;
+}
+
+/* External clock (MCLK & BCLK) functions */
+static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate)
+{
+ unsigned dsor;
+ __u16 ratio_bits;
+
+ dsor = calc_ext_dsor(rate);
+ clk->rate = 96000000 / dsor;
+ if (dsor > 8)
+ ratio_bits = ((dsor - 8) / 2 + 6) << 2;
+ else
+ ratio_bits = (dsor - 2) << 2;
+
+ ratio_bits |= omap_readw(clk->enable_reg) & ~0xfd;
+ omap_writew(ratio_bits, clk->enable_reg);
+
+ return 0;
+}
+
+static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate)
+{
+ return 96000000 / calc_ext_dsor(rate);
+}
+
+static void omap1_init_ext_clk(struct clk * clk)
+{
+ unsigned dsor;
+ __u16 ratio_bits;
+
+ /* Determine current rate and ensure clock is based on 96MHz APLL */
+ ratio_bits = omap_readw(clk->enable_reg) & ~1;
+ omap_writew(ratio_bits, clk->enable_reg);
+
+ ratio_bits = (ratio_bits & 0xfc) >> 2;
+ if (ratio_bits > 6)
+ dsor = (ratio_bits - 6) * 2 + 8;
+ else
+ dsor = ratio_bits + 2;
+
+ clk-> rate = 96000000 / dsor;
+}
+
+static int omap1_clk_use(struct clk *clk)
+{
+ int ret = 0;
+ if (clk->usecount++ == 0) {
+ if (likely(clk->parent)) {
+ ret = omap1_clk_use(clk->parent);
+
+ if (unlikely(ret != 0)) {
+ clk->usecount--;
+ return ret;
+ }
+
+ if (clk->flags & CLOCK_NO_IDLE_PARENT)
+ if (!cpu_is_omap24xx())
+ omap1_clk_deny_idle(clk->parent);
+ }
+
+ ret = clk->enable(clk);
+
+ if (unlikely(ret != 0) && clk->parent) {
+ omap1_clk_unuse(clk->parent);
+ clk->usecount--;
+ }
+ }
+
+ return ret;
+}
+
+static void omap1_clk_unuse(struct clk *clk)
+{
+ if (clk->usecount > 0 && !(--clk->usecount)) {
+ clk->disable(clk);
+ if (likely(clk->parent)) {
+ omap1_clk_unuse(clk->parent);
+ if (clk->flags & CLOCK_NO_IDLE_PARENT)
+ if (!cpu_is_omap24xx())
+ omap1_clk_allow_idle(clk->parent);
+ }
+ }
+}
+
+static int omap1_clk_enable(struct clk *clk)
+{
+ __u16 regval16;
+ __u32 regval32;
+
+ if (clk->flags & ALWAYS_ENABLED)
+ return 0;
+
+ if (unlikely(clk->enable_reg == 0)) {
+ printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
+ clk->name);
+ return 0;
+ }
+
+ if (clk->flags & ENABLE_REG_32BIT) {
+ if (clk->flags & VIRTUAL_IO_ADDRESS) {
+ regval32 = __raw_readl(clk->enable_reg);
+ regval32 |= (1 << clk->enable_bit);
+ __raw_writel(regval32, clk->enable_reg);
+ } else {
+ regval32 = omap_readl(clk->enable_reg);
+ regval32 |= (1 << clk->enable_bit);
+ omap_writel(regval32, clk->enable_reg);
+ }
+ } else {
+ if (clk->flags & VIRTUAL_IO_ADDRESS) {
+ regval16 = __raw_readw(clk->enable_reg);
+ regval16 |= (1 << clk->enable_bit);
+ __raw_writew(regval16, clk->enable_reg);
+ } else {
+ regval16 = omap_readw(clk->enable_reg);
+ regval16 |= (1 << clk->enable_bit);
+ omap_writew(regval16, clk->enable_reg);
+ }
+ }
+
+ return 0;
+}
+
+static void omap1_clk_disable(struct clk *clk)
+{
+ __u16 regval16;
+ __u32 regval32;
+
+ if (clk->enable_reg == 0)
+ return;
+
+ if (clk->flags & ENABLE_REG_32BIT) {
+ if (clk->flags & VIRTUAL_IO_ADDRESS) {
+ regval32 = __raw_readl(clk->enable_reg);
+ regval32 &= ~(1 << clk->enable_bit);
+ __raw_writel(regval32, clk->enable_reg);
+ } else {
+ regval32 = omap_readl(clk->enable_reg);
+ regval32 &= ~(1 << clk->enable_bit);
+ omap_writel(regval32, clk->enable_reg);
+ }
+ } else {
+ if (clk->flags & VIRTUAL_IO_ADDRESS) {
+ regval16 = __raw_readw(clk->enable_reg);
+ regval16 &= ~(1 << clk->enable_bit);
+ __raw_writew(regval16, clk->enable_reg);
+ } else {
+ regval16 = omap_readw(clk->enable_reg);
+ regval16 &= ~(1 << clk->enable_bit);
+ omap_writew(regval16, clk->enable_reg);
+ }
+ }
+}
+
+static long omap1_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ int dsor_exp;
+
+ if (clk->flags & RATE_FIXED)
+ return clk->rate;
+
+ if (clk->flags & RATE_CKCTL) {
+ dsor_exp = calc_dsor_exp(clk, rate);
+ if (dsor_exp < 0)
+ return dsor_exp;
+ if (dsor_exp > 3)
+ dsor_exp = 3;
+ return clk->parent->rate / (1 << dsor_exp);
+ }
+
+ if(clk->round_rate != 0)
+ return clk->round_rate(clk, rate);
+
+ return clk->rate;
+}
+
+static int omap1_clk_set_rate(struct clk *clk, unsigned long rate)
+{
+ int ret = -EINVAL;
+ int dsor_exp;
+ __u16 regval;
+
+ if (clk->set_rate)
+ ret = clk->set_rate(clk, rate);
+ else if (clk->flags & RATE_CKCTL) {
+ dsor_exp = calc_dsor_exp(clk, rate);
+ if (dsor_exp > 3)
+ dsor_exp = -EINVAL;
+ if (dsor_exp < 0)
+ return dsor_exp;
+
+ regval = omap_readw(ARM_CKCTL);
+ regval &= ~(3 << clk->rate_offset);
+ regval |= dsor_exp << clk->rate_offset;
+ regval = verify_ckctl_value(regval);
+ omap_writew(regval, ARM_CKCTL);
+ clk->rate = clk->parent->rate / (1 << dsor_exp);
+ ret = 0;
+ }
+
+ if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
+ propagate_rate(clk);
+
+ return ret;
+}
+
+/*-------------------------------------------------------------------------
+ * Omap1 clock reset and init functions
+ *-------------------------------------------------------------------------*/
+
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+/*
+ * Resets some clocks that may be left on from bootloader,
+ * but leaves serial clocks on. See also omap_late_clk_reset().
+ */
+static inline void omap1_early_clk_reset(void)
+{
+ //omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
+}
+
+static int __init omap1_late_clk_reset(void)
+{
+ /* Turn off all unused clocks */
+ struct clk *p;
+ __u32 regval32;
+
+ /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
+ regval32 = omap_readw(SOFT_REQ_REG) & (1 << 4);
+ omap_writew(regval32, SOFT_REQ_REG);
+ omap_writew(0, SOFT_REQ_REG2);
+
+ list_for_each_entry(p, &clocks, node) {
+ if (p->usecount > 0 || (p->flags & ALWAYS_ENABLED) ||
+ p->enable_reg == 0)
+ continue;
+
+ /* Clocks in the DSP domain need api_ck. Just assume bootloader
+ * has not enabled any DSP clocks */
+ if ((u32)p->enable_reg == DSP_IDLECT2) {
+ printk(KERN_INFO "Skipping reset check for DSP domain "
+ "clock \"%s\"\n", p->name);
+ continue;
+ }
+
+ /* Is the clock already disabled? */
+ if (p->flags & ENABLE_REG_32BIT) {
+ if (p->flags & VIRTUAL_IO_ADDRESS)
+ regval32 = __raw_readl(p->enable_reg);
+ else
+ regval32 = omap_readl(p->enable_reg);
+ } else {
+ if (p->flags & VIRTUAL_IO_ADDRESS)
+ regval32 = __raw_readw(p->enable_reg);
+ else
+ regval32 = omap_readw(p->enable_reg);
+ }
+
+ if ((regval32 & (1 << p->enable_bit)) == 0)
+ continue;
+
+ /* FIXME: This clock seems to be necessary but no-one
+ * has asked for its activation. */
+ if (p == &tc2_ck // FIX: pm.c (SRAM), CCP, Camera
+ || p == &ck_dpll1out.clk // FIX: SoSSI, SSR
+ || p == &arm_gpio_ck // FIX: GPIO code for 1510
+ ) {
+ printk(KERN_INFO "FIXME: Clock \"%s\" seems unused\n",
+ p->name);
+ continue;
+ }
+
+ printk(KERN_INFO "Disabling unused clock \"%s\"... ", p->name);
+ p->disable(p);
+ printk(" done\n");
+ }
+
+ return 0;
+}
+late_initcall(omap1_late_clk_reset);
+
+#else
+#define omap1_early_clk_reset() {}
+#endif
+
+static struct clk_functions omap1_clk_functions = {
+ .clk_use = omap1_clk_use,
+ .clk_unuse = omap1_clk_unuse,
+ .clk_round_rate = omap1_clk_round_rate,
+ .clk_set_rate = omap1_clk_set_rate,
+};
+
+int __init omap1_clk_init(void)
+{
+ struct clk ** clkp;
+ const struct omap_clock_config *info;
+ int crystal_type = 0; /* Default 12 MHz */
+
+ omap1_early_clk_reset();
+ clk_init(&omap1_clk_functions);
+
+ /* By default all idlect1 clocks are allowed to idle */
+ arm_idlect1_mask = ~0;
+
+ for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
+ if (((*clkp)->flags &CLOCK_IN_OMAP1510) && cpu_is_omap1510()) {
+ clk_register(*clkp);
+ continue;
+ }
+
+ if (((*clkp)->flags &CLOCK_IN_OMAP16XX) && cpu_is_omap16xx()) {
+ clk_register(*clkp);
+ continue;
+ }
+
+ if (((*clkp)->flags &CLOCK_IN_OMAP730) && cpu_is_omap730()) {
+ clk_register(*clkp);
+ continue;
+ }
+ }
+
+ info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
+ if (info != NULL) {
+ if (!cpu_is_omap1510())
+ crystal_type = info->system_clock_type;
+ }
+
+#if defined(CONFIG_ARCH_OMAP730)
+ ck_ref.rate = 13000000;
+#elif defined(CONFIG_ARCH_OMAP16XX)
+ if (crystal_type == 2)
+ ck_ref.rate = 19200000;
+#endif
+
+ printk("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
+ omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
+ omap_readw(ARM_CKCTL));
+
+ /* We want to be in syncronous scalable mode */
+ omap_writew(0x1000, ARM_SYSST);
+
+#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
+ /* Use values set by bootloader. Determine PLL rate and recalculate
+ * dependent clocks as if kernel had changed PLL or divisors.
+ */
+ {
+ unsigned pll_ctl_val = omap_readw(DPLL_CTL);
+
+ ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
+ if (pll_ctl_val & 0x10) {
+ /* PLL enabled, apply multiplier and divisor */
+ if (pll_ctl_val & 0xf80)
+ ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
+ ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
+ } else {
+ /* PLL disabled, apply bypass divisor */
+ switch (pll_ctl_val & 0xc) {
+ case 0:
+ break;
+ case 0x4:
+ ck_dpll1.rate /= 2;
+ break;
+ default:
+ ck_dpll1.rate /= 4;
+ break;
+ }
+ }
+ }
+ propagate_rate(&ck_dpll1);
+#else
+ /* Find the highest supported frequency and enable it */
+ if (omap1_select_table_rate(&virtual_ck_mpu, ~0)) {
+ printk(KERN_ERR "System frequencies not set. Check your config.\n");
+ /* Guess sane values (60MHz) */
+ omap_writew(0x2290, DPLL_CTL);
+ omap_writew(0x1005, ARM_CKCTL);
+ ck_dpll1.rate = 60000000;
+ propagate_rate(&ck_dpll1);
+ }
+#endif
+ /* Cache rates for clocks connected to ck_ref (not dpll1) */
+ propagate_rate(&ck_ref);
+ printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
+ "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
+ ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
+ ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
+ arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
+
+#ifdef CONFIG_MACH_OMAP_PERSEUS2
+ /* Select slicer output as OMAP input clock */
+ omap_writew(omap_readw(OMAP730_PCC_UPLD_CTRL) & ~0x1, OMAP730_PCC_UPLD_CTRL);
+#endif
+
+ /* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
+ omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
+
+ /* Put DSP/MPUI into reset until needed */
+ omap_writew(0, ARM_RSTCT1);
+ omap_writew(1, ARM_RSTCT2);
+ omap_writew(0x400, ARM_IDLECT1);
+
+ /*
+ * According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
+ * of the ARM_IDLECT2 register must be set to zero. The power-on
+ * default value of this bit is one.
+ */
+ omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_use(&armper_ck.clk);
+ clk_use(&armxor_ck.clk);
+ clk_use(&armtim_ck.clk); /* This should be done by timer code */
+
+ if (cpu_is_omap1510())
+ clk_enable(&arm_gpio_ck);
+
+ return 0;
+}
+
diff --git a/arch/arm/mach-omap1/clock.h b/arch/arm/mach-omap1/clock.h
new file mode 100644
index 00000000000..f3bdfb50e01
--- /dev/null
+++ b/arch/arm/mach-omap1/clock.h
@@ -0,0 +1,768 @@
+/*
+ * linux/arch/arm/mach-omap1/clock.h
+ *
+ * Copyright (C) 2004 - 2005 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP1_CLOCK_H
+#define __ARCH_ARM_MACH_OMAP1_CLOCK_H
+
+static int omap1_clk_enable(struct clk * clk);
+static void omap1_clk_disable(struct clk * clk);
+static void omap1_ckctl_recalc(struct clk * clk);
+static void omap1_watchdog_recalc(struct clk * clk);
+static void omap1_ckctl_recalc_dsp_domain(struct clk * clk);
+static int omap1_clk_enable_dsp_domain(struct clk * clk);
+static int omap1_clk_set_rate_dsp_domain(struct clk * clk, unsigned long rate);
+static void omap1_clk_disable_dsp_domain(struct clk * clk);
+static int omap1_set_uart_rate(struct clk * clk, unsigned long rate);
+static void omap1_uart_recalc(struct clk * clk);
+static int omap1_clk_enable_uart_functional(struct clk * clk);
+static void omap1_clk_disable_uart_functional(struct clk * clk);
+static int omap1_set_ext_clk_rate(struct clk * clk, unsigned long rate);
+static long omap1_round_ext_clk_rate(struct clk * clk, unsigned long rate);
+static void omap1_init_ext_clk(struct clk * clk);
+static int omap1_select_table_rate(struct clk * clk, unsigned long rate);
+static long omap1_round_to_table_rate(struct clk * clk, unsigned long rate);
+static int omap1_clk_use(struct clk *clk);
+static void omap1_clk_unuse(struct clk *clk);
+
+struct mpu_rate {
+ unsigned long rate;
+ unsigned long xtal;
+ unsigned long pll_rate;
+ __u16 ckctl_val;
+ __u16 dpllctl_val;
+};
+
+struct uart_clk {
+ struct clk clk;
+ unsigned long sysc_addr;
+};
+
+/* Provide a method for preventing idling some ARM IDLECT clocks */
+struct arm_idlect1_clk {
+ struct clk clk;
+ unsigned long no_idle_count;
+ __u8 idlect_shift;
+};
+
+/* ARM_CKCTL bit shifts */
+#define CKCTL_PERDIV_OFFSET 0
+#define CKCTL_LCDDIV_OFFSET 2
+#define CKCTL_ARMDIV_OFFSET 4
+#define CKCTL_DSPDIV_OFFSET 6
+#define CKCTL_TCDIV_OFFSET 8
+#define CKCTL_DSPMMUDIV_OFFSET 10
+/*#define ARM_TIMXO 12*/
+#define EN_DSPCK 13
+/*#define ARM_INTHCK_SEL 14*/ /* Divide-by-2 for mpu inth_ck */
+/* DSP_CKCTL bit shifts */
+#define CKCTL_DSPPERDIV_OFFSET 0
+
+/* ARM_IDLECT2 bit shifts */
+#define EN_WDTCK 0
+#define EN_XORPCK 1
+#define EN_PERCK 2
+#define EN_LCDCK 3
+#define EN_LBCK 4 /* Not on 1610/1710 */
+/*#define EN_HSABCK 5*/
+#define EN_APICK 6
+#define EN_TIMCK 7
+#define DMACK_REQ 8
+#define EN_GPIOCK 9 /* Not on 1610/1710 */
+/*#define EN_LBFREECK 10*/
+#define EN_CKOUT_ARM 11
+
+/* ARM_IDLECT3 bit shifts */
+#define EN_OCPI_CK 0
+#define EN_TC1_CK 2
+#define EN_TC2_CK 4
+
+/* DSP_IDLECT2 bit shifts (0,1,2 are same as for ARM_IDLECT2) */
+#define EN_DSPTIMCK 5
+
+/* Various register defines for clock controls scattered around OMAP chip */
+#define USB_MCLK_EN_BIT 4 /* In ULPD_CLKC_CTRL */
+#define USB_HOST_HHC_UHOST_EN 9 /* In MOD_CONF_CTRL_0 */
+#define SWD_ULPD_PLL_CLK_REQ 1 /* In SWD_CLK_DIV_CTRL_SEL */
+#define COM_ULPD_PLL_CLK_REQ 1 /* In COM_CLK_DIV_CTRL_SEL */
+#define SWD_CLK_DIV_CTRL_SEL 0xfffe0874
+#define COM_CLK_DIV_CTRL_SEL 0xfffe0878
+#define SOFT_REQ_REG 0xfffe0834
+#define SOFT_REQ_REG2 0xfffe0880
+
+/*-------------------------------------------------------------------------
+ * Omap1 MPU rate table
+ *-------------------------------------------------------------------------*/
+static struct mpu_rate rate_table[] = {
+ /* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
+ * NOTE: Comment order here is different from bits in CKCTL value:
+ * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
+ */
+#if defined(CONFIG_OMAP_ARM_216MHZ)
+ { 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_195MHZ)
+ { 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_192MHZ)
+ { 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
+ { 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
+ { 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
+ { 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/4/4/8/8/8 */
+ { 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_182MHZ)
+ { 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_168MHZ)
+ { 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
+#endif
+#if defined(CONFIG_OMAP_ARM_150MHZ)
+ { 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
+#endif
+#if defined(CONFIG_OMAP_ARM_120MHZ)
+ { 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
+#endif
+#if defined(CONFIG_OMAP_ARM_96MHZ)
+ { 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
+#endif
+#if defined(CONFIG_OMAP_ARM_60MHZ)
+ { 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
+#endif
+#if defined(CONFIG_OMAP_ARM_30MHZ)
+ { 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
+#endif
+ { 0, 0, 0, 0, 0 },
+};
+
+/*-------------------------------------------------------------------------
+ * Omap1 clocks
+ *-------------------------------------------------------------------------*/
+
+static struct clk ck_ref = {
+ .name = "ck_ref",
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ ALWAYS_ENABLED,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk ck_dpll1 = {
+ .name = "ck_dpll1",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_PROPAGATES | ALWAYS_ENABLED,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk ck_dpll1out = {
+ .clk = {
+ .name = "ck_dpll1out",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP16XX | CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_CKOUT_ARM,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 12,
+};
+
+static struct clk arm_ck = {
+ .name = "arm_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
+ .rate_offset = CKCTL_ARMDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk armper_ck = {
+ .clk = {
+ .name = "armper_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_CKCTL | CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_PERCK,
+ .rate_offset = CKCTL_PERDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 2,
+};
+
+static struct clk arm_gpio_ck = {
+ .name = "arm_gpio_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_GPIOCK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk armxor_ck = {
+ .clk = {
+ .name = "armxor_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_XORPCK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 1,
+};
+
+static struct arm_idlect1_clk armtim_ck = {
+ .clk = {
+ .name = "armtim_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_TIMCK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 9,
+};
+
+static struct arm_idlect1_clk armwdt_ck = {
+ .clk = {
+ .name = "armwdt_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_WDTCK,
+ .recalc = &omap1_watchdog_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 0,
+};
+
+static struct clk arminth_ck16xx = {
+ .name = "arminth_ck",
+ .parent = &arm_ck,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ /* Note: On 16xx the frequency can be divided by 2 by programming
+ * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
+ *
+ * 1510 version is in TC clocks.
+ */
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk dsp_ck = {
+ .name = "dsp_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_CKCTL,
+ .enable_reg = (void __iomem *)ARM_CKCTL,
+ .enable_bit = EN_DSPCK,
+ .rate_offset = CKCTL_DSPDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk dspmmu_ck = {
+ .name = "dspmmu_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_CKCTL | ALWAYS_ENABLED,
+ .rate_offset = CKCTL_DSPMMUDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk dspper_ck = {
+ .name = "dspper_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_CKCTL | VIRTUAL_IO_ADDRESS,
+ .enable_reg = (void __iomem *)DSP_IDLECT2,
+ .enable_bit = EN_PERCK,
+ .rate_offset = CKCTL_PERDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc_dsp_domain,
+ .set_rate = &omap1_clk_set_rate_dsp_domain,
+ .enable = &omap1_clk_enable_dsp_domain,
+ .disable = &omap1_clk_disable_dsp_domain,
+};
+
+static struct clk dspxor_ck = {
+ .name = "dspxor_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ VIRTUAL_IO_ADDRESS,
+ .enable_reg = (void __iomem *)DSP_IDLECT2,
+ .enable_bit = EN_XORPCK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable_dsp_domain,
+ .disable = &omap1_clk_disable_dsp_domain,
+};
+
+static struct clk dsptim_ck = {
+ .name = "dsptim_ck",
+ .parent = &ck_ref,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ VIRTUAL_IO_ADDRESS,
+ .enable_reg = (void __iomem *)DSP_IDLECT2,
+ .enable_bit = EN_DSPTIMCK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable_dsp_domain,
+ .disable = &omap1_clk_disable_dsp_domain,
+};
+
+/* Tie ARM_IDLECT1:IDLIF_ARM to this logical clock structure */
+static struct arm_idlect1_clk tc_ck = {
+ .clk = {
+ .name = "tc_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IN_OMAP730 | RATE_CKCTL |
+ RATE_PROPAGATES | ALWAYS_ENABLED |
+ CLOCK_IDLE_CONTROL,
+ .rate_offset = CKCTL_TCDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 6,
+};
+
+static struct clk arminth_ck1510 = {
+ .name = "arminth_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ /* Note: On 1510 the frequency follows TC_CK
+ *
+ * 16xx version is in MPU clocks.
+ */
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk tipb_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "tibp_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk l3_ocpi_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "l3_ocpi_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP16XX,
+ .enable_reg = (void __iomem *)ARM_IDLECT3,
+ .enable_bit = EN_OCPI_CK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk tc1_ck = {
+ .name = "tc1_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP16XX,
+ .enable_reg = (void __iomem *)ARM_IDLECT3,
+ .enable_bit = EN_TC1_CK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk tc2_ck = {
+ .name = "tc2_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP16XX,
+ .enable_reg = (void __iomem *)ARM_IDLECT3,
+ .enable_bit = EN_TC2_CK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk dma_ck = {
+ /* No-idle controlled by "tc_ck" */
+ .name = "dma_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk dma_lcdfree_ck = {
+ .name = "dma_lcdfree_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk api_ck = {
+ .clk = {
+ .name = "api_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_APICK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 8,
+};
+
+static struct arm_idlect1_clk lb_ck = {
+ .clk = {
+ .name = "lb_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_LBCK,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 4,
+};
+
+static struct clk rhea1_ck = {
+ .name = "rhea1_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk rhea2_ck = {
+ .name = "rhea2_ck",
+ .parent = &tc_ck.clk,
+ .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
+ .recalc = &followparent_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk lcd_ck_16xx = {
+ .name = "lcd_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 | RATE_CKCTL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_LCDCK,
+ .rate_offset = CKCTL_LCDDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct arm_idlect1_clk lcd_ck_1510 = {
+ .clk = {
+ .name = "lcd_ck",
+ .parent = &ck_dpll1,
+ .flags = CLOCK_IN_OMAP1510 | RATE_CKCTL |
+ CLOCK_IDLE_CONTROL,
+ .enable_reg = (void __iomem *)ARM_IDLECT2,
+ .enable_bit = EN_LCDCK,
+ .rate_offset = CKCTL_LCDDIV_OFFSET,
+ .recalc = &omap1_ckctl_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+ },
+ .idlect_shift = 3,
+};
+
+static struct clk uart1_1510 = {
+ .name = "uart1_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT |
+ ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 29, /* Chooses between 12MHz and 48MHz */
+ .set_rate = &omap1_set_uart_rate,
+ .recalc = &omap1_uart_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct uart_clk uart1_16xx = {
+ .clk = {
+ .name = "uart1_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX | RATE_FIXED |
+ ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 29,
+ .enable = &omap1_clk_enable_uart_functional,
+ .disable = &omap1_clk_disable_uart_functional,
+ },
+ .sysc_addr = 0xfffb0054,
+};
+
+static struct clk uart2_ck = {
+ .name = "uart2_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ ENABLE_REG_32BIT | ALWAYS_ENABLED |
+ CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
+ .set_rate = &omap1_set_uart_rate,
+ .recalc = &omap1_uart_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk uart3_1510 = {
+ .name = "uart3_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT |
+ ALWAYS_ENABLED | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 31, /* Chooses between 12MHz and 48MHz */
+ .set_rate = &omap1_set_uart_rate,
+ .recalc = &omap1_uart_recalc,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct uart_clk uart3_16xx = {
+ .clk = {
+ .name = "uart3_ck",
+ /* Direct from ULPD, no real parent */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX | RATE_FIXED |
+ ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 31,
+ .enable = &omap1_clk_enable_uart_functional,
+ .disable = &omap1_clk_disable_uart_functional,
+ },
+ .sysc_addr = 0xfffb9854,
+};
+
+static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
+ .name = "usb_clko",
+ /* Direct from ULPD, no parent */
+ .rate = 6000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_FIXED | ENABLE_REG_32BIT,
+ .enable_reg = (void __iomem *)ULPD_CLOCK_CTRL,
+ .enable_bit = USB_MCLK_EN_BIT,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk usb_hhc_ck1510 = {
+ .name = "usb_hhc_ck",
+ /* Direct from ULPD, no parent */
+ .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
+ .flags = CLOCK_IN_OMAP1510 |
+ RATE_FIXED | ENABLE_REG_32BIT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = USB_HOST_HHC_UHOST_EN,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk usb_hhc_ck16xx = {
+ .name = "usb_hhc_ck",
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
+ .flags = CLOCK_IN_OMAP16XX |
+ RATE_FIXED | ENABLE_REG_32BIT,
+ .enable_reg = (void __iomem *)OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
+ .enable_bit = 8 /* UHOST_EN */,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk usb_dc_ck = {
+ .name = "usb_dc_ck",
+ /* Direct from ULPD, no parent */
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX | RATE_FIXED,
+ .enable_reg = (void __iomem *)SOFT_REQ_REG,
+ .enable_bit = 4,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk mclk_1510 = {
+ .name = "mclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk mclk_16xx = {
+ .name = "mclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .flags = CLOCK_IN_OMAP16XX,
+ .enable_reg = (void __iomem *)COM_CLK_DIV_CTRL_SEL,
+ .enable_bit = COM_ULPD_PLL_CLK_REQ,
+ .set_rate = &omap1_set_ext_clk_rate,
+ .round_rate = &omap1_round_ext_clk_rate,
+ .init = &omap1_init_ext_clk,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk bclk_1510 = {
+ .name = "bclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk bclk_16xx = {
+ .name = "bclk",
+ /* Direct from ULPD, no parent. May be enabled by ext hardware. */
+ .flags = CLOCK_IN_OMAP16XX,
+ .enable_reg = (void __iomem *)SWD_CLK_DIV_CTRL_SEL,
+ .enable_bit = SWD_ULPD_PLL_CLK_REQ,
+ .set_rate = &omap1_set_ext_clk_rate,
+ .round_rate = &omap1_round_ext_clk_rate,
+ .init = &omap1_init_ext_clk,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk mmc1_ck = {
+ .name = "mmc1_ck",
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 23,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk mmc2_ck = {
+ .name = "mmc2_ck",
+ /* Functional clock is direct from ULPD, interface clock is ARMPER */
+ .parent = &armper_ck.clk,
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP16XX |
+ RATE_FIXED | ENABLE_REG_32BIT | CLOCK_NO_IDLE_PARENT,
+ .enable_reg = (void __iomem *)MOD_CONF_CTRL_0,
+ .enable_bit = 20,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk virtual_ck_mpu = {
+ .name = "mpu",
+ .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
+ VIRTUAL_CLOCK | ALWAYS_ENABLED,
+ .parent = &arm_ck, /* Is smarter alias for */
+ .recalc = &followparent_recalc,
+ .set_rate = &omap1_select_table_rate,
+ .round_rate = &omap1_round_to_table_rate,
+ .enable = &omap1_clk_enable,
+ .disable = &omap1_clk_disable,
+};
+
+static struct clk * onchip_clks[] = {
+ /* non-ULPD clocks */
+ &ck_ref,
+ &ck_dpll1,
+ /* CK_GEN1 clocks */
+ &ck_dpll1out.clk,
+ &arm_ck,
+ &armper_ck.clk,
+ &arm_gpio_ck,
+ &armxor_ck.clk,
+ &armtim_ck.clk,
+ &armwdt_ck.clk,
+ &arminth_ck1510, &arminth_ck16xx,
+ /* CK_GEN2 clocks */
+ &dsp_ck,
+ &dspmmu_ck,
+ &dspper_ck,
+ &dspxor_ck,
+ &dsptim_ck,
+ /* CK_GEN3 clocks */
+ &tc_ck.clk,
+ &tipb_ck,
+ &l3_ocpi_ck,
+ &tc1_ck,
+ &tc2_ck,
+ &dma_ck,
+ &dma_lcdfree_ck,
+ &api_ck.clk,
+ &lb_ck.clk,
+ &rhea1_ck,
+ &rhea2_ck,
+ &lcd_ck_16xx,
+ &lcd_ck_1510.clk,
+ /* ULPD clocks */
+ &uart1_1510,
+ &uart1_16xx.clk,
+ &uart2_ck,
+ &uart3_1510,
+ &uart3_16xx.clk,
+ &usb_clko,
+ &usb_hhc_ck1510, &usb_hhc_ck16xx,
+ &usb_dc_ck,
+ &mclk_1510, &mclk_16xx,
+ &bclk_1510, &bclk_16xx,
+ &mmc1_ck,
+ &mmc2_ck,
+ /* Virtual clocks */
+ &virtual_ck_mpu,
+};
+
+#endif
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index 3c5d901efea..ecbc47514ad 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -25,56 +25,7 @@
#include <asm/arch/mux.h>
#include <asm/arch/gpio.h>
-
-static void omap_nop_release(struct device *dev)
-{
- /* Nothing */
-}
-
-/*-------------------------------------------------------------------------*/
-
-#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
-
-#define OMAP_I2C_BASE 0xfffb3800
-
-static struct resource i2c_resources[] = {
- {
- .start = OMAP_I2C_BASE,
- .end = OMAP_I2C_BASE + 0x3f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_I2C,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-/* DMA not used; works around erratum writing to non-empty i2c fifo */
-
-static struct platform_device omap_i2c_device = {
- .name = "i2c_omap",
- .id = -1,
- .dev = {
- .release = omap_nop_release,
- },
- .num_resources = ARRAY_SIZE(i2c_resources),
- .resource = i2c_resources,
-};
-
-static void omap_init_i2c(void)
-{
- /* FIXME define and use a boot tag, in case of boards that
- * either don't wire up I2C, or chips that mux it differently...
- * it can include clocking and address info, maybe more.
- */
- omap_cfg_reg(I2C_SCL);
- omap_cfg_reg(I2C_SDA);
-
- (void) platform_device_register(&omap_i2c_device);
-}
-#else
-static inline void omap_init_i2c(void) {}
-#endif
+extern void omap_nop_release(struct device *dev);
/*-------------------------------------------------------------------------*/
@@ -110,137 +61,6 @@ static inline void omap_init_irda(void) {}
/*-------------------------------------------------------------------------*/
-#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
-
-#define OMAP_MMC1_BASE 0xfffb7800
-#define OMAP_MMC2_BASE 0xfffb7c00 /* omap16xx only */
-
-static struct omap_mmc_conf mmc1_conf;
-
-static u64 mmc1_dmamask = 0xffffffff;
-
-static struct resource mmc1_resources[] = {
- {
- .start = IO_ADDRESS(OMAP_MMC1_BASE),
- .end = IO_ADDRESS(OMAP_MMC1_BASE) + 0x7f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_MMC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device mmc_omap_device1 = {
- .name = "mmci-omap",
- .id = 1,
- .dev = {
- .release = omap_nop_release,
- .dma_mask = &mmc1_dmamask,
- .platform_data = &mmc1_conf,
- },
- .num_resources = ARRAY_SIZE(mmc1_resources),
- .resource = mmc1_resources,
-};
-
-#ifdef CONFIG_ARCH_OMAP16XX
-
-static struct omap_mmc_conf mmc2_conf;
-
-static u64 mmc2_dmamask = 0xffffffff;
-
-static struct resource mmc2_resources[] = {
- {
- .start = IO_ADDRESS(OMAP_MMC2_BASE),
- .end = IO_ADDRESS(OMAP_MMC2_BASE) + 0x7f,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = INT_1610_MMC2,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-static struct platform_device mmc_omap_device2 = {
- .name = "mmci-omap",
- .id = 2,
- .dev = {
- .release = omap_nop_release,
- .dma_mask = &mmc2_dmamask,
- .platform_data = &mmc2_conf,
- },
- .num_resources = ARRAY_SIZE(mmc2_resources),
- .resource = mmc2_resources,
-};
-#endif
-
-static void __init omap_init_mmc(void)
-{
- const struct omap_mmc_config *mmc_conf;
- const struct omap_mmc_conf *mmc;
-
- /* NOTE: assumes MMC was never (wrongly) enabled */
- mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
- if (!mmc_conf)
- return;
-
- /* block 1 is always available and has just one pinout option */
- mmc = &mmc_conf->mmc[0];
- if (mmc->enabled) {
- omap_cfg_reg(MMC_CMD);
- omap_cfg_reg(MMC_CLK);
- omap_cfg_reg(MMC_DAT0);
- if (cpu_is_omap1710()) {
- omap_cfg_reg(M15_1710_MMC_CLKI);
- omap_cfg_reg(P19_1710_MMC_CMDDIR);
- omap_cfg_reg(P20_1710_MMC_DATDIR0);
- }
- if (mmc->wire4) {
- omap_cfg_reg(MMC_DAT1);
- /* NOTE: DAT2 can be on W10 (here) or M15 */
- if (!mmc->nomux)
- omap_cfg_reg(MMC_DAT2);
- omap_cfg_reg(MMC_DAT3);
- }
- mmc1_conf = *mmc;
- (void) platform_device_register(&mmc_omap_device1);
- }
-
-#ifdef CONFIG_ARCH_OMAP16XX
- /* block 2 is on newer chips, and has many pinout options */
- mmc = &mmc_conf->mmc[1];
- if (mmc->enabled) {
- if (!mmc->nomux) {
- omap_cfg_reg(Y8_1610_MMC2_CMD);
- omap_cfg_reg(Y10_1610_MMC2_CLK);
- omap_cfg_reg(R18_1610_MMC2_CLKIN);
- omap_cfg_reg(W8_1610_MMC2_DAT0);
- if (mmc->wire4) {
- omap_cfg_reg(V8_1610_MMC2_DAT1);
- omap_cfg_reg(W15_1610_MMC2_DAT2);
- omap_cfg_reg(R10_1610_MMC2_DAT3);
- }
-
- /* These are needed for the level shifter */
- omap_cfg_reg(V9_1610_MMC2_CMDDIR);
- omap_cfg_reg(V5_1610_MMC2_DATDIR0);
- omap_cfg_reg(W19_1610_MMC2_DATDIR1);
- }
-
- /* Feedback clock must be set on OMAP-1710 MMC2 */
- if (cpu_is_omap1710())
- omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
- MOD_CONF_CTRL_1);
- mmc2_conf = *mmc;
- (void) platform_device_register(&mmc_omap_device2);
- }
-#endif
- return;
-}
-#else
-static inline void omap_init_mmc(void) {}
-#endif
-
#if defined(CONFIG_OMAP_RTC) || defined(CONFIG_OMAP_RTC)
#define OMAP_RTC_BASE 0xfffb4800
@@ -279,38 +99,6 @@ static void omap_init_rtc(void)
static inline void omap_init_rtc(void) {}
#endif
-/*-------------------------------------------------------------------------*/
-
-#if defined(CONFIG_OMAP16XX_WATCHDOG) || defined(CONFIG_OMAP16XX_WATCHDOG_MODULE)
-
-#define OMAP_WDT_BASE 0xfffeb000
-
-static struct resource wdt_resources[] = {
- {
- .start = OMAP_WDT_BASE,
- .end = OMAP_WDT_BASE + 0x4f,
- .flags = IORESOURCE_MEM,
- },
-};
-
-static struct platform_device omap_wdt_device = {
- .name = "omap1610_wdt",
- .id = -1,
- .dev = {
- .release = omap_nop_release,
- },
- .num_resources = ARRAY_SIZE(wdt_resources),
- .resource = wdt_resources,
-};
-
-static void omap_init_wdt(void)
-{
- (void) platform_device_register(&omap_wdt_device);
-}
-#else
-static inline void omap_init_wdt(void) {}
-#endif
-
/*-------------------------------------------------------------------------*/
@@ -334,18 +122,15 @@ static inline void omap_init_wdt(void) {}
* may be handled by the boot loader, and drivers should expect it will
* normally have been done by the time they're probed.
*/
-static int __init omap_init_devices(void)
+static int __init omap1_init_devices(void)
{
/* please keep these calls, and their implementations above,
* in alphabetical order so they're easier to sort through.
*/
- omap_init_i2c();
omap_init_irda();
- omap_init_mmc();
omap_init_rtc();
- omap_init_wdt();
return 0;
}
-arch_initcall(omap_init_devices);
+arch_initcall(omap1_init_devices);
diff --git a/arch/arm/mach-omap1/id.c b/arch/arm/mach-omap1/id.c
index 986c3b7e09b..5c637c04836 100644
--- a/arch/arm/mach-omap1/id.c
+++ b/arch/arm/mach-omap1/id.c
@@ -18,6 +18,13 @@
#include <asm/io.h>
+#define OMAP_DIE_ID_0 0xfffe1800
+#define OMAP_DIE_ID_1 0xfffe1804
+#define OMAP_PRODUCTION_ID_0 0xfffe2000
+#define OMAP_PRODUCTION_ID_1 0xfffe2004
+#define OMAP32_ID_0 0xfffed400
+#define OMAP32_ID_1 0xfffed404
+
struct omap_id {
u16 jtag_id; /* Used to determine OMAP type */
u8 die_rev; /* Processor revision */
@@ -27,6 +34,7 @@ struct omap_id {
/* Register values to detect the OMAP version */
static struct omap_id omap_ids[] __initdata = {
+ { .jtag_id = 0xb574, .die_rev = 0x2, .omap_id = 0x03310315, .type = 0x03100000},
{ .jtag_id = 0x355f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300100},
{ .jtag_id = 0xb55f, .die_rev = 0x0, .omap_id = 0x03320000, .type = 0x07300300},
{ .jtag_id = 0xb470, .die_rev = 0x0, .omap_id = 0x03310100, .type = 0x15100000},
@@ -164,6 +172,7 @@ void __init omap_check_revision(void)
case 0x07:
system_rev |= 0x07;
break;
+ case 0x03:
case 0x15:
system_rev |= 0x15;
break;
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 79fb86535eb..a7a19f75b9e 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -15,9 +15,10 @@
#include <asm/mach/map.h>
#include <asm/io.h>
+#include <asm/arch/mux.h>
#include <asm/arch/tc.h>
-extern int clk_init(void);
+extern int omap1_clk_init(void);
extern void omap_check_revision(void);
extern void omap_sram_init(void);
@@ -50,7 +51,7 @@ static struct map_desc omap730_io_desc[] __initdata = {
};
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
static struct map_desc omap1510_io_desc[] __initdata = {
{
.virtual = OMAP1510_DSP_BASE,
@@ -98,7 +99,7 @@ static void __init _omap_map_io(void)
iotable_init(omap730_io_desc, ARRAY_SIZE(omap730_io_desc));
}
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
iotable_init(omap1510_io_desc, ARRAY_SIZE(omap1510_io_desc));
}
@@ -119,7 +120,7 @@ static void __init _omap_map_io(void)
/* Must init clocks early to assure that timer interrupt works
*/
- clk_init();
+ omap1_clk_init();
}
/*
@@ -127,7 +128,9 @@ static void __init _omap_map_io(void)
*/
void __init omap_map_common_io(void)
{
- if (!initialized)
+ if (!initialized) {
_omap_map_io();
+ omap1_mux_init();
+ }
}
diff --git a/arch/arm/mach-omap1/irq.c b/arch/arm/mach-omap1/irq.c
index 192ce6055fa..ed65a7d2e94 100644
--- a/arch/arm/mach-omap1/irq.c
+++ b/arch/arm/mach-omap1/irq.c
@@ -47,6 +47,7 @@
#include <asm/irq.h>
#include <asm/mach/irq.h>
#include <asm/arch/gpio.h>
+#include <asm/arch/cpu.h>
#include <asm/io.h>
@@ -147,11 +148,15 @@ static struct omap_irq_bank omap730_irq_banks[] = {
};
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
static struct omap_irq_bank omap1510_irq_banks[] = {
{ .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3febfff },
{ .base_reg = OMAP_IH2_BASE, .trigger_map = 0xffbfffed },
};
+static struct omap_irq_bank omap310_irq_banks[] = {
+ { .base_reg = OMAP_IH1_BASE, .trigger_map = 0xb3faefc3 },
+ { .base_reg = OMAP_IH2_BASE, .trigger_map = 0x65b3c061 },
+};
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
@@ -181,11 +186,15 @@ void __init omap_init_irq(void)
irq_bank_count = ARRAY_SIZE(omap730_irq_banks);
}
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
irq_banks = omap1510_irq_banks;
irq_bank_count = ARRAY_SIZE(omap1510_irq_banks);
}
+ if (cpu_is_omap310()) {
+ irq_banks = omap310_irq_banks;
+ irq_bank_count = ARRAY_SIZE(omap310_irq_banks);
+ }
#endif
#if defined(CONFIG_ARCH_OMAP16XX)
if (cpu_is_omap16xx()) {
@@ -226,9 +235,11 @@ void __init omap_init_irq(void)
}
/* Unmask level 2 handler */
- if (cpu_is_omap730()) {
+
+ if (cpu_is_omap730())
omap_unmask_irq(INT_730_IH2_IRQ);
- } else {
- omap_unmask_irq(INT_IH2_IRQ);
- }
+ else if (cpu_is_omap1510())
+ omap_unmask_irq(INT_1510_IH2_IRQ);
+ else if (cpu_is_omap16xx())
+ omap_unmask_irq(INT_1610_IH2_IRQ);
}
diff --git a/arch/arm/mach-omap1/leds-h2p2-debug.c b/arch/arm/mach-omap1/leds-h2p2-debug.c
index 399010c1403..65065081591 100644
--- a/arch/arm/mach-omap1/leds-h2p2-debug.c
+++ b/arch/arm/mach-omap1/leds-h2p2-debug.c
@@ -18,6 +18,7 @@
#include <asm/hardware.h>
#include <asm/leds.h>
#include <asm/system.h>
+#include <asm/mach-types.h>
#include <asm/arch/fpga.h>
#include <asm/arch/gpio.h>
@@ -63,14 +64,19 @@ void h2p2_dbg_leds_event(led_event_t evt)
case led_stop:
case led_halted:
/* all leds off during suspend or shutdown */
- omap_set_gpio_dataout(GPIO_TIMER, 0);
- omap_set_gpio_dataout(GPIO_IDLE, 0);
+
+ if (! machine_is_omap_perseus2()) {
+ omap_set_gpio_dataout(GPIO_TIMER, 0);
+ omap_set_gpio_dataout(GPIO_IDLE, 0);
+ }
+
__raw_writew(~0, &fpga->leds);
led_state &= ~LED_STATE_ENABLED;
if (evt == led_halted) {
iounmap(fpga);
fpga = NULL;
}
+
goto done;
case led_claim:
@@ -85,18 +91,37 @@ void h2p2_dbg_leds_event(led_event_t evt)
#ifdef CONFIG_LEDS_TIMER
case led_timer:
led_state ^= LED_TIMER_ON;
- omap_set_gpio_dataout(GPIO_TIMER, led_state & LED_TIMER_ON);
- goto done;
+
+ if (machine_is_omap_perseus2())
+ hw_led_state ^= H2P2_DBG_FPGA_P2_LED_TIMER;
+ else {
+ omap_set_gpio_dataout(GPIO_TIMER, led_state & LED_TIMER_ON);
+ goto done;
+ }
+
+ break;
#endif
#ifdef CONFIG_LEDS_CPU
case led_idle_start:
- omap_set_gpio_dataout(GPIO_IDLE, 1);
- goto done;
+ if (machine_is_omap_perseus2())
+ hw_led_state |= H2P2_DBG_FPGA_P2_LED_IDLE;
+ else {
+ omap_set_gpio_dataout(GPIO_IDLE, 1);
+ goto done;
+ }
+
+ break;
case led_idle_end:
- omap_set_gpio_dataout(GPIO_IDLE, 0);
- goto done;
+ if (machine_is_omap_perseus2())
+ hw_led_state &= ~H2P2_DBG_FPGA_P2_LED_IDLE;
+ else {
+ omap_set_gpio_dataout(GPIO_IDLE, 0);
+ goto done;
+ }
+
+ break;
#endif
case led_green_on:
@@ -135,7 +160,7 @@ void h2p2_dbg_leds_event(led_event_t evt)
/*
* Actually burn the LEDs
*/
- if (led_state & LED_STATE_CLAIMED)
+ if (led_state & LED_STATE_ENABLED)
__raw_writew(~hw_led_state, &fpga->leds);
done:
diff --git a/arch/arm/mach-omap1/leds.c b/arch/arm/mach-omap1/leds.c
index 5c6b1bb6e72..3f9dcac4fd4 100644
--- a/arch/arm/mach-omap1/leds.c
+++ b/arch/arm/mach-omap1/leds.c
@@ -33,7 +33,6 @@ omap_leds_init(void)
if (machine_is_omap_h2()
|| machine_is_omap_h3()
- || machine_is_omap_perseus2()
#ifdef CONFIG_OMAP_OSK_MISTRAL
|| machine_is_omap_osk()
#endif
diff --git a/arch/arm/mach-omap1/mux.c b/arch/arm/mach-omap1/mux.c
new file mode 100644
index 00000000000..d4b8d624e74
--- /dev/null
+++ b/arch/arm/mach-omap1/mux.c
@@ -0,0 +1,289 @@
+/*
+ * linux/arch/arm/mach-omap1/mux.c
+ *
+ * OMAP1 pin multiplexing configurations
+ *
+ * Copyright (C) 2003 - 2005 Nokia Corporation
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+
+#include <asm/arch/mux.h>
+
+#ifdef CONFIG_OMAP_MUX
+
+#ifdef CONFIG_ARCH_OMAP730
+struct pin_config __initdata_or_module omap730_pins[] = {
+MUX_CFG_730("E2_730_KBR0", 12, 21, 0, 0, 20, 1, NA, 0, 0)
+MUX_CFG_730("J7_730_KBR1", 12, 25, 0, 0, 24, 1, NA, 0, 0)
+MUX_CFG_730("E1_730_KBR2", 12, 29, 0, 0, 28, 1, NA, 0, 0)
+MUX_CFG_730("F3_730_KBR3", 13, 1, 0, 0, 0, 1, NA, 0, 0)
+MUX_CFG_730("D2_730_KBR4", 13, 5, 0, 0, 4, 1, NA, 0, 0)
+MUX_CFG_730("C2_730_KBC0", 13, 9, 0, 0, 8, 1, NA, 0, 0)
+MUX_CFG_730("D3_730_KBC1", 13, 13, 0, 0, 12, 1, NA, 0, 0)
+MUX_CFG_730("E4_730_KBC2", 13, 17, 0, 0, 16, 1, NA, 0, 0)
+MUX_CFG_730("F4_730_KBC3", 13, 21, 0, 0, 20, 1, NA, 0, 0)
+MUX_CFG_730("E3_730_KBC4", 13, 25, 0, 0, 24, 1, NA, 0, 0)
+};
+#endif
+
+#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
+struct pin_config __initdata_or_module omap1xxx_pins[] = {
+/*
+ * description mux mode mux pull pull pull pu_pd pu dbg
+ * reg offset mode reg bit ena reg
+ */
+MUX_CFG("UART1_TX", 9, 21, 1, 2, 3, 0, NA, 0, 0)
+MUX_CFG("UART1_RTS", 9, 12, 1, 2, 0, 0, NA, 0, 0)
+
+/* UART2 (COM_UART_GATING), conflicts with USB2 */
+MUX_CFG("UART2_TX", C, 27, 1, 3, 3, 0, NA, 0, 0)
+MUX_CFG("UART2_RX", C, 18, 0, 3, 1, 1, NA, 0, 0)
+MUX_CFG("UART2_CTS", C, 21, 0, 3, 1, 1, NA, 0, 0)
+MUX_CFG("UART2_RTS", C, 24, 1, 3, 2, 0, NA, 0, 0)
+
+/* UART3 (GIGA_UART_GATING) */
+MUX_CFG("UART3_TX", 6, 0, 1, 0, 30, 0, NA, 0, 0)
+MUX_CFG("UART3_RX", 6, 3, 0, 0, 31, 1, NA, 0, 0)
+MUX_CFG("UART3_CTS", 5, 12, 2, 0, 24, 0, NA, 0, 0)
+MUX_CFG("UART3_RTS", 5, 15, 2, 0, 25, 0, NA, 0, 0)
+MUX_CFG("UART3_CLKREQ", 9, 27, 0, 2, 5, 0, NA, 0, 0)
+MUX_CFG("UART3_BCLK", A, 0, 0, 2, 6, 0, NA, 0, 0)
+MUX_CFG("Y15_1610_UART3_RTS", A, 0, 1, 2, 6, 0, NA, 0, 0)
+
+/* PWT & PWL, conflicts with UART3 */
+MUX_CFG("PWT", 6, 0, 2, 0, 30, 0, NA, 0, 0)
+MUX_CFG("PWL", 6, 3, 1, 0, 31, 1, NA, 0, 0)
+
+/* USB internal master generic */
+MUX_CFG("R18_USB_VBUS", 7, 9, 2, 1, 11, 0, NA, 0, 1)
+MUX_CFG("R18_1510_USB_GPIO0", 7, 9, 0, 1, 11, 1, NA, 0, 1)
+/* works around erratum: W4_USB_PUEN and W4_USB_PUDIS are switched! */
+MUX_CFG("W4_USB_PUEN", D, 3, 3, 3, 5, 1, NA, 0, 1)
+MUX_CFG("W4_USB_CLKO", D, 3, 1, 3, 5, 0, NA, 0, 1)
+MUX_CFG("W4_USB_HIGHZ", D, 3, 4, 3, 5, 0, 3, 0, 1)
+MUX_CFG("W4_GPIO58", D, 3, 7, 3, 5, 0, 3, 0, 1)
+
+/* USB1 master */
+MUX_CFG("USB1_SUSP", 8, 27, 2, 1, 27, 0, NA, 0, 1)
+MUX_CFG("USB1_SE0", 9, 0, 2, 1, 28, 0, NA, 0, 1)
+MUX_CFG("W13_1610_USB1_SE0", 9, 0, 4, 1, 28, 0, NA, 0, 1)
+MUX_CFG("USB1_TXEN", 9, 3, 2, 1, 29, 0, NA, 0, 1)
+MUX_CFG("USB1_TXD", 9, 24, 1, 2, 4, 0, NA, 0, 1)
+MUX_CFG("USB1_VP", A, 3, 1, 2, 7, 0, NA, 0, 1)
+MUX_CFG("USB1_VM", A, 6, 1, 2, 8, 0, NA, 0, 1)
+MUX_CFG("USB1_RCV", A, 9, 1, 2, 9, 0, NA, 0, 1)
+MUX_CFG("USB1_SPEED", A, 12, 2, 2, 10, 0, NA, 0, 1)
+MUX_CFG("R13_1610_USB1_SPEED", A, 12, 5, 2, 10, 0, NA, 0, 1)
+MUX_CFG("R13_1710_USB1_SEO", A, 12, 5, 2, 10, 0, NA, 0, 1)
+
+/* USB2 master */
+MUX_CFG("USB2_SUSP", B, 3, 1, 2, 17, 0, NA, 0, 1)
+MUX_CFG("USB2_VP", B, 6, 1, 2, 18, 0, NA, 0, 1)
+MUX_CFG("USB2_TXEN", B, 9, 1, 2, 19, 0, NA, 0, 1)
+MUX_CFG("USB2_VM", C, 18, 1, 3, 0, 0, NA, 0, 1)
+MUX_CFG("USB2_RCV", C, 21, 1, 3, 1, 0, NA, 0, 1)
+MUX_CFG("USB2_SE0", C, 24, 2, 3, 2, 0, NA, 0, 1)
+MUX_CFG("USB2_TXD", C, 27, 2, 3, 3, 0, NA, 0, 1)
+
+/* OMAP-1510 GPIO */
+MUX_CFG("R18_1510_GPIO0", 7, 9, 0, 1, 11, 1, 0, 0, 1)
+MUX_CFG("R19_1510_GPIO1", 7, 6, 0, 1, 10, 1, 0, 0, 1)
+MUX_CFG("M14_1510_GPIO2", 7, 3, 0, 1, 9, 1, 0, 0, 1)
+
+/* OMAP1610 GPIO */
+MUX_CFG("P18_1610_GPIO3", 7, 0, 0, 1, 8, 0, NA, 0, 1)
+MUX_CFG("Y15_1610_GPIO17", A, 0, 7, 2, 6, 0, NA, 0, 1)
+
+/* OMAP-1710 GPIO */
+MUX_CFG("R18_1710_GPIO0", 7, 9, 0, 1, 11, 1, 1, 1, 1)
+MUX_CFG("V2_1710_GPIO10", F, 27, 1, 4, 3, 1, 4, 1, 1)
+MUX_CFG("N21_1710_GPIO14", 6, 9, 0, 1, 1, 1, 1, 1, 1)
+MUX_CFG("W15_1710_GPIO40", 9, 27, 7, 2, 5, 1, 2, 1, 1)
+
+/* MPUIO */
+MUX_CFG("MPUIO2", 7, 18, 0, 1, 14, 1, NA, 0, 1)
+MUX_CFG("N15_1610_MPUIO2", 7, 18, 0, 1, 14, 1, 1, 0, 1)
+MUX_CFG("MPUIO4", 7, 15, 0, 1, 13, 1, NA, 0, 1)
+MUX_CFG("MPUIO5", 7, 12, 0, 1, 12, 1, NA, 0, 1)
+
+MUX_CFG("T20_1610_MPUIO5", 7, 12, 0, 1, 12, 0, 3, 0, 1)
+MUX_CFG("W11_1610_MPUIO6", 10, 15, 2, 3, 8, 0, 3, 0, 1)
+MUX_CFG("V10_1610_MPUIO7", A, 24, 2, 2, 14, 0, 2, 0, 1)
+MUX_CFG("W11_1610_MPUIO9", 10, 15, 1, 3, 8, 0, 3, 0, 1)
+MUX_CFG("V10_1610_MPUIO10", A, 24, 1, 2, 14, 0, 2, 0, 1)
+MUX_CFG("W10_1610_MPUIO11", A, 18, 2, 2, 11, 0, 2, 0, 1)
+MUX_CFG("E20_1610_MPUIO13", 3, 21, 1, 0, 7, 0, 0, 0, 1)
+MUX_CFG("U20_1610_MPUIO14", 9, 6, 6, 0, 30, 0, 0, 0, 1)
+MUX_CFG("E19_1610_MPUIO15", 3, 18, 1, 0, 6, 0, 0, 0, 1)
+
+/* MCBSP2 */
+MUX_CFG("MCBSP2_CLKR", C, 6, 0, 2, 27, 1, NA, 0, 1)
+MUX_CFG("MCBSP2_CLKX", C, 9, 0, 2, 29, 1, NA, 0, 1)
+MUX_CFG("MCBSP2_DR", C, 0, 0, 2, 26, 1, NA, 0, 1)
+MUX_CFG("MCBSP2_DX", C, 15, 0, 2, 31, 1, NA, 0, 1)
+MUX_CFG("MCBSP2_FSR", C, 12, 0, 2, 30, 1, NA, 0, 1)
+MUX_CFG("MCBSP2_FSX", C, 3, 0, 2, 27, 1, NA, 0, 1)
+
+/* MCBSP3 NOTE: Mode must 1 for clock */
+MUX_CFG("MCBSP3_CLKX", 9, 3, 1, 1, 29, 0, NA, 0, 1)
+
+/* Misc ballouts */
+MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1)
+MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0)
+
+/* OMAP-1610 MMC2 */
+MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1)
+MUX_CFG("V8_1610_MMC2_DAT1", B, 27, 6, 2, 25, 1, 2, 1, 1)
+MUX_CFG("W15_1610_MMC2_DAT2", 9, 12, 6, 2, 5, 1, 2, 1, 1)
+MUX_CFG("R10_1610_MMC2_DAT3", B, 18, 6, 2, 22, 1, 2, 1, 1)
+MUX_CFG("Y10_1610_MMC2_CLK", B, 3, 6, 2, 17, 0, 2, 0, 1)
+MUX_CFG("Y8_1610_MMC2_CMD", B, 24, 6, 2, 24, 1, 2, 1, 1)
+MUX_CFG("V9_1610_MMC2_CMDDIR", B, 12, 6, 2, 20, 0, 2, 1, 1)
+MUX_CFG("V5_1610_MMC2_DATDIR0", B, 15, 6, 2, 21, 0, 2, 1, 1)
+MUX_CFG("W19_1610_MMC2_DATDIR1", 8, 15, 6, 1, 23, 0, 1, 1, 1)
+MUX_CFG("R18_1610_MMC2_CLKIN", 7, 9, 6, 1, 11, 0, 1, 11, 1)
+
+/* OMAP-1610 External Trace Interface */
+MUX_CFG("M19_1610_ETM_PSTAT0", 5, 27, 1, 0, 29, 0, 0, 0, 1)
+MUX_CFG("L15_1610_ETM_PSTAT1", 5, 24, 1, 0, 28, 0, 0, 0, 1)
+MUX_CFG("L18_1610_ETM_PSTAT2", 5, 21, 1, 0, 27, 0, 0, 0, 1)
+MUX_CFG("L19_1610_ETM_D0", 5, 18, 1, 0, 26, 0, 0, 0, 1)
+MUX_CFG("J19_1610_ETM_D6", 5, 0, 1, 0, 20, 0, 0, 0, 1)
+MUX_CFG("J18_1610_ETM_D7", 5, 27, 1, 0, 19, 0, 0, 0, 1)
+
+/* OMAP16XX GPIO */
+MUX_CFG("P20_1610_GPIO4", 6, 27, 0, 1, 7, 0, 1, 1, 1)
+MUX_CFG("V9_1610_GPIO7", B, 12, 1, 2, 20, 0, 2, 1, 1)
+MUX_CFG("W8_1610_GPIO9", B, 21, 0, 2, 23, 0, 2, 1, 1)
+MUX_CFG("N20_1610_GPIO11", 6, 18, 0, 1, 4, 0, 1, 1, 1)
+MUX_CFG("N19_1610_GPIO13", 6, 12, 0, 1, 2, 0, 1, 1, 1)
+MUX_CFG("P10_1610_GPIO22", C, 0, 7, 2, 26, 0, 2, 1, 1)
+MUX_CFG("V5_1610_GPIO24", B, 15, 7, 2, 21, 0, 2, 1, 1)
+MUX_CFG("AA20_1610_GPIO_41", 9, 9, 7, 1, 31, 0, 1, 1, 1)
+MUX_CFG("W19_1610_GPIO48", 8, 15, 7, 1, 23, 1, 1, 0, 1)
+MUX_CFG("M7_1610_GPIO62", 10, 0, 0, 4, 24, 0, 4, 0, 1)
+MUX_CFG("V14_16XX_GPIO37", 9, 18, 7, 2, 2, 0, 2, 2, 0)
+MUX_CFG("R9_16XX_GPIO18", C, 18, 7, 3, 0, 0, 3, 0, 0)
+MUX_CFG("L14_16XX_GPIO49", 6, 3, 7, 0, 31, 0, 0, 31, 0)
+
+/* OMAP-1610 uWire */
+MUX_CFG("V19_1610_UWIRE_SCLK", 8, 6, 0, 1, 20, 0, 1, 1, 1)
+MUX_CFG("U18_1610_UWIRE_SDI", 8, 0, 0, 1, 18, 0, 1, 1, 1)
+MUX_CFG("W21_1610_UWIRE_SDO", 8, 3, 0, 1, 19, 0, 1, 1, 1)
+MUX_CFG("N14_1610_UWIRE_CS0", 8, 9, 1, 1, 21, 0, 1, 1, 1)
+MUX_CFG("P15_1610_UWIRE_CS3", 8, 12, 1, 1, 22, 0, 1, 1, 1)
+MUX_CFG("N15_1610_UWIRE_CS1", 7, 18, 2, 1, 14, 0, NA, 0, 1)
+
+/* OMAP-1610 Flash */
+MUX_CFG("L3_1610_FLASH_CS2B_OE",10, 6, 1, NA, 0, 0, NA, 0, 1)
+MUX_CFG("M8_1610_FLASH_CS2B_WE",10, 3, 1, NA, 0, 0, NA, 0, 1)
+
+/* First MMC interface, same on 1510, 1610 and 1710 */
+MUX_CFG("MMC_CMD", A, 27, 0, 2, 15, 1, 2, 1, 1)
+MUX_CFG("MMC_DAT1", A, 24, 0, 2, 14, 1, 2, 1, 1)
+MUX_CFG("MMC_DAT2", A, 18, 0, 2, 12, 1, 2, 1, 1)
+MUX_CFG("MMC_DAT0", B, 0, 0, 2, 16, 1, 2, 1, 1)
+MUX_CFG("MMC_CLK", A, 21, 0, NA, 0, 0, NA, 0, 1)
+MUX_CFG("MMC_DAT3", 10, 15, 0, 3, 8, 1, 3, 1, 1)
+MUX_CFG("M15_1710_MMC_CLKI", 6, 21, 2, 0, 0, 0, NA, 0, 1)
+MUX_CFG("P19_1710_MMC_CMDDIR", 6, 24, 6, 0, 0, 0, NA, 0, 1)
+MUX_CFG("P20_1710_MMC_DATDIR0", 6, 27, 5, 0, 0, 0, NA, 0, 1)
+
+/* OMAP-1610 USB0 alternate configuration */
+MUX_CFG("W9_USB0_TXEN", B, 9, 5, 2, 19, 0, 2, 0, 1)
+MUX_CFG("AA9_USB0_VP", B, 6, 5, 2, 18, 0, 2, 0, 1)
+MUX_CFG("Y5_USB0_RCV", C, 21, 5, 3, 1, 0, 1, 0, 1)
+MUX_CFG("R9_USB0_VM", C, 18, 5, 3, 0, 0, 3, 0, 1)
+MUX_CFG("V6_USB0_TXD", C, 27, 5, 3, 3, 0, 3, 0, 1)
+MUX_CFG("W5_USB0_SE0", C, 24, 5, 3, 2, 0, 3, 0, 1)
+MUX_CFG("V9_USB0_SPEED", B, 12, 5, 2, 20, 0, 2, 0, 1)
+MUX_CFG("Y10_USB0_SUSP", B, 3, 5, 2, 17, 0, 2, 0, 1)
+
+/* USB2 interface */
+MUX_CFG("W9_USB2_TXEN", B, 9, 1, NA, 0, 0, NA, 0, 1)
+MUX_CFG("AA9_USB2_VP", B, 6, 1, NA, 0, 0, NA, 0, 1)
+MUX_CFG("Y5_USB2_RCV", C, 21, 1, NA, 0, 0, NA, 0, 1)
+MUX_CFG("R9_USB2_VM", C, 18, 1, NA, 0, 0, NA, 0, 1)
+MUX_CFG("V6_USB2_TXD", C, 27, 2, NA, 0, 0, NA, 0, 1)
+MUX_CFG("W5_USB2_SE0", C, 24, 2, NA, 0, 0, NA, 0, 1)
+
+/* 16XX UART */
+MUX_CFG("R13_1610_UART1_TX", A, 12, 6, 2, 10, 0, 2, 10, 1)
+MUX_CFG("V14_16XX_UART1_RX", 9, 18, 0, 2, 2, 0, 2, 2, 1)
+MUX_CFG("R14_1610_UART1_CTS", 9, 15, 0, 2, 1, 0, 2, 1, 1)
+MUX_CFG("AA15_1610_UART1_RTS", 9, 12, 1, 2, 0, 0, 2, 0, 1)
+MUX_CFG("R9_16XX_UART2_RX", C, 18, 0, 3, 0, 0, 3, 0, 1)
+MUX_CFG("L14_16XX_UART3_RX", 6, 3, 0, 0, 31, 0, 0, 31, 1)
+
+/* I2C interface */
+MUX_CFG("I2C_SCL", 7, 24, 0, NA, 0, 0, NA, 0, 0)
+MUX_CFG("I2C_SDA", 7, 27, 0, NA, 0, 0, NA, 0, 0)
+
+/* Keypad */
+MUX_CFG("F18_1610_KBC0", 3, 15, 0, 0, 5, 1, 0, 0, 0)
+MUX_CFG("D20_1610_KBC1", 3, 12, 0, 0, 4, 1, 0, 0, 0)
+MUX_CFG("D19_1610_KBC2", 3, 9, 0, 0, 3, 1, 0, 0, 0)
+MUX_CFG("E18_1610_KBC3", 3, 6, 0, 0, 2, 1, 0, 0, 0)
+MUX_CFG("C21_1610_KBC4", 3, 3, 0, 0, 1, 1, 0, 0, 0)
+MUX_CFG("G18_1610_KBR0", 4, 0, 0, 0, 10, 1, 0, 1, 0)
+MUX_CFG("F19_1610_KBR1", 3, 27, 0, 0, 9, 1, 0, 1, 0)
+MUX_CFG("H14_1610_KBR2", 3, 24, 0, 0, 8, 1, 0, 1, 0)
+MUX_CFG("E20_1610_KBR3", 3, 21, 0, 0, 7, 1, 0, 1, 0)
+MUX_CFG("E19_1610_KBR4", 3, 18, 0, 0, 6, 1, 0, 1, 0)
+MUX_CFG("N19_1610_KBR5", 6, 12, 1, 1, 2, 1, 1, 1, 0)
+
+/* Power management */
+MUX_CFG("T20_1610_LOW_PWR", 7, 12, 1, NA, 0, 0, NA, 0, 0)
+
+/* MCLK Settings */
+MUX_CFG("V5_1710_MCLK_ON", B, 15, 0, NA, 0, 0, NA, 0, 0)
+MUX_CFG("V5_1710_MCLK_OFF", B, 15, 6, NA, 0, 0, NA, 0, 0)
+MUX_CFG("R10_1610_MCLK_ON", B, 18, 0, NA, 22, 0, NA, 1, 0)
+MUX_CFG("R10_1610_MCLK_OFF", B, 18, 6, 2, 22, 1, 2, 1, 1)
+
+/* CompactFlash controller, conflicts with MMC1 */
+MUX_CFG("P11_1610_CF_CD2", A, 27, 3, 2, 15, 1, 2, 1, 1)
+MUX_CFG("R11_1610_CF_IOIS16", B, 0, 3, 2, 16, 1, 2, 1, 1)
+MUX_CFG("V10_1610_CF_IREQ", A, 24, 3, 2, 14, 0, 2, 0, 1)
+MUX_CFG("W10_1610_CF_RESET", A, 18, 3, 2, 12, 1, 2, 1, 1)
+MUX_CFG("W11_1610_CF_CD1", 10, 15, 3, 3, 8, 1, 3, 1, 1)
+};
+#endif /* CONFIG_ARCH_OMAP15XX || CONFIG_ARCH_OMAP16XX */
+
+int __init omap1_mux_init(void)
+{
+
+#ifdef CONFIG_ARCH_OMAP730
+ omap_mux_register(omap730_pins, ARRAY_SIZE(omap730_pins));
+#endif
+
+#if defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX)
+ omap_mux_register(omap1xxx_pins, ARRAY_SIZE(omap1xxx_pins));
+#endif
+
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 40c4f7c40e7..6810cfb8446 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -109,9 +109,10 @@ static struct platform_device serial_device = {
* By default UART2 does not work on Innovator-1510 if you have
* USB OHCI enabled. To use UART2, you must disable USB2 first.
*/
-void __init omap_serial_init(int ports[OMAP_MAX_NR_PORTS])
+void __init omap_serial_init(void)
{
int i;
+ const struct omap_uart_config *info;
if (cpu_is_omap730()) {
serial_platform_data[0].regshift = 0;
@@ -126,10 +127,14 @@ void __init omap_serial_init(int ports[OMAP_MAX_NR_PORTS])
serial_platform_data[2].uartclk = OMAP1510_BASE_BAUD * 16;
}
+ info = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
+ if (info == NULL)
+ return;
+
for (i = 0; i < OMAP_MAX_NR_PORTS; i++) {
unsigned char reg;
- if (ports[i] == 0) {
+ if (!((1 << i) & info->enabled_uarts)) {
serial_platform_data[i].membase = NULL;
serial_platform_data[i].mapbase = 0;
continue;
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 191a9b1ee9b..cdbf4d7620c 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -226,8 +226,8 @@ unsigned long long sched_clock(void)
#ifdef CONFIG_OMAP_32K_TIMER
-#ifdef CONFIG_ARCH_OMAP1510
-#error OMAP 32KHz timer does not currently work on 1510!
+#ifdef CONFIG_ARCH_OMAP15XX
+#error OMAP 32KHz timer does not currently work on 15XX!
#endif
/*
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
new file mode 100644
index 00000000000..578880943cf
--- /dev/null
+++ b/arch/arm/mach-omap2/Kconfig
@@ -0,0 +1,22 @@
+comment "OMAP Core Type"
+ depends on ARCH_OMAP2
+
+config ARCH_OMAP24XX
+ bool "OMAP24xx Based System"
+ depends on ARCH_OMAP2
+
+config ARCH_OMAP2420
+ bool "OMAP2420 support"
+ depends on ARCH_OMAP24XX
+
+comment "OMAP Board Type"
+ depends on ARCH_OMAP2
+
+config MACH_OMAP_GENERIC
+ bool "Generic OMAP board"
+ depends on ARCH_OMAP2 && ARCH_OMAP24XX
+
+config MACH_OMAP_H4
+ bool "OMAP 2420 H4 board"
+ depends on ARCH_OMAP2 && ARCH_OMAP24XX
+
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
new file mode 100644
index 00000000000..42041166435
--- /dev/null
+++ b/arch/arm/mach-omap2/Makefile
@@ -0,0 +1,13 @@
+#
+# Makefile for the linux kernel.
+#
+
+# Common support
+obj-y := irq.o id.o io.o sram-fn.o clock.o mux.o devices.o serial.o
+
+obj-$(CONFIG_OMAP_MPU_TIMER) += timer-gp.o
+
+# Specific board support
+obj-$(CONFIG_MACH_OMAP_GENERIC) += board-generic.o
+obj-$(CONFIG_MACH_OMAP_H4) += board-h4.o
+
diff --git a/arch/arm/mach-omap2/Makefile.boot b/arch/arm/mach-omap2/Makefile.boot
new file mode 100644
index 00000000000..565aff7f37a
--- /dev/null
+++ b/arch/arm/mach-omap2/Makefile.boot
@@ -0,0 +1,3 @@
+ zreladdr-y := 0x80008000
+params_phys-y := 0x80000100
+initrd_phys-y := 0x80800000
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
new file mode 100644
index 00000000000..c602e7a3d93
--- /dev/null
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -0,0 +1,80 @@
+/*
+ * linux/arch/arm/mach-omap/omap2/board-generic.c
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * Modified from mach-omap/omap1/board-generic.c
+ *
+ * Code for generic OMAP2 board. Should work on many OMAP2 systems where
+ * the bootloader passes the board-specific data to the kernel.
+ * Do not put any board specific code to this file; create a new machine
+ * type if you need custom low-level initializations.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/device.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+
+static void __init omap_generic_init_irq(void)
+{
+ omap_init_irq();
+}
+
+static struct omap_uart_config generic_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_mmc_config generic_mmc_config __initdata = {
+ .mmc [0] = {
+ .enabled = 0,
+ .wire4 = 0,
+ .wp_pin = -1,
+ .power_pin = -1,
+ .switch_pin = -1,
+ },
+};
+
+static struct omap_board_config_kernel generic_config[] = {
+ { OMAP_TAG_UART, &generic_uart_config },
+ { OMAP_TAG_MMC, &generic_mmc_config },
+};
+
+static void __init omap_generic_init(void)
+{
+ omap_board_config = generic_config;
+ omap_board_config_size = ARRAY_SIZE(generic_config);
+ omap_serial_init();
+}
+
+static void __init omap_generic_map_io(void)
+{
+ omap_map_common_io();
+}
+
+MACHINE_START(OMAP_GENERIC, "Generic OMAP24xx")
+ /* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
+ .phys_ram = 0x80000000,
+ .phys_io = 0x48000000,
+ .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc,
+ .boot_params = 0x80000100,
+ .map_io = omap_generic_map_io,
+ .init_irq = omap_generic_init_irq,
+ .init_machine = omap_generic_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
new file mode 100644
index 00000000000..f2554469a76
--- /dev/null
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -0,0 +1,197 @@
+/*
+ * linux/arch/arm/mach-omap/omap2/board-h4.c
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * Modified from mach-omap/omap1/board-generic.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
+#include <linux/delay.h>
+
+#include <asm/hardware.h>
+#include <asm/mach-types.h>
+#include <asm/mach/arch.h>
+#include <asm/mach/map.h>
+#include <asm/mach/flash.h>
+
+#include <asm/arch/gpio.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/usb.h>
+#include <asm/arch/board.h>
+#include <asm/arch/common.h>
+#include <asm/arch/prcm.h>
+
+#include <asm/io.h>
+#include <asm/delay.h>
+
+static struct mtd_partition h4_partitions[] = {
+ /* bootloader (U-Boot, etc) in first sector */
+ {
+ .name = "bootloader",
+ .offset = 0,
+ .size = SZ_128K,
+ .mask_flags = MTD_WRITEABLE, /* force read-only */
+ },
+ /* bootloader params in the next sector */
+ {
+ .name = "params",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_128K,
+ .mask_flags = 0,
+ },
+ /* kernel */
+ {
+ .name = "kernel",
+ .offset = MTDPART_OFS_APPEND,
+ .size = SZ_2M,
+ .mask_flags = 0
+ },
+ /* file system */
+ {
+ .name = "filesystem",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ .mask_flags = 0
+ }
+};
+
+static struct flash_platform_data h4_flash_data = {
+ .map_name = "cfi_probe",
+ .width = 2,
+ .parts = h4_partitions,
+ .nr_parts = ARRAY_SIZE(h4_partitions),
+};
+
+static struct resource h4_flash_resource = {
+ .start = H4_CS0_BASE,
+ .end = H4_CS0_BASE + SZ_64M - 1,
+ .flags = IORESOURCE_MEM,
+};
+
+static struct platform_device h4_flash_device = {
+ .name = "omapflash",
+ .id = 0,
+ .dev = {
+ .platform_data = &h4_flash_data,
+ },
+ .num_resources = 1,
+ .resource = &h4_flash_resource,
+};
+
+static struct resource h4_smc91x_resources[] = {
+ [0] = {
+ .start = OMAP24XX_ETHR_START, /* Physical */
+ .end = OMAP24XX_ETHR_START + 0xf,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = OMAP_GPIO_IRQ(OMAP24XX_ETHR_GPIO_IRQ),
+ .end = OMAP_GPIO_IRQ(OMAP24XX_ETHR_GPIO_IRQ),
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device h4_smc91x_device = {
+ .name = "smc91x",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(h4_smc91x_resources),
+ .resource = h4_smc91x_resources,
+};
+
+static struct platform_device *h4_devices[] __initdata = {
+ &h4_smc91x_device,
+ &h4_flash_device,
+};
+
+static inline void __init h4_init_smc91x(void)
+{
+ /* Make sure CS1 timings are correct */
+ GPMC_CONFIG1_1 = 0x00011200;
+ GPMC_CONFIG2_1 = 0x001f1f01;
+ GPMC_CONFIG3_1 = 0x00080803;
+ GPMC_CONFIG4_1 = 0x1c091c09;
+ GPMC_CONFIG5_1 = 0x041f1f1f;
+ GPMC_CONFIG6_1 = 0x000004c4;
+ GPMC_CONFIG7_1 = 0x00000f40 | (0x08000000 >> 24);
+ udelay(100);
+
+ omap_cfg_reg(M15_24XX_GPIO92);
+ if (omap_request_gpio(OMAP24XX_ETHR_GPIO_IRQ) < 0) {
+ printk(KERN_ERR "Failed to request GPIO%d for smc91x IRQ\n",
+ OMAP24XX_ETHR_GPIO_IRQ);
+ return;
+ }
+ omap_set_gpio_direction(OMAP24XX_ETHR_GPIO_IRQ, 1);
+}
+
+static void __init omap_h4_init_irq(void)
+{
+ omap_init_irq();
+ omap_gpio_init();
+ h4_init_smc91x();
+}
+
+static struct omap_uart_config h4_uart_config __initdata = {
+ .enabled_uarts = ((1 << 0) | (1 << 1) | (1 << 2)),
+};
+
+static struct omap_mmc_config h4_mmc_config __initdata = {
+ .mmc [0] = {
+ .enabled = 1,
+ .wire4 = 1,
+ .wp_pin = -1,
+ .power_pin = -1,
+ .switch_pin = -1,
+ },
+};
+
+static struct omap_lcd_config h4_lcd_config __initdata = {
+ .panel_name = "h4",
+ .ctrl_name = "internal",
+};
+
+static struct omap_board_config_kernel h4_config[] = {
+ { OMAP_TAG_UART, &h4_uart_config },
+ { OMAP_TAG_MMC, &h4_mmc_config },
+ { OMAP_TAG_LCD, &h4_lcd_config },
+};
+
+static void __init omap_h4_init(void)
+{
+ /*
+ * Make sure the serial ports are muxed on at this point.
+ * You have to mux them off in device drivers later on
+ * if not needed.
+ */
+ platform_add_devices(h4_devices, ARRAY_SIZE(h4_devices));
+ omap_board_config = h4_config;
+ omap_board_config_size = ARRAY_SIZE(h4_config);
+ omap_serial_init();
+}
+
+static void __init omap_h4_map_io(void)
+{
+ omap_map_common_io();
+}
+
+MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
+ /* Maintainer: Paul Mundt <paul.mundt@nokia.com> */
+ .phys_ram = 0x80000000,
+ .phys_io = 0x48000000,
+ .io_pg_offst = ((0xd8000000) >> 18) & 0xfffc,
+ .boot_params = 0x80000100,
+ .map_io = omap_h4_map_io,
+ .init_irq = omap_h4_init_irq,
+ .init_machine = omap_h4_init,
+ .timer = &omap_timer,
+MACHINE_END
diff --git a/arch/arm/mach-omap2/clock.c b/arch/arm/mach-omap2/clock.c
new file mode 100644
index 00000000000..85818d9f263
--- /dev/null
+++ b/arch/arm/mach-omap2/clock.c
@@ -0,0 +1,1129 @@
+/*
+ * linux/arch/arm/mach-omap2/clock.c
+ *
+ * Copyright (C) 2005 Texas Instruments Inc.
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Created for OMAP2.
+ *
+ * Cleaned up and modified to use omap shared clock framework by
+ * Tony Lindgren <tony@atomide.com>
+ *
+ * Based on omap1 clock.c, Copyright (C) 2004 - 2005 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/list.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+
+#include <asm/io.h>
+
+#include <asm/hardware/clock.h>
+#include <asm/arch/clock.h>
+#include <asm/arch/sram.h>
+#include <asm/arch/prcm.h>
+
+#include "clock.h"
+
+//#define DOWN_VARIABLE_DPLL 1 /* Experimental */
+
+static struct prcm_config *curr_prcm_set;
+static struct memory_timings mem_timings;
+static u32 curr_perf_level = PRCM_FULL_SPEED;
+
+/*-------------------------------------------------------------------------
+ * Omap2 specific clock functions
+ *-------------------------------------------------------------------------*/
+
+/* Recalculate SYST_CLK */
+static void omap2_sys_clk_recalc(struct clk * clk)
+{
+ u32 div = PRCM_CLKSRC_CTRL;
+ div &= (1 << 7) | (1 << 6); /* Test if ext clk divided by 1 or 2 */
+ div >>= clk->rate_offset;
+ clk->rate = (clk->parent->rate / div);
+ propagate_rate(clk);
+}
+
+static u32 omap2_get_dpll_rate(struct clk * tclk)
+{
+ int dpll_clk, dpll_mult, dpll_div, amult;
+
+ dpll_mult = (CM_CLKSEL1_PLL >> 12) & 0x03ff; /* 10 bits */
+ dpll_div = (CM_CLKSEL1_PLL >> 8) & 0x0f; /* 4 bits */
+ dpll_clk = (tclk->parent->rate * dpll_mult) / (dpll_div + 1);
+ amult = CM_CLKSEL2_PLL & 0x3;
+ dpll_clk *= amult;
+
+ return dpll_clk;
+}
+
+static void omap2_followparent_recalc(struct clk *clk)
+{
+ followparent_recalc(clk);
+}
+
+static void omap2_propagate_rate(struct clk * clk)
+{
+ if (!(clk->flags & RATE_FIXED))
+ clk->rate = clk->parent->rate;
+
+ propagate_rate(clk);
+}
+
+/* Enable an APLL if off */
+static void omap2_clk_fixed_enable(struct clk *clk)
+{
+ u32 cval, i=0;
+
+ if (clk->enable_bit == 0xff) /* Parent will do it */
+ return;
+
+ cval = CM_CLKEN_PLL;
+
+ if ((cval & (0x3 << clk->enable_bit)) == (0x3 << clk->enable_bit))
+ return;
+
+ cval &= ~(0x3 << clk->enable_bit);
+ cval |= (0x3 << clk->enable_bit);
+ CM_CLKEN_PLL = cval;
+
+ if (clk == &apll96_ck)
+ cval = (1 << 8);
+ else if (clk == &apll54_ck)
+ cval = (1 << 6);
+
+ while (!CM_IDLEST_CKGEN & cval) { /* Wait for lock */
+ ++i;
+ udelay(1);
+ if (i == 100000)
+ break;
+ }
+}
+
+/* Enables clock without considering parent dependencies or use count
+ * REVISIT: Maybe change this to use clk->enable like on omap1?
+ */
+static int omap2_clk_enable(struct clk * clk)
+{
+ u32 regval32;
+
+ if (clk->flags & ALWAYS_ENABLED)
+ return 0;
+
+ if (unlikely(clk->enable_reg == 0)) {
+ printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
+ clk->name);
+ return 0;
+ }
+
+ if (clk->enable_reg == (void __iomem *)&CM_CLKEN_PLL) {
+ omap2_clk_fixed_enable(clk);
+ return 0;
+ }
+
+ regval32 = __raw_readl(clk->enable_reg);
+ regval32 |= (1 << clk->enable_bit);
+ __raw_writel(regval32, clk->enable_reg);
+
+ return 0;
+}
+
+/* Stop APLL */
+static void omap2_clk_fixed_disable(struct clk *clk)
+{
+ u32 cval;
+
+ if(clk->enable_bit == 0xff) /* let parent off do it */
+ return;
+
+ cval = CM_CLKEN_PLL;
+ cval &= ~(0x3 << clk->enable_bit);
+ CM_CLKEN_PLL = cval;
+}
+
+/* Disables clock without considering parent dependencies or use count */
+static void omap2_clk_disable(struct clk *clk)
+{
+ u32 regval32;
+
+ if (clk->enable_reg == 0)
+ return;
+
+ if (clk->enable_reg == (void __iomem *)&CM_CLKEN_PLL) {
+ omap2_clk_fixed_disable(clk);
+ return;
+ }
+
+ regval32 = __raw_readl(clk->enable_reg);
+ regval32 &= ~(1 << clk->enable_bit);
+ __raw_writel(regval32, clk->enable_reg);
+}
+
+static int omap2_clk_use(struct clk *clk)
+{
+ int ret = 0;
+
+ if (clk->usecount++ == 0) {
+ if (likely((u32)clk->parent))
+ ret = omap2_clk_use(clk->parent);
+
+ if (unlikely(ret != 0)) {
+ clk->usecount--;
+ return ret;
+ }
+
+ ret = omap2_clk_enable(clk);
+
+ if (unlikely(ret != 0) && clk->parent) {
+ omap2_clk_unuse(clk->parent);
+ clk->usecount--;
+ }
+ }
+
+ return ret;
+}
+
+static void omap2_clk_unuse(struct clk *clk)
+{
+ if (clk->usecount > 0 && !(--clk->usecount)) {
+ omap2_clk_disable(clk);
+ if (likely((u32)clk->parent))
+ omap2_clk_unuse(clk->parent);
+ }
+}
+
+/*
+ * Uses the current prcm set to tell if a rate is valid.
+ * You can go slower, but not faster within a given rate set.
+ */
+static u32 omap2_dpll_round_rate(unsigned long target_rate)
+{
+ u32 high, low;
+
+ if ((CM_CLKSEL2_PLL & 0x3) == 1) { /* DPLL clockout */
+ high = curr_prcm_set->dpll_speed * 2;
+ low = curr_prcm_set->dpll_speed;
+ } else { /* DPLL clockout x 2 */
+ high = curr_prcm_set->dpll_speed;
+ low = curr_prcm_set->dpll_speed / 2;
+ }
+
+#ifdef DOWN_VARIABLE_DPLL
+ if (target_rate > high)
+ return high;
+ else
+ return target_rate;
+#else
+ if (target_rate > low)
+ return high;
+ else
+ return low;
+#endif
+
+}
+
+/*
+ * Used for clocks that are part of CLKSEL_xyz governed clocks.
+ * REVISIT: Maybe change to use clk->enable() functions like on omap1?
+ */
+static void omap2_clksel_recalc(struct clk * clk)
+{
+ u32 fixed = 0, div = 0;
+
+ if (clk == &dpll_ck) {
+ clk->rate = omap2_get_dpll_rate(clk);
+ fixed = 1;
+ div = 0;
+ }
+
+ if (clk == &iva1_mpu_int_ifck) {
+ div = 2;
+ fixed = 1;
+ }
+
+ if ((clk == &dss1_fck) && ((CM_CLKSEL1_CORE & (0x1f << 8)) == 0)) {
+ clk->rate = sys_ck.rate;
+ return;
+ }
+
+ if (!fixed) {
+ div = omap2_clksel_get_divisor(clk);
+ if (div == 0)
+ return;
+ }
+
+ if (div != 0) {
+ if (unlikely(clk->rate == clk->parent->rate / div))
+ return;
+ clk->rate = clk->parent->rate / div;
+ }
+
+ if (unlikely(clk->flags & RATE_PROPAGATES))
+ propagate_rate(clk);
+}
+
+/*
+ * Finds best divider value in an array based on the source and target
+ * rates. The divider array must be sorted with smallest divider first.
+ */
+static inline u32 omap2_divider_from_table(u32 size, u32 *div_array,
+ u32 src_rate, u32 tgt_rate)
+{
+ int i, test_rate;
+
+ if (div_array == NULL)
+ return ~1;
+
+ for (i=0; i < size; i++) {
+ test_rate = src_rate / *div_array;
+ if (test_rate <= tgt_rate)
+ return *div_array;
+ ++div_array;
+ }
+
+ return ~0; /* No acceptable divider */
+}
+
+/*
+ * Find divisor for the given clock and target rate.
+ *
+ * Note that this will not work for clocks which are part of CONFIG_PARTICIPANT,
+ * they are only settable as part of virtual_prcm set.
+ */
+static u32 omap2_clksel_round_rate(struct clk *tclk, u32 target_rate,
+ u32 *new_div)
+{
+ u32 gfx_div[] = {2, 3, 4};
+ u32 sysclkout_div[] = {1, 2, 4, 8, 16};
+ u32 dss1_div[] = {1, 2, 3, 4, 5, 6, 8, 9, 12, 16};
+ u32 vylnq_div[] = {1, 2, 3, 4, 6, 8, 9, 12, 16, 18};
+ u32 best_div = ~0, asize = 0;
+ u32 *div_array = NULL;
+
+ switch (tclk->flags & SRC_RATE_SEL_MASK) {
+ case CM_GFX_SEL1:
+ asize = 3;
+ div_array = gfx_div;
+ break;
+ case CM_PLL_SEL1:
+ return omap2_dpll_round_rate(target_rate);
+ case CM_SYSCLKOUT_SEL1:
+ asize = 5;
+ div_array = sysclkout_div;
+ break;
+ case CM_CORE_SEL1:
+ if(tclk == &dss1_fck){
+ if(tclk->parent == &core_ck){
+ asize = 10;
+ div_array = dss1_div;
+ } else {
+ *new_div = 0; /* fixed clk */
+ return(tclk->parent->rate);
+ }
+ } else if((tclk == &vlynq_fck) && cpu_is_omap2420()){
+ if(tclk->parent == &core_ck){
+ asize = 10;
+ div_array = vylnq_div;
+ } else {
+ *new_div = 0; /* fixed clk */
+ return(tclk->parent->rate);
+ }
+ }
+ break;
+ }
+
+ best_div = omap2_divider_from_table(asize, div_array,
+ tclk->parent->rate, target_rate);
+ if (best_div == ~0){
+ *new_div = 1;
+ return best_div; /* signal error */
+ }
+
+ *new_div = best_div;
+ return (tclk->parent->rate / best_div);
+}
+
+/* Given a clock and a rate apply a clock specific rounding function */
+static long omap2_clk_round_rate(struct clk *clk, unsigned long rate)
+{
+ u32 new_div = 0;
+ int valid_rate;
+
+ if (clk->flags & RATE_FIXED)
+ return clk->rate;
+
+ if (clk->flags & RATE_CKCTL) {
+ valid_rate = omap2_clksel_round_rate(clk, rate, &new_div);
+ return valid_rate;
+ }
+
+ if (clk->round_rate != 0)
+ return clk->round_rate(clk, rate);
+
+ return clk->rate;
+}
+
+/*
+ * Check the DLL lock state, and return tue if running in unlock mode.
+ * This is needed to compenste for the shifted DLL value in unlock mode.
+ */
+static u32 omap2_dll_force_needed(void)
+{
+ u32 dll_state = SDRC_DLLA_CTRL; /* dlla and dllb are a set */
+
+ if ((dll_state & (1 << 2)) == (1 << 2))
+ return 1;
+ else
+ return 0;
+}
+
+static void omap2_init_memory_params(u32 force_lock_to_unlock_mode)
+{
+ unsigned long dll_cnt;
+ u32 fast_dll = 0;
+
+ mem_timings.m_type = !((SDRC_MR_0 & 0x3) == 0x1); /* DDR = 1, SDR = 0 */
+
+ /* 2422 es2.05 and beyond has a single SIP DDR instead of 2 like others.
+ * In the case of 2422, its ok to use CS1 instead of CS0.
+ */
+
+#if 0 /* FIXME: Enable after 24xx cpu detection works */
+ ctype = get_cpu_type();
+ if (cpu_is_omap2422())
+ mem_timings.base_cs = 1;
+ else
+#endif
+ mem_timings.base_cs = 0;
+
+ if (mem_timings.m_type != M_DDR)
+ return;
+
+ /* With DDR we need to determine the low frequency DLL value */
+ if (((mem_timings.fast_dll_ctrl & (1 << 2)) == M_LOCK_CTRL))
+ mem_timings.dll_mode = M_UNLOCK;
+ else
+ mem_timings.dll_mode = M_LOCK;
+
+ if (mem_timings.base_cs == 0) {
+ fast_dll = SDRC_DLLA_CTRL;
+ dll_cnt = SDRC_DLLA_STATUS & 0xff00;
+ } else {
+ fast_dll = SDRC_DLLB_CTRL;
+ dll_cnt = SDRC_DLLB_STATUS & 0xff00;
+ }
+ if (force_lock_to_unlock_mode) {
+ fast_dll &= ~0xff00;
+ fast_dll |= dll_cnt; /* Current lock mode */
+ }
+ mem_timings.fast_dll_ctrl = fast_dll;
+
+ /* No disruptions, DDR will be offline & C-ABI not followed */
+ omap2_sram_ddr_init(&mem_timings.slow_dll_ctrl,
+ mem_timings.fast_dll_ctrl,
+ mem_timings.base_cs,
+ force_lock_to_unlock_mode);
+ mem_timings.slow_dll_ctrl &= 0xff00; /* Keep lock value */
+
+ /* Turn status into unlock ctrl */
+ mem_timings.slow_dll_ctrl |=
+ ((mem_timings.fast_dll_ctrl & 0xF) | (1 << 2));
+
+ /* 90 degree phase for anything below 133Mhz */
+ mem_timings.slow_dll_ctrl |= (1 << 1);
+}
+
+static u32 omap2_reprogram_sdrc(u32 level, u32 force)
+{
+ u32 prev = curr_perf_level, flags;
+
+ if ((curr_perf_level == level) && !force)
+ return prev;
+
+ if (level == PRCM_HALF_SPEED) {
+ local_irq_save(flags);
+ PRCM_VOLTSETUP = 0xffff;
+ omap2_sram_reprogram_sdrc(PRCM_HALF_SPEED,
+ mem_timings.slow_dll_ctrl,
+ mem_timings.m_type);
+ curr_perf_level = PRCM_HALF_SPEED;
+ local_irq_restore(flags);
+ }
+ if (level == PRCM_FULL_SPEED) {
+ local_irq_save(flags);
+ PRCM_VOLTSETUP = 0xffff;
+ omap2_sram_reprogram_sdrc(PRCM_FULL_SPEED,
+ mem_timings.fast_dll_ctrl,
+ mem_timings.m_type);
+ curr_perf_level = PRCM_FULL_SPEED;
+ local_irq_restore(flags);
+ }
+
+ return prev;
+}
+
+static int omap2_reprogram_dpll(struct clk * clk, unsigned long rate)
+{
+ u32 flags, cur_rate, low, mult, div, valid_rate, done_rate;
+ u32 bypass = 0;
+ struct prcm_config tmpset;
+ int ret = -EINVAL;
+
+ local_irq_save(flags);
+ cur_rate = omap2_get_dpll_rate(&dpll_ck);
+ mult = CM_CLKSEL2_PLL & 0x3;
+
+ if ((rate == (cur_rate / 2)) && (mult == 2)) {
+ omap2_reprogram_sdrc(PRCM_HALF_SPEED, 1);
+ } else if ((rate == (cur_rate * 2)) && (mult == 1)) {
+ omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
+ } else if (rate != cur_rate) {
+ valid_rate = omap2_dpll_round_rate(rate);
+ if (valid_rate != rate)
+ goto dpll_exit;
+
+ if ((CM_CLKSEL2_PLL & 0x3) == 1)
+ low = curr_prcm_set->dpll_speed;
+ else
+ low = curr_prcm_set->dpll_speed / 2;
+
+ tmpset.cm_clksel1_pll = CM_CLKSEL1_PLL;
+ tmpset.cm_clksel1_pll &= ~(0x3FFF << 8);
+ div = ((curr_prcm_set->xtal_speed / 1000000) - 1);
+ tmpset.cm_clksel2_pll = CM_CLKSEL2_PLL;
+ tmpset.cm_clksel2_pll &= ~0x3;
+ if (rate > low) {
+ tmpset.cm_clksel2_pll |= 0x2;
+ mult = ((rate / 2) / 1000000);
+ done_rate = PRCM_FULL_SPEED;
+ } else {
+ tmpset.cm_clksel2_pll |= 0x1;
+ mult = (rate / 1000000);
+ done_rate = PRCM_HALF_SPEED;
+ }
+ tmpset.cm_clksel1_pll |= ((div << 8) | (mult << 12));
+
+ /* Worst case */
+ tmpset.base_sdrc_rfr = V24XX_SDRC_RFR_CTRL_BYPASS;
+
+ if (rate == curr_prcm_set->xtal_speed) /* If asking for 1-1 */
+ bypass = 1;
+
+ omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1); /* For init_mem */
+
+ /* Force dll lock mode */
+ omap2_set_prcm(tmpset.cm_clksel1_pll, tmpset.base_sdrc_rfr,
+ bypass);
+
+ /* Errata: ret dll entry state */
+ omap2_init_memory_params(omap2_dll_force_needed());
+ omap2_reprogram_sdrc(done_rate, 0);
+ }
+ omap2_clksel_recalc(&dpll_ck);
+ ret = 0;
+
+dpll_exit:
+ local_irq_restore(flags);
+ return(ret);
+}
+
+/* Just return the MPU speed */
+static void omap2_mpu_recalc(struct clk * clk)
+{
+ clk->rate = curr_prcm_set->mpu_speed;
+}
+
+/*
+ * Look for a rate equal or less than the target rate given a configuration set.
+ *
+ * What's not entirely clear is "which" field represents the key field.
+ * Some might argue L3-DDR, others ARM, others IVA. This code is simple and
+ * just uses the ARM rates.
+ */
+static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate)
+{
+ struct prcm_config * ptr;
+ long highest_rate;
+
+ if (clk != &virt_prcm_set)
+ return -EINVAL;
+
+ highest_rate = -EINVAL;
+
+ for (ptr = rate_table; ptr->mpu_speed; ptr++) {
+ if (ptr->xtal_speed != sys_ck.rate)
+ continue;
+
+ highest_rate = ptr->mpu_speed;
+
+ /* Can check only after xtal frequency check */
+ if (ptr->mpu_speed <= rate)
+ break;
+ }
+ return highest_rate;
+}
+
+/*
+ * omap2_convert_field_to_div() - turn field value into integer divider
+ */
+static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val)
+{
+ u32 i;
+ u32 clkout_array[] = {1, 2, 4, 8, 16};
+
+ if ((div_sel & SRC_RATE_SEL_MASK) == CM_SYSCLKOUT_SEL1) {
+ for (i = 0; i < 5; i++) {
+ if (field_val == i)
+ return clkout_array[i];
+ }
+ return ~0;
+ } else
+ return field_val;
+}
+
+/*
+ * Returns the CLKSEL divider register value
+ * REVISIT: This should be cleaned up to work nicely with void __iomem *
+ */
+static u32 omap2_get_clksel(u32 *div_sel, u32 *field_mask,
+ struct clk *clk)
+{
+ int ret = ~0;
+ u32 reg_val, div_off;
+ u32 div_addr = 0;
+ u32 mask = ~0;
+
+ div_off = clk->rate_offset;
+
+ switch ((*div_sel & SRC_RATE_SEL_MASK)) {
+ case CM_MPU_SEL1:
+ div_addr = (u32)&CM_CLKSEL_MPU;
+ mask = 0x1f;
+ break;
+ case CM_DSP_SEL1:
+ div_addr = (u32)&CM_CLKSEL_DSP;
+ if (cpu_is_omap2420()) {
+ if ((div_off == 0) || (div_off == 8))
+ mask = 0x1f;
+ else if (div_off == 5)
+ mask = 0x3;
+ } else if (cpu_is_omap2430()) {
+ if (div_off == 0)
+ mask = 0x1f;
+ else if (div_off == 5)
+ mask = 0x3;
+ }
+ break;
+ case CM_GFX_SEL1:
+ div_addr = (u32)&CM_CLKSEL_GFX;
+ if (div_off == 0)
+ mask = 0x7;
+ break;
+ case CM_MODEM_SEL1:
+ div_addr = (u32)&CM_CLKSEL_MDM;
+ if (div_off == 0)
+ mask = 0xf;
+ break;
+ case CM_SYSCLKOUT_SEL1:
+ div_addr = (u32)&PRCM_CLKOUT_CTRL;
+ if ((div_off == 3) || (div_off = 11))
+ mask= 0x3;
+ break;
+ case CM_CORE_SEL1:
+ div_addr = (u32)&CM_CLKSEL1_CORE;
+ switch (div_off) {
+ case 0: /* l3 */
+ case 8: /* dss1 */
+ case 15: /* vylnc-2420 */
+ case 20: /* ssi */
+ mask = 0x1f; break;
+ case 5: /* l4 */
+ mask = 0x3; break;
+ case 13: /* dss2 */
+ mask = 0x1; break;
+ case 25: /* usb */
+ mask = 0xf; break;
+ }
+ }
+
+ *field_mask = mask;
+
+ if (unlikely(mask == ~0))
+ div_addr = 0;
+
+ *div_sel = div_addr;
+
+ if (unlikely(div_addr == 0))
+ return ret;
+
+ /* Isolate field */
+ reg_val = __raw_readl((void __iomem *)div_addr) & (mask << div_off);
+
+ /* Normalize back to divider value */
+ reg_val >>= div_off;
+
+ return reg_val;
+}
+
+/*
+ * Return divider to be applied to parent clock.
+ * Return 0 on error.
+ */
+static u32 omap2_clksel_get_divisor(struct clk *clk)
+{
+ int ret = 0;
+ u32 div, div_sel, div_off, field_mask, field_val;
+
+ /* isolate control register */
+ div_sel = (SRC_RATE_SEL_MASK & clk->flags);
+
+ div_off = clk->rate_offset;
+ field_val = omap2_get_clksel(&div_sel, &field_mask, clk);
+ if (div_sel == 0)
+ return ret;
+
+ div_sel = (SRC_RATE_SEL_MASK & clk->flags);
+ div = omap2_clksel_to_divisor(div_sel, field_val);
+
+ return div;
+}
+
+/* Set the clock rate for a clock source */
+static int omap2_clk_set_rate(struct clk *clk, unsigned long rate)
+
+{
+ int ret = -EINVAL;
+ void __iomem * reg;
+ u32 div_sel, div_off, field_mask, field_val, reg_val, validrate;
+ u32 new_div = 0;
+
+ if (!(clk->flags & CONFIG_PARTICIPANT) && (clk->flags & RATE_CKCTL)) {
+ if (clk == &dpll_ck)
+ return omap2_reprogram_dpll(clk, rate);
+
+ /* Isolate control register */
+ div_sel = (SRC_RATE_SEL_MASK & clk->flags);
+ div_off = clk->src_offset;
+
+ validrate = omap2_clksel_round_rate(clk, rate, &new_div);
+ if(validrate != rate)
+ return(ret);
+
+ field_val = omap2_get_clksel(&div_sel, &field_mask, clk);
+ if (div_sel == 0)
+ return ret;
+
+ if(clk->flags & CM_SYSCLKOUT_SEL1){
+ switch(new_div){
+ case 16: field_val = 4; break;
+ case 8: field_val = 3; break;
+ case 4: field_val = 2; break;
+ case 2: field_val = 1; break;
+ case 1: field_val = 0; break;
+ }
+ }
+ else
+ field_val = new_div;
+
+ reg = (void __iomem *)div_sel;
+
+ reg_val = __raw_readl(reg);
+ reg_val &= ~(field_mask << div_off);
+ reg_val |= (field_val << div_off);
+
+ __raw_writel(reg_val, reg);
+ clk->rate = clk->parent->rate / field_val;
+
+ if (clk->flags & DELAYED_APP)
+ __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
+ ret = 0;
+ } else if (clk->set_rate != 0)
+ ret = clk->set_rate(clk, rate);
+
+ if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
+ propagate_rate(clk);
+
+ return ret;
+}
+
+/* Converts encoded control register address into a full address */
+static u32 omap2_get_src_field(u32 *type_to_addr, u32 reg_offset,
+ struct clk *src_clk, u32 *field_mask)
+{
+ u32 val = ~0, src_reg_addr = 0, mask = 0;
+
+ /* Find target control register.*/
+ switch ((*type_to_addr & SRC_RATE_SEL_MASK)) {
+ case CM_CORE_SEL1:
+ src_reg_addr = (u32)&CM_CLKSEL1_CORE;
+ if (reg_offset == 13) { /* DSS2_fclk */
+ mask = 0x1;
+ if (src_clk == &sys_ck)
+ val = 0;
+ if (src_clk == &func_48m_ck)
+ val = 1;
+ } else if (reg_offset == 8) { /* DSS1_fclk */
+ mask = 0x1f;
+ if (src_clk == &sys_ck)
+ val = 0;
+ else if (src_clk == &core_ck) /* divided clock */
+ val = 0x10; /* rate needs fixing */
+ } else if ((reg_offset == 15) && cpu_is_omap2420()){ /*vlnyq*/
+ mask = 0x1F;
+ if(src_clk == &func_96m_ck)
+ val = 0;
+ else if (src_clk == &core_ck)
+ val = 0x10;
+ }
+ break;
+ case CM_CORE_SEL2:
+ src_reg_addr = (u32)&CM_CLKSEL2_CORE;
+ mask = 0x3;
+ if (src_clk == &func_32k_ck)
+ val = 0x0;
+ if (src_clk == &sys_ck)
+ val = 0x1;
+ if (src_clk == &alt_ck)
+ val = 0x2;
+ break;
+ case CM_WKUP_SEL1:
+ src_reg_addr = (u32)&CM_CLKSEL2_CORE;
+ mask = 0x3;
+ if (src_clk == &func_32k_ck)
+ val = 0x0;
+ if (src_clk == &sys_ck)
+ val = 0x1;
+ if (src_clk == &alt_ck)
+ val = 0x2;
+ break;
+ case CM_PLL_SEL1:
+ src_reg_addr = (u32)&CM_CLKSEL1_PLL;
+ mask = 0x1;
+ if (reg_offset == 0x3) {
+ if (src_clk == &apll96_ck)
+ val = 0;
+ if (src_clk == &alt_ck)
+ val = 1;
+ }
+ else if (reg_offset == 0x5) {
+ if (src_clk == &apll54_ck)
+ val = 0;
+ if (src_clk == &alt_ck)
+ val = 1;
+ }
+ break;
+ case CM_PLL_SEL2:
+ src_reg_addr = (u32)&CM_CLKSEL2_PLL;
+ mask = 0x3;
+ if (src_clk == &func_32k_ck)
+ val = 0x0;
+ if (src_clk == &dpll_ck)
+ val = 0x2;
+ break;
+ case CM_SYSCLKOUT_SEL1:
+ src_reg_addr = (u32)&PRCM_CLKOUT_CTRL;
+ mask = 0x3;
+ if (src_clk == &dpll_ck)
+ val = 0;
+ if (src_clk == &sys_ck)
+ val = 1;
+ if (src_clk == &func_54m_ck)
+ val = 2;
+ if (src_clk == &func_96m_ck)
+ val = 3;
+ break;
+ }
+
+ if (val == ~0) /* Catch errors in offset */
+ *type_to_addr = 0;
+ else
+ *type_to_addr = src_reg_addr;
+ *field_mask = mask;
+
+ return val;
+}
+
+static int omap2_clk_set_parent(struct clk *clk, struct clk *new_parent)
+{
+ void __iomem * reg;
+ u32 src_sel, src_off, field_val, field_mask, reg_val, rate;
+ int ret = -EINVAL;
+
+ if (unlikely(clk->flags & CONFIG_PARTICIPANT))
+ return ret;
+
+ if (clk->flags & SRC_SEL_MASK) { /* On-chip SEL collection */
+ src_sel = (SRC_RATE_SEL_MASK & clk->flags);
+ src_off = clk->src_offset;
+
+ if (src_sel == 0)
+ goto set_parent_error;
+
+ field_val = omap2_get_src_field(&src_sel, src_off, new_parent,
+ &field_mask);
+
+ reg = (void __iomem *)src_sel;
+
+ if (clk->usecount > 0)
+ omap2_clk_disable(clk);
+
+ /* Set new source value (previous dividers if any in effect) */
+ reg_val = __raw_readl(reg) & ~(field_mask << src_off);
+ reg_val |= (field_val << src_off);
+ __raw_writel(reg_val, reg);
+
+ if (clk->flags & DELAYED_APP)
+ __raw_writel(0x1, (void __iomem *)&PRCM_CLKCFG_CTRL);
+
+ if (clk->usecount > 0)
+ omap2_clk_enable(clk);
+
+ clk->parent = new_parent;
+
+ /* SRC_RATE_SEL_MASK clocks follow their parents rates.*/
+ if ((new_parent == &core_ck) && (clk == &dss1_fck))
+ clk->rate = new_parent->rate / 0x10;
+ else
+ clk->rate = new_parent->rate;
+
+ if (unlikely(clk->flags & RATE_PROPAGATES))
+ propagate_rate(clk);
+
+ return 0;
+ } else {
+ clk->parent = new_parent;
+ rate = new_parent->rate;
+ omap2_clk_set_rate(clk, rate);
+ ret = 0;
+ }
+
+ set_parent_error:
+ return ret;
+}
+
+/* Sets basic clocks based on the specified rate */
+static int omap2_select_table_rate(struct clk * clk, unsigned long rate)
+{
+ u32 flags, cur_rate, done_rate, bypass = 0;
+ u8 cpu_mask = 0;
+ struct prcm_config *prcm;
+ unsigned long found_speed = 0;
+
+ if (clk != &virt_prcm_set)
+ return -EINVAL;
+
+ /* FIXME: Change cpu_is_omap2420() to cpu_is_omap242x() */
+ if (cpu_is_omap2420())
+ cpu_mask = RATE_IN_242X;
+ else if (cpu_is_omap2430())
+ cpu_mask = RATE_IN_243X;
+
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (!(prcm->flags & cpu_mask))
+ continue;
+
+ if (prcm->xtal_speed != sys_ck.rate)
+ continue;
+
+ if (prcm->mpu_speed <= rate) {
+ found_speed = prcm->mpu_speed;
+ break;
+ }
+ }
+
+ if (!found_speed) {
+ printk(KERN_INFO "Could not set MPU rate to %luMHz\n",
+ rate / 1000000);
+ return -EINVAL;
+ }
+
+ curr_prcm_set = prcm;
+ cur_rate = omap2_get_dpll_rate(&dpll_ck);
+
+ if (prcm->dpll_speed == cur_rate / 2) {
+ omap2_reprogram_sdrc(PRCM_HALF_SPEED, 1);
+ } else if (prcm->dpll_speed == cur_rate * 2) {
+ omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
+ } else if (prcm->dpll_speed != cur_rate) {
+ local_irq_save(flags);
+
+ if (prcm->dpll_speed == prcm->xtal_speed)
+ bypass = 1;
+
+ if ((prcm->cm_clksel2_pll & 0x3) == 2)
+ done_rate = PRCM_FULL_SPEED;
+ else
+ done_rate = PRCM_HALF_SPEED;
+
+ /* MPU divider */
+ CM_CLKSEL_MPU = prcm->cm_clksel_mpu;
+
+ /* dsp + iva1 div(2420), iva2.1(2430) */
+ CM_CLKSEL_DSP = prcm->cm_clksel_dsp;
+
+ CM_CLKSEL_GFX = prcm->cm_clksel_gfx;
+
+ /* Major subsystem dividers */
+ CM_CLKSEL1_CORE = prcm->cm_clksel1_core;
+ if (cpu_is_omap2430())
+ CM_CLKSEL_MDM = prcm->cm_clksel_mdm;
+
+ /* x2 to enter init_mem */
+ omap2_reprogram_sdrc(PRCM_FULL_SPEED, 1);
+
+ omap2_set_prcm(prcm->cm_clksel1_pll, prcm->base_sdrc_rfr,
+ bypass);
+
+ omap2_init_memory_params(omap2_dll_force_needed());
+ omap2_reprogram_sdrc(done_rate, 0);
+
+ local_irq_restore(flags);
+ }
+ omap2_clksel_recalc(&dpll_ck);
+
+ return 0;
+}
+
+/*-------------------------------------------------------------------------
+ * Omap2 clock reset and init functions
+ *-------------------------------------------------------------------------*/
+
+static struct clk_functions omap2_clk_functions = {
+ .clk_enable = omap2_clk_enable,
+ .clk_disable = omap2_clk_disable,
+ .clk_use = omap2_clk_use,
+ .clk_unuse = omap2_clk_unuse,
+ .clk_round_rate = omap2_clk_round_rate,
+ .clk_set_rate = omap2_clk_set_rate,
+ .clk_set_parent = omap2_clk_set_parent,
+};
+
+static void __init omap2_get_crystal_rate(struct clk *osc, struct clk *sys)
+{
+ u32 div, aplls, sclk = 13000000;
+
+ aplls = CM_CLKSEL1_PLL;
+ aplls &= ((1 << 23) | (1 << 24) | (1 << 25));
+ aplls >>= 23; /* Isolate field, 0,2,3 */
+
+ if (aplls == 0)
+ sclk = 19200000;
+ else if (aplls == 2)
+ sclk = 13000000;
+ else if (aplls == 3)
+ sclk = 12000000;
+
+ div = PRCM_CLKSRC_CTRL;
+ div &= ((1 << 7) | (1 << 6));
+ div >>= sys->rate_offset;
+
+ osc->rate = sclk * div;
+ sys->rate = sclk;
+}
+
+#ifdef CONFIG_OMAP_RESET_CLOCKS
+static void __init omap2_disable_unused_clocks(void)
+{
+ struct clk *ck;
+ u32 regval32;
+
+ list_for_each_entry(ck, &clocks, node) {
+ if (ck->usecount > 0 || (ck->flags & ALWAYS_ENABLED) ||
+ ck->enable_reg == 0)
+ continue;
+
+ regval32 = __raw_readl(ck->enable_reg);
+ if ((regval32 & (1 << ck->enable_bit)) == 0)
+ continue;
+
+ printk(KERN_INFO "Disabling unused clock \"%s\"\n", ck->name);
+ omap2_clk_disable(ck);
+ }
+}
+late_initcall(omap2_disable_unused_clocks);
+#endif
+
+/*
+ * Switch the MPU rate if specified on cmdline.
+ * We cannot do this early until cmdline is parsed.
+ */
+static int __init omap2_clk_arch_init(void)
+{
+ if (!mpurate)
+ return -EINVAL;
+
+ if (omap2_select_table_rate(&virt_prcm_set, mpurate))
+ printk(KERN_ERR "Could not find matching MPU rate\n");
+
+ propagate_rate(&osc_ck); /* update main root fast */
+ propagate_rate(&func_32k_ck); /* update main root slow */
+
+ printk(KERN_INFO "Switched to new clocking rate (Crystal/DPLL/MPU): "
+ "%ld.%01ld/%ld/%ld MHz\n",
+ (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
+ (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
+
+ return 0;
+}
+arch_initcall(omap2_clk_arch_init);
+
+int __init omap2_clk_init(void)
+{
+ struct prcm_config *prcm;
+ struct clk ** clkp;
+ u32 clkrate;
+
+ clk_init(&omap2_clk_functions);
+ omap2_get_crystal_rate(&osc_ck, &sys_ck);
+
+ for (clkp = onchip_clks; clkp < onchip_clks + ARRAY_SIZE(onchip_clks);
+ clkp++) {
+
+ if ((*clkp)->flags & CLOCK_IN_OMAP242X && cpu_is_omap2420()) {
+ clk_register(*clkp);
+ continue;
+ }
+
+ if ((*clkp)->flags & CLOCK_IN_OMAP243X && cpu_is_omap2430()) {
+ clk_register(*clkp);
+ continue;
+ }
+ }
+
+ /* Check the MPU rate set by bootloader */
+ clkrate = omap2_get_dpll_rate(&dpll_ck);
+ for (prcm = rate_table; prcm->mpu_speed; prcm++) {
+ if (prcm->xtal_speed != sys_ck.rate)
+ continue;
+ if (prcm->dpll_speed <= clkrate)
+ break;
+ }
+ curr_prcm_set = prcm;
+
+ propagate_rate(&osc_ck); /* update main root fast */
+ propagate_rate(&func_32k_ck); /* update main root slow */
+
+ printk(KERN_INFO "Clocking rate (Crystal/DPLL/MPU): "
+ "%ld.%01ld/%ld/%ld MHz\n",
+ (sys_ck.rate / 1000000), (sys_ck.rate / 100000) % 10,
+ (dpll_ck.rate / 1000000), (mpu_ck.rate / 1000000)) ;
+
+ /*
+ * Only enable those clocks we will need, let the drivers
+ * enable other clocks as necessary
+ */
+ clk_use(&sync_32k_ick);
+ clk_use(&omapctrl_ick);
+ if (cpu_is_omap2430())
+ clk_use(&sdrc_ick);
+
+ return 0;
+}
diff --git a/arch/arm/mach-omap2/clock.h b/arch/arm/mach-omap2/clock.h
new file mode 100644
index 00000000000..4aeab5591bd
--- /dev/null
+++ b/arch/arm/mach-omap2/clock.h
@@ -0,0 +1,2103 @@
+/*
+ * linux/arch/arm/mach-omap24xx/clock.h
+ *
+ * Copyright (C) 2005 Texas Instruments Inc.
+ * Richard Woodruff <r-woodruff2@ti.com>
+ * Created for OMAP2.
+ *
+ * Copyright (C) 2004 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_MACH_OMAP2_CLOCK_H
+#define __ARCH_ARM_MACH_OMAP2_CLOCK_H
+
+static void omap2_sys_clk_recalc(struct clk * clk);
+static void omap2_clksel_recalc(struct clk * clk);
+static void omap2_followparent_recalc(struct clk * clk);
+static void omap2_propagate_rate(struct clk * clk);
+static void omap2_mpu_recalc(struct clk * clk);
+static int omap2_select_table_rate(struct clk * clk, unsigned long rate);
+static long omap2_round_to_table_rate(struct clk * clk, unsigned long rate);
+static void omap2_clk_unuse(struct clk *clk);
+static void omap2_sys_clk_recalc(struct clk * clk);
+static u32 omap2_clksel_to_divisor(u32 div_sel, u32 field_val);
+static u32 omap2_clksel_get_divisor(struct clk *clk);
+
+
+#define RATE_IN_242X (1 << 0)
+#define RATE_IN_243X (1 << 1)
+
+/* Memory timings */
+#define M_DDR 1
+#define M_LOCK_CTRL (1 << 2)
+#define M_UNLOCK 0
+#define M_LOCK 1
+
+struct memory_timings {
+ u32 m_type; /* ddr = 1, sdr = 0 */
+ u32 dll_mode; /* use lock mode = 1, unlock mode = 0 */
+ u32 slow_dll_ctrl; /* unlock mode, dll value for slow speed */
+ u32 fast_dll_ctrl; /* unlock mode, dll value for fast speed */
+ u32 base_cs; /* base chip select to use for calculations */
+};
+
+/* Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,CM_CLKSEL_DSP
+ * CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ */
+struct prcm_config {
+ unsigned long xtal_speed; /* crystal rate */
+ unsigned long dpll_speed; /* dpll: out*xtal*M/(N-1)table_recalc */
+ unsigned long mpu_speed; /* speed of MPU */
+ unsigned long cm_clksel_mpu; /* mpu divider */
+ unsigned long cm_clksel_dsp; /* dsp+iva1 div(2420), iva2.1(2430) */
+ unsigned long cm_clksel_gfx; /* gfx dividers */
+ unsigned long cm_clksel1_core; /* major subsystem dividers */
+ unsigned long cm_clksel1_pll; /* m,n */
+ unsigned long cm_clksel2_pll; /* dpllx1 or x2 out */
+ unsigned long cm_clksel_mdm; /* modem dividers 2430 only */
+ unsigned long base_sdrc_rfr; /* base refresh timing for a set */
+ unsigned char flags;
+};
+
+/* Mask for clksel which support parent settign in set_rate */
+#define SRC_SEL_MASK (CM_CORE_SEL1 | CM_CORE_SEL2 | CM_WKUP_SEL1 | \
+ CM_PLL_SEL1 | CM_PLL_SEL2 | CM_SYSCLKOUT_SEL1)
+
+/* Mask for clksel regs which support rate operations */
+#define SRC_RATE_SEL_MASK (CM_MPU_SEL1 | CM_DSP_SEL1 | CM_GFX_SEL1 | \
+ CM_MODEM_SEL1 | CM_CORE_SEL1 | CM_CORE_SEL2 | \
+ CM_WKUP_SEL1 | CM_PLL_SEL1 | CM_PLL_SEL2 | \
+ CM_SYSCLKOUT_SEL1)
+
+/*
+ * The OMAP2 processor can be run at several discrete 'PRCM configurations'.
+ * These configurations are characterized by voltage and speed for clocks.
+ * The device is only validated for certain combinations. One way to express
+ * these combinations is via the 'ratio's' which the clocks operate with
+ * respect to each other. These ratio sets are for a given voltage/DPLL
+ * setting. All configurations can be described by a DPLL setting and a ratio
+ * There are 3 ratio sets for the 2430 and X ratio sets for 2420.
+ *
+ * 2430 differs from 2420 in that there are no more phase synchronizers used.
+ * They both have a slightly different clock domain setup. 2420(iva1,dsp) vs
+ * 2430 (iva2.1, NOdsp, mdm)
+ */
+
+/* Core fields for cm_clksel, not ratio governed */
+#define RX_CLKSEL_DSS1 (0x10 << 8)
+#define RX_CLKSEL_DSS2 (0x0 << 13)
+#define RX_CLKSEL_SSI (0x5 << 20)
+
+/*-------------------------------------------------------------------------
+ * Voltage/DPLL ratios
+ *-------------------------------------------------------------------------*/
+
+/* 2430 Ratio's, 2430-Ratio Config 1 */
+#define R1_CLKSEL_L3 (4 << 0)
+#define R1_CLKSEL_L4 (2 << 5)
+#define R1_CLKSEL_USB (4 << 25)
+#define R1_CM_CLKSEL1_CORE_VAL R1_CLKSEL_USB | RX_CLKSEL_SSI | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ R1_CLKSEL_L4 | R1_CLKSEL_L3
+#define R1_CLKSEL_MPU (2 << 0)
+#define R1_CM_CLKSEL_MPU_VAL R1_CLKSEL_MPU
+#define R1_CLKSEL_DSP (2 << 0)
+#define R1_CLKSEL_DSP_IF (2 << 5)
+#define R1_CM_CLKSEL_DSP_VAL R1_CLKSEL_DSP | R1_CLKSEL_DSP_IF
+#define R1_CLKSEL_GFX (2 << 0)
+#define R1_CM_CLKSEL_GFX_VAL R1_CLKSEL_GFX
+#define R1_CLKSEL_MDM (4 << 0)
+#define R1_CM_CLKSEL_MDM_VAL R1_CLKSEL_MDM
+
+/* 2430-Ratio Config 2 */
+#define R2_CLKSEL_L3 (6 << 0)
+#define R2_CLKSEL_L4 (2 << 5)
+#define R2_CLKSEL_USB (2 << 25)
+#define R2_CM_CLKSEL1_CORE_VAL R2_CLKSEL_USB | RX_CLKSEL_SSI | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ R2_CLKSEL_L4 | R2_CLKSEL_L3
+#define R2_CLKSEL_MPU (2 << 0)
+#define R2_CM_CLKSEL_MPU_VAL R2_CLKSEL_MPU
+#define R2_CLKSEL_DSP (2 << 0)
+#define R2_CLKSEL_DSP_IF (3 << 5)
+#define R2_CM_CLKSEL_DSP_VAL R2_CLKSEL_DSP | R2_CLKSEL_DSP_IF
+#define R2_CLKSEL_GFX (2 << 0)
+#define R2_CM_CLKSEL_GFX_VAL R2_CLKSEL_GFX
+#define R2_CLKSEL_MDM (6 << 0)
+#define R2_CM_CLKSEL_MDM_VAL R2_CLKSEL_MDM
+
+/* 2430-Ratio Bootm (BYPASS) */
+#define RB_CLKSEL_L3 (1 << 0)
+#define RB_CLKSEL_L4 (1 << 5)
+#define RB_CLKSEL_USB (1 << 25)
+#define RB_CM_CLKSEL1_CORE_VAL RB_CLKSEL_USB | RX_CLKSEL_SSI | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ RB_CLKSEL_L4 | RB_CLKSEL_L3
+#define RB_CLKSEL_MPU (1 << 0)
+#define RB_CM_CLKSEL_MPU_VAL RB_CLKSEL_MPU
+#define RB_CLKSEL_DSP (1 << 0)
+#define RB_CLKSEL_DSP_IF (1 << 5)
+#define RB_CM_CLKSEL_DSP_VAL RB_CLKSEL_DSP | RB_CLKSEL_DSP_IF
+#define RB_CLKSEL_GFX (1 << 0)
+#define RB_CM_CLKSEL_GFX_VAL RB_CLKSEL_GFX
+#define RB_CLKSEL_MDM (1 << 0)
+#define RB_CM_CLKSEL_MDM_VAL RB_CLKSEL_MDM
+
+/* 2420 Ratio Equivalents */
+#define RXX_CLKSEL_VLYNQ (0x12 << 15)
+#define RXX_CLKSEL_SSI (0x8 << 20)
+
+/* 2420-PRCM III 532MHz core */
+#define RIII_CLKSEL_L3 (4 << 0) /* 133MHz */
+#define RIII_CLKSEL_L4 (2 << 5) /* 66.5MHz */
+#define RIII_CLKSEL_USB (4 << 25) /* 33.25MHz */
+#define RIII_CM_CLKSEL1_CORE_VAL RIII_CLKSEL_USB | RXX_CLKSEL_SSI | \
+ RXX_CLKSEL_VLYNQ | RX_CLKSEL_DSS2 | \
+ RX_CLKSEL_DSS1 | RIII_CLKSEL_L4 | \
+ RIII_CLKSEL_L3
+#define RIII_CLKSEL_MPU (2 << 0) /* 266MHz */
+#define RIII_CM_CLKSEL_MPU_VAL RIII_CLKSEL_MPU
+#define RIII_CLKSEL_DSP (3 << 0) /* c5x - 177.3MHz */
+#define RIII_CLKSEL_DSP_IF (2 << 5) /* c5x - 88.67MHz */
+#define RIII_SYNC_DSP (1 << 7) /* Enable sync */
+#define RIII_CLKSEL_IVA (6 << 8) /* iva1 - 88.67MHz */
+#define RIII_SYNC_IVA (1 << 13) /* Enable sync */
+#define RIII_CM_CLKSEL_DSP_VAL RIII_SYNC_IVA | RIII_CLKSEL_IVA | \
+ RIII_SYNC_DSP | RIII_CLKSEL_DSP_IF | \
+ RIII_CLKSEL_DSP
+#define RIII_CLKSEL_GFX (2 << 0) /* 66.5MHz */
+#define RIII_CM_CLKSEL_GFX_VAL RIII_CLKSEL_GFX
+
+/* 2420-PRCM II 600MHz core */
+#define RII_CLKSEL_L3 (6 << 0) /* 100MHz */
+#define RII_CLKSEL_L4 (2 << 5) /* 50MHz */
+#define RII_CLKSEL_USB (2 << 25) /* 50MHz */
+#define RII_CM_CLKSEL1_CORE_VAL RII_CLKSEL_USB | \
+ RXX_CLKSEL_SSI | RXX_CLKSEL_VLYNQ | \
+ RX_CLKSEL_DSS2 | RX_CLKSEL_DSS1 | \
+ RII_CLKSEL_L4 | RII_CLKSEL_L3
+#define RII_CLKSEL_MPU (2 << 0) /* 300MHz */
+#define RII_CM_CLKSEL_MPU_VAL RII_CLKSEL_MPU
+#define RII_CLKSEL_DSP (3 << 0) /* c5x - 200MHz */
+#define RII_CLKSEL_DSP_IF (2 << 5) /* c5x - 100MHz */
+#define RII_SYNC_DSP (0 << 7) /* Bypass sync */
+#define RII_CLKSEL_IVA (6 << 8) /* iva1 - 200MHz */
+#define RII_SYNC_IVA (0 << 13) /* Bypass sync */
+#define RII_CM_CLKSEL_DSP_VAL RII_SYNC_IVA | RII_CLKSEL_IVA | \
+ RII_SYNC_DSP | RII_CLKSEL_DSP_IF | \
+ RII_CLKSEL_DSP
+#define RII_CLKSEL_GFX (2 << 0) /* 50MHz */
+#define RII_CM_CLKSEL_GFX_VAL RII_CLKSEL_GFX
+
+/* 2420-PRCM VII (boot) */
+#define RVII_CLKSEL_L3 (1 << 0)
+#define RVII_CLKSEL_L4 (1 << 5)
+#define RVII_CLKSEL_DSS1 (1 << 8)
+#define RVII_CLKSEL_DSS2 (0 << 13)
+#define RVII_CLKSEL_VLYNQ (1 << 15)
+#define RVII_CLKSEL_SSI (1 << 20)
+#define RVII_CLKSEL_USB (1 << 25)
+
+#define RVII_CM_CLKSEL1_CORE_VAL RVII_CLKSEL_USB | RVII_CLKSEL_SSI | \
+ RVII_CLKSEL_VLYNQ | RVII_CLKSEL_DSS2 | \
+ RVII_CLKSEL_DSS1 | RVII_CLKSEL_L4 | RVII_CLKSEL_L3
+
+#define RVII_CLKSEL_MPU (1 << 0) /* all divide by 1 */
+#define RVII_CM_CLKSEL_MPU_VAL RVII_CLKSEL_MPU
+
+#define RVII_CLKSEL_DSP (1 << 0)
+#define RVII_CLKSEL_DSP_IF (1 << 5)
+#define RVII_SYNC_DSP (0 << 7)
+#define RVII_CLKSEL_IVA (1 << 8)
+#define RVII_SYNC_IVA (0 << 13)
+#define RVII_CM_CLKSEL_DSP_VAL RVII_SYNC_IVA | RVII_CLKSEL_IVA | RVII_SYNC_DSP | \
+ RVII_CLKSEL_DSP_IF | RVII_CLKSEL_DSP
+
+#define RVII_CLKSEL_GFX (1 << 0)
+#define RVII_CM_CLKSEL_GFX_VAL RVII_CLKSEL_GFX
+
+/*-------------------------------------------------------------------------
+ * 2430 Target modes: Along with each configuration the CPU has several
+ * modes which goes along with them. Modes mainly are the addition of
+ * describe DPLL combinations to go along with a ratio.
+ *-------------------------------------------------------------------------*/
+
+/* Hardware governed */
+#define MX_48M_SRC (0 << 3)
+#define MX_54M_SRC (0 << 5)
+#define MX_APLLS_CLIKIN_12 (3 << 23)
+#define MX_APLLS_CLIKIN_13 (2 << 23)
+#define MX_APLLS_CLIKIN_19_2 (0 << 23)
+
+/*
+ * 2430 - standalone, 2*ref*M/(n+1), M/N is for exactness not relock speed
+ * #2 (ratio1) baseport-target
+ * #5a (ratio1) baseport-target, target DPLL = 266*2 = 532MHz
+ */
+#define M5A_DPLL_MULT_12 (133 << 12)
+#define M5A_DPLL_DIV_12 (5 << 8)
+#define M5A_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
+ M5A_DPLL_DIV_12 | M5A_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12
+#define M5A_DPLL_MULT_13 (266 << 12)
+#define M5A_DPLL_DIV_13 (12 << 8)
+#define M5A_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
+ M5A_DPLL_DIV_13 | M5A_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13
+#define M5A_DPLL_MULT_19 (180 << 12)
+#define M5A_DPLL_DIV_19 (12 << 8)
+#define M5A_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
+ M5A_DPLL_DIV_19 | M5A_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2
+/* #5b (ratio1) target DPLL = 200*2 = 400MHz */
+#define M5B_DPLL_MULT_12 (50 << 12)
+#define M5B_DPLL_DIV_12 (2 << 8)
+#define M5B_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
+ M5B_DPLL_DIV_12 | M5B_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12
+#define M5B_DPLL_MULT_13 (200 << 12)
+#define M5B_DPLL_DIV_13 (12 << 8)
+
+#define M5B_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
+ M5B_DPLL_DIV_13 | M5B_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13
+#define M5B_DPLL_MULT_19 (125 << 12)
+#define M5B_DPLL_DIV_19 (31 << 8)
+#define M5B_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
+ M5B_DPLL_DIV_19 | M5B_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2
+/*
+ * #4 (ratio2)
+ * #3 (ratio2) baseport-target, target DPLL = 330*2 = 660MHz
+ */
+#define M3_DPLL_MULT_12 (55 << 12)
+#define M3_DPLL_DIV_12 (1 << 8)
+#define M3_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
+ M3_DPLL_DIV_12 | M3_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12
+#define M3_DPLL_MULT_13 (330 << 12)
+#define M3_DPLL_DIV_13 (12 << 8)
+#define M3_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
+ M3_DPLL_DIV_13 | M3_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13
+#define M3_DPLL_MULT_19 (275 << 12)
+#define M3_DPLL_DIV_19 (15 << 8)
+#define M3_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | \
+ M3_DPLL_DIV_19 | M3_DPLL_MULT_19 | \
+ MX_APLLS_CLIKIN_19_2
+/* boot (boot) */
+#define MB_DPLL_MULT (1 << 12)
+#define MB_DPLL_DIV (0 << 8)
+#define MB_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
+ MB_DPLL_MULT | MX_APLLS_CLIKIN_12
+
+#define MB_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
+ MB_DPLL_MULT | MX_APLLS_CLIKIN_13
+
+#define MB_CM_CLKSEL1_PLL_19_VAL MX_48M_SRC | MX_54M_SRC | MB_DPLL_DIV |\
+ MB_DPLL_MULT | MX_APLLS_CLIKIN_19
+
+/*
+ * 2430 - chassis (sedna)
+ * 165 (ratio1) same as above #2
+ * 150 (ratio1)
+ * 133 (ratio2) same as above #4
+ * 110 (ratio2) same as above #3
+ * 104 (ratio2)
+ * boot (boot)
+ */
+
+/*
+ * 2420 Equivalent - mode registers
+ * PRCM II , target DPLL = 2*300MHz = 600MHz
+ */
+#define MII_DPLL_MULT_12 (50 << 12)
+#define MII_DPLL_DIV_12 (1 << 8)
+#define MII_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
+ MII_DPLL_DIV_12 | MII_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12
+#define MII_DPLL_MULT_13 (300 << 12)
+#define MII_DPLL_DIV_13 (12 << 8)
+#define MII_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
+ MII_DPLL_DIV_13 | MII_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13
+
+/* PRCM III target DPLL = 2*266 = 532MHz*/
+#define MIII_DPLL_MULT_12 (133 << 12)
+#define MIII_DPLL_DIV_12 (5 << 8)
+#define MIII_CM_CLKSEL1_PLL_12_VAL MX_48M_SRC | MX_54M_SRC | \
+ MIII_DPLL_DIV_12 | MIII_DPLL_MULT_12 | \
+ MX_APLLS_CLIKIN_12
+#define MIII_DPLL_MULT_13 (266 << 12)
+#define MIII_DPLL_DIV_13 (12 << 8)
+#define MIII_CM_CLKSEL1_PLL_13_VAL MX_48M_SRC | MX_54M_SRC | \
+ MIII_DPLL_DIV_13 | MIII_DPLL_MULT_13 | \
+ MX_APLLS_CLIKIN_13
+
+/* PRCM VII (boot bypass) */
+#define MVII_CM_CLKSEL1_PLL_12_VAL MB_CM_CLKSEL1_PLL_12_VAL
+#define MVII_CM_CLKSEL1_PLL_13_VAL MB_CM_CLKSEL1_PLL_13_VAL
+
+/* High and low operation value */
+#define MX_CLKSEL2_PLL_2x_VAL (2 << 0)
+#define MX_CLKSEL2_PLL_1x_VAL (1 << 0)
+
+/*
+ * These represent optimal values for common parts, it won't work for all.
+ * As long as you scale down, most parameters are still work, they just
+ * become sub-optimal. The RFR value goes in the oppisite direction. If you
+ * don't adjust it down as your clock period increases the refresh interval
+ * will not be met. Setting all parameters for complete worst case may work,
+ * but may cut memory performance by 2x. Due to errata the DLLs need to be
+ * unlocked and their value needs run time calibration. A dynamic call is
+ * need for that as no single right value exists acorss production samples.
+ *
+ * Only the FULL speed values are given. Current code is such that rate
+ * changes must be made at DPLLoutx2. The actual value adjustment for low
+ * frequency operation will be handled by omap_set_performance()
+ *
+ * By having the boot loader boot up in the fastest L4 speed available likely
+ * will result in something which you can switch between.
+ */
+#define V24XX_SDRC_RFR_CTRL_133MHz (0x0003de00 | 1)
+#define V24XX_SDRC_RFR_CTRL_100MHz (0x0002da01 | 1)
+#define V24XX_SDRC_RFR_CTRL_110MHz (0x0002da01 | 1) /* Need to calc */
+#define V24XX_SDRC_RFR_CTRL_BYPASS (0x00005000 | 1) /* Need to calc */
+
+/* MPU speed defines */
+#define S12M 12000000
+#define S13M 13000000
+#define S19M 19200000
+#define S26M 26000000
+#define S100M 100000000
+#define S133M 133000000
+#define S150M 150000000
+#define S165M 165000000
+#define S200M 200000000
+#define S266M 266000000
+#define S300M 300000000
+#define S330M 330000000
+#define S400M 400000000
+#define S532M 532000000
+#define S600M 600000000
+#define S660M 660000000
+
+/*-------------------------------------------------------------------------
+ * Key dividers which make up a PRCM set. Ratio's for a PRCM are mandated.
+ * xtal_speed, dpll_speed, mpu_speed, CM_CLKSEL_MPU,
+ * CM_CLKSEL_DSP, CM_CLKSEL_GFX, CM_CLKSEL1_CORE, CM_CLKSEL1_PLL,
+ * CM_CLKSEL2_PLL, CM_CLKSEL_MDM
+ *
+ * Filling in table based on H4 boards and 2430-SDPs variants available.
+ * There are quite a few more rates combinations which could be defined.
+ *
+ * When multiple values are defiend the start up will try and choose the
+ * fastest one. If a 'fast' value is defined, then automatically, the /2
+ * one should be included as it can be used. Generally having more that
+ * one fast set does not make sense, as static timings need to be changed
+ * to change the set. The exception is the bypass setting which is
+ * availble for low power bypass.
+ *
+ * Note: This table needs to be sorted, fastest to slowest.
+ *-------------------------------------------------------------------------*/
+static struct prcm_config rate_table[] = {
+ /* PRCM II - FAST */
+ {S12M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ {S13M, S600M, S300M, RII_CM_CLKSEL_MPU_VAL, /* 300MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ /* PRCM III - FAST */
+ {S12M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ {S13M, S532M, S266M, RIII_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ /* PRCM II - SLOW */
+ {S12M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ {S13M, S300M, S150M, RII_CM_CLKSEL_MPU_VAL, /* 150MHz ARM */
+ RII_CM_CLKSEL_DSP_VAL, RII_CM_CLKSEL_GFX_VAL,
+ RII_CM_CLKSEL1_CORE_VAL, MII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_100MHz,
+ RATE_IN_242X},
+
+ /* PRCM III - SLOW */
+ {S12M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ {S13M, S266M, S133M, RIII_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
+ RIII_CM_CLKSEL_DSP_VAL, RIII_CM_CLKSEL_GFX_VAL,
+ RIII_CM_CLKSEL1_CORE_VAL, MIII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_133MHz,
+ RATE_IN_242X},
+
+ /* PRCM-VII (boot-bypass) */
+ {S12M, S12M, S12M, RVII_CM_CLKSEL_MPU_VAL, /* 12MHz ARM*/
+ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
+ RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_242X},
+
+ /* PRCM-VII (boot-bypass) */
+ {S13M, S13M, S13M, RVII_CM_CLKSEL_MPU_VAL, /* 13MHz ARM */
+ RVII_CM_CLKSEL_DSP_VAL, RVII_CM_CLKSEL_GFX_VAL,
+ RVII_CM_CLKSEL1_CORE_VAL, MVII_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, 0, V24XX_SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_242X},
+
+ /* PRCM #3 - ratio2 (ES2) - FAST */
+ {S13M, S660M, S330M, R2_CM_CLKSEL_MPU_VAL, /* 330MHz ARM */
+ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
+ R2_CM_CLKSEL1_CORE_VAL, M3_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R2_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_110MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5a - ratio1 - FAST */
+ {S13M, S532M, S266M, R1_CM_CLKSEL_MPU_VAL, /* 266MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_133MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5b - ratio1 - FAST */
+ {S13M, S400M, S200M, R1_CM_CLKSEL_MPU_VAL, /* 200MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_100MHz,
+ RATE_IN_243X},
+
+ /* PRCM #3 - ratio2 (ES2) - SLOW */
+ {S13M, S330M, S165M, R2_CM_CLKSEL_MPU_VAL, /* 165MHz ARM */
+ R2_CM_CLKSEL_DSP_VAL, R2_CM_CLKSEL_GFX_VAL,
+ R2_CM_CLKSEL1_CORE_VAL, M3_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R2_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_110MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5a - ratio1 - SLOW */
+ {S13M, S266M, S133M, R1_CM_CLKSEL_MPU_VAL, /* 133MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5A_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_133MHz,
+ RATE_IN_243X},
+
+ /* PRCM #5b - ratio1 - SLOW*/
+ {S13M, S200M, S100M, R1_CM_CLKSEL_MPU_VAL, /* 100MHz ARM */
+ R1_CM_CLKSEL_DSP_VAL, R1_CM_CLKSEL_GFX_VAL,
+ R1_CM_CLKSEL1_CORE_VAL, M5B_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_1x_VAL, R1_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_100MHz,
+ RATE_IN_243X},
+
+ /* PRCM-boot/bypass */
+ {S13M, S13M, S13M, RB_CM_CLKSEL_MPU_VAL, /* 13Mhz */
+ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
+ RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_13_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_243X},
+
+ /* PRCM-boot/bypass */
+ {S12M, S12M, S12M, RB_CM_CLKSEL_MPU_VAL, /* 12Mhz */
+ RB_CM_CLKSEL_DSP_VAL, RB_CM_CLKSEL_GFX_VAL,
+ RB_CM_CLKSEL1_CORE_VAL, MB_CM_CLKSEL1_PLL_12_VAL,
+ MX_CLKSEL2_PLL_2x_VAL, RB_CM_CLKSEL_MDM_VAL,
+ V24XX_SDRC_RFR_CTRL_BYPASS,
+ RATE_IN_243X},
+
+ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0},
+};
+
+/*-------------------------------------------------------------------------
+ * 24xx clock tree.
+ *
+ * NOTE:In many cases here we are assigning a 'default' parent. In many
+ * cases the parent is selectable. The get/set parent calls will also
+ * switch sources.
+ *
+ * Many some clocks say always_enabled, but they can be auto idled for
+ * power savings. They will always be available upon clock request.
+ *
+ * Several sources are given initial rates which may be wrong, this will
+ * be fixed up in the init func.
+ *
+ * Things are broadly separated below by clock domains. It is
+ * noteworthy that most periferals have dependencies on multiple clock
+ * domains. Many get their interface clocks from the L4 domain, but get
+ * functional clocks from fixed sources or other core domain derived
+ * clocks.
+ *-------------------------------------------------------------------------*/
+
+/* Base external input clocks */
+static struct clk func_32k_ck = {
+ .name = "func_32k_ck",
+ .rate = 32000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | ALWAYS_ENABLED,
+};
+
+/* Typical 12/13MHz in standalone mode, will be 26Mhz in chassis mode */
+static struct clk osc_ck = { /* (*12, *13, 19.2, *26, 38.4)MHz */
+ .name = "osc_ck",
+ .rate = 26000000, /* fixed up in clock init */
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | ALWAYS_ENABLED | RATE_PROPAGATES,
+};
+
+/* With out modem likely 12MHz, with modem likely 13MHz */
+static struct clk sys_ck = { /* (*12, *13, 19.2, 26, 38.4)MHz */
+ .name = "sys_ck", /* ~ ref_clk also */
+ .parent = &osc_ck,
+ .rate = 13000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | ALWAYS_ENABLED | RATE_PROPAGATES,
+ .rate_offset = 6, /* sysclkdiv 1 or 2, already handled or no boot */
+ .recalc = &omap2_sys_clk_recalc,
+};
+
+static struct clk alt_ck = { /* Typical 54M or 48M, may not exist */
+ .name = "alt_ck",
+ .rate = 54000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | ALWAYS_ENABLED | RATE_PROPAGATES,
+ .recalc = &omap2_propagate_rate,
+};
+
+/*
+ * Analog domain root source clocks
+ */
+
+/* dpll_ck, is broken out in to special cases through clksel */
+static struct clk dpll_ck = {
+ .name = "dpll_ck",
+ .parent = &sys_ck, /* Can be func_32k also */
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_PROPAGATES | RATE_CKCTL | CM_PLL_SEL1,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk apll96_ck = {
+ .name = "apll96_ck",
+ .parent = &sys_ck,
+ .rate = 96000000,
+ .flags = CLOCK_IN_OMAP242X |CLOCK_IN_OMAP243X |
+ RATE_FIXED | RATE_PROPAGATES,
+ .enable_reg = (void __iomem *)&CM_CLKEN_PLL,
+ .enable_bit = 0x2,
+ .recalc = &omap2_propagate_rate,
+};
+
+static struct clk apll54_ck = {
+ .name = "apll54_ck",
+ .parent = &sys_ck,
+ .rate = 54000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | RATE_PROPAGATES,
+ .enable_reg = (void __iomem *)&CM_CLKEN_PLL,
+ .enable_bit = 0x6,
+ .recalc = &omap2_propagate_rate,
+};
+
+/*
+ * PRCM digital base sources
+ */
+static struct clk func_54m_ck = {
+ .name = "func_54m_ck",
+ .parent = &apll54_ck, /* can also be alt_clk */
+ .rate = 54000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | CM_PLL_SEL1 | RATE_PROPAGATES,
+ .src_offset = 5,
+ .enable_reg = (void __iomem *)&CM_CLKEN_PLL,
+ .enable_bit = 0xff,
+ .recalc = &omap2_propagate_rate,
+};
+
+static struct clk core_ck = {
+ .name = "core_ck",
+ .parent = &dpll_ck, /* can also be 32k */
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ ALWAYS_ENABLED | RATE_PROPAGATES,
+ .recalc = &omap2_propagate_rate,
+};
+
+static struct clk sleep_ck = { /* sys_clk or 32k */
+ .name = "sleep_ck",
+ .parent = &func_32k_ck,
+ .rate = 32000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .recalc = &omap2_propagate_rate,
+};
+
+static struct clk func_96m_ck = {
+ .name = "func_96m_ck",
+ .parent = &apll96_ck,
+ .rate = 96000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | RATE_PROPAGATES,
+ .enable_reg = (void __iomem *)&CM_CLKEN_PLL,
+ .enable_bit = 0xff,
+ .recalc = &omap2_propagate_rate,
+};
+
+static struct clk func_48m_ck = {
+ .name = "func_48m_ck",
+ .parent = &apll96_ck, /* 96M or Alt */
+ .rate = 48000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | CM_PLL_SEL1 | RATE_PROPAGATES,
+ .src_offset = 3,
+ .enable_reg = (void __iomem *)&CM_CLKEN_PLL,
+ .enable_bit = 0xff,
+ .recalc = &omap2_propagate_rate,
+};
+
+static struct clk func_12m_ck = {
+ .name = "func_12m_ck",
+ .parent = &func_48m_ck,
+ .rate = 12000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | RATE_PROPAGATES,
+ .recalc = &omap2_propagate_rate,
+ .enable_reg = (void __iomem *)&CM_CLKEN_PLL,
+ .enable_bit = 0xff,
+};
+
+/* Secure timer, only available in secure mode */
+static struct clk wdt1_osc_ck = {
+ .name = "ck_wdt1_osc",
+ .parent = &osc_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk sys_clkout = {
+ .name = "sys_clkout",
+ .parent = &func_54m_ck,
+ .rate = 54000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_SYSCLKOUT_SEL1 | RATE_CKCTL,
+ .src_offset = 0,
+ .enable_reg = (void __iomem *)&PRCM_CLKOUT_CTRL,
+ .enable_bit = 7,
+ .rate_offset = 3,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* In 2430, new in 2420 ES2 */
+static struct clk sys_clkout2 = {
+ .name = "sys_clkout2",
+ .parent = &func_54m_ck,
+ .rate = 54000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_SYSCLKOUT_SEL1 | RATE_CKCTL,
+ .src_offset = 8,
+ .enable_reg = (void __iomem *)&PRCM_CLKOUT_CTRL,
+ .enable_bit = 15,
+ .rate_offset = 11,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * MPU clock domain
+ * Clocks:
+ * MPU_FCLK, MPU_ICLK
+ * INT_M_FCLK, INT_M_I_CLK
+ *
+ * - Individual clocks are hardware managed.
+ * - Base divider comes from: CM_CLKSEL_MPU
+ *
+ */
+static struct clk mpu_ck = { /* Control cpu */
+ .name = "mpu_ck",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | RATE_CKCTL |
+ ALWAYS_ENABLED | CM_MPU_SEL1 | DELAYED_APP |
+ CONFIG_PARTICIPANT | RATE_PROPAGATES,
+ .rate_offset = 0, /* bits 0-4 */
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * DSP (2430-IVA2.1) (2420-UMA+IVA1) clock domain
+ * Clocks:
+ * 2430: IVA2.1_FCLK, IVA2.1_ICLK
+ * 2420: UMA_FCLK, UMA_ICLK, IVA_MPU, IVA_COP
+ */
+static struct clk iva2_1_fck = {
+ .name = "iva2_1_fck",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP243X | RATE_CKCTL | CM_DSP_SEL1 |
+ DELAYED_APP | RATE_PROPAGATES |
+ CONFIG_PARTICIPANT,
+ .rate_offset = 0,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_DSP,
+ .enable_bit = 0,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk iva2_1_ick = {
+ .name = "iva2_1_ick",
+ .parent = &iva2_1_fck,
+ .flags = CLOCK_IN_OMAP243X | RATE_CKCTL | CM_DSP_SEL1 |
+ DELAYED_APP | CONFIG_PARTICIPANT,
+ .rate_offset = 5,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * Won't be too specific here. The core clock comes into this block
+ * it is divided then tee'ed. One branch goes directly to xyz enable
+ * controls. The other branch gets further divided by 2 then possibly
+ * routed into a synchronizer and out of clocks abc.
+ */
+static struct clk dsp_fck = {
+ .name = "dsp_fck",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP242X | RATE_CKCTL | CM_DSP_SEL1 |
+ DELAYED_APP | CONFIG_PARTICIPANT | RATE_PROPAGATES,
+ .rate_offset = 0,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_DSP,
+ .enable_bit = 0,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk dsp_ick = {
+ .name = "dsp_ick", /* apparently ipi and isp */
+ .parent = &dsp_fck,
+ .flags = CLOCK_IN_OMAP242X | RATE_CKCTL | CM_DSP_SEL1 |
+ DELAYED_APP | CONFIG_PARTICIPANT,
+ .rate_offset = 5,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_DSP,
+ .enable_bit = 1, /* for ipi */
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk iva1_ifck = {
+ .name = "iva1_ifck",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP242X | CM_DSP_SEL1 | RATE_CKCTL |
+ CONFIG_PARTICIPANT | RATE_PROPAGATES | DELAYED_APP,
+ .rate_offset= 8,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_DSP,
+ .enable_bit = 10,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/* IVA1 mpu/int/i/f clocks are /2 of parent */
+static struct clk iva1_mpu_int_ifck = {
+ .name = "iva1_mpu_int_ifck",
+ .parent = &iva1_ifck,
+ .flags = CLOCK_IN_OMAP242X | RATE_CKCTL | CM_DSP_SEL1,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_DSP,
+ .enable_bit = 8,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * L3 clock domain
+ * L3 clocks are used for both interface and functional clocks to
+ * multiple entities. Some of these clocks are completely managed
+ * by hardware, and some others allow software control. Hardware
+ * managed ones general are based on directly CLK_REQ signals and
+ * various auto idle settings. The functional spec sets many of these
+ * as 'tie-high' for their enables.
+ *
+ * I-CLOCKS:
+ * L3-Interconnect, SMS, GPMC, SDRC, OCM_RAM, OCM_ROM, SDMA
+ * CAM, HS-USB.
+ * F-CLOCK
+ * SSI.
+ *
+ * GPMC memories and SDRC have timing and clock sensitive registers which
+ * may very well need notification when the clock changes. Currently for low
+ * operating points, these are taken care of in sleep.S.
+ */
+static struct clk core_l3_ck = { /* Used for ick and fck, interconnect */
+ .name = "core_l3_ck",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | ALWAYS_ENABLED | CM_CORE_SEL1 |
+ DELAYED_APP | CONFIG_PARTICIPANT |
+ RATE_PROPAGATES,
+ .rate_offset = 0,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk usb_l4_ick = { /* FS-USB interface clock */
+ .name = "usb_l4_ick",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP |
+ CONFIG_PARTICIPANT,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 0,
+ .rate_offset = 25,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * SSI is in L3 management domain, its direct parent is core not l3,
+ * many core power domain entities are grouped into the L3 clock
+ * domain.
+ * SSI_SSR_FCLK, SSI_SST_FCLK, SSI_L4_CLIK
+ *
+ * ssr = core/1/2/3/4/5, sst = 1/2 ssr.
+ */
+static struct clk ssi_ssr_sst_fck = {
+ .name = "ssi_fck",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE, /* bit 1 */
+ .enable_bit = 1,
+ .rate_offset = 20,
+ .recalc = &omap2_clksel_recalc,
+};
+
+/*
+ * GFX clock domain
+ * Clocks:
+ * GFX_FCLK, GFX_ICLK
+ * GFX_CG1(2d), GFX_CG2(3d)
+ *
+ * GFX_FCLK runs from L3, and is divided by (1,2,3,4)
+ * The 2d and 3d clocks run at a hardware determined
+ * divided value of fclk.
+ *
+ */
+static struct clk gfx_3d_fck = {
+ .name = "gfx_3d_fck",
+ .parent = &core_l3_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | CM_GFX_SEL1,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_GFX,
+ .enable_bit = 2,
+ .rate_offset= 0,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gfx_2d_fck = {
+ .name = "gfx_2d_fck",
+ .parent = &core_l3_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | CM_GFX_SEL1,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_GFX,
+ .enable_bit = 1,
+ .rate_offset= 0,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk gfx_ick = {
+ .name = "gfx_ick", /* From l3 */
+ .parent = &core_l3_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_GFX, /* bit 0 */
+ .enable_bit = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+/*
+ * Modem clock domain (2430)
+ * CLOCKS:
+ * MDM_OSC_CLK
+ * MDM_ICLK
+ */
+static struct clk mdm_ick = { /* used both as a ick and fck */
+ .name = "mdm_ick",
+ .parent = &core_ck,
+ .flags = CLOCK_IN_OMAP243X | RATE_CKCTL | CM_MODEM_SEL1 |
+ DELAYED_APP | CONFIG_PARTICIPANT,
+ .rate_offset = 0,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_MDM,
+ .enable_bit = 0,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk mdm_osc_ck = {
+ .name = "mdm_osc_ck",
+ .rate = 26000000,
+ .parent = &osc_ck,
+ .flags = CLOCK_IN_OMAP243X | RATE_FIXED,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_MDM,
+ .enable_bit = 1,
+ .recalc = &omap2_followparent_recalc,
+};
+
+/*
+ * L4 clock management domain
+ *
+ * This domain contains lots of interface clocks from the L4 interface, some
+ * functional clocks. Fixed APLL functional source clocks are managed in
+ * this domain.
+ */
+static struct clk l4_ck = { /* used both as an ick and fck */
+ .name = "l4_ck",
+ .parent = &core_l3_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | ALWAYS_ENABLED | CM_CORE_SEL1 |
+ DELAYED_APP | RATE_PROPAGATES,
+ .rate_offset = 5,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk ssi_l4_ick = {
+ .name = "ssi_l4_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | RATE_CKCTL,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE, /* bit 1 */
+ .enable_bit = 1,
+ .recalc = &omap2_followparent_recalc,
+};
+
+/*
+ * DSS clock domain
+ * CLOCKs:
+ * DSS_L4_ICLK, DSS_L3_ICLK,
+ * DSS_CLK1, DSS_CLK2, DSS_54MHz_CLK
+ *
+ * DSS is both initiator and target.
+ */
+static struct clk dss_ick = { /* Enables both L3,L4 ICLK's */
+ .name = "dss_ick",
+ .parent = &l4_ck, /* really both l3 and l4 */
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X | RATE_CKCTL,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk dss1_fck = {
+ .name = "dss1_fck",
+ .parent = &core_ck, /* Core or sys */
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 0,
+ .rate_offset = 8,
+ .src_offset = 8,
+ .recalc = &omap2_clksel_recalc,
+};
+
+static struct clk dss2_fck = { /* Alt clk used in power management */
+ .name = "dss2_fck",
+ .parent = &sys_ck, /* fixed at sys_ck or 48MHz */
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_CKCTL | CM_CORE_SEL1 | RATE_FIXED,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 1,
+ .src_offset = 13,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk dss_54m_fck = { /* Alt clk used in power management */
+ .name = "dss_54m_fck", /* 54m tv clk */
+ .parent = &func_54m_ck,
+ .rate = 54000000,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ RATE_FIXED | RATE_PROPAGATES,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 2,
+ .recalc = &omap2_propagate_rate,
+};
+
+/*
+ * CORE power domain ICLK & FCLK defines.
+ * Many of the these can have more than one possible parent. Entries
+ * here will likely have an L4 interface parent, and may have multiple
+ * functional clock parents.
+ */
+static struct clk gpt1_ick = {
+ .name = "gpt1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP, /* Bit4 */
+ .enable_bit = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt1_fck = {
+ .name = "gpt1_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_WKUP_SEL1,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_WKUP,
+ .enable_bit = 0,
+ .src_offset = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt2_ick = {
+ .name = "gpt2_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit4 */
+ .enable_bit = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt2_fck = {
+ .name = "gpt2_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 4,
+ .src_offset = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt3_ick = {
+ .name = "gpt3_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* Bit5 */
+ .enable_bit = 5,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt3_fck = {
+ .name = "gpt3_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 5,
+ .src_offset = 4,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt4_ick = {
+ .name = "gpt4_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* Bit6 */
+ .enable_bit = 6,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt4_fck = {
+ .name = "gpt4_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 6,
+ .src_offset = 6,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt5_ick = {
+ .name = "gpt5_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* Bit7 */
+ .enable_bit = 7,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt5_fck = {
+ .name = "gpt5_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 7,
+ .src_offset = 8,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt6_ick = {
+ .name = "gpt6_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_bit = 8,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit8 */
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt6_fck = {
+ .name = "gpt6_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 8,
+ .src_offset = 10,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt7_ick = {
+ .name = "gpt7_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit9 */
+ .enable_bit = 9,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt7_fck = {
+ .name = "gpt7_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 9,
+ .src_offset = 12,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt8_ick = {
+ .name = "gpt8_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit10 */
+ .enable_bit = 10,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt8_fck = {
+ .name = "gpt8_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 10,
+ .src_offset = 14,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt9_ick = {
+ .name = "gpt9_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 11,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt9_fck = {
+ .name = "gpt9_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 11,
+ .src_offset = 16,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt10_ick = {
+ .name = "gpt10_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 12,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt10_fck = {
+ .name = "gpt10_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 12,
+ .src_offset = 18,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt11_ick = {
+ .name = "gpt11_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 13,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt11_fck = {
+ .name = "gpt11_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 13,
+ .src_offset = 20,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt12_ick = {
+ .name = "gpt12_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit14 */
+ .enable_bit = 14,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpt12_fck = {
+ .name = "gpt12_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ CM_CORE_SEL2,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 14,
+ .src_offset = 22,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp1_ick = {
+ .name = "mcbsp1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_bit = 15,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE, /* bit16 */
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp1_fck = {
+ .name = "mcbsp1_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_bit = 15,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp2_ick = {
+ .name = "mcbsp2_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_bit = 16,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp2_fck = {
+ .name = "mcbsp2_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_bit = 16,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp3_ick = {
+ .name = "mcbsp3_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 3,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp3_fck = {
+ .name = "mcbsp3_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 3,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp4_ick = {
+ .name = "mcbsp4_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 4,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp4_fck = {
+ .name = "mcbsp4_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 4,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp5_ick = {
+ .name = "mcbsp5_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 5,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcbsp5_fck = {
+ .name = "mcbsp5_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 5,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcspi1_ick = {
+ .name = "mcspi1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 17,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcspi1_fck = {
+ .name = "mcspi1_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 17,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcspi2_ick = {
+ .name = "mcspi2_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 18,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcspi2_fck = {
+ .name = "mcspi2_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 18,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcspi3_ick = {
+ .name = "mcspi3_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 9,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mcspi3_fck = {
+ .name = "mcspi3_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 9,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk uart1_ick = {
+ .name = "uart1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 21,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk uart1_fck = {
+ .name = "uart1_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 21,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk uart2_ick = {
+ .name = "uart2_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 22,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk uart2_fck = {
+ .name = "uart2_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 22,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk uart3_ick = {
+ .name = "uart3_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk uart3_fck = {
+ .name = "uart3_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpios_ick = {
+ .name = "gpios_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP,
+ .enable_bit = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpios_fck = {
+ .name = "gpios_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_WKUP,
+ .enable_bit = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mpu_wdt_ick = {
+ .name = "mpu_wdt_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP,
+ .enable_bit = 3,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mpu_wdt_fck = {
+ .name = "mpu_wdt_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN_WKUP,
+ .enable_bit = 3,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk sync_32k_ick = {
+ .name = "sync_32k_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP,
+ .enable_bit = 1,
+ .recalc = &omap2_followparent_recalc,
+};
+static struct clk wdt1_ick = {
+ .name = "wdt1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP,
+ .enable_bit = 4,
+ .recalc = &omap2_followparent_recalc,
+};
+static struct clk omapctrl_ick = {
+ .name = "omapctrl_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP,
+ .enable_bit = 5,
+ .recalc = &omap2_followparent_recalc,
+};
+static struct clk icr_ick = {
+ .name = "icr_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN_WKUP,
+ .enable_bit = 6,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk cam_ick = {
+ .name = "cam_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 31,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk cam_fck = {
+ .name = "cam_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 31,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mailboxes_ick = {
+ .name = "mailboxes_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 30,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk wdt4_ick = {
+ .name = "wdt4_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 29,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk wdt4_fck = {
+ .name = "wdt4_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 29,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk wdt3_ick = {
+ .name = "wdt3_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 28,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk wdt3_fck = {
+ .name = "wdt3_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 28,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mspro_ick = {
+ .name = "mspro_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 27,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mspro_fck = {
+ .name = "mspro_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 27,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmc_ick = {
+ .name = "mmc_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 26,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmc_fck = {
+ .name = "mmc_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 26,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk fac_ick = {
+ .name = "fac_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 25,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk fac_fck = {
+ .name = "fac_fck",
+ .parent = &func_12m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 25,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk eac_ick = {
+ .name = "eac_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 24,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk eac_fck = {
+ .name = "eac_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 24,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk hdq_ick = {
+ .name = "hdq_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 23,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk hdq_fck = {
+ .name = "hdq_fck",
+ .parent = &func_12m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 23,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk i2c2_ick = {
+ .name = "i2c2_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 20,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk i2c2_fck = {
+ .name = "i2c2_fck",
+ .parent = &func_12m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 20,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk i2chs2_fck = {
+ .name = "i2chs2_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 20,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk i2c1_ick = {
+ .name = "i2c1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 19,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk i2c1_fck = {
+ .name = "i2c1_fck",
+ .parent = &func_12m_ck,
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 19,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk i2chs1_fck = {
+ .name = "i2chs1_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 19,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk vlynq_ick = {
+ .name = "vlynq_ick",
+ .parent = &core_l3_ck,
+ .flags = CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN1_CORE,
+ .enable_bit = 3,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk vlynq_fck = {
+ .name = "vlynq_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP242X | RATE_CKCTL | CM_CORE_SEL1 | DELAYED_APP,
+ .enable_reg = (void __iomem *)&CM_FCLKEN1_CORE,
+ .enable_bit = 3,
+ .src_offset = 15,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk sdrc_ick = {
+ .name = "sdrc_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN3_CORE,
+ .enable_bit = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk des_ick = {
+ .name = "des_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN4_CORE,
+ .enable_bit = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk sha_ick = {
+ .name = "sha_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN4_CORE,
+ .enable_bit = 1,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk rng_ick = {
+ .name = "rng_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN4_CORE,
+ .enable_bit = 2,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk aes_ick = {
+ .name = "aes_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN4_CORE,
+ .enable_bit = 3,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk pka_ick = {
+ .name = "pka_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN4_CORE,
+ .enable_bit = 4,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk usb_fck = {
+ .name = "usb_fck",
+ .parent = &func_48m_ck,
+ .flags = CLOCK_IN_OMAP243X | CLOCK_IN_OMAP242X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 0,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk usbhs_ick = {
+ .name = "usbhs_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 6,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmchs1_ick = {
+ .name = "mmchs1_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 7,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmchs1_fck = {
+ .name = "mmchs1_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 7,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmchs2_ick = {
+ .name = "mmchs2_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 8,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmchs2_fck = {
+ .name = "mmchs2_fck",
+ .parent = &func_96m_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 8,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpio5_ick = {
+ .name = "gpio5_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 10,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk gpio5_fck = {
+ .name = "gpio5_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 10,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mdm_intc_ick = {
+ .name = "mdm_intc_ick",
+ .parent = &l4_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_ICLKEN2_CORE,
+ .enable_bit = 11,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmchsdb1_fck = {
+ .name = "mmchsdb1_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 16,
+ .recalc = &omap2_followparent_recalc,
+};
+
+static struct clk mmchsdb2_fck = {
+ .name = "mmchsdb2_fck",
+ .parent = &func_32k_ck,
+ .flags = CLOCK_IN_OMAP243X,
+ .enable_reg = (void __iomem *)&CM_FCLKEN2_CORE,
+ .enable_bit = 17,
+ .recalc = &omap2_followparent_recalc,
+};
+
+/*
+ * This clock is a composite clock which does entire set changes then
+ * forces a rebalance. It keys on the MPU speed, but it really could
+ * be any key speed part of a set in the rate table.
+ *
+ * to really change a set, you need memory table sets which get changed
+ * in sram, pre-notifiers & post notifiers, changing the top set, without
+ * having low level display recalc's won't work... this is why dpm notifiers
+ * work, isr's off, walk a list of clocks already _off_ and not messing with
+ * the bus.
+ *
+ * This clock should have no parent. It embodies the entire upper level
+ * active set. A parent will mess up some of the init also.
+ */
+static struct clk virt_prcm_set = {
+ .name = "virt_prcm_set",
+ .flags = CLOCK_IN_OMAP242X | CLOCK_IN_OMAP243X |
+ VIRTUAL_CLOCK | ALWAYS_ENABLED | DELAYED_APP,
+ .parent = &mpu_ck, /* Indexed by mpu speed, no parent */
+ .recalc = &omap2_mpu_recalc, /* sets are keyed on mpu rate */
+ .set_rate = &omap2_select_table_rate,
+ .round_rate = &omap2_round_to_table_rate,
+};
+
+static struct clk *onchip_clks[] = {
+ /* external root sources */
+ &func_32k_ck,
+ &osc_ck,
+ &sys_ck,
+ &alt_ck,
+ /* internal analog sources */
+ &dpll_ck,
+ &apll96_ck,
+ &apll54_ck,
+ /* internal prcm root sources */
+ &func_54m_ck,
+ &core_ck,
+ &sleep_ck,
+ &func_96m_ck,
+ &func_48m_ck,
+ &func_12m_ck,
+ &wdt1_osc_ck,
+ &sys_clkout,
+ &sys_clkout2,
+ /* mpu domain clocks */
+ &mpu_ck,
+ /* dsp domain clocks */
+ &iva2_1_fck, /* 2430 */
+ &iva2_1_ick,
+ &dsp_ick, /* 2420 */
+ &dsp_fck,
+ &iva1_ifck,
+ &iva1_mpu_int_ifck,
+ /* GFX domain clocks */
+ &gfx_3d_fck,
+ &gfx_2d_fck,
+ &gfx_ick,
+ /* Modem domain clocks */
+ &mdm_ick,
+ &mdm_osc_ck,
+ /* DSS domain clocks */
+ &dss_ick,
+ &dss1_fck,
+ &dss2_fck,
+ &dss_54m_fck,
+ /* L3 domain clocks */
+ &core_l3_ck,
+ &ssi_ssr_sst_fck,
+ &usb_l4_ick,
+ /* L4 domain clocks */
+ &l4_ck, /* used as both core_l4 and wu_l4 */
+ &ssi_l4_ick,
+ /* virtual meta-group clock */
+ &virt_prcm_set,
+ /* general l4 interface ck, multi-parent functional clk */
+ &gpt1_ick,
+ &gpt1_fck,
+ &gpt2_ick,
+ &gpt2_fck,
+ &gpt3_ick,
+ &gpt3_fck,
+ &gpt4_ick,
+ &gpt4_fck,
+ &gpt5_ick,
+ &gpt5_fck,
+ &gpt6_ick,
+ &gpt6_fck,
+ &gpt7_ick,
+ &gpt7_fck,
+ &gpt8_ick,
+ &gpt8_fck,
+ &gpt9_ick,
+ &gpt9_fck,
+ &gpt10_ick,
+ &gpt10_fck,
+ &gpt11_ick,
+ &gpt11_fck,
+ &gpt12_ick,
+ &gpt12_fck,
+ &mcbsp1_ick,
+ &mcbsp1_fck,
+ &mcbsp2_ick,
+ &mcbsp2_fck,
+ &mcbsp3_ick,
+ &mcbsp3_fck,
+ &mcbsp4_ick,
+ &mcbsp4_fck,
+ &mcbsp5_ick,
+ &mcbsp5_fck,
+ &mcspi1_ick,
+ &mcspi1_fck,
+ &mcspi2_ick,
+ &mcspi2_fck,
+ &mcspi3_ick,
+ &mcspi3_fck,
+ &uart1_ick,
+ &uart1_fck,
+ &uart2_ick,
+ &uart2_fck,
+ &uart3_ick,
+ &uart3_fck,
+ &gpios_ick,
+ &gpios_fck,
+ &mpu_wdt_ick,
+ &mpu_wdt_fck,
+ &sync_32k_ick,
+ &wdt1_ick,
+ &omapctrl_ick,
+ &icr_ick,
+ &cam_fck,
+ &cam_ick,
+ &mailboxes_ick,
+ &wdt4_ick,
+ &wdt4_fck,
+ &wdt3_ick,
+ &wdt3_fck,
+ &mspro_ick,
+ &mspro_fck,
+ &mmc_ick,
+ &mmc_fck,
+ &fac_ick,
+ &fac_fck,
+ &eac_ick,
+ &eac_fck,
+ &hdq_ick,
+ &hdq_fck,
+ &i2c1_ick,
+ &i2c1_fck,
+ &i2chs1_fck,
+ &i2c2_ick,
+ &i2c2_fck,
+ &i2chs2_fck,
+ &vlynq_ick,
+ &vlynq_fck,
+ &sdrc_ick,
+ &des_ick,
+ &sha_ick,
+ &rng_ick,
+ &aes_ick,
+ &pka_ick,
+ &usb_fck,
+ &usbhs_ick,
+ &mmchs1_ick,
+ &mmchs1_fck,
+ &mmchs2_ick,
+ &mmchs2_fck,
+ &gpio5_ick,
+ &gpio5_fck,
+ &mdm_intc_ick,
+ &mmchsdb1_fck,
+ &mmchsdb2_fck,
+};
+
+#endif
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
new file mode 100644
index 00000000000..7181edb8935
--- /dev/null
+++ b/arch/arm/mach-omap2/devices.c
@@ -0,0 +1,89 @@
+/*
+ * linux/arch/arm/mach-omap2/devices.c
+ *
+ * OMAP2 platform device setup/initialization
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/tc.h>
+#include <asm/arch/board.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/gpio.h>
+
+extern void omap_nop_release(struct device *dev);
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
+
+#define OMAP2_I2C_BASE2 0x48072000
+#define OMAP2_I2C_INT2 57
+
+static struct resource i2c_resources2[] = {
+ {
+ .start = OMAP2_I2C_BASE2,
+ .end = OMAP2_I2C_BASE2 + 0x3f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = OMAP2_I2C_INT2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device omap_i2c_device2 = {
+ .name = "i2c_omap",
+ .id = 2,
+ .dev = {
+ .release = omap_nop_release,
+ },
+ .num_resources = ARRAY_SIZE(i2c_resources2),
+ .resource = i2c_resources2,
+};
+
+/* See also arch/arm/plat-omap/devices.c for first I2C on 24xx */
+static void omap_init_i2c(void)
+{
+ /* REVISIT: Second I2C not in use on H4? */
+ if (machine_is_omap_h4())
+ return;
+
+ omap_cfg_reg(J15_24XX_I2C2_SCL);
+ omap_cfg_reg(H19_24XX_I2C2_SDA);
+ (void) platform_device_register(&omap_i2c_device2);
+}
+
+#else
+
+static void omap_init_i2c(void) {}
+
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+static int __init omap2_init_devices(void)
+{
+ /* please keep these calls, and their implementations above,
+ * in alphabetical order so they're easier to sort through.
+ */
+ omap_init_i2c();
+
+ return 0;
+}
+arch_initcall(omap2_init_devices);
+
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
new file mode 100644
index 00000000000..76187300f2b
--- /dev/null
+++ b/arch/arm/mach-omap2/id.c
@@ -0,0 +1,124 @@
+/*
+ * linux/arch/arm/mach-omap2/id.c
+ *
+ * OMAP2 CPU identification code
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Written by Tony Lindgren <tony@atomide.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/io.h>
+
+#define OMAP24XX_TAP_BASE io_p2v(0x48014000)
+
+#define OMAP_TAP_IDCODE 0x0204
+#define OMAP_TAP_PROD_ID 0x0208
+
+#define OMAP_TAP_DIE_ID_0 0x0218
+#define OMAP_TAP_DIE_ID_1 0x021C
+#define OMAP_TAP_DIE_ID_2 0x0220
+#define OMAP_TAP_DIE_ID_3 0x0224
+
+/* system_rev fields for OMAP2 processors:
+ * CPU id bits [31:16],
+ * CPU device type [15:12], (unprg,normal,POP)
+ * CPU revision [11:08]
+ * CPU class bits [07:00]
+ */
+
+struct omap_id {
+ u16 hawkeye; /* Silicon type (Hawkeye id) */
+ u8 dev; /* Device type from production_id reg */
+ u32 type; /* combined type id copied to system_rev */
+};
+
+/* Register values to detect the OMAP version */
+static struct omap_id omap_ids[] __initdata = {
+ { .hawkeye = 0xb5d9, .dev = 0x0, .type = 0x24200000 },
+ { .hawkeye = 0xb5d9, .dev = 0x1, .type = 0x24201000 },
+ { .hawkeye = 0xb5d9, .dev = 0x2, .type = 0x24202000 },
+ { .hawkeye = 0xb5d9, .dev = 0x4, .type = 0x24220000 },
+ { .hawkeye = 0xb5d9, .dev = 0x8, .type = 0x24230000 },
+ { .hawkeye = 0xb68a, .dev = 0x0, .type = 0x24300000 },
+};
+
+static u32 __init read_tap_reg(int reg)
+{
+ return __raw_readl(OMAP24XX_TAP_BASE + reg);
+}
+
+void __init omap2_check_revision(void)
+{
+ int i, j;
+ u32 idcode;
+ u32 prod_id;
+ u16 hawkeye;
+ u8 dev_type;
+ u8 rev;
+
+ idcode = read_tap_reg(OMAP_TAP_IDCODE);
+ prod_id = read_tap_reg(OMAP_TAP_PROD_ID);
+ hawkeye = (idcode >> 12) & 0xffff;
+ rev = (idcode >> 28) & 0x0f;
+ dev_type = (prod_id >> 16) & 0x0f;
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "OMAP_TAP_IDCODE 0x%08x REV %i HAWKEYE 0x%04x MANF %03x\n",
+ idcode, rev, hawkeye, (idcode >> 1) & 0x7ff);
+ printk(KERN_DEBUG "OMAP_TAP_DIE_ID_0: 0x%08x\n",
+ read_tap_reg(OMAP_TAP_DIE_ID_0));
+ printk(KERN_DEBUG "OMAP_TAP_DIE_ID_1: 0x%08x DEV_REV: %i\n",
+ read_tap_reg(OMAP_TAP_DIE_ID_1),
+ (read_tap_reg(OMAP_TAP_DIE_ID_1) >> 28) & 0xf);
+ printk(KERN_DEBUG "OMAP_TAP_DIE_ID_2: 0x%08x\n",
+ read_tap_reg(OMAP_TAP_DIE_ID_2));
+ printk(KERN_DEBUG "OMAP_TAP_DIE_ID_3: 0x%08x\n",
+ read_tap_reg(OMAP_TAP_DIE_ID_3));
+ printk(KERN_DEBUG "OMAP_TAP_PROD_ID_0: 0x%08x DEV_TYPE: %i\n",
+ prod_id, dev_type);
+#endif
+
+ /* Check hawkeye ids */
+ for (i = 0; i < ARRAY_SIZE(omap_ids); i++) {
+ if (hawkeye == omap_ids[i].hawkeye)
+ break;
+ }
+
+ if (i == ARRAY_SIZE(omap_ids)) {
+ printk(KERN_ERR "Unknown OMAP CPU id\n");
+ return;
+ }
+
+ for (j = i; j < ARRAY_SIZE(omap_ids); j++) {
+ if (dev_type == omap_ids[j].dev)
+ break;
+ }
+
+ if (j == ARRAY_SIZE(omap_ids)) {
+ printk(KERN_ERR "Unknown OMAP device type. "
+ "Handling it as OMAP%04x\n",
+ omap_ids[i].type >> 16);
+ j = i;
+ }
+ system_rev = omap_ids[j].type;
+
+ system_rev |= rev << 8;
+
+ /* Add the cpu class info (24xx) */
+ system_rev |= 0x24;
+
+ pr_info("OMAP%04x", system_rev >> 16);
+ if ((system_rev >> 8) & 0x0f)
+ printk("%x", (system_rev >> 8) & 0x0f);
+ printk("\n");
+}
+
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
new file mode 100644
index 00000000000..8ea67bf196a
--- /dev/null
+++ b/arch/arm/mach-omap2/io.c
@@ -0,0 +1,53 @@
+/*
+ * linux/arch/arm/mach-omap2/io.c
+ *
+ * OMAP2 I/O mapping code
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <asm/mach/map.h>
+#include <asm/io.h>
+#include <asm/arch/mux.h>
+
+extern void omap_sram_init(void);
+extern int omap2_clk_init(void);
+extern void omap2_check_revision(void);
+
+/*
+ * The machine specific code may provide the extra mapping besides the
+ * default mapping provided here.
+ */
+static struct map_desc omap2_io_desc[] __initdata = {
+ {
+ .virtual = L3_24XX_VIRT,
+ .pfn = __phys_to_pfn(L3_24XX_PHYS),
+ .length = L3_24XX_SIZE,
+ .type = MT_DEVICE
+ },
+ {
+ .virtual = L4_24XX_VIRT,
+ .pfn = __phys_to_pfn(L4_24XX_PHYS),
+ .length = L4_24XX_SIZE,
+ .type = MT_DEVICE
+ }
+};
+
+void __init omap_map_common_io(void)
+{
+ iotable_init(omap2_io_desc, ARRAY_SIZE(omap2_io_desc));
+ omap2_check_revision();
+ omap_sram_init();
+ omap2_mux_init();
+ omap2_clk_init();
+}
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
new file mode 100644
index 00000000000..d7baff675cf
--- /dev/null
+++ b/arch/arm/mach-omap2/irq.c
@@ -0,0 +1,149 @@
+/*
+ * linux/arch/arm/mach-omap/omap2/irq.c
+ *
+ * Interrupt handler for OMAP2 boards.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/config.h>
+#include <linux/interrupt.h>
+#include <asm/hardware.h>
+#include <asm/mach/irq.h>
+#include <asm/irq.h>
+#include <asm/io.h>
+
+#define INTC_REVISION 0x0000
+#define INTC_SYSCONFIG 0x0010
+#define INTC_SYSSTATUS 0x0014
+#define INTC_CONTROL 0x0048
+#define INTC_MIR_CLEAR0 0x0088
+#define INTC_MIR_SET0 0x008c
+
+/*
+ * OMAP2 has a number of different interrupt controllers, each interrupt
+ * controller is identified as its own "bank". Register definitions are
+ * fairly consistent for each bank, but not all registers are implemented
+ * for each bank.. when in doubt, consult the TRM.
+ */
+static struct omap_irq_bank {
+ unsigned long base_reg;
+ unsigned int nr_irqs;
+} __attribute__ ((aligned(4))) irq_banks[] = {
+ {
+ /* MPU INTC */
+ .base_reg = OMAP24XX_IC_BASE,
+ .nr_irqs = 96,
+ }, {
+ /* XXX: DSP INTC */
+
+#if 0
+ /*
+ * Commented out for now until we fix the IVA clocking
+ */
+#ifdef CONFIG_ARCH_OMAP2420
+ }, {
+ /* IVA INTC (2420 only) */
+ .base_reg = OMAP24XX_IVA_INTC_BASE,
+ .nr_irqs = 16, /* Actually 32, but only 16 are used */
+#endif
+#endif
+ }
+};
+
+/* XXX: FIQ and additional INTC support (only MPU at the moment) */
+static void omap_ack_irq(unsigned int irq)
+{
+ omap_writel(0x1, irq_banks[0].base_reg + INTC_CONTROL);
+}
+
+static void omap_mask_irq(unsigned int irq)
+{
+ int offset = (irq >> 5) << 5;
+
+ if (irq >= 64) {
+ irq %= 64;
+ } else if (irq >= 32) {
+ irq %= 32;
+ }
+
+ omap_writel(1 << irq, irq_banks[0].base_reg + INTC_MIR_SET0 + offset);
+}
+
+static void omap_unmask_irq(unsigned int irq)
+{
+ int offset = (irq >> 5) << 5;
+
+ if (irq >= 64) {
+ irq %= 64;
+ } else if (irq >= 32) {
+ irq %= 32;
+ }
+
+ omap_writel(1 << irq, irq_banks[0].base_reg + INTC_MIR_CLEAR0 + offset);
+}
+
+static void omap_mask_ack_irq(unsigned int irq)
+{
+ omap_mask_irq(irq);
+ omap_ack_irq(irq);
+}
+
+static struct irqchip omap_irq_chip = {
+ .ack = omap_mask_ack_irq,
+ .mask = omap_mask_irq,
+ .unmask = omap_unmask_irq,
+};
+
+static void __init omap_irq_bank_init_one(struct omap_irq_bank *bank)
+{
+ unsigned long tmp;
+
+ tmp = omap_readl(bank->base_reg + INTC_REVISION) & 0xff;
+ printk(KERN_INFO "IRQ: Found an INTC at 0x%08lx "
+ "(revision %ld.%ld) with %d interrupts\n",
+ bank->base_reg, tmp >> 4, tmp & 0xf, bank->nr_irqs);
+
+ tmp = omap_readl(bank->base_reg + INTC_SYSCONFIG);
+ tmp |= 1 << 1; /* soft reset */
+ omap_writel(tmp, bank->base_reg + INTC_SYSCONFIG);
+
+ while (!(omap_readl(bank->base_reg + INTC_SYSSTATUS) & 0x1))
+ /* Wait for reset to complete */;
+}
+
+void __init omap_init_irq(void)
+{
+ unsigned long nr_irqs = 0;
+ unsigned int nr_banks = 0;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(irq_banks); i++) {
+ struct omap_irq_bank *bank = irq_banks + i;
+
+ /* XXX */
+ if (!bank->base_reg)
+ continue;
+
+ omap_irq_bank_init_one(bank);
+
+ nr_irqs += bank->nr_irqs;
+ nr_banks++;
+ }
+
+ printk(KERN_INFO "Total of %ld interrupts on %d active controller%s\n",
+ nr_irqs, nr_banks, nr_banks > 1 ? "s" : "");
+
+ for (i = 0; i < nr_irqs; i++) {
+ set_irq_chip(i, &omap_irq_chip);
+ set_irq_handler(i, do_level_IRQ);
+ set_irq_flags(i, IRQF_VALID);
+ }
+}
+
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
new file mode 100644
index 00000000000..ea4654815dd
--- /dev/null
+++ b/arch/arm/mach-omap2/mux.c
@@ -0,0 +1,65 @@
+/*
+ * linux/arch/arm/mach-omap2/mux.c
+ *
+ * OMAP1 pin multiplexing configurations
+ *
+ * Copyright (C) 2003 - 2005 Nokia Corporation
+ *
+ * Written by Tony Lindgren <tony.lindgren@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ */
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <linux/spinlock.h>
+
+#include <asm/arch/mux.h>
+
+#ifdef CONFIG_OMAP_MUX
+
+/* NOTE: See mux.h for the enumeration */
+
+struct pin_config __initdata_or_module omap24xx_pins[] = {
+/*
+ * description mux mux pull pull debug
+ * offset mode ena type
+ */
+
+/* 24xx I2C */
+MUX_CFG_24XX("M19_24XX_I2C1_SCL", 0x111, 0, 0, 0, 1)
+MUX_CFG_24XX("L15_24XX_I2C1_SDA", 0x112, 0, 0, 0, 1)
+MUX_CFG_24XX("J15_24XX_I2C2_SCL", 0x113, 0, 0, 0, 1)
+MUX_CFG_24XX("H19_24XX_I2C2_SDA", 0x114, 0, 0, 0, 1)
+
+/* Menelaus interrupt */
+MUX_CFG_24XX("W19_24XX_SYS_NIRQ", 0x12c, 0, 1, 1, 1)
+
+/* 24xx GPIO */
+MUX_CFG_24XX("Y20_24XX_GPIO60", 0x12c, 3, 0, 0, 1)
+MUX_CFG_24XX("M15_24XX_GPIO92", 0x10a, 3, 0, 0, 1)
+
+};
+
+int __init omap2_mux_init(void)
+{
+ omap_mux_register(omap24xx_pins, ARRAY_SIZE(omap24xx_pins));
+ return 0;
+}
+
+#endif
diff --git a/arch/arm/mach-omap2/prcm.h b/arch/arm/mach-omap2/prcm.h
new file mode 100644
index 00000000000..2eb89b936c8
--- /dev/null
+++ b/arch/arm/mach-omap2/prcm.h
@@ -0,0 +1,419 @@
+/*
+ * prcm.h - Access definations for use in OMAP24XX clock and power management
+ *
+ * Copyright (C) 2005 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_ARCH_DPM_PRCM_H
+#define __ASM_ARM_ARCH_DPM_PRCM_H
+
+/* SET_PERFORMANCE_LEVEL PARAMETERS */
+#define PRCM_HALF_SPEED 1
+#define PRCM_FULL_SPEED 2
+
+#ifndef __ASSEMBLER__
+
+#define PRCM_REG32(offset) __REG32(OMAP24XX_PRCM_BASE + (offset))
+
+#define PRCM_REVISION PRCM_REG32(0x000)
+#define PRCM_SYSCONFIG PRCM_REG32(0x010)
+#define PRCM_IRQSTATUS_MPU PRCM_REG32(0x018)
+#define PRCM_IRQENABLE_MPU PRCM_REG32(0x01C)
+#define PRCM_VOLTCTRL PRCM_REG32(0x050)
+#define PRCM_VOLTST PRCM_REG32(0x054)
+#define PRCM_CLKSRC_CTRL PRCM_REG32(0x060)
+#define PRCM_CLKOUT_CTRL PRCM_REG32(0x070)
+#define PRCM_CLKEMUL_CTRL PRCM_REG32(0x078)
+#define PRCM_CLKCFG_CTRL PRCM_REG32(0x080)
+#define PRCM_CLKCFG_STATUS PRCM_REG32(0x084)
+#define PRCM_VOLTSETUP PRCM_REG32(0x090)
+#define PRCM_CLKSSETUP PRCM_REG32(0x094)
+#define PRCM_POLCTRL PRCM_REG32(0x098)
+
+/* GENERAL PURPOSE */
+#define GENERAL_PURPOSE1 PRCM_REG32(0x0B0)
+#define GENERAL_PURPOSE2 PRCM_REG32(0x0B4)
+#define GENERAL_PURPOSE3 PRCM_REG32(0x0B8)
+#define GENERAL_PURPOSE4 PRCM_REG32(0x0BC)
+#define GENERAL_PURPOSE5 PRCM_REG32(0x0C0)
+#define GENERAL_PURPOSE6 PRCM_REG32(0x0C4)
+#define GENERAL_PURPOSE7 PRCM_REG32(0x0C8)
+#define GENERAL_PURPOSE8 PRCM_REG32(0x0CC)
+#define GENERAL_PURPOSE9 PRCM_REG32(0x0D0)
+#define GENERAL_PURPOSE10 PRCM_REG32(0x0D4)
+#define GENERAL_PURPOSE11 PRCM_REG32(0x0D8)
+#define GENERAL_PURPOSE12 PRCM_REG32(0x0DC)
+#define GENERAL_PURPOSE13 PRCM_REG32(0x0E0)
+#define GENERAL_PURPOSE14 PRCM_REG32(0x0E4)
+#define GENERAL_PURPOSE15 PRCM_REG32(0x0E8)
+#define GENERAL_PURPOSE16 PRCM_REG32(0x0EC)
+#define GENERAL_PURPOSE17 PRCM_REG32(0x0F0)
+#define GENERAL_PURPOSE18 PRCM_REG32(0x0F4)
+#define GENERAL_PURPOSE19 PRCM_REG32(0x0F8)
+#define GENERAL_PURPOSE20 PRCM_REG32(0x0FC)
+
+/* MPU */
+#define CM_CLKSEL_MPU PRCM_REG32(0x140)
+#define CM_CLKSTCTRL_MPU PRCM_REG32(0x148)
+#define RM_RSTST_MPU PRCM_REG32(0x158)
+#define PM_WKDEP_MPU PRCM_REG32(0x1C8)
+#define PM_EVGENCTRL_MPU PRCM_REG32(0x1D4)
+#define PM_EVEGENONTIM_MPU PRCM_REG32(0x1D8)
+#define PM_EVEGENOFFTIM_MPU PRCM_REG32(0x1DC)
+#define PM_PWSTCTRL_MPU PRCM_REG32(0x1E0)
+#define PM_PWSTST_MPU PRCM_REG32(0x1E4)
+
+/* CORE */
+#define CM_FCLKEN1_CORE PRCM_REG32(0x200)
+#define CM_FCLKEN2_CORE PRCM_REG32(0x204)
+#define CM_FCLKEN3_CORE PRCM_REG32(0x208)
+#define CM_ICLKEN1_CORE PRCM_REG32(0x210)
+#define CM_ICLKEN2_CORE PRCM_REG32(0x214)
+#define CM_ICLKEN3_CORE PRCM_REG32(0x218)
+#define CM_ICLKEN4_CORE PRCM_REG32(0x21C)
+#define CM_IDLEST1_CORE PRCM_REG32(0x220)
+#define CM_IDLEST2_CORE PRCM_REG32(0x224)
+#define CM_IDLEST3_CORE PRCM_REG32(0x228)
+#define CM_IDLEST4_CORE PRCM_REG32(0x22C)
+#define CM_AUTOIDLE1_CORE PRCM_REG32(0x230)
+#define CM_AUTOIDLE2_CORE PRCM_REG32(0x234)
+#define CM_AUTOIDLE3_CORE PRCM_REG32(0x238)
+#define CM_AUTOIDLE4_CORE PRCM_REG32(0x23C)
+#define CM_CLKSEL1_CORE PRCM_REG32(0x240)
+#define CM_CLKSEL2_CORE PRCM_REG32(0x244)
+#define CM_CLKSTCTRL_CORE PRCM_REG32(0x248)
+#define PM_WKEN1_CORE PRCM_REG32(0x2A0)
+#define PM_WKEN2_CORE PRCM_REG32(0x2A4)
+#define PM_WKST1_CORE PRCM_REG32(0x2B0)
+#define PM_WKST2_CORE PRCM_REG32(0x2B4)
+#define PM_WKDEP_CORE PRCM_REG32(0x2C8)
+#define PM_PWSTCTRL_CORE PRCM_REG32(0x2E0)
+#define PM_PWSTST_CORE PRCM_REG32(0x2E4)
+
+/* GFX */
+#define CM_FCLKEN_GFX PRCM_REG32(0x300)
+#define CM_ICLKEN_GFX PRCM_REG32(0x310)
+#define CM_IDLEST_GFX PRCM_REG32(0x320)
+#define CM_CLKSEL_GFX PRCM_REG32(0x340)
+#define CM_CLKSTCTRL_GFX PRCM_REG32(0x348)
+#define RM_RSTCTRL_GFX PRCM_REG32(0x350)
+#define RM_RSTST_GFX PRCM_REG32(0x358)
+#define PM_WKDEP_GFX PRCM_REG32(0x3C8)
+#define PM_PWSTCTRL_GFX PRCM_REG32(0x3E0)
+#define PM_PWSTST_GFX PRCM_REG32(0x3E4)
+
+/* WAKE-UP */
+#define CM_FCLKEN_WKUP PRCM_REG32(0x400)
+#define CM_ICLKEN_WKUP PRCM_REG32(0x410)
+#define CM_IDLEST_WKUP PRCM_REG32(0x420)
+#define CM_AUTOIDLE_WKUP PRCM_REG32(0x430)
+#define CM_CLKSEL_WKUP PRCM_REG32(0x440)
+#define RM_RSTCTRL_WKUP PRCM_REG32(0x450)
+#define RM_RSTTIME_WKUP PRCM_REG32(0x454)
+#define RM_RSTST_WKUP PRCM_REG32(0x458)
+#define PM_WKEN_WKUP PRCM_REG32(0x4A0)
+#define PM_WKST_WKUP PRCM_REG32(0x4B0)
+
+/* CLOCKS */
+#define CM_CLKEN_PLL PRCM_REG32(0x500)
+#define CM_IDLEST_CKGEN PRCM_REG32(0x520)
+#define CM_AUTOIDLE_PLL PRCM_REG32(0x530)
+#define CM_CLKSEL1_PLL PRCM_REG32(0x540)
+#define CM_CLKSEL2_PLL PRCM_REG32(0x544)
+
+/* DSP */
+#define CM_FCLKEN_DSP PRCM_REG32(0x800)
+#define CM_ICLKEN_DSP PRCM_REG32(0x810)
+#define CM_IDLEST_DSP PRCM_REG32(0x820)
+#define CM_AUTOIDLE_DSP PRCM_REG32(0x830)
+#define CM_CLKSEL_DSP PRCM_REG32(0x840)
+#define CM_CLKSTCTRL_DSP PRCM_REG32(0x848)
+#define RM_RSTCTRL_DSP PRCM_REG32(0x850)
+#define RM_RSTST_DSP PRCM_REG32(0x858)
+#define PM_WKEN_DSP PRCM_REG32(0x8A0)
+#define PM_WKDEP_DSP PRCM_REG32(0x8C8)
+#define PM_PWSTCTRL_DSP PRCM_REG32(0x8E0)
+#define PM_PWSTST_DSP PRCM_REG32(0x8E4)
+#define PRCM_IRQSTATUS_DSP PRCM_REG32(0x8F0)
+#define PRCM_IRQENABLE_DSP PRCM_REG32(0x8F4)
+
+/* IVA */
+#define PRCM_IRQSTATUS_IVA PRCM_REG32(0x8F8)
+#define PRCM_IRQENABLE_IVA PRCM_REG32(0x8FC)
+
+/* Modem on 2430 */
+#define CM_FCLKEN_MDM PRCM_REG32(0xC00)
+#define CM_ICLKEN_MDM PRCM_REG32(0xC10)
+#define CM_IDLEST_MDM PRCM_REG32(0xC20)
+#define CM_CLKSEL_MDM PRCM_REG32(0xC40)
+
+/* FIXME: Move to header for 2430 */
+#define DISP_BASE (OMAP24XX_L4_IO_BASE+0x50000)
+#define DISP_REG32(offset) __REG32(DISP_BASE + (offset))
+
+#define GPMC_BASE (OMAP24XX_GPMC_BASE)
+#define GPMC_REG32(offset) __REG32(GPMC_BASE + (offset))
+
+#define GPT1_BASE (OMAP24XX_GPT1)
+#define GPT1_REG32(offset) __REG32(GPT1_BASE + (offset))
+
+/* Misc sysconfig */
+#define DISPC_SYSCONFIG DISP_REG32(0x410)
+#define SPI_BASE (OMAP24XX_L4_IO_BASE+0x98000)
+#define MCSPI1_SYSCONFIG __REG32(SPI_BASE + 0x10)
+#define MCSPI2_SYSCONFIG __REG32(SPI_BASE+0x2000 + 0x10)
+
+//#define DSP_MMU_SYSCONFIG 0x5A000010
+#define CAMERA_MMU_SYSCONFIG __REG32(DISP_BASE+0x2C10)
+//#define IVA_MMU_SYSCONFIG 0x5D000010
+//#define DSP_DMA_SYSCONFIG 0x00FCC02C
+#define CAMERA_DMA_SYSCONFIG __REG32(DISP_BASE+0x282C)
+#define SYSTEM_DMA_SYSCONFIG __REG32(DISP_BASE+0x602C)
+#define GPMC_SYSCONFIG GPMC_REG32(0x010)
+#define MAILBOXES_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x94010)
+#define UART1_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6A054)
+#define UART2_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6C054)
+#define UART3_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6E054)
+//#define IVA_SYSCONFIG 0x5C060010
+#define SDRC_SYSCONFIG __REG32(OMAP24XX_SDRC_BASE+0x10)
+#define SMS_SYSCONFIG __REG32(OMAP24XX_SMS_BASE+0x10)
+#define SSI_SYSCONFIG __REG32(DISP_BASE+0x8010)
+//#define VLYNQ_SYSCONFIG 0x67FFFE10
+
+/* rkw - good cannidates for PM_ to start what nm was trying */
+#define OMAP24XX_GPT2 (OMAP24XX_L4_IO_BASE+0x2A000)
+#define OMAP24XX_GPT3 (OMAP24XX_L4_IO_BASE+0x78000)
+#define OMAP24XX_GPT4 (OMAP24XX_L4_IO_BASE+0x7A000)
+#define OMAP24XX_GPT5 (OMAP24XX_L4_IO_BASE+0x7C000)
+#define OMAP24XX_GPT6 (OMAP24XX_L4_IO_BASE+0x7E000)
+#define OMAP24XX_GPT7 (OMAP24XX_L4_IO_BASE+0x80000)
+#define OMAP24XX_GPT8 (OMAP24XX_L4_IO_BASE+0x82000)
+#define OMAP24XX_GPT9 (OMAP24XX_L4_IO_BASE+0x84000)
+#define OMAP24XX_GPT10 (OMAP24XX_L4_IO_BASE+0x86000)
+#define OMAP24XX_GPT11 (OMAP24XX_L4_IO_BASE+0x88000)
+#define OMAP24XX_GPT12 (OMAP24XX_L4_IO_BASE+0x8A000)
+
+#define GPTIMER1_SYSCONFIG GPT1_REG32(0x010)
+#define GPTIMER2_SYSCONFIG __REG32(OMAP24XX_GPT2 + 0x10)
+#define GPTIMER3_SYSCONFIG __REG32(OMAP24XX_GPT3 + 0x10)
+#define GPTIMER4_SYSCONFIG __REG32(OMAP24XX_GPT4 + 0x10)
+#define GPTIMER5_SYSCONFIG __REG32(OMAP24XX_GPT5 + 0x10)
+#define GPTIMER6_SYSCONFIG __REG32(OMAP24XX_GPT6 + 0x10)
+#define GPTIMER7_SYSCONFIG __REG32(OMAP24XX_GPT7 + 0x10)
+#define GPTIMER8_SYSCONFIG __REG32(OMAP24XX_GPT8 + 0x10)
+#define GPTIMER9_SYSCONFIG __REG32(OMAP24XX_GPT9 + 0x10)
+#define GPTIMER10_SYSCONFIG __REG32(OMAP24XX_GPT10 + 0x10)
+#define GPTIMER11_SYSCONFIG __REG32(OMAP24XX_GPT11 + 0x10)
+#define GPTIMER12_SYSCONFIG __REG32(OMAP24XX_GPT12 + 0x10)
+
+#define GPIOX_BASE(X) (OMAP24XX_GPIO_BASE+(0x2000*((X)-1)))
+
+#define GPIO1_SYSCONFIG __REG32((GPIOX_BASE(1)+0x10))
+#define GPIO2_SYSCONFIG __REG32((GPIOX_BASE(2)+0x10))
+#define GPIO3_SYSCONFIG __REG32((GPIOX_BASE(3)+0x10))
+#define GPIO4_SYSCONFIG __REG32((GPIOX_BASE(4)+0x10))
+
+/* GP TIMER 1 */
+#define GPTIMER1_TISTAT GPT1_REG32(0x014)
+#define GPTIMER1_TISR GPT1_REG32(0x018)
+#define GPTIMER1_TIER GPT1_REG32(0x01C)
+#define GPTIMER1_TWER GPT1_REG32(0x020)
+#define GPTIMER1_TCLR GPT1_REG32(0x024)
+#define GPTIMER1_TCRR GPT1_REG32(0x028)
+#define GPTIMER1_TLDR GPT1_REG32(0x02C)
+#define GPTIMER1_TTGR GPT1_REG32(0x030)
+#define GPTIMER1_TWPS GPT1_REG32(0x034)
+#define GPTIMER1_TMAR GPT1_REG32(0x038)
+#define GPTIMER1_TCAR1 GPT1_REG32(0x03C)
+#define GPTIMER1_TSICR GPT1_REG32(0x040)
+#define GPTIMER1_TCAR2 GPT1_REG32(0x044)
+
+/* rkw -- base fix up please... */
+#define GPTIMER3_TISR __REG32(OMAP24XX_L4_IO_BASE+0x78018)
+
+/* SDRC */
+#define SDRC_DLLA_CTRL __REG32(OMAP24XX_SDRC_BASE+0x060)
+#define SDRC_DLLA_STATUS __REG32(OMAP24XX_SDRC_BASE+0x064)
+#define SDRC_DLLB_CTRL __REG32(OMAP24XX_SDRC_BASE+0x068)
+#define SDRC_DLLB_STATUS __REG32(OMAP24XX_SDRC_BASE+0x06C)
+#define SDRC_POWER __REG32(OMAP24XX_SDRC_BASE+0x070)
+#define SDRC_MR_0 __REG32(OMAP24XX_SDRC_BASE+0x084)
+
+/* GPIO 1 */
+#define GPIO1_BASE GPIOX_BASE(1)
+#define GPIO1_REG32(offset) __REG32(GPIO1_BASE + (offset))
+#define GPIO1_IRQENABLE1 GPIO1_REG32(0x01C)
+#define GPIO1_IRQSTATUS1 GPIO1_REG32(0x018)
+#define GPIO1_IRQENABLE2 GPIO1_REG32(0x02C)
+#define GPIO1_IRQSTATUS2 GPIO1_REG32(0x028)
+#define GPIO1_WAKEUPENABLE GPIO1_REG32(0x020)
+#define GPIO1_RISINGDETECT GPIO1_REG32(0x048)
+#define GPIO1_DATAIN GPIO1_REG32(0x038)
+#define GPIO1_OE GPIO1_REG32(0x034)
+#define GPIO1_DATAOUT GPIO1_REG32(0x03C)
+
+/* GPIO2 */
+#define GPIO2_BASE GPIOX_BASE(2)
+#define GPIO2_REG32(offset) __REG32(GPIO2_BASE + (offset))
+#define GPIO2_IRQENABLE1 GPIO2_REG32(0x01C)
+#define GPIO2_IRQSTATUS1 GPIO2_REG32(0x018)
+#define GPIO2_IRQENABLE2 GPIO2_REG32(0x02C)
+#define GPIO2_IRQSTATUS2 GPIO2_REG32(0x028)
+#define GPIO2_WAKEUPENABLE GPIO2_REG32(0x020)
+#define GPIO2_RISINGDETECT GPIO2_REG32(0x048)
+#define GPIO2_DATAIN GPIO2_REG32(0x038)
+#define GPIO2_OE GPIO2_REG32(0x034)
+#define GPIO2_DATAOUT GPIO2_REG32(0x03C)
+
+/* GPIO 3 */
+#define GPIO3_BASE GPIOX_BASE(3)
+#define GPIO3_REG32(offset) __REG32(GPIO3_BASE + (offset))
+#define GPIO3_IRQENABLE1 GPIO3_REG32(0x01C)
+#define GPIO3_IRQSTATUS1 GPIO3_REG32(0x018)
+#define GPIO3_IRQENABLE2 GPIO3_REG32(0x02C)
+#define GPIO3_IRQSTATUS2 GPIO3_REG32(0x028)
+#define GPIO3_WAKEUPENABLE GPIO3_REG32(0x020)
+#define GPIO3_RISINGDETECT GPIO3_REG32(0x048)
+#define GPIO3_FALLINGDETECT GPIO3_REG32(0x04C)
+#define GPIO3_DATAIN GPIO3_REG32(0x038)
+#define GPIO3_OE GPIO3_REG32(0x034)
+#define GPIO3_DATAOUT GPIO3_REG32(0x03C)
+#define GPIO3_DEBOUNCENABLE GPIO3_REG32(0x050)
+#define GPIO3_DEBOUNCINGTIME GPIO3_REG32(0x054)
+
+/* GPIO 4 */
+#define GPIO4_BASE GPIOX_BASE(4)
+#define GPIO4_REG32(offset) __REG32(GPIO4_BASE + (offset))
+#define GPIO4_IRQENABLE1 GPIO4_REG32(0x01C)
+#define GPIO4_IRQSTATUS1 GPIO4_REG32(0x018)
+#define GPIO4_IRQENABLE2 GPIO4_REG32(0x02C)
+#define GPIO4_IRQSTATUS2 GPIO4_REG32(0x028)
+#define GPIO4_WAKEUPENABLE GPIO4_REG32(0x020)
+#define GPIO4_RISINGDETECT GPIO4_REG32(0x048)
+#define GPIO4_FALLINGDETECT GPIO4_REG32(0x04C)
+#define GPIO4_DATAIN GPIO4_REG32(0x038)
+#define GPIO4_OE GPIO4_REG32(0x034)
+#define GPIO4_DATAOUT GPIO4_REG32(0x03C)
+#define GPIO4_DEBOUNCENABLE GPIO4_REG32(0x050)
+#define GPIO4_DEBOUNCINGTIME GPIO4_REG32(0x054)
+
+
+/* IO CONFIG */
+#define CONTROL_BASE (OMAP24XX_CTRL_BASE)
+#define CONTROL_REG32(offset) __REG32(CONTROL_BASE + (offset))
+
+#define CONTROL_PADCONF_SPI1_NCS2 CONTROL_REG32(0x104)
+#define CONTROL_PADCONF_SYS_XTALOUT CONTROL_REG32(0x134)
+#define CONTROL_PADCONF_UART1_RX CONTROL_REG32(0x0C8)
+#define CONTROL_PADCONF_MCBSP1_DX CONTROL_REG32(0x10C)
+#define CONTROL_PADCONF_GPMC_NCS4 CONTROL_REG32(0x090)
+#define CONTROL_PADCONF_DSS_D5 CONTROL_REG32(0x0B8)
+#define CONTROL_PADCONF_DSS_D9 CONTROL_REG32(0x0BC)
+#define CONTROL_PADCONF_DSS_D13 CONTROL_REG32(0x0C0)
+#define CONTROL_PADCONF_DSS_VSYNC CONTROL_REG32(0x0CC)
+
+/* CONTROL */
+#define CONTROL_DEVCONF CONTROL_REG32(0x274)
+
+/* INTERRUPT CONTROLLER */
+#define INTC_BASE (OMAP24XX_L4_IO_BASE+0xfe000)
+#define INTC_REG32(offset) __REG32(INTC_BASE + (offset))
+
+#define INTC1_U_BASE INTC_REG32(0x000)
+#define INTC_MIR0 INTC_REG32(0x084)
+#define INTC_MIR_SET0 INTC_REG32(0x08C)
+#define INTC_MIR_CLEAR0 INTC_REG32(0x088)
+#define INTC_ISR_CLEAR0 INTC_REG32(0x094)
+#define INTC_MIR1 INTC_REG32(0x0A4)
+#define INTC_MIR_SET1 INTC_REG32(0x0AC)
+#define INTC_MIR_CLEAR1 INTC_REG32(0x0A8)
+#define INTC_ISR_CLEAR1 INTC_REG32(0x0B4)
+#define INTC_MIR2 INTC_REG32(0x0C4)
+#define INTC_MIR_SET2 INTC_REG32(0x0CC)
+#define INTC_MIR_CLEAR2 INTC_REG32(0x0C8)
+#define INTC_ISR_CLEAR2 INTC_REG32(0x0D4)
+#define INTC_SIR_IRQ INTC_REG32(0x040)
+#define INTC_CONTROL INTC_REG32(0x048)
+#define INTC_ILR11 INTC_REG32(0x12C)
+#define INTC_ILR32 INTC_REG32(0x180)
+#define INTC_ILR37 INTC_REG32(0x194)
+#define INTC_SYSCONFIG INTC_REG32(0x010)
+
+/* RAM FIREWALL */
+#define RAMFW_BASE (0x68005000)
+#define RAMFW_REG32(offset) __REG32(RAMFW_BASE + (offset))
+
+#define RAMFW_REQINFOPERM0 RAMFW_REG32(0x048)
+#define RAMFW_READPERM0 RAMFW_REG32(0x050)
+#define RAMFW_WRITEPERM0 RAMFW_REG32(0x058)
+
+/* GPMC CS1 FPGA ON USER INTERFACE MODULE */
+//#define DEBUG_BOARD_LED_REGISTER 0x04000014
+
+/* GPMC CS0 */
+#define GPMC_CONFIG1_0 GPMC_REG32(0x060)
+#define GPMC_CONFIG2_0 GPMC_REG32(0x064)
+#define GPMC_CONFIG3_0 GPMC_REG32(0x068)
+#define GPMC_CONFIG4_0 GPMC_REG32(0x06C)
+#define GPMC_CONFIG5_0 GPMC_REG32(0x070)
+#define GPMC_CONFIG6_0 GPMC_REG32(0x074)
+#define GPMC_CONFIG7_0 GPMC_REG32(0x078)
+
+/* DSS */
+#define DSS_CONTROL DISP_REG32(0x040)
+#define DISPC_CONTROL DISP_REG32(0x440)
+#define DISPC_SYSSTATUS DISP_REG32(0x414)
+#define DISPC_IRQSTATUS DISP_REG32(0x418)
+#define DISPC_IRQENABLE DISP_REG32(0x41C)
+#define DISPC_CONFIG DISP_REG32(0x444)
+#define DISPC_DEFAULT_COLOR0 DISP_REG32(0x44C)
+#define DISPC_DEFAULT_COLOR1 DISP_REG32(0x450)
+#define DISPC_TRANS_COLOR0 DISP_REG32(0x454)
+#define DISPC_TRANS_COLOR1 DISP_REG32(0x458)
+#define DISPC_LINE_NUMBER DISP_REG32(0x460)
+#define DISPC_TIMING_H DISP_REG32(0x464)
+#define DISPC_TIMING_V DISP_REG32(0x468)
+#define DISPC_POL_FREQ DISP_REG32(0x46C)
+#define DISPC_DIVISOR DISP_REG32(0x470)
+#define DISPC_SIZE_DIG DISP_REG32(0x478)
+#define DISPC_SIZE_LCD DISP_REG32(0x47C)
+#define DISPC_GFX_BA0 DISP_REG32(0x480)
+#define DISPC_GFX_BA1 DISP_REG32(0x484)
+#define DISPC_GFX_POSITION DISP_REG32(0x488)
+#define DISPC_GFX_SIZE DISP_REG32(0x48C)
+#define DISPC_GFX_ATTRIBUTES DISP_REG32(0x4A0)
+#define DISPC_GFX_FIFO_THRESHOLD DISP_REG32(0x4A4)
+#define DISPC_GFX_ROW_INC DISP_REG32(0x4AC)
+#define DISPC_GFX_PIXEL_INC DISP_REG32(0x4B0)
+#define DISPC_GFX_WINDOW_SKIP DISP_REG32(0x4B4)
+#define DISPC_GFX_TABLE_BA DISP_REG32(0x4B8)
+#define DISPC_DATA_CYCLE1 DISP_REG32(0x5D4)
+#define DISPC_DATA_CYCLE2 DISP_REG32(0x5D8)
+#define DISPC_DATA_CYCLE3 DISP_REG32(0x5DC)
+
+/* Wake up define for board */
+#define GPIO97 (1 << 1)
+#define GPIO88 (1 << 24)
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+
+
+
+
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
new file mode 100644
index 00000000000..f4df04fe1dd
--- /dev/null
+++ b/arch/arm/mach-omap2/serial.c
@@ -0,0 +1,180 @@
+/*
+ * arch/arm/mach-omap/omap2/serial.c
+ *
+ * OMAP2 serial support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ *
+ * Based off of arch/arm/mach-omap/omap1/serial.c
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/serial_8250.h>
+#include <linux/serial_reg.h>
+
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#include <asm/arch/common.h>
+#include <asm/arch/board.h>
+
+static struct clk * uart1_ick = NULL;
+static struct clk * uart1_fck = NULL;
+static struct clk * uart2_ick = NULL;
+static struct clk * uart2_fck = NULL;
+static struct clk * uart3_ick = NULL;
+static struct clk * uart3_fck = NULL;
+
+static struct plat_serial8250_port serial_platform_data[] = {
+ {
+ .membase = (char *)IO_ADDRESS(OMAP_UART1_BASE),
+ .mapbase = (unsigned long)OMAP_UART1_BASE,
+ .irq = 72,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = OMAP16XX_BASE_BAUD * 16,
+ }, {
+ .membase = (char *)IO_ADDRESS(OMAP_UART2_BASE),
+ .mapbase = (unsigned long)OMAP_UART2_BASE,
+ .irq = 73,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = OMAP16XX_BASE_BAUD * 16,
+ }, {
+ .membase = (char *)IO_ADDRESS(OMAP_UART3_BASE),
+ .mapbase = (unsigned long)OMAP_UART3_BASE,
+ .irq = 74,
+ .flags = UPF_BOOT_AUTOCONF,
+ .iotype = UPIO_MEM,
+ .regshift = 2,
+ .uartclk = OMAP16XX_BASE_BAUD * 16,
+ }, {
+ .flags = 0
+ }
+};
+
+static inline unsigned int serial_read_reg(struct plat_serial8250_port *up,
+ int offset)
+{
+ offset <<= up->regshift;
+ return (unsigned int)__raw_readb(up->membase + offset);
+}
+
+static inline void serial_write_reg(struct plat_serial8250_port *p, int offset,
+ int value)
+{
+ offset <<= p->regshift;
+ __raw_writeb(value, (unsigned long)(p->membase + offset));
+}
+
+/*
+ * Internal UARTs need to be initialized for the 8250 autoconfig to work
+ * properly. Note that the TX watermark initialization may not be needed
+ * once the 8250.c watermark handling code is merged.
+ */
+static inline void __init omap_serial_reset(struct plat_serial8250_port *p)
+{
+ serial_write_reg(p, UART_OMAP_MDR1, 0x07);
+ serial_write_reg(p, UART_OMAP_SCR, 0x08);
+ serial_write_reg(p, UART_OMAP_MDR1, 0x00);
+ serial_write_reg(p, UART_OMAP_SYSC, 0x01);
+}
+
+void __init omap_serial_init()
+{
+ int i;
+ const struct omap_uart_config *info;
+
+ /*
+ * Make sure the serial ports are muxed on at this point.
+ * You have to mux them off in device drivers later on
+ * if not needed.
+ */
+
+ info = omap_get_config(OMAP_TAG_UART,
+ struct omap_uart_config);
+
+ if (info == NULL)
+ return;
+
+ for (i = 0; i < OMAP_MAX_NR_PORTS; i++) {
+ struct plat_serial8250_port *p = serial_platform_data + i;
+
+ if (!(info->enabled_uarts & (1 << i))) {
+ p->membase = 0;
+ p->mapbase = 0;
+ continue;
+ }
+
+ switch (i) {
+ case 0:
+ uart1_ick = clk_get(NULL, "uart1_ick");
+ if (IS_ERR(uart1_ick))
+ printk("Could not get uart1_ick\n");
+ else {
+ clk_use(uart1_ick);
+ }
+
+ uart1_fck = clk_get(NULL, "uart1_fck");
+ if (IS_ERR(uart1_fck))
+ printk("Could not get uart1_fck\n");
+ else {
+ clk_use(uart1_fck);
+ }
+ break;
+ case 1:
+ uart2_ick = clk_get(NULL, "uart2_ick");
+ if (IS_ERR(uart2_ick))
+ printk("Could not get uart2_ick\n");
+ else {
+ clk_use(uart2_ick);
+ }
+
+ uart2_fck = clk_get(NULL, "uart2_fck");
+ if (IS_ERR(uart2_fck))
+ printk("Could not get uart2_fck\n");
+ else {
+ clk_use(uart2_fck);
+ }
+ break;
+ case 2:
+ uart3_ick = clk_get(NULL, "uart3_ick");
+ if (IS_ERR(uart3_ick))
+ printk("Could not get uart3_ick\n");
+ else {
+ clk_use(uart3_ick);
+ }
+
+ uart3_fck = clk_get(NULL, "uart3_fck");
+ if (IS_ERR(uart3_fck))
+ printk("Could not get uart3_fck\n");
+ else {
+ clk_use(uart3_fck);
+ }
+ break;
+ }
+
+ omap_serial_reset(p);
+ }
+}
+
+static struct platform_device serial_device = {
+ .name = "serial8250",
+ .id = 0,
+ .dev = {
+ .platform_data = serial_platform_data,
+ },
+};
+
+static int __init omap_init(void)
+{
+ return platform_device_register(&serial_device);
+}
+arch_initcall(omap_init);
diff --git a/arch/arm/mach-omap2/sram-fn.S b/arch/arm/mach-omap2/sram-fn.S
new file mode 100644
index 00000000000..2a869e20334
--- /dev/null
+++ b/arch/arm/mach-omap2/sram-fn.S
@@ -0,0 +1,333 @@
+/*
+ * linux/arch/arm/mach-omap1/sram.S
+ *
+ * Omap2 specific functions that need to be run in internal SRAM
+ *
+ * (C) Copyright 2004
+ * Texas Instruments, <www.ti.com>
+ * Richard Woodruff <r-woodruff2@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of
+ * the License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR /PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston,
+ * MA 02111-1307 USA
+ */
+#include <linux/config.h>
+#include <linux/linkage.h>
+#include <asm/assembler.h>
+#include <asm/arch/io.h>
+#include <asm/hardware.h>
+
+#include <asm/arch/prcm.h>
+
+#define TIMER_32KSYNCT_CR_V IO_ADDRESS(OMAP24XX_32KSYNCT_BASE + 0x010)
+
+#define CM_CLKSEL2_PLL_V IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x544)
+#define PRCM_VOLTCTRL_V IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x050)
+#define PRCM_CLKCFG_CTRL_V IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x080)
+#define CM_CLKEN_PLL_V IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x500)
+#define CM_IDLEST_CKGEN_V IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x520)
+#define CM_CLKSEL1_PLL_V IO_ADDRESS(OMAP24XX_PRCM_BASE + 0x540)
+
+#define SDRC_DLLA_CTRL_V IO_ADDRESS(OMAP24XX_SDRC_BASE + 0x060)
+#define SDRC_RFR_CTRL_V IO_ADDRESS(OMAP24XX_SDRC_BASE + 0x0a4)
+
+ .text
+
+ENTRY(sram_ddr_init)
+ stmfd sp!, {r0 - r12, lr} @ save registers on stack
+
+ mov r12, r2 @ capture CS1 vs CS0
+ mov r8, r3 @ capture force parameter
+
+ /* frequency shift down */
+ ldr r2, cm_clksel2_pll @ get address of dpllout reg
+ mov r3, #0x1 @ value for 1x operation
+ str r3, [r2] @ go to L1-freq operation
+
+ /* voltage shift down */
+ mov r9, #0x1 @ set up for L1 voltage call
+ bl voltage_shift @ go drop voltage
+
+ /* dll lock mode */
+ ldr r11, sdrc_dlla_ctrl @ addr of dlla ctrl
+ ldr r10, [r11] @ get current val
+ cmp r12, #0x1 @ cs1 base (2422 es2.05/1)
+ addeq r11, r11, #0x8 @ if cs1 base, move to DLLB
+ mvn r9, #0x4 @ mask to get clear bit2
+ and r10, r10, r9 @ clear bit2 for lock mode.
+ orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
+ orr r10, r10, #0x2 @ 90 degree phase for all below 133Mhz
+ str r10, [r11] @ commit to DLLA_CTRL
+ bl i_dll_wait @ wait for dll to lock
+
+ /* get dll value */
+ add r11, r11, #0x4 @ get addr of status reg
+ ldr r10, [r11] @ get locked value
+
+ /* voltage shift up */
+ mov r9, #0x0 @ shift back to L0-voltage
+ bl voltage_shift @ go raise voltage
+
+ /* frequency shift up */
+ mov r3, #0x2 @ value for 2x operation
+ str r3, [r2] @ go to L0-freq operation
+
+ /* reset entry mode for dllctrl */
+ sub r11, r11, #0x4 @ move from status to ctrl
+ cmp r12, #0x1 @ normalize if cs1 based
+ subeq r11, r11, #0x8 @ possibly back to DLLA
+ cmp r8, #0x1 @ if forced unlock exit
+ orreq r1, r1, #0x4 @ make sure exit with unlocked value
+ str r1, [r11] @ restore DLLA_CTRL high value
+ add r11, r11, #0x8 @ move to DLLB_CTRL addr
+ str r1, [r11] @ set value DLLB_CTRL
+ bl i_dll_wait @ wait for possible lock
+
+ /* set up for return, DDR should be good */
+ str r10, [r0] @ write dll_status and return counter
+ ldmfd sp!, {r0 - r12, pc} @ restore regs and return
+
+ /* ensure the DLL has relocked */
+i_dll_wait:
+ mov r4, #0x800 @ delay DLL relock, min 0x400 L3 clocks
+i_dll_delay:
+ subs r4, r4, #0x1
+ bne i_dll_delay
+ mov pc, lr
+
+ /*
+ * shift up or down voltage, use R9 as input to tell level.
+ * wait for it to finish, use 32k sync counter, 1tick=31uS.
+ */
+voltage_shift:
+ ldr r4, prcm_voltctrl @ get addr of volt ctrl.
+ ldr r5, [r4] @ get value.
+ ldr r6, prcm_mask_val @ get value of mask
+ and r5, r5, r6 @ apply mask to clear bits
+ orr r5, r5, r9 @ bulld value for L0/L1-volt operation.
+ str r5, [r4] @ set up for change.
+ mov r3, #0x4000 @ get val for force
+ orr r5, r5, r3 @ build value for force
+ str r5, [r4] @ Force transition to L1
+
+ ldr r3, timer_32ksynct_cr @ get addr of counter
+ ldr r5, [r3] @ get value
+ add r5, r5, #0x3 @ give it at most 93uS
+volt_delay:
+ ldr r7, [r3] @ get timer value
+ cmp r5, r7 @ time up?
+ bhi volt_delay @ not yet->branch
+ mov pc, lr @ back to caller.
+
+/* relative load constants */
+cm_clksel2_pll:
+ .word CM_CLKSEL2_PLL_V
+sdrc_dlla_ctrl:
+ .word SDRC_DLLA_CTRL_V
+prcm_voltctrl:
+ .word PRCM_VOLTCTRL_V
+prcm_mask_val:
+ .word 0xFFFF3FFC
+timer_32ksynct_cr:
+ .word TIMER_32KSYNCT_CR_V
+ENTRY(sram_ddr_init_sz)
+ .word . - sram_ddr_init
+
+/*
+ * Reprograms memory timings.
+ * r0 = [PRCM_FULL | PRCM_HALF] r1 = SDRC_DLLA_CTRL value r2 = [DDR | SDR]
+ * PRCM_FULL = 2, PRCM_HALF = 1, DDR = 1, SDR = 0
+ */
+ENTRY(sram_reprogram_sdrc)
+ stmfd sp!, {r0 - r10, lr} @ save registers on stack
+ mov r3, #0x0 @ clear for mrc call
+ mcr p15, 0, r3, c7, c10, 4 @ memory barrier, finish ARM SDR/DDR
+ nop
+ nop
+ ldr r6, ddr_sdrc_rfr_ctrl @ get addr of refresh reg
+ ldr r5, [r6] @ get value
+ mov r5, r5, lsr #8 @ isolate rfr field and drop burst
+
+ cmp r0, #0x1 @ going to half speed?
+ movne r9, #0x0 @ if up set flag up for pre up, hi volt
+
+ blne voltage_shift_c @ adjust voltage
+
+ cmp r0, #0x1 @ going to half speed (post branch link)
+ moveq r5, r5, lsr #1 @ divide by 2 if to half
+ movne r5, r5, lsl #1 @ mult by 2 if to full
+ mov r5, r5, lsl #8 @ put rfr field back into place
+ add r5, r5, #0x1 @ turn on burst of 1
+ ldr r4, ddr_cm_clksel2_pll @ get address of out reg
+ ldr r3, [r4] @ get curr value
+ orr r3, r3, #0x3
+ bic r3, r3, #0x3 @ clear lower bits
+ orr r3, r3, r0 @ new state value
+ str r3, [r4] @ set new state (pll/x, x=1 or 2)
+ nop
+ nop
+
+ moveq r9, #0x1 @ if speed down, post down, drop volt
+ bleq voltage_shift_c
+
+ mcr p15, 0, r3, c7, c10, 4 @ memory barrier
+ str r5, [r6] @ set new RFR_1 value
+ add r6, r6, #0x30 @ get RFR_2 addr
+ str r5, [r6] @ set RFR_2
+ nop
+ cmp r2, #0x1 @ (SDR or DDR) do we need to adjust DLL
+ bne freq_out @ leave if SDR, no DLL function
+
+ /* With DDR, we need to take care of the DLL for the frequency change */
+ ldr r2, ddr_sdrc_dlla_ctrl @ addr of dlla ctrl
+ str r1, [r2] @ write out new SDRC_DLLA_CTRL
+ add r2, r2, #0x8 @ addr to SDRC_DLLB_CTRL
+ str r1, [r2] @ commit to SDRC_DLLB_CTRL
+ mov r1, #0x2000 @ wait DLL relock, min 0x400 L3 clocks
+dll_wait:
+ subs r1, r1, #0x1
+ bne dll_wait
+freq_out:
+ ldmfd sp!, {r0 - r10, pc} @ restore regs and return
+
+ /*
+ * shift up or down voltage, use R9 as input to tell level.
+ * wait for it to finish, use 32k sync counter, 1tick=31uS.
+ */
+voltage_shift_c:
+ ldr r10, ddr_prcm_voltctrl @ get addr of volt ctrl
+ ldr r8, [r10] @ get value
+ ldr r7, ddr_prcm_mask_val @ get value of mask
+ and r8, r8, r7 @ apply mask to clear bits
+ orr r8, r8, r9 @ bulld value for L0/L1-volt operation.
+ str r8, [r10] @ set up for change.
+ mov r7, #0x4000 @ get val for force
+ orr r8, r8, r7 @ build value for force
+ str r8, [r10] @ Force transition to L1
+
+ ldr r10, ddr_timer_32ksynct @ get addr of counter
+ ldr r8, [r10] @ get value
+ add r8, r8, #0x2 @ give it at most 62uS (min 31+)
+volt_delay_c:
+ ldr r7, [r10] @ get timer value
+ cmp r8, r7 @ time up?
+ bhi volt_delay_c @ not yet->branch
+ mov pc, lr @ back to caller
+
+ddr_cm_clksel2_pll:
+ .word CM_CLKSEL2_PLL_V
+ddr_sdrc_dlla_ctrl:
+ .word SDRC_DLLA_CTRL_V
+ddr_sdrc_rfr_ctrl:
+ .word SDRC_RFR_CTRL_V
+ddr_prcm_voltctrl:
+ .word PRCM_VOLTCTRL_V
+ddr_prcm_mask_val:
+ .word 0xFFFF3FFC
+ddr_timer_32ksynct:
+ .word TIMER_32KSYNCT_CR_V
+
+ENTRY(sram_reprogram_sdrc_sz)
+ .word . - sram_reprogram_sdrc
+
+/*
+ * Set dividers and pll. Also recalculate DLL value for DDR and unlock mode.
+ */
+ENTRY(sram_set_prcm)
+ stmfd sp!, {r0-r12, lr} @ regs to stack
+ adr r4, pbegin @ addr of preload start
+ adr r8, pend @ addr of preload end
+ mcrr p15, 1, r8, r4, c12 @ preload into icache
+pbegin:
+ /* move into fast relock bypass */
+ ldr r8, pll_ctl @ get addr
+ ldr r5, [r8] @ get val
+ mvn r6, #0x3 @ clear mask
+ and r5, r5, r6 @ clear field
+ orr r7, r5, #0x2 @ fast relock val
+ str r7, [r8] @ go to fast relock
+ ldr r4, pll_stat @ addr of stat
+block:
+ /* wait for bypass */
+ ldr r8, [r4] @ stat value
+ and r8, r8, #0x3 @ mask for stat
+ cmp r8, #0x1 @ there yet
+ bne block @ loop if not
+
+ /* set new dpll dividers _after_ in bypass */
+ ldr r4, pll_div @ get addr
+ str r0, [r4] @ set dpll ctrl val
+
+ ldr r4, set_config @ get addr
+ mov r8, #1 @ valid cfg msk
+ str r8, [r4] @ make dividers take
+
+ mov r4, #100 @ dead spin a bit
+wait_a_bit:
+ subs r4, r4, #1 @ dec loop
+ bne wait_a_bit @ delay done?
+
+ /* check if staying in bypass */
+ cmp r2, #0x1 @ stay in bypass?
+ beq pend @ jump over dpll relock
+
+ /* relock DPLL with new vals */
+ ldr r5, pll_stat @ get addr
+ ldr r4, pll_ctl @ get addr
+ orr r8, r7, #0x3 @ val for lock dpll
+ str r8, [r4] @ set val
+ mov r0, #1000 @ dead spin a bit
+wait_more:
+ subs r0, r0, #1 @ dec loop
+ bne wait_more @ delay done?
+wait_lock:
+ ldr r8, [r5] @ get lock val
+ and r8, r8, #3 @ isolate field
+ cmp r8, #2 @ locked?
+ bne wait_lock @ wait if not
+pend:
+ /* update memory timings & briefly lock dll */
+ ldr r4, sdrc_rfr @ get addr
+ str r1, [r4] @ update refresh timing
+ ldr r11, dlla_ctrl @ get addr of DLLA ctrl
+ ldr r10, [r11] @ get current val
+ mvn r9, #0x4 @ mask to get clear bit2
+ and r10, r10, r9 @ clear bit2 for lock mode
+ orr r10, r10, #0x8 @ make sure DLL on (es2 bit pos)
+ str r10, [r11] @ commit to DLLA_CTRL
+ add r11, r11, #0x8 @ move to dllb
+ str r10, [r11] @ hit DLLB also
+
+ mov r4, #0x800 @ relock time (min 0x400 L3 clocks)
+wait_dll_lock:
+ subs r4, r4, #0x1
+ bne wait_dll_lock
+ nop
+ ldmfd sp!, {r0-r12, pc} @ restore regs and return
+
+set_config:
+ .word PRCM_CLKCFG_CTRL_V
+pll_ctl:
+ .word CM_CLKEN_PLL_V
+pll_stat:
+ .word CM_IDLEST_CKGEN_V
+pll_div:
+ .word CM_CLKSEL1_PLL_V
+sdrc_rfr:
+ .word SDRC_RFR_CTRL_V
+dlla_ctrl:
+ .word SDRC_DLLA_CTRL_V
+
+ENTRY(sram_set_prcm_sz)
+ .word . - sram_set_prcm
diff --git a/arch/arm/mach-omap2/timer-gp.c b/arch/arm/mach-omap2/timer-gp.c
new file mode 100644
index 00000000000..9ec11443200
--- /dev/null
+++ b/arch/arm/mach-omap2/timer-gp.c
@@ -0,0 +1,126 @@
+/*
+ * linux/arch/arm/mach-omap2/timer-gp.c
+ *
+ * OMAP2 GP timer support.
+ *
+ * Copyright (C) 2005 Nokia Corporation
+ * Author: Paul Mundt <paul.mundt@nokia.com>
+ * Juha Yrjölä <juha.yrjola@nokia.com>
+ *
+ * Some parts based off of TI's 24xx code:
+ *
+ * Copyright (C) 2004 Texas Instruments, Inc.
+ *
+ * Roughly modelled after the OMAP1 MPU timer code.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/time.h>
+#include <linux/interrupt.h>
+#include <linux/err.h>
+#include <asm/mach/time.h>
+#include <asm/delay.h>
+#include <asm/io.h>
+#include <asm/hardware/clock.h>
+
+#define OMAP2_GP_TIMER1_BASE 0x48028000
+#define OMAP2_GP_TIMER2_BASE 0x4802a000
+#define OMAP2_GP_TIMER3_BASE 0x48078000
+#define OMAP2_GP_TIMER4_BASE 0x4807a000
+
+#define GP_TIMER_TIDR 0x00
+#define GP_TIMER_TISR 0x18
+#define GP_TIMER_TIER 0x1c
+#define GP_TIMER_TCLR 0x24
+#define GP_TIMER_TCRR 0x28
+#define GP_TIMER_TLDR 0x2c
+#define GP_TIMER_TSICR 0x40
+
+#define OS_TIMER_NR 1 /* GP timer 2 */
+
+static unsigned long timer_base[] = {
+ IO_ADDRESS(OMAP2_GP_TIMER1_BASE),
+ IO_ADDRESS(OMAP2_GP_TIMER2_BASE),
+ IO_ADDRESS(OMAP2_GP_TIMER3_BASE),
+ IO_ADDRESS(OMAP2_GP_TIMER4_BASE),
+};
+
+static inline unsigned int timer_read_reg(int nr, unsigned int reg)
+{
+ return __raw_readl(timer_base[nr] + reg);
+}
+
+static inline void timer_write_reg(int nr, unsigned int reg, unsigned int val)
+{
+ __raw_writel(val, timer_base[nr] + reg);
+}
+
+/* Note that we always enable the clock prescale divider bit */
+static inline void omap2_gp_timer_start(int nr, unsigned long load_val)
+{
+ unsigned int tmp;
+
+ tmp = 0xffffffff - load_val;
+
+ timer_write_reg(nr, GP_TIMER_TLDR, tmp);
+ timer_write_reg(nr, GP_TIMER_TCRR, tmp);
+ timer_write_reg(nr, GP_TIMER_TIER, 1 << 1);
+ timer_write_reg(nr, GP_TIMER_TCLR, (1 << 5) | (1 << 1) | 1);
+}
+
+static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ write_seqlock(&xtime_lock);
+
+ timer_write_reg(OS_TIMER_NR, GP_TIMER_TISR, 1 << 1);
+ timer_tick(regs);
+
+ write_sequnlock(&xtime_lock);
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction omap2_gp_timer_irq = {
+ .name = "gp timer",
+ .flags = SA_INTERRUPT,
+ .handler = omap2_gp_timer_interrupt,
+};
+
+static void __init omap2_gp_timer_init(void)
+{
+ struct clk * sys_ck;
+ u32 tick_period = 120000;
+ u32 l;
+
+ /* Reset clock and prescale value */
+ timer_write_reg(OS_TIMER_NR, GP_TIMER_TCLR, 0);
+
+ sys_ck = clk_get(NULL, "sys_ck");
+ if (IS_ERR(sys_ck))
+ printk(KERN_ERR "Could not get sys_ck\n");
+ else {
+ clk_use(sys_ck);
+ tick_period = clk_get_rate(sys_ck) / 100;
+ clk_put(sys_ck);
+ }
+
+ tick_period /= 2; /* Minimum prescale divider is 2 */
+ tick_period -= 1;
+
+ l = timer_read_reg(OS_TIMER_NR, GP_TIMER_TIDR);
+ printk(KERN_INFO "OMAP2 GP timer (HW version %d.%d)\n",
+ (l >> 4) & 0x0f, l & 0x0f);
+
+ setup_irq(38, &omap2_gp_timer_irq);
+
+ omap2_gp_timer_start(OS_TIMER_NR, tick_period);
+}
+
+struct sys_timer omap_timer = {
+ .init = omap2_gp_timer_init,
+};
+
diff --git a/arch/arm/mach-pxa/Kconfig b/arch/arm/mach-pxa/Kconfig
index b380a438e68..e201aa9765b 100644
--- a/arch/arm/mach-pxa/Kconfig
+++ b/arch/arm/mach-pxa/Kconfig
@@ -60,6 +60,7 @@ config MACH_CORGI
bool "Enable Sharp SL-C700 (Corgi) Support"
depends PXA_SHARPSL_25x
select PXA_SHARP_C7xx
+ select PXA_SSP
config MACH_SHEPHERD
bool "Enable Sharp SL-C750 (Shepherd) Support"
@@ -102,12 +103,18 @@ config IWMMXT
config PXA_SHARP_C7xx
bool
+ select PXA_SSP
help
Enable support for all Sharp C7xx models
config PXA_SHARP_Cxx00
bool
+ select PXA_SSP
help
Enable common support for Sharp Cxx00 models
+config PXA_SSP
+ tristate
+ help
+ Enable support for PXA2xx SSP ports
endif
diff --git a/arch/arm/mach-pxa/Makefile b/arch/arm/mach-pxa/Makefile
index 8bc72d07cea..d210bd5032c 100644
--- a/arch/arm/mach-pxa/Makefile
+++ b/arch/arm/mach-pxa/Makefile
@@ -11,8 +11,8 @@ obj-$(CONFIG_PXA27x) += pxa27x.o
obj-$(CONFIG_ARCH_LUBBOCK) += lubbock.o
obj-$(CONFIG_MACH_MAINSTONE) += mainstone.o
obj-$(CONFIG_ARCH_PXA_IDP) += idp.o
-obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o ssp.o
-obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o corgi_ssp.o corgi_lcd.o ssp.o
+obj-$(CONFIG_PXA_SHARP_C7xx) += corgi.o corgi_ssp.o corgi_lcd.o
+obj-$(CONFIG_PXA_SHARP_Cxx00) += spitz.o corgi_ssp.o corgi_lcd.o
obj-$(CONFIG_MACH_POODLE) += poodle.o
obj-$(CONFIG_MACH_TOSA) += tosa.o
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS) += $(led-y)
# Misc features
obj-$(CONFIG_PM) += pm.o sleep.o
+obj-$(CONFIG_PXA_SSP) += ssp.o
ifeq ($(CONFIG_PXA27x),y)
obj-$(CONFIG_PM) += standby.o
diff --git a/arch/arm/mach-pxa/corgi_ssp.c b/arch/arm/mach-pxa/corgi_ssp.c
index 591e5f32dbe..bdf10cfa944 100644
--- a/arch/arm/mach-pxa/corgi_ssp.c
+++ b/arch/arm/mach-pxa/corgi_ssp.c
@@ -203,7 +203,7 @@ static int __init corgi_ssp_probe(struct device *dev)
GPDR(ssp_machinfo->cs_ads7846) |= GPIO_bit(ssp_machinfo->cs_ads7846); /* output */
GPSR(ssp_machinfo->cs_ads7846) = GPIO_bit(ssp_machinfo->cs_ads7846); /* High - Disable ADS7846*/
- ret = ssp_init(&corgi_ssp_dev,ssp_machinfo->port);
+ ret = ssp_init(&corgi_ssp_dev, ssp_machinfo->port, 0);
if (ret)
printk(KERN_ERR "Unable to register SSP handler!\n");
diff --git a/arch/arm/mach-pxa/sharpsl.h b/arch/arm/mach-pxa/sharpsl.h
index 3977a77aacd..4879c0f7da7 100644
--- a/arch/arm/mach-pxa/sharpsl.h
+++ b/arch/arm/mach-pxa/sharpsl.h
@@ -32,3 +32,90 @@ void corgi_put_hsync(void);
void spitz_put_hsync(void);
void corgi_wait_hsync(void);
void spitz_wait_hsync(void);
+
+/*
+ * SharpSL Battery/PM Driver
+ */
+
+struct sharpsl_charger_machinfo {
+ void (*init)(void);
+ int gpio_acin;
+ int gpio_batfull;
+ int gpio_batlock;
+ int gpio_fatal;
+ int (*status_acin)(void);
+ void (*discharge)(int);
+ void (*discharge1)(int);
+ void (*charge)(int);
+ void (*chargeled)(int);
+ void (*measure_temp)(int);
+ void (*presuspend)(void);
+ void (*postsuspend)(void);
+ unsigned long (*charger_wakeup)(void);
+ int (*should_wakeup)(unsigned int resume_on_alarm);
+ int bat_levels;
+ struct battery_thresh *bat_levels_noac;
+ struct battery_thresh *bat_levels_acin;
+ int status_high_acin;
+ int status_low_acin;
+ int status_high_noac;
+ int status_low_noac;
+};
+
+struct battery_thresh {
+ int voltage;
+ int percentage;
+};
+
+struct battery_stat {
+ int ac_status; /* APM AC Present/Not Present */
+ int mainbat_status; /* APM Main Battery Status */
+ int mainbat_percent; /* Main Battery Percentage Charge */
+ int mainbat_voltage; /* Main Battery Voltage */
+};
+
+struct sharpsl_pm_status {
+ struct device *dev;
+ struct timer_list ac_timer;
+ struct timer_list chrg_full_timer;
+
+ int charge_mode;
+#define CHRG_ERROR (-1)
+#define CHRG_OFF (0)
+#define CHRG_ON (1)
+#define CHRG_DONE (2)
+
+ unsigned int flags;
+#define SHARPSL_SUSPENDED (1 << 0) /* Device is Suspended */
+#define SHARPSL_ALARM_ACTIVE (1 << 1) /* Alarm is for charging event (not user) */
+#define SHARPSL_BL_LIMIT (1 << 2) /* Backlight Intensity Limited */
+#define SHARPSL_APM_QUEUED (1 << 3) /* APM Event Queued */
+#define SHARPSL_DO_OFFLINE_CHRG (1 << 4) /* Trigger the offline charger */
+
+ int full_count;
+ unsigned long charge_start_time;
+ struct sharpsl_charger_machinfo *machinfo;
+ struct battery_stat battstat;
+};
+
+extern struct sharpsl_pm_status sharpsl_pm;
+extern struct battery_thresh spitz_battery_levels_acin[];
+extern struct battery_thresh spitz_battery_levels_noac[];
+
+#define READ_GPIO_BIT(x) (GPLR(x) & GPIO_bit(x))
+
+#define SHARPSL_LED_ERROR 2
+#define SHARPSL_LED_ON 1
+#define SHARPSL_LED_OFF 0
+
+#define CHARGE_ON() sharpsl_pm.machinfo->charge(1)
+#define CHARGE_OFF() sharpsl_pm.machinfo->charge(0)
+#define CHARGE_LED_ON() sharpsl_pm.machinfo->chargeled(SHARPSL_LED_ON)
+#define CHARGE_LED_OFF() sharpsl_pm.machinfo->chargeled(SHARPSL_LED_OFF)
+#define CHARGE_LED_ERR() sharpsl_pm.machinfo->chargeled(SHARPSL_LED_ERROR)
+#define DISCHARGE_ON() sharpsl_pm.machinfo->discharge(1)
+#define DISCHARGE_OFF() sharpsl_pm.machinfo->discharge(0)
+#define STATUS_AC_IN sharpsl_pm.machinfo->status_acin()
+#define STATUS_BATT_LOCKED READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batlock)
+#define STATUS_CHRG_FULL READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_batfull)
+#define STATUS_FATAL READ_GPIO_BIT(sharpsl_pm.machinfo->gpio_fatal)
diff --git a/arch/arm/mach-pxa/sharpsl_pm.c b/arch/arm/mach-pxa/sharpsl_pm.c
new file mode 100644
index 00000000000..6c9e871c53d
--- /dev/null
+++ b/arch/arm/mach-pxa/sharpsl_pm.c
@@ -0,0 +1,992 @@
+/*
+ * Battery and Power Management code for the Sharp SL-C7xx and SL-Cxx00
+ * series of PDAs
+ *
+ * Copyright (c) 2004-2005 Richard Purdie
+ *
+ * Based on code written by Sharp for 2.4 kernels
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#undef DEBUG
+
+#include <linux/module.h>
+#include <linux/timer.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/apm_bios.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/device.h>
+
+#include <asm/hardware.h>
+#include <asm/hardware/scoop.h>
+#include <asm/mach-types.h>
+#include <asm/irq.h>
+#include <asm/apm.h>
+
+#include <asm/arch/pm.h>
+#include <asm/arch/pxa-regs.h>
+#include <asm/arch/sharpsl.h>
+#include "sharpsl.h"
+
+/*
+ * Constants
+ */
+#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
+#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
+#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
+#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
+#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
+#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP 10 /* 10 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT 10 /* 10 msec */
+#define SHARPSL_CHECK_BATTERY_WAIT_TIME_JKVAD 10 /* 10 msec */
+#define SHARPSL_CHARGE_WAIT_TIME 15 /* 15 msec */
+#define SHARPSL_CHARGE_CO_CHECK_TIME 5 /* 5 msec */
+#define SHARPSL_CHARGE_RETRY_CNT 1 /* eqv. 10 min */
+
+#define SHARPSL_CHARGE_ON_VOLT 0x99 /* 2.9V */
+#define SHARPSL_CHARGE_ON_TEMP 0xe0 /* 2.9V */
+#define SHARPSL_CHARGE_ON_JKVAD_HIGH 0x9b /* 6V */
+#define SHARPSL_CHARGE_ON_JKVAD_LOW 0x34 /* 2V */
+#define SHARPSL_FATAL_ACIN_VOLT 182 /* 3.45V */
+#define SHARPSL_FATAL_NOACIN_VOLT 170 /* 3.40V */
+
+struct battery_thresh spitz_battery_levels_acin[] = {
+ { 213, 100},
+ { 212, 98},
+ { 211, 95},
+ { 210, 93},
+ { 209, 90},
+ { 208, 88},
+ { 207, 85},
+ { 206, 83},
+ { 205, 80},
+ { 204, 78},
+ { 203, 75},
+ { 202, 73},
+ { 201, 70},
+ { 200, 68},
+ { 199, 65},
+ { 198, 63},
+ { 197, 60},
+ { 196, 58},
+ { 195, 55},
+ { 194, 53},
+ { 193, 50},
+ { 192, 48},
+ { 192, 45},
+ { 191, 43},
+ { 191, 40},
+ { 190, 38},
+ { 190, 35},
+ { 189, 33},
+ { 188, 30},
+ { 187, 28},
+ { 186, 25},
+ { 185, 23},
+ { 184, 20},
+ { 183, 18},
+ { 182, 15},
+ { 181, 13},
+ { 180, 10},
+ { 179, 8},
+ { 178, 5},
+ { 0, 0},
+};
+
+struct battery_thresh spitz_battery_levels_noac[] = {
+ { 213, 100},
+ { 212, 98},
+ { 211, 95},
+ { 210, 93},
+ { 209, 90},
+ { 208, 88},
+ { 207, 85},
+ { 206, 83},
+ { 205, 80},
+ { 204, 78},
+ { 203, 75},
+ { 202, 73},
+ { 201, 70},
+ { 200, 68},
+ { 199, 65},
+ { 198, 63},
+ { 197, 60},
+ { 196, 58},
+ { 195, 55},
+ { 194, 53},
+ { 193, 50},
+ { 192, 48},
+ { 191, 45},
+ { 190, 43},
+ { 189, 40},
+ { 188, 38},
+ { 187, 35},
+ { 186, 33},
+ { 185, 30},
+ { 184, 28},
+ { 183, 25},
+ { 182, 23},
+ { 181, 20},
+ { 180, 18},
+ { 179, 15},
+ { 178, 13},
+ { 177, 10},
+ { 176, 8},
+ { 175, 5},
+ { 0, 0},
+};
+
+/* MAX1111 Commands */
+#define MAXCTRL_PD0 1u << 0
+#define MAXCTRL_PD1 1u << 1
+#define MAXCTRL_SGL 1u << 2
+#define MAXCTRL_UNI 1u << 3
+#define MAXCTRL_SEL_SH 4
+#define MAXCTRL_STR 1u << 7
+
+/* MAX1111 Channel Definitions */
+#define BATT_AD 4u
+#define BATT_THM 2u
+#define JK_VAD 6u
+
+
+/*
+ * Prototypes
+ */
+static int sharpsl_read_MainBattery(void);
+static int sharpsl_off_charge_battery(void);
+static int sharpsl_check_battery(int mode);
+static int sharpsl_ac_check(void);
+static int sharpsl_fatal_check(void);
+static int sharpsl_average_value(int ad);
+static void sharpsl_average_clear(void);
+static void sharpsl_charge_toggle(void *private_);
+static void sharpsl_battery_thread(void *private_);
+
+
+/*
+ * Variables
+ */
+struct sharpsl_pm_status sharpsl_pm;
+DECLARE_WORK(toggle_charger, sharpsl_charge_toggle, NULL);
+DECLARE_WORK(sharpsl_bat, sharpsl_battery_thread, NULL);
+
+
+static int get_percentage(int voltage)
+{
+ int i = sharpsl_pm.machinfo->bat_levels - 1;
+ struct battery_thresh *thresh;
+
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ thresh=sharpsl_pm.machinfo->bat_levels_acin;
+ else
+ thresh=sharpsl_pm.machinfo->bat_levels_noac;
+
+ while (i > 0 && (voltage > thresh[i].voltage))
+ i--;
+
+ return thresh[i].percentage;
+}
+
+static int get_apm_status(int voltage)
+{
+ int low_thresh, high_thresh;
+
+ if (sharpsl_pm.charge_mode == CHRG_ON) {
+ high_thresh = sharpsl_pm.machinfo->status_high_acin;
+ low_thresh = sharpsl_pm.machinfo->status_low_acin;
+ } else {
+ high_thresh = sharpsl_pm.machinfo->status_high_noac;
+ low_thresh = sharpsl_pm.machinfo->status_low_noac;
+ }
+
+ if (voltage >= high_thresh)
+ return APM_BATTERY_STATUS_HIGH;
+ if (voltage >= low_thresh)
+ return APM_BATTERY_STATUS_LOW;
+ return APM_BATTERY_STATUS_CRITICAL;
+}
+
+void sharpsl_battery_kick(void)
+{
+ schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(125));
+}
+EXPORT_SYMBOL(sharpsl_battery_kick);
+
+
+static void sharpsl_battery_thread(void *private_)
+{
+ int voltage, percent, apm_status, i = 0;
+
+ if (!sharpsl_pm.machinfo)
+ return;
+
+ sharpsl_pm.battstat.ac_status = (!(STATUS_AC_IN) ? APM_AC_OFFLINE : APM_AC_ONLINE);
+
+ /* Corgi cannot confirm when battery fully charged so periodically kick! */
+ if (machine_is_corgi() && (sharpsl_pm.charge_mode == CHRG_ON)
+ && time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_ON_TIME_INTERVAL))
+ schedule_work(&toggle_charger);
+
+ while(1) {
+ voltage = sharpsl_read_MainBattery();
+ if (voltage > 0) break;
+ if (i++ > 5) {
+ voltage = sharpsl_pm.machinfo->bat_levels_noac[0].voltage;
+ dev_warn(sharpsl_pm.dev, "Warning: Cannot read main battery!\n");
+ break;
+ }
+ }
+
+ voltage = sharpsl_average_value(voltage);
+ apm_status = get_apm_status(voltage);
+ percent = get_percentage(voltage);
+
+ /* At low battery voltages, the voltage has a tendency to start
+ creeping back up so we try to avoid this here */
+ if ((sharpsl_pm.battstat.ac_status == APM_AC_ONLINE) || (apm_status == APM_BATTERY_STATUS_HIGH) || percent <= sharpsl_pm.battstat.mainbat_percent) {
+ sharpsl_pm.battstat.mainbat_voltage = voltage;
+ sharpsl_pm.battstat.mainbat_status = apm_status;
+ sharpsl_pm.battstat.mainbat_percent = percent;
+ }
+
+ dev_dbg(sharpsl_pm.dev, "Battery: voltage: %d, status: %d, percentage: %d, time: %d\n", voltage,
+ sharpsl_pm.battstat.mainbat_status, sharpsl_pm.battstat.mainbat_percent, jiffies);
+
+ /* If battery is low. limit backlight intensity to save power. */
+ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
+ && ((sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_LOW) ||
+ (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL))) {
+ if (!(sharpsl_pm.flags & SHARPSL_BL_LIMIT)) {
+ corgibl_limit_intensity(1);
+ sharpsl_pm.flags |= SHARPSL_BL_LIMIT;
+ }
+ } else if (sharpsl_pm.flags & SHARPSL_BL_LIMIT) {
+ corgibl_limit_intensity(0);
+ sharpsl_pm.flags &= ~SHARPSL_BL_LIMIT;
+ }
+
+ /* Suspend if critical battery level */
+ if ((sharpsl_pm.battstat.ac_status != APM_AC_ONLINE)
+ && (sharpsl_pm.battstat.mainbat_status == APM_BATTERY_STATUS_CRITICAL)
+ && !(sharpsl_pm.flags & SHARPSL_APM_QUEUED)) {
+ sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
+ dev_err(sharpsl_pm.dev, "Fatal Off\n");
+ apm_queue_event(APM_CRITICAL_SUSPEND);
+ }
+
+ schedule_delayed_work(&sharpsl_bat, SHARPSL_BATCHK_TIME);
+}
+
+static void sharpsl_charge_on(void)
+{
+ dev_dbg(sharpsl_pm.dev, "Turning Charger On\n");
+
+ sharpsl_pm.full_count = 0;
+ sharpsl_pm.charge_mode = CHRG_ON;
+ schedule_delayed_work(&toggle_charger, msecs_to_jiffies(250));
+ schedule_delayed_work(&sharpsl_bat, msecs_to_jiffies(500));
+}
+
+static void sharpsl_charge_off(void)
+{
+ dev_dbg(sharpsl_pm.dev, "Turning Charger Off\n");
+
+ CHARGE_OFF();
+ CHARGE_LED_OFF();
+ sharpsl_pm.charge_mode = CHRG_OFF;
+
+ schedule_work(&sharpsl_bat);
+}
+
+static void sharpsl_charge_error(void)
+{
+ CHARGE_LED_ERR();
+ CHARGE_OFF();
+ sharpsl_pm.charge_mode = CHRG_ERROR;
+}
+
+static void sharpsl_charge_toggle(void *private_)
+{
+ dev_dbg(sharpsl_pm.dev, "Toogling Charger at time: %lx\n", jiffies);
+
+ if (STATUS_AC_IN == 0) {
+ sharpsl_charge_off();
+ return;
+ } else if ((sharpsl_check_battery(1) < 0) || (sharpsl_ac_check() < 0)) {
+ sharpsl_charge_error();
+ return;
+ }
+
+ CHARGE_LED_ON();
+ CHARGE_OFF();
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ CHARGE_ON();
+
+ sharpsl_pm.charge_start_time = jiffies;
+}
+
+static void sharpsl_ac_timer(unsigned long data)
+{
+ int acin = STATUS_AC_IN;
+
+ dev_dbg(sharpsl_pm.dev, "AC Status: %d\n",acin);
+
+ sharpsl_average_clear();
+ if (acin && (sharpsl_pm.charge_mode != CHRG_ON))
+ sharpsl_charge_on();
+ else if (sharpsl_pm.charge_mode == CHRG_ON)
+ sharpsl_charge_off();
+
+ schedule_work(&sharpsl_bat);
+}
+
+
+static irqreturn_t sharpsl_ac_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+ /* Delay the event slightly to debounce */
+ /* Must be a smaller delay than the chrg_full_isr below */
+ mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
+
+ return IRQ_HANDLED;
+}
+
+static void sharpsl_chrg_full_timer(unsigned long data)
+{
+ dev_dbg(sharpsl_pm.dev, "Charge Full at time: %lx\n", jiffies);
+
+ sharpsl_pm.full_count++;
+
+ if (STATUS_AC_IN == 0) {
+ dev_dbg(sharpsl_pm.dev, "Charge Full: AC removed - stop charging!\n");
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ sharpsl_charge_off();
+ } else if (sharpsl_pm.full_count < 2) {
+ dev_dbg(sharpsl_pm.dev, "Charge Full: Count too low\n");
+ schedule_work(&toggle_charger);
+ } else if (time_after(jiffies, sharpsl_pm.charge_start_time + SHARPSL_CHARGE_FINISH_TIME)) {
+ dev_dbg(sharpsl_pm.dev, "Charge Full: Interrupt generated too slowly - retry.\n");
+ schedule_work(&toggle_charger);
+ } else {
+ sharpsl_charge_off();
+ sharpsl_pm.charge_mode = CHRG_DONE;
+ dev_dbg(sharpsl_pm.dev, "Charge Full: Charging Finished\n");
+ }
+}
+
+/* Charging Finished Interrupt (Not present on Corgi) */
+/* Can trigger at the same time as an AC staus change so
+ delay until after that has been processed */
+static irqreturn_t sharpsl_chrg_full_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+ if (sharpsl_pm.flags & SHARPSL_SUSPENDED)
+ return IRQ_HANDLED;
+
+ /* delay until after any ac interrupt */
+ mod_timer(&sharpsl_pm.chrg_full_timer, jiffies + msecs_to_jiffies(500));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t sharpsl_fatal_isr(int irq, void *dev_id, struct pt_regs *fp)
+{
+ int is_fatal = 0;
+
+ if (STATUS_BATT_LOCKED == 0) {
+ dev_err(sharpsl_pm.dev, "Battery now Unlocked! Suspending.\n");
+ is_fatal = 1;
+ }
+
+ if (sharpsl_pm.machinfo->gpio_fatal && (STATUS_FATAL == 0)) {
+ dev_err(sharpsl_pm.dev, "Fatal Batt Error! Suspending.\n");
+ is_fatal = 1;
+ }
+
+ if (!(sharpsl_pm.flags & SHARPSL_APM_QUEUED) && is_fatal) {
+ sharpsl_pm.flags |= SHARPSL_APM_QUEUED;
+ apm_queue_event(APM_CRITICAL_SUSPEND);
+ }
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Maintain an average of the last 10 readings
+ */
+#define SHARPSL_CNV_VALUE_NUM 10
+static int sharpsl_ad_index;
+
+static void sharpsl_average_clear(void)
+{
+ sharpsl_ad_index = 0;
+}
+
+static int sharpsl_average_value(int ad)
+{
+ int i, ad_val = 0;
+ static int sharpsl_ad[SHARPSL_CNV_VALUE_NUM+1];
+
+ if (sharpsl_pm.battstat.mainbat_status != APM_BATTERY_STATUS_HIGH) {
+ sharpsl_ad_index = 0;
+ return ad;
+ }
+
+ sharpsl_ad[sharpsl_ad_index] = ad;
+ sharpsl_ad_index++;
+ if (sharpsl_ad_index >= SHARPSL_CNV_VALUE_NUM) {
+ for (i=0; i < (SHARPSL_CNV_VALUE_NUM-1); i++)
+ sharpsl_ad[i] = sharpsl_ad[i+1];
+ sharpsl_ad_index = SHARPSL_CNV_VALUE_NUM - 1;
+ }
+ for (i=0; i < sharpsl_ad_index; i++)
+ ad_val += sharpsl_ad[i];
+
+ return (ad_val / sharpsl_ad_index);
+}
+
+
+/*
+ * Read MAX1111 ADC
+ */
+static int read_max1111(int channel)
+{
+ return corgi_ssp_max1111_get((channel << MAXCTRL_SEL_SH) | MAXCTRL_PD0 | MAXCTRL_PD1
+ | MAXCTRL_SGL | MAXCTRL_UNI | MAXCTRL_STR);
+}
+
+static int sharpsl_read_MainBattery(void)
+{
+ return read_max1111(BATT_AD);
+}
+
+static int sharpsl_read_Temp(void)
+{
+ int temp;
+
+ sharpsl_pm.machinfo->measure_temp(1);
+
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
+ temp = read_max1111(BATT_THM);
+
+ sharpsl_pm.machinfo->measure_temp(0);
+
+ return temp;
+}
+
+static int sharpsl_read_jkvad(void)
+{
+ return read_max1111(JK_VAD);
+}
+
+/*
+ * Take an array of 5 integers, remove the maximum and minimum values
+ * and return the average.
+ */
+static int get_select_val(int *val)
+{
+ int i, j, k, temp, sum = 0;
+
+ /* Find MAX val */
+ temp = val[0];
+ j=0;
+ for (i=1; i<5; i++) {
+ if (temp < val[i]) {
+ temp = val[i];
+ j = i;
+ }
+ }
+
+ /* Find MIN val */
+ temp = val[4];
+ k=4;
+ for (i=3; i>=0; i--) {
+ if (temp > val[i]) {
+ temp = val[i];
+ k = i;
+ }
+ }
+
+ for (i=0; i<5; i++)
+ if (i != j && i != k )
+ sum += val[i];
+
+ dev_dbg(sharpsl_pm.dev, "Average: %d from values: %d, %d, %d, %d, %d\n", sum/3, val[0], val[1], val[2], val[3], val[4]);
+
+ return (sum/3);
+}
+
+/* mode 0 - Check temperature and voltage
+ * 1 - Check temperature only */
+static int sharpsl_check_battery(int mode)
+{
+ int val, i, buff[5];
+
+ /* Check battery temperature */
+ for (i=0; i<5; i++) {
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_TEMP);
+ buff[i] = sharpsl_read_Temp();
+ }
+
+ val = get_select_val(buff);
+
+ dev_dbg(sharpsl_pm.dev, "Temperature: %d\n", val);
+ if (val > SHARPSL_CHARGE_ON_TEMP)
+ return -1;
+ if (mode == 1)
+ return 0;
+
+ /* disable charge, enable discharge */
+ CHARGE_OFF();
+ DISCHARGE_ON();
+ mdelay(SHARPSL_WAIT_DISCHARGE_ON);
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(1);
+
+ /* Check battery voltage */
+ for (i=0; i<5; i++) {
+ buff[i] = sharpsl_read_MainBattery();
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
+ }
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(0);
+
+ DISCHARGE_OFF();
+
+ val = get_select_val(buff);
+ dev_dbg(sharpsl_pm.dev, "Battery Voltage: %d\n", val);
+
+ if (val < SHARPSL_CHARGE_ON_VOLT)
+ return -1;
+
+ return 0;
+}
+
+static int sharpsl_ac_check(void)
+{
+ int temp, i, buff[5];
+
+ for (i=0; i<5; i++) {
+ buff[i] = sharpsl_read_jkvad();
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_JKVAD);
+ }
+
+ temp = get_select_val(buff);
+ dev_dbg(sharpsl_pm.dev, "AC Voltage: %d\n",temp);
+
+ if ((temp > SHARPSL_CHARGE_ON_JKVAD_HIGH) || (temp < SHARPSL_CHARGE_ON_JKVAD_LOW)) {
+ dev_err(sharpsl_pm.dev, "Error: AC check failed.\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int sharpsl_pm_suspend(struct device *dev, pm_message_t state)
+{
+ sharpsl_pm.flags |= SHARPSL_SUSPENDED;
+ flush_scheduled_work();
+
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ sharpsl_pm.flags |= SHARPSL_DO_OFFLINE_CHRG;
+ else
+ sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
+
+ return 0;
+}
+
+static int sharpsl_pm_resume(struct device *dev)
+{
+ /* Clear the reset source indicators as they break the bootloader upon reboot */
+ RCSR = 0x0f;
+ sharpsl_average_clear();
+ sharpsl_pm.flags &= ~SHARPSL_APM_QUEUED;
+ sharpsl_pm.flags &= ~SHARPSL_SUSPENDED;
+
+ return 0;
+}
+
+static void corgi_goto_sleep(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
+{
+ dev_dbg(sharpsl_pm.dev, "Time is: %08x\n",RCNR);
+
+ dev_dbg(sharpsl_pm.dev, "Offline Charge Activate = %d\n",sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG);
+ /* not charging and AC-IN! */
+
+ if ((sharpsl_pm.flags & SHARPSL_DO_OFFLINE_CHRG) && (STATUS_AC_IN != 0)) {
+ dev_dbg(sharpsl_pm.dev, "Activating Offline Charger...\n");
+ sharpsl_pm.charge_mode = CHRG_OFF;
+ sharpsl_pm.flags &= ~SHARPSL_DO_OFFLINE_CHRG;
+ sharpsl_off_charge_battery();
+ }
+
+ sharpsl_pm.machinfo->presuspend();
+
+ PEDR = 0xffffffff; /* clear it */
+
+ sharpsl_pm.flags &= ~SHARPSL_ALARM_ACTIVE;
+ if ((sharpsl_pm.charge_mode == CHRG_ON) && ((alarm_enable && ((alarm_time - RCNR) > (SHARPSL_BATCHK_TIME_SUSPEND + 30))) || !alarm_enable)) {
+ RTSR &= RTSR_ALE;
+ RTAR = RCNR + SHARPSL_BATCHK_TIME_SUSPEND;
+ dev_dbg(sharpsl_pm.dev, "Charging alarm at: %08x\n",RTAR);
+ sharpsl_pm.flags |= SHARPSL_ALARM_ACTIVE;
+ } else if (alarm_enable) {
+ RTSR &= RTSR_ALE;
+ RTAR = alarm_time;
+ dev_dbg(sharpsl_pm.dev, "User alarm at: %08x\n",RTAR);
+ } else {
+ dev_dbg(sharpsl_pm.dev, "No alarms set.\n");
+ }
+
+ pxa_pm_enter(state);
+
+ sharpsl_pm.machinfo->postsuspend();
+
+ dev_dbg(sharpsl_pm.dev, "Corgi woken up from suspend: %08x\n",PEDR);
+}
+
+static int corgi_enter_suspend(unsigned long alarm_time, unsigned int alarm_enable, suspend_state_t state)
+{
+ if (!sharpsl_pm.machinfo->should_wakeup(!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE) && alarm_enable) )
+ {
+ if (!(sharpsl_pm.flags & SHARPSL_ALARM_ACTIVE)) {
+ dev_dbg(sharpsl_pm.dev, "No user triggered wakeup events and not charging. Strange. Suspend.\n");
+ corgi_goto_sleep(alarm_time, alarm_enable, state);
+ return 1;
+ }
+ if(sharpsl_off_charge_battery()) {
+ dev_dbg(sharpsl_pm.dev, "Charging. Suspend...\n");
+ corgi_goto_sleep(alarm_time, alarm_enable, state);
+ return 1;
+ }
+ dev_dbg(sharpsl_pm.dev, "User triggered wakeup in offline charger.\n");
+ }
+
+ if ((STATUS_BATT_LOCKED == 0) || (sharpsl_fatal_check() < 0) )
+ {
+ dev_err(sharpsl_pm.dev, "Fatal condition. Suspend.\n");
+ corgi_goto_sleep(alarm_time, alarm_enable, state);
+ return 1;
+ }
+
+ return 0;
+}
+
+static int corgi_pxa_pm_enter(suspend_state_t state)
+{
+ unsigned long alarm_time = RTAR;
+ unsigned int alarm_status = ((RTSR & RTSR_ALE) != 0);
+
+ dev_dbg(sharpsl_pm.dev, "SharpSL suspending for first time.\n");
+
+ corgi_goto_sleep(alarm_time, alarm_status, state);
+
+ while (corgi_enter_suspend(alarm_time,alarm_status,state))
+ {}
+
+ dev_dbg(sharpsl_pm.dev, "SharpSL resuming...\n");
+
+ return 0;
+}
+#endif
+
+
+/*
+ * Check for fatal battery errors
+ * Fatal returns -1
+ */
+static int sharpsl_fatal_check(void)
+{
+ int buff[5], temp, i, acin;
+
+ dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check entered\n");
+
+ /* Check AC-Adapter */
+ acin = STATUS_AC_IN;
+
+ if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
+ CHARGE_OFF();
+ udelay(100);
+ DISCHARGE_ON(); /* enable discharge */
+ mdelay(SHARPSL_WAIT_DISCHARGE_ON);
+ }
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(1);
+
+ /* Check battery : check inserting battery ? */
+ for (i=0; i<5; i++) {
+ buff[i] = sharpsl_read_MainBattery();
+ mdelay(SHARPSL_CHECK_BATTERY_WAIT_TIME_VOLT);
+ }
+
+ if (sharpsl_pm.machinfo->discharge1)
+ sharpsl_pm.machinfo->discharge1(0);
+
+ if (acin && (sharpsl_pm.charge_mode == CHRG_ON)) {
+ udelay(100);
+ CHARGE_ON();
+ DISCHARGE_OFF();
+ }
+
+ temp = get_select_val(buff);
+ dev_dbg(sharpsl_pm.dev, "sharpsl_fatal_check: acin: %d, discharge voltage: %d, no discharge: %d\n", acin, temp, sharpsl_read_MainBattery());
+
+ if ((acin && (temp < SHARPSL_FATAL_ACIN_VOLT)) ||
+ (!acin && (temp < SHARPSL_FATAL_NOACIN_VOLT)))
+ return -1;
+ return 0;
+}
+
+static int sharpsl_off_charge_error(void)
+{
+ dev_err(sharpsl_pm.dev, "Offline Charger: Error occured.\n");
+ CHARGE_OFF();
+ CHARGE_LED_ERR();
+ sharpsl_pm.charge_mode = CHRG_ERROR;
+ return 1;
+}
+
+/*
+ * Charging Control while suspended
+ * Return 1 - go straight to sleep
+ * Return 0 - sleep or wakeup depending on other factors
+ */
+static int sharpsl_off_charge_battery(void)
+{
+ int time;
+
+ dev_dbg(sharpsl_pm.dev, "Charge Mode: %d\n", sharpsl_pm.charge_mode);
+
+ if (sharpsl_pm.charge_mode == CHRG_OFF) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 1\n");
+
+ /* AC Check */
+ if ((sharpsl_ac_check() < 0) || (sharpsl_check_battery(1) < 0))
+ return sharpsl_off_charge_error();
+
+ /* Start Charging */
+ CHARGE_LED_ON();
+ CHARGE_OFF();
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ CHARGE_ON();
+
+ sharpsl_pm.charge_mode = CHRG_ON;
+ sharpsl_pm.full_count = 0;
+
+ return 1;
+ } else if (sharpsl_pm.charge_mode != CHRG_ON) {
+ return 1;
+ }
+
+ if (sharpsl_pm.full_count == 0) {
+ int time;
+
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 2\n");
+
+ if (sharpsl_check_battery(0) < 0)
+ return sharpsl_off_charge_error();
+
+ CHARGE_OFF();
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ CHARGE_ON();
+ sharpsl_pm.charge_mode = CHRG_ON;
+
+ mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
+
+ time = RCNR;
+ while(1) {
+ /* Check if any wakeup event had occured */
+ if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ return 0;
+ /* Check for timeout */
+ if ((RCNR - time) > SHARPSL_WAIT_CO_TIME)
+ return 1;
+ if (STATUS_CHRG_FULL) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Charge full occured. Retrying to check\n");
+ sharpsl_pm.full_count++;
+ CHARGE_OFF();
+ mdelay(SHARPSL_CHARGE_WAIT_TIME);
+ CHARGE_ON();
+ return 1;
+ }
+ }
+ }
+
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Step 3\n");
+
+ mdelay(SHARPSL_CHARGE_CO_CHECK_TIME);
+
+ time = RCNR;
+ while(1) {
+ /* Check if any wakeup event had occured */
+ if (sharpsl_pm.machinfo->charger_wakeup() != 0)
+ return 0;
+ /* Check for timeout */
+ if ((RCNR-time) > SHARPSL_WAIT_CO_TIME) {
+ if (sharpsl_pm.full_count > SHARPSL_CHARGE_RETRY_CNT) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Not charged sufficiently. Retrying.\n");
+ sharpsl_pm.full_count = 0;
+ }
+ sharpsl_pm.full_count++;
+ return 1;
+ }
+ if (STATUS_CHRG_FULL) {
+ dev_dbg(sharpsl_pm.dev, "Offline Charger: Charging complete.\n");
+ CHARGE_LED_OFF();
+ CHARGE_OFF();
+ sharpsl_pm.charge_mode = CHRG_DONE;
+ return 1;
+ }
+ }
+}
+
+
+static ssize_t battery_percentage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_percent);
+}
+
+static ssize_t battery_voltage_show(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n",sharpsl_pm.battstat.mainbat_voltage);
+}
+
+static DEVICE_ATTR(battery_percentage, 0444, battery_percentage_show, NULL);
+static DEVICE_ATTR(battery_voltage, 0444, battery_voltage_show, NULL);
+
+extern void (*apm_get_power_status)(struct apm_power_info *);
+
+static void sharpsl_apm_get_power_status(struct apm_power_info *info)
+{
+ info->ac_line_status = sharpsl_pm.battstat.ac_status;
+
+ if (sharpsl_pm.charge_mode == CHRG_ON)
+ info->battery_status = APM_BATTERY_STATUS_CHARGING;
+ else
+ info->battery_status = sharpsl_pm.battstat.mainbat_status;
+
+ info->battery_flag = (1 << info->battery_status);
+ info->battery_life = sharpsl_pm.battstat.mainbat_percent;
+}
+
+static struct pm_ops sharpsl_pm_ops = {
+ .pm_disk_mode = PM_DISK_FIRMWARE,
+ .prepare = pxa_pm_prepare,
+ .enter = corgi_pxa_pm_enter,
+ .finish = pxa_pm_finish,
+};
+
+static int __init sharpsl_pm_probe(struct device *dev)
+{
+ if (!dev->platform_data)
+ return -EINVAL;
+
+ sharpsl_pm.dev = dev;
+ sharpsl_pm.machinfo = dev->platform_data;
+ sharpsl_pm.charge_mode = CHRG_OFF;
+ sharpsl_pm.flags = 0;
+
+ sharpsl_pm.machinfo->init();
+
+ init_timer(&sharpsl_pm.ac_timer);
+ sharpsl_pm.ac_timer.function = sharpsl_ac_timer;
+
+ init_timer(&sharpsl_pm.chrg_full_timer);
+ sharpsl_pm.chrg_full_timer.function = sharpsl_chrg_full_timer;
+
+ pxa_gpio_mode(sharpsl_pm.machinfo->gpio_acin | GPIO_IN);
+ pxa_gpio_mode(sharpsl_pm.machinfo->gpio_batfull | GPIO_IN);
+ pxa_gpio_mode(sharpsl_pm.machinfo->gpio_batlock | GPIO_IN);
+
+ /* Register interrupt handlers */
+ if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr, SA_INTERRUPT, "AC Input Detect", sharpsl_ac_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin));
+ }
+ else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin),IRQT_BOTHEDGE);
+
+ if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr, SA_INTERRUPT, "Battery Cover", sharpsl_fatal_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock));
+ }
+ else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock),IRQT_FALLING);
+
+ if (sharpsl_pm.machinfo->gpio_fatal) {
+ if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr, SA_INTERRUPT, "Fatal Battery", sharpsl_fatal_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal));
+ }
+ else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal),IRQT_FALLING);
+ }
+
+ if (!machine_is_corgi())
+ {
+ /* Register interrupt handler. */
+ if (request_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr, SA_INTERRUPT, "CO", sharpsl_chrg_full_isr)) {
+ dev_err(sharpsl_pm.dev, "Could not get irq %d.\n", IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull));
+ }
+ else set_irq_type(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull),IRQT_RISING);
+ }
+
+ device_create_file(dev, &dev_attr_battery_percentage);
+ device_create_file(dev, &dev_attr_battery_voltage);
+
+ apm_get_power_status = sharpsl_apm_get_power_status;
+
+ pm_set_ops(&sharpsl_pm_ops);
+
+ mod_timer(&sharpsl_pm.ac_timer, jiffies + msecs_to_jiffies(250));
+
+ return 0;
+}
+
+static int sharpsl_pm_remove(struct device *dev)
+{
+ pm_set_ops(NULL);
+
+ device_remove_file(dev, &dev_attr_battery_percentage);
+ device_remove_file(dev, &dev_attr_battery_voltage);
+
+ free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_acin), sharpsl_ac_isr);
+ free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batlock), sharpsl_fatal_isr);
+
+ if (sharpsl_pm.machinfo->gpio_fatal)
+ free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_fatal), sharpsl_fatal_isr);
+
+ if (!machine_is_corgi())
+ free_irq(IRQ_GPIO(sharpsl_pm.machinfo->gpio_batfull), sharpsl_chrg_full_isr);
+
+ del_timer_sync(&sharpsl_pm.chrg_full_timer);
+ del_timer_sync(&sharpsl_pm.ac_timer);
+
+ return 0;
+}
+
+static struct device_driver sharpsl_pm_driver = {
+ .name = "sharpsl-pm",
+ .bus = &platform_bus_type,
+ .probe = sharpsl_pm_probe,
+ .remove = sharpsl_pm_remove,
+ .suspend = sharpsl_pm_suspend,
+ .resume = sharpsl_pm_resume,
+};
+
+static int __devinit sharpsl_pm_init(void)
+{
+ return driver_register(&sharpsl_pm_driver);
+}
+
+static void sharpsl_pm_exit(void)
+{
+ driver_unregister(&sharpsl_pm_driver);
+}
+
+late_initcall(sharpsl_pm_init);
+module_exit(sharpsl_pm_exit);
diff --git a/arch/arm/mach-pxa/ssp.c b/arch/arm/mach-pxa/ssp.c
index 4d826c02131..a68b30eff4d 100644
--- a/arch/arm/mach-pxa/ssp.c
+++ b/arch/arm/mach-pxa/ssp.c
@@ -19,6 +19,8 @@
* 22nd Aug 2003 Initial version.
* 20th Dec 2004 Added ssp_config for changing port config without
* closing the port.
+ * 4th Aug 2005 Added option to disable irq handler registration and
+ * cleaned up irq and clock detection.
*/
#include <linux/module.h>
@@ -37,6 +39,26 @@
#define PXA_SSP_PORTS 3
+struct ssp_info_ {
+ int irq;
+ u32 clock;
+};
+
+/*
+ * SSP port clock and IRQ settings
+ */
+static const struct ssp_info_ ssp_info[PXA_SSP_PORTS] = {
+#if defined (CONFIG_PXA27x)
+ {IRQ_SSP, CKEN23_SSP1},
+ {IRQ_SSP2, CKEN3_SSP2},
+ {IRQ_SSP3, CKEN4_SSP3},
+#else
+ {IRQ_SSP, CKEN3_SSP},
+ {IRQ_NSSP, CKEN9_NSSP},
+ {IRQ_ASSP, CKEN10_ASSP},
+#endif
+};
+
static DECLARE_MUTEX(sem);
static int use_count[PXA_SSP_PORTS] = {0, 0, 0};
@@ -210,9 +232,9 @@ int ssp_config(struct ssp_dev *dev, u32 mode, u32 flags, u32 psp_flags, u32 spee
* %-EBUSY if the resources are already in use
* %0 on success
*/
-int ssp_init(struct ssp_dev *dev, u32 port)
+int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags)
{
- int ret, irq;
+ int ret;
if (port > PXA_SSP_PORTS || port == 0)
return -ENODEV;
@@ -229,61 +251,20 @@ int ssp_init(struct ssp_dev *dev, u32 port)
up(&sem);
return -EBUSY;
}
-
- switch (port) {
- case 1:
- irq = IRQ_SSP;
- break;
-#if defined (CONFIG_PXA27x)
- case 2:
- irq = IRQ_SSP2;
- break;
- case 3:
- irq = IRQ_SSP3;
- break;
-#else
- case 2:
- irq = IRQ_NSSP;
- break;
- case 3:
- irq = IRQ_ASSP;
- break;
-#endif
- default:
- return -ENODEV;
- }
-
dev->port = port;
- ret = request_irq(irq, ssp_interrupt, 0, "SSP", dev);
- if (ret)
- goto out_region;
+ /* do we need to get irq */
+ if (!(init_flags & SSP_NO_IRQ)) {
+ ret = request_irq(ssp_info[port-1].irq, ssp_interrupt,
+ 0, "SSP", dev);
+ if (ret)
+ goto out_region;
+ dev->irq = ssp_info[port-1].irq;
+ } else
+ dev->irq = 0;
/* turn on SSP port clock */
- switch (dev->port) {
-#if defined (CONFIG_PXA27x)
- case 1:
- pxa_set_cken(CKEN23_SSP1, 1);
- break;
- case 2:
- pxa_set_cken(CKEN3_SSP2, 1);
- break;
- case 3:
- pxa_set_cken(CKEN4_SSP3, 1);
- break;
-#else
- case 1:
- pxa_set_cken(CKEN3_SSP, 1);
- break;
- case 2:
- pxa_set_cken(CKEN9_NSSP, 1);
- break;
- case 3:
- pxa_set_cken(CKEN10_ASSP, 1);
- break;
-#endif
- }
-
+ pxa_set_cken(ssp_info[port-1].clock, 1);
up(&sem);
return 0;
@@ -301,46 +282,17 @@ out_region:
*/
void ssp_exit(struct ssp_dev *dev)
{
- int irq;
-
down(&sem);
SSCR0_P(dev->port) &= ~SSCR0_SSE;
- /* find irq, save power and turn off SSP port clock */
- switch (dev->port) {
-#if defined (CONFIG_PXA27x)
- case 1:
- irq = IRQ_SSP;
- pxa_set_cken(CKEN23_SSP1, 0);
- break;
- case 2:
- irq = IRQ_SSP2;
- pxa_set_cken(CKEN3_SSP2, 0);
- break;
- case 3:
- irq = IRQ_SSP3;
- pxa_set_cken(CKEN4_SSP3, 0);
- break;
-#else
- case 1:
- irq = IRQ_SSP;
- pxa_set_cken(CKEN3_SSP, 0);
- break;
- case 2:
- irq = IRQ_NSSP;
- pxa_set_cken(CKEN9_NSSP, 0);
- break;
- case 3:
- irq = IRQ_ASSP;
- pxa_set_cken(CKEN10_ASSP, 0);
- break;
-#endif
- default:
- printk(KERN_WARNING "SSP: tried to close invalid port\n");
- return;
+ if (dev->port > PXA_SSP_PORTS || dev->port == 0) {
+ printk(KERN_WARNING "SSP: tried to close invalid port\n");
+ return;
}
- free_irq(irq, dev);
+ pxa_set_cken(ssp_info[dev->port-1].clock, 0);
+ if (dev->irq)
+ free_irq(dev->irq, dev);
release_mem_region(__PREG(SSCR0_P(dev->port)), 0x2c);
use_count[dev->port - 1]--;
up(&sem);
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index e3c14d6b432..e84fdde6edf 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -102,8 +102,8 @@ config CPU_ARM922T
# ARM925T
config CPU_ARM925T
bool "Support ARM925T processor" if ARCH_OMAP1
- depends on ARCH_OMAP1510
- default y if ARCH_OMAP1510
+ depends on ARCH_OMAP15XX
+ default y if ARCH_OMAP15XX
select CPU_32v4
select CPU_ABRT_EV4T
select CPU_CACHE_V4WT
@@ -242,7 +242,7 @@ config CPU_XSCALE
# ARMv6
config CPU_V6
bool "Support ARM V6 processor"
- depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB
+ depends on ARCH_INTEGRATOR || MACH_REALVIEW_EB || ARCH_OMAP2
select CPU_32v6
select CPU_ABRT_EV6
select CPU_CACHE_V6
diff --git a/arch/arm/plat-omap/Makefile b/arch/arm/plat-omap/Makefile
index 7e144f9cad1..9ccf1943fc9 100644
--- a/arch/arm/plat-omap/Makefile
+++ b/arch/arm/plat-omap/Makefile
@@ -3,7 +3,7 @@
#
# Common support
-obj-y := common.o sram.o sram-fn.o clock.o dma.o mux.o gpio.o mcbsp.o usb.o
+obj-y := common.o sram.o sram-fn.o clock.o devices.o dma.o mux.o gpio.o mcbsp.o usb.o
obj-m :=
obj-n :=
obj- :=
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index a020fe16428..7ce39b986e2 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -1,15 +1,20 @@
/*
* linux/arch/arm/plat-omap/clock.c
*
- * Copyright (C) 2004 Nokia corporation
+ * Copyright (C) 2004 - 2005 Nokia corporation
* Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
*
+ * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
-#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/config.h>
#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/module.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
@@ -18,562 +23,20 @@
#include <asm/io.h>
#include <asm/semaphore.h>
#include <asm/hardware/clock.h>
-#include <asm/arch/board.h>
-#include <asm/arch/usb.h>
-#include "clock.h"
-#include "sram.h"
+#include <asm/arch/clock.h>
-static LIST_HEAD(clocks);
+LIST_HEAD(clocks);
static DECLARE_MUTEX(clocks_sem);
-static DEFINE_SPINLOCK(clockfw_lock);
-static void propagate_rate(struct clk * clk);
-/* UART clock function */
-static int set_uart_rate(struct clk * clk, unsigned long rate);
-/* External clock (MCLK & BCLK) functions */
-static int set_ext_clk_rate(struct clk * clk, unsigned long rate);
-static long round_ext_clk_rate(struct clk * clk, unsigned long rate);
-static void init_ext_clk(struct clk * clk);
-/* MPU virtual clock functions */
-static int select_table_rate(struct clk * clk, unsigned long rate);
-static long round_to_table_rate(struct clk * clk, unsigned long rate);
-void clk_setdpll(__u16, __u16);
-
-static struct mpu_rate rate_table[] = {
- /* MPU MHz, xtal MHz, dpll1 MHz, CKCTL, DPLL_CTL
- * armdiv, dspdiv, dspmmu, tcdiv, perdiv, lcddiv
- */
-#if defined(CONFIG_OMAP_ARM_216MHZ)
- { 216000000, 12000000, 216000000, 0x050d, 0x2910 }, /* 1/1/2/2/2/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_195MHZ)
- { 195000000, 13000000, 195000000, 0x050e, 0x2790 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_192MHZ)
- { 192000000, 19200000, 192000000, 0x050f, 0x2510 }, /* 1/1/2/2/8/8 */
- { 192000000, 12000000, 192000000, 0x050f, 0x2810 }, /* 1/1/2/2/8/8 */
- { 96000000, 12000000, 192000000, 0x055f, 0x2810 }, /* 2/2/2/2/8/8 */
- { 48000000, 12000000, 192000000, 0x0baf, 0x2810 }, /* 4/8/4/4/8/8 */
- { 24000000, 12000000, 192000000, 0x0fff, 0x2810 }, /* 8/8/8/8/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_182MHZ)
- { 182000000, 13000000, 182000000, 0x050e, 0x2710 }, /* 1/1/2/2/4/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_168MHZ)
- { 168000000, 12000000, 168000000, 0x010f, 0x2710 }, /* 1/1/1/2/8/8 */
-#endif
-#if defined(CONFIG_OMAP_ARM_150MHZ)
- { 150000000, 12000000, 150000000, 0x010a, 0x2cb0 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_120MHZ)
- { 120000000, 12000000, 120000000, 0x010a, 0x2510 }, /* 1/1/1/2/4/4 */
-#endif
-#if defined(CONFIG_OMAP_ARM_96MHZ)
- { 96000000, 12000000, 96000000, 0x0005, 0x2410 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_60MHZ)
- { 60000000, 12000000, 60000000, 0x0005, 0x2290 }, /* 1/1/1/1/2/2 */
-#endif
-#if defined(CONFIG_OMAP_ARM_30MHZ)
- { 30000000, 12000000, 60000000, 0x0555, 0x2290 }, /* 2/2/2/2/2/2 */
-#endif
- { 0, 0, 0, 0, 0 },
-};
-
-
-static void ckctl_recalc(struct clk * clk);
-int __clk_enable(struct clk *clk);
-void __clk_disable(struct clk *clk);
-void __clk_unuse(struct clk *clk);
-int __clk_use(struct clk *clk);
-
-
-static void followparent_recalc(struct clk * clk)
-{
- clk->rate = clk->parent->rate;
-}
-
-
-static void watchdog_recalc(struct clk * clk)
-{
- clk->rate = clk->parent->rate / 14;
-}
-
-static void uart_recalc(struct clk * clk)
-{
- unsigned int val = omap_readl(clk->enable_reg);
- if (val & clk->enable_bit)
- clk->rate = 48000000;
- else
- clk->rate = 12000000;
-}
-
-static struct clk ck_ref = {
- .name = "ck_ref",
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- ALWAYS_ENABLED,
-};
-
-static struct clk ck_dpll1 = {
- .name = "ck_dpll1",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_PROPAGATES | ALWAYS_ENABLED,
-};
-
-static struct clk ck_dpll1out = {
- .name = "ck_dpll1out",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_CKOUT_ARM,
- .recalc = &followparent_recalc,
-};
-
-static struct clk arm_ck = {
- .name = "arm_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
- .rate_offset = CKCTL_ARMDIV_OFFSET,
- .recalc = &ckctl_recalc,
-};
-
-static struct clk armper_ck = {
- .name = "armper_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_CKCTL,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_PERCK,
- .rate_offset = CKCTL_PERDIV_OFFSET,
- .recalc = &ckctl_recalc,
-};
-
-static struct clk arm_gpio_ck = {
- .name = "arm_gpio_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_GPIOCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk armxor_ck = {
- .name = "armxor_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_XORPCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk armtim_ck = {
- .name = "armtim_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_TIMCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk armwdt_ck = {
- .name = "armwdt_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_WDTCK,
- .recalc = &watchdog_recalc,
-};
-
-static struct clk arminth_ck16xx = {
- .name = "arminth_ck",
- .parent = &arm_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
- /* Note: On 16xx the frequency can be divided by 2 by programming
- * ARM_CKCTL:ARM_INTHCK_SEL(14) to 1
- *
- * 1510 version is in TC clocks.
- */
-};
-
-static struct clk dsp_ck = {
- .name = "dsp_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_CKCTL,
- .enable_reg = ARM_CKCTL,
- .enable_bit = EN_DSPCK,
- .rate_offset = CKCTL_DSPDIV_OFFSET,
- .recalc = &ckctl_recalc,
-};
-
-static struct clk dspmmu_ck = {
- .name = "dspmmu_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_CKCTL | ALWAYS_ENABLED,
- .rate_offset = CKCTL_DSPMMUDIV_OFFSET,
- .recalc = &ckctl_recalc,
-};
-
-static struct clk dspper_ck = {
- .name = "dspper_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_CKCTL | DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
- .enable_reg = DSP_IDLECT2,
- .enable_bit = EN_PERCK,
- .rate_offset = CKCTL_PERDIV_OFFSET,
- .recalc = &followparent_recalc,
- //.recalc = &ckctl_recalc,
-};
-
-static struct clk dspxor_ck = {
- .name = "dspxor_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
- .enable_reg = DSP_IDLECT2,
- .enable_bit = EN_XORPCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dsptim_ck = {
- .name = "dsptim_ck",
- .parent = &ck_ref,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- DSP_DOMAIN_CLOCK | VIRTUAL_IO_ADDRESS,
- .enable_reg = DSP_IDLECT2,
- .enable_bit = EN_DSPTIMCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk tc_ck = {
- .name = "tc_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 |
- RATE_CKCTL | RATE_PROPAGATES | ALWAYS_ENABLED,
- .rate_offset = CKCTL_TCDIV_OFFSET,
- .recalc = &ckctl_recalc,
-};
-
-static struct clk arminth_ck1510 = {
- .name = "arminth_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
- /* Note: On 1510 the frequency follows TC_CK
- *
- * 16xx version is in MPU clocks.
- */
-};
-
-static struct clk tipb_ck = {
- .name = "tibp_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
-};
-
-static struct clk l3_ocpi_ck = {
- .name = "l3_ocpi_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT3,
- .enable_bit = EN_OCPI_CK,
- .recalc = &followparent_recalc,
-};
+DEFINE_SPINLOCK(clockfw_lock);
-static struct clk tc1_ck = {
- .name = "tc1_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT3,
- .enable_bit = EN_TC1_CK,
- .recalc = &followparent_recalc,
-};
+static struct clk_functions *arch_clock;
-static struct clk tc2_ck = {
- .name = "tc2_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT3,
- .enable_bit = EN_TC2_CK,
- .recalc = &followparent_recalc,
-};
+/*-------------------------------------------------------------------------
+ * Standard clock functions defined in asm/hardware/clock.h
+ *-------------------------------------------------------------------------*/
-static struct clk dma_ck = {
- .name = "dma_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
-};
-
-static struct clk dma_lcdfree_ck = {
- .name = "dma_lcdfree_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
-};
-
-static struct clk api_ck = {
- .name = "api_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_APICK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk lb_ck = {
- .name = "lb_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP1510,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_LBCK,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rhea1_ck = {
- .name = "rhea1_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
-};
-
-static struct clk rhea2_ck = {
- .name = "rhea2_ck",
- .parent = &tc_ck,
- .flags = CLOCK_IN_OMAP16XX | ALWAYS_ENABLED,
- .recalc = &followparent_recalc,
-};
-
-static struct clk lcd_ck = {
- .name = "lcd_ck",
- .parent = &ck_dpll1,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | CLOCK_IN_OMAP730 |
- RATE_CKCTL,
- .enable_reg = ARM_IDLECT2,
- .enable_bit = EN_LCDCK,
- .rate_offset = CKCTL_LCDDIV_OFFSET,
- .recalc = &ckctl_recalc,
-};
-
-static struct clk uart1_1510 = {
- .name = "uart1_ck",
- /* Direct from ULPD, no parent */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | ALWAYS_ENABLED,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 29, /* Chooses between 12MHz and 48MHz */
- .set_rate = &set_uart_rate,
- .recalc = &uart_recalc,
-};
-
-static struct clk uart1_16xx = {
- .name = "uart1_ck",
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX | RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 29,
-};
-
-static struct clk uart2_ck = {
- .name = "uart2_ck",
- /* Direct from ULPD, no parent */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX | ENABLE_REG_32BIT |
- ALWAYS_ENABLED,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 30, /* Chooses between 12MHz and 48MHz */
- .set_rate = &set_uart_rate,
- .recalc = &uart_recalc,
-};
-
-static struct clk uart3_1510 = {
- .name = "uart3_ck",
- /* Direct from ULPD, no parent */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | ENABLE_REG_32BIT | ALWAYS_ENABLED,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 31, /* Chooses between 12MHz and 48MHz */
- .set_rate = &set_uart_rate,
- .recalc = &uart_recalc,
-};
-
-static struct clk uart3_16xx = {
- .name = "uart3_ck",
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX | RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 31,
-};
-
-static struct clk usb_clko = { /* 6 MHz output on W4_USB_CLKO */
- .name = "usb_clko",
- /* Direct from ULPD, no parent */
- .rate = 6000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = ULPD_CLOCK_CTRL,
- .enable_bit = USB_MCLK_EN_BIT,
-};
-
-static struct clk usb_hhc_ck1510 = {
- .name = "usb_hhc_ck",
- /* Direct from ULPD, no parent */
- .rate = 48000000, /* Actually 2 clocks, 12MHz and 48MHz */
- .flags = CLOCK_IN_OMAP1510 |
- RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = USB_HOST_HHC_UHOST_EN,
-};
-
-static struct clk usb_hhc_ck16xx = {
- .name = "usb_hhc_ck",
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- /* OTG_SYSCON_2.OTG_PADEN == 0 (not 1510-compatible) */
- .flags = CLOCK_IN_OMAP16XX |
- RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = OTG_BASE + 0x08 /* OTG_SYSCON_2 */,
- .enable_bit = 8 /* UHOST_EN */,
-};
-
-static struct clk usb_dc_ck = {
- .name = "usb_dc_ck",
- /* Direct from ULPD, no parent */
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX | RATE_FIXED,
- .enable_reg = SOFT_REQ_REG,
- .enable_bit = 4,
-};
-
-static struct clk mclk_1510 = {
- .name = "mclk",
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
-};
-
-static struct clk mclk_16xx = {
- .name = "mclk",
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .flags = CLOCK_IN_OMAP16XX,
- .enable_reg = COM_CLK_DIV_CTRL_SEL,
- .enable_bit = COM_ULPD_PLL_CLK_REQ,
- .set_rate = &set_ext_clk_rate,
- .round_rate = &round_ext_clk_rate,
- .init = &init_ext_clk,
-};
-
-static struct clk bclk_1510 = {
- .name = "bclk",
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .rate = 12000000,
- .flags = CLOCK_IN_OMAP1510 | RATE_FIXED,
-};
-
-static struct clk bclk_16xx = {
- .name = "bclk",
- /* Direct from ULPD, no parent. May be enabled by ext hardware. */
- .flags = CLOCK_IN_OMAP16XX,
- .enable_reg = SWD_CLK_DIV_CTRL_SEL,
- .enable_bit = SWD_ULPD_PLL_CLK_REQ,
- .set_rate = &set_ext_clk_rate,
- .round_rate = &round_ext_clk_rate,
- .init = &init_ext_clk,
-};
-
-static struct clk mmc1_ck = {
- .name = "mmc1_ck",
- /* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck,
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 23,
-};
-
-static struct clk mmc2_ck = {
- .name = "mmc2_ck",
- /* Functional clock is direct from ULPD, interface clock is ARMPER */
- .parent = &armper_ck,
- .rate = 48000000,
- .flags = CLOCK_IN_OMAP16XX |
- RATE_FIXED | ENABLE_REG_32BIT,
- .enable_reg = MOD_CONF_CTRL_0,
- .enable_bit = 20,
-};
-
-static struct clk virtual_ck_mpu = {
- .name = "mpu",
- .flags = CLOCK_IN_OMAP1510 | CLOCK_IN_OMAP16XX |
- VIRTUAL_CLOCK | ALWAYS_ENABLED,
- .parent = &arm_ck, /* Is smarter alias for */
- .recalc = &followparent_recalc,
- .set_rate = &select_table_rate,
- .round_rate = &round_to_table_rate,
-};
-
-
-static struct clk * onchip_clks[] = {
- /* non-ULPD clocks */
- &ck_ref,
- &ck_dpll1,
- /* CK_GEN1 clocks */
- &ck_dpll1out,
- &arm_ck,
- &armper_ck,
- &arm_gpio_ck,
- &armxor_ck,
- &armtim_ck,
- &armwdt_ck,
- &arminth_ck1510, &arminth_ck16xx,
- /* CK_GEN2 clocks */
- &dsp_ck,
- &dspmmu_ck,
- &dspper_ck,
- &dspxor_ck,
- &dsptim_ck,
- /* CK_GEN3 clocks */
- &tc_ck,
- &tipb_ck,
- &l3_ocpi_ck,
- &tc1_ck,
- &tc2_ck,
- &dma_ck,
- &dma_lcdfree_ck,
- &api_ck,
- &lb_ck,
- &rhea1_ck,
- &rhea2_ck,
- &lcd_ck,
- /* ULPD clocks */
- &uart1_1510,
- &uart1_16xx,
- &uart2_ck,
- &uart3_1510,
- &uart3_16xx,
- &usb_clko,
- &usb_hhc_ck1510, &usb_hhc_ck16xx,
- &usb_dc_ck,
- &mclk_1510, &mclk_16xx,
- &bclk_1510, &bclk_16xx,
- &mmc1_ck,
- &mmc2_ck,
- /* Virtual clocks */
- &virtual_ck_mpu,
-};
-
-struct clk *clk_get(struct device *dev, const char *id)
+struct clk * clk_get(struct device *dev, const char *id)
{
struct clk *p, *clk = ERR_PTR(-ENOENT);
@@ -590,534 +53,200 @@ struct clk *clk_get(struct device *dev, const char *id)
}
EXPORT_SYMBOL(clk_get);
-
-void clk_put(struct clk *clk)
-{
- if (clk && !IS_ERR(clk))
- module_put(clk->owner);
-}
-EXPORT_SYMBOL(clk_put);
-
-
-int __clk_enable(struct clk *clk)
-{
- __u16 regval16;
- __u32 regval32;
-
- if (clk->flags & ALWAYS_ENABLED)
- return 0;
-
- if (unlikely(clk->enable_reg == 0)) {
- printk(KERN_ERR "clock.c: Enable for %s without enable code\n",
- clk->name);
- return 0;
- }
-
- if (clk->flags & DSP_DOMAIN_CLOCK) {
- __clk_use(&api_ck);
- }
-
- if (clk->flags & ENABLE_REG_32BIT) {
- if (clk->flags & VIRTUAL_IO_ADDRESS) {
- regval32 = __raw_readl(clk->enable_reg);
- regval32 |= (1 << clk->enable_bit);
- __raw_writel(regval32, clk->enable_reg);
- } else {
- regval32 = omap_readl(clk->enable_reg);
- regval32 |= (1 << clk->enable_bit);
- omap_writel(regval32, clk->enable_reg);
- }
- } else {
- if (clk->flags & VIRTUAL_IO_ADDRESS) {
- regval16 = __raw_readw(clk->enable_reg);
- regval16 |= (1 << clk->enable_bit);
- __raw_writew(regval16, clk->enable_reg);
- } else {
- regval16 = omap_readw(clk->enable_reg);
- regval16 |= (1 << clk->enable_bit);
- omap_writew(regval16, clk->enable_reg);
- }
- }
-
- if (clk->flags & DSP_DOMAIN_CLOCK) {
- __clk_unuse(&api_ck);
- }
-
- return 0;
-}
-
-
-void __clk_disable(struct clk *clk)
-{
- __u16 regval16;
- __u32 regval32;
-
- if (clk->enable_reg == 0)
- return;
-
- if (clk->flags & DSP_DOMAIN_CLOCK) {
- __clk_use(&api_ck);
- }
-
- if (clk->flags & ENABLE_REG_32BIT) {
- if (clk->flags & VIRTUAL_IO_ADDRESS) {
- regval32 = __raw_readl(clk->enable_reg);
- regval32 &= ~(1 << clk->enable_bit);
- __raw_writel(regval32, clk->enable_reg);
- } else {
- regval32 = omap_readl(clk->enable_reg);
- regval32 &= ~(1 << clk->enable_bit);
- omap_writel(regval32, clk->enable_reg);
- }
- } else {
- if (clk->flags & VIRTUAL_IO_ADDRESS) {
- regval16 = __raw_readw(clk->enable_reg);
- regval16 &= ~(1 << clk->enable_bit);
- __raw_writew(regval16, clk->enable_reg);
- } else {
- regval16 = omap_readw(clk->enable_reg);
- regval16 &= ~(1 << clk->enable_bit);
- omap_writew(regval16, clk->enable_reg);
- }
- }
-
- if (clk->flags & DSP_DOMAIN_CLOCK) {
- __clk_unuse(&api_ck);
- }
-}
-
-
-void __clk_unuse(struct clk *clk)
-{
- if (clk->usecount > 0 && !(--clk->usecount)) {
- __clk_disable(clk);
- if (likely(clk->parent))
- __clk_unuse(clk->parent);
- }
-}
-
-
-int __clk_use(struct clk *clk)
-{
- int ret = 0;
- if (clk->usecount++ == 0) {
- if (likely(clk->parent))
- ret = __clk_use(clk->parent);
-
- if (unlikely(ret != 0)) {
- clk->usecount--;
- return ret;
- }
-
- ret = __clk_enable(clk);
-
- if (unlikely(ret != 0) && clk->parent) {
- __clk_unuse(clk->parent);
- clk->usecount--;
- }
- }
-
- return ret;
-}
-
-
int clk_enable(struct clk *clk)
{
unsigned long flags;
- int ret;
+ int ret = 0;
spin_lock_irqsave(&clockfw_lock, flags);
- ret = __clk_enable(clk);
+ if (clk->enable)
+ ret = clk->enable(clk);
+ else if (arch_clock->clk_enable)
+ ret = arch_clock->clk_enable(clk);
+ else
+ printk(KERN_ERR "Could not enable clock %s\n", clk->name);
spin_unlock_irqrestore(&clockfw_lock, flags);
+
return ret;
}
EXPORT_SYMBOL(clk_enable);
-
void clk_disable(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clockfw_lock, flags);
- __clk_disable(clk);
+ if (clk->disable)
+ clk->disable(clk);
+ else if (arch_clock->clk_disable)
+ arch_clock->clk_disable(clk);
+ else
+ printk(KERN_ERR "Could not disable clock %s\n", clk->name);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_disable);
-
int clk_use(struct clk *clk)
{
unsigned long flags;
int ret = 0;
spin_lock_irqsave(&clockfw_lock, flags);
- ret = __clk_use(clk);
+ if (arch_clock->clk_use)
+ ret = arch_clock->clk_use(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
+
return ret;
}
EXPORT_SYMBOL(clk_use);
-
void clk_unuse(struct clk *clk)
{
unsigned long flags;
spin_lock_irqsave(&clockfw_lock, flags);
- __clk_unuse(clk);
+ if (arch_clock->clk_unuse)
+ arch_clock->clk_unuse(clk);
spin_unlock_irqrestore(&clockfw_lock, flags);
}
EXPORT_SYMBOL(clk_unuse);
-
int clk_get_usecount(struct clk *clk)
{
- return clk->usecount;
-}
-EXPORT_SYMBOL(clk_get_usecount);
-
-
-unsigned long clk_get_rate(struct clk *clk)
-{
- return clk->rate;
-}
-EXPORT_SYMBOL(clk_get_rate);
-
-
-static __u16 verify_ckctl_value(__u16 newval)
-{
- /* This function checks for following limitations set
- * by the hardware (all conditions must be true):
- * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
- * ARM_CK >= TC_CK
- * DSP_CK >= TC_CK
- * DSPMMU_CK >= TC_CK
- *
- * In addition following rules are enforced:
- * LCD_CK <= TC_CK
- * ARMPER_CK <= TC_CK
- *
- * However, maximum frequencies are not checked for!
- */
- __u8 per_exp;
- __u8 lcd_exp;
- __u8 arm_exp;
- __u8 dsp_exp;
- __u8 tc_exp;
- __u8 dspmmu_exp;
-
- per_exp = (newval >> CKCTL_PERDIV_OFFSET) & 3;
- lcd_exp = (newval >> CKCTL_LCDDIV_OFFSET) & 3;
- arm_exp = (newval >> CKCTL_ARMDIV_OFFSET) & 3;
- dsp_exp = (newval >> CKCTL_DSPDIV_OFFSET) & 3;
- tc_exp = (newval >> CKCTL_TCDIV_OFFSET) & 3;
- dspmmu_exp = (newval >> CKCTL_DSPMMUDIV_OFFSET) & 3;
-
- if (dspmmu_exp < dsp_exp)
- dspmmu_exp = dsp_exp;
- if (dspmmu_exp > dsp_exp+1)
- dspmmu_exp = dsp_exp+1;
- if (tc_exp < arm_exp)
- tc_exp = arm_exp;
- if (tc_exp < dspmmu_exp)
- tc_exp = dspmmu_exp;
- if (tc_exp > lcd_exp)
- lcd_exp = tc_exp;
- if (tc_exp > per_exp)
- per_exp = tc_exp;
+ unsigned long flags;
+ int ret = 0;
- newval &= 0xf000;
- newval |= per_exp << CKCTL_PERDIV_OFFSET;
- newval |= lcd_exp << CKCTL_LCDDIV_OFFSET;
- newval |= arm_exp << CKCTL_ARMDIV_OFFSET;
- newval |= dsp_exp << CKCTL_DSPDIV_OFFSET;
- newval |= tc_exp << CKCTL_TCDIV_OFFSET;
- newval |= dspmmu_exp << CKCTL_DSPMMUDIV_OFFSET;
+ spin_lock_irqsave(&clockfw_lock, flags);
+ ret = clk->usecount;
+ spin_unlock_irqrestore(&clockfw_lock, flags);
- return newval;
+ return ret;
}
+EXPORT_SYMBOL(clk_get_usecount);
-
-static int calc_dsor_exp(struct clk *clk, unsigned long rate)
+unsigned long clk_get_rate(struct clk *clk)
{
- /* Note: If target frequency is too low, this function will return 4,
- * which is invalid value. Caller must check for this value and act
- * accordingly.
- *
- * Note: This function does not check for following limitations set
- * by the hardware (all conditions must be true):
- * DSPMMU_CK == DSP_CK or DSPMMU_CK == DSP_CK/2
- * ARM_CK >= TC_CK
- * DSP_CK >= TC_CK
- * DSPMMU_CK >= TC_CK
- */
- unsigned long realrate;
- struct clk * parent;
- unsigned dsor_exp;
-
- if (unlikely(!(clk->flags & RATE_CKCTL)))
- return -EINVAL;
-
- parent = clk->parent;
- if (unlikely(parent == 0))
- return -EIO;
-
- realrate = parent->rate;
- for (dsor_exp=0; dsor_exp<4; dsor_exp++) {
- if (realrate <= rate)
- break;
+ unsigned long flags;
+ unsigned long ret = 0;
- realrate /= 2;
- }
+ spin_lock_irqsave(&clockfw_lock, flags);
+ ret = clk->rate;
+ spin_unlock_irqrestore(&clockfw_lock, flags);
- return dsor_exp;
+ return ret;
}
+EXPORT_SYMBOL(clk_get_rate);
-
-static void ckctl_recalc(struct clk * clk)
+void clk_put(struct clk *clk)
{
- int dsor;
-
- /* Calculate divisor encoded as 2-bit exponent */
- if (clk->flags & DSP_DOMAIN_CLOCK) {
- /* The clock control bits are in DSP domain,
- * so api_ck is needed for access.
- * Note that DSP_CKCTL virt addr = phys addr, so
- * we must use __raw_readw() instead of omap_readw().
- */
- __clk_use(&api_ck);
- dsor = 1 << (3 & (__raw_readw(DSP_CKCTL) >> clk->rate_offset));
- __clk_unuse(&api_ck);
- } else {
- dsor = 1 << (3 & (omap_readw(ARM_CKCTL) >> clk->rate_offset));
- }
- if (unlikely(clk->rate == clk->parent->rate / dsor))
- return; /* No change, quick exit */
- clk->rate = clk->parent->rate / dsor;
-
- if (unlikely(clk->flags & RATE_PROPAGATES))
- propagate_rate(clk);
+ if (clk && !IS_ERR(clk))
+ module_put(clk->owner);
}
+EXPORT_SYMBOL(clk_put);
+/*-------------------------------------------------------------------------
+ * Optional clock functions defined in asm/hardware/clock.h
+ *-------------------------------------------------------------------------*/
long clk_round_rate(struct clk *clk, unsigned long rate)
{
- int dsor_exp;
-
- if (clk->flags & RATE_FIXED)
- return clk->rate;
-
- if (clk->flags & RATE_CKCTL) {
- dsor_exp = calc_dsor_exp(clk, rate);
- if (dsor_exp < 0)
- return dsor_exp;
- if (dsor_exp > 3)
- dsor_exp = 3;
- return clk->parent->rate / (1 << dsor_exp);
- }
+ unsigned long flags;
+ long ret = 0;
- if(clk->round_rate != 0)
- return clk->round_rate(clk, rate);
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_round_rate)
+ ret = arch_clock->clk_round_rate(clk, rate);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
- return clk->rate;
+ return ret;
}
EXPORT_SYMBOL(clk_round_rate);
-
-static void propagate_rate(struct clk * clk)
-{
- struct clk ** clkp;
-
- for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
- if (likely((*clkp)->parent != clk)) continue;
- if (likely((*clkp)->recalc))
- (*clkp)->recalc(*clkp);
- }
-}
-
-
-static int select_table_rate(struct clk * clk, unsigned long rate)
+int clk_set_rate(struct clk *clk, unsigned long rate)
{
- /* Find the highest supported frequency <= rate and switch to it */
- struct mpu_rate * ptr;
-
- if (clk != &virtual_ck_mpu)
- return -EINVAL;
-
- for (ptr = rate_table; ptr->rate; ptr++) {
- if (ptr->xtal != ck_ref.rate)
- continue;
-
- /* DPLL1 cannot be reprogrammed without risking system crash */
- if (likely(ck_dpll1.rate!=0) && ptr->pll_rate != ck_dpll1.rate)
- continue;
-
- /* Can check only after xtal frequency check */
- if (ptr->rate <= rate)
- break;
- }
-
- if (!ptr->rate)
- return -EINVAL;
+ unsigned long flags;
+ int ret = 0;
- /*
- * In most cases we should not need to reprogram DPLL.
- * Reprogramming the DPLL is tricky, it must be done from SRAM.
- */
- omap_sram_reprogram_clock(ptr->dpllctl_val, ptr->ckctl_val);
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_set_rate)
+ ret = arch_clock->clk_set_rate(clk, rate);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
- ck_dpll1.rate = ptr->pll_rate;
- propagate_rate(&ck_dpll1);
- return 0;
+ return ret;
}
+EXPORT_SYMBOL(clk_set_rate);
-
-static long round_to_table_rate(struct clk * clk, unsigned long rate)
+int clk_set_parent(struct clk *clk, struct clk *parent)
{
- /* Find the highest supported frequency <= rate */
- struct mpu_rate * ptr;
- long highest_rate;
-
- if (clk != &virtual_ck_mpu)
- return -EINVAL;
-
- highest_rate = -EINVAL;
-
- for (ptr = rate_table; ptr->rate; ptr++) {
- if (ptr->xtal != ck_ref.rate)
- continue;
-
- highest_rate = ptr->rate;
+ unsigned long flags;
+ int ret = 0;
- /* Can check only after xtal frequency check */
- if (ptr->rate <= rate)
- break;
- }
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_set_parent)
+ ret = arch_clock->clk_set_parent(clk, parent);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
- return highest_rate;
+ return ret;
}
+EXPORT_SYMBOL(clk_set_parent);
-
-int clk_set_rate(struct clk *clk, unsigned long rate)
+struct clk *clk_get_parent(struct clk *clk)
{
- int ret = -EINVAL;
- int dsor_exp;
- __u16 regval;
- unsigned long flags;
-
- if (clk->flags & RATE_CKCTL) {
- dsor_exp = calc_dsor_exp(clk, rate);
- if (dsor_exp > 3)
- dsor_exp = -EINVAL;
- if (dsor_exp < 0)
- return dsor_exp;
-
- spin_lock_irqsave(&clockfw_lock, flags);
- regval = omap_readw(ARM_CKCTL);
- regval &= ~(3 << clk->rate_offset);
- regval |= dsor_exp << clk->rate_offset;
- regval = verify_ckctl_value(regval);
- omap_writew(regval, ARM_CKCTL);
- clk->rate = clk->parent->rate / (1 << dsor_exp);
- spin_unlock_irqrestore(&clockfw_lock, flags);
- ret = 0;
- } else if(clk->set_rate != 0) {
- spin_lock_irqsave(&clockfw_lock, flags);
- ret = clk->set_rate(clk, rate);
- spin_unlock_irqrestore(&clockfw_lock, flags);
- }
+ unsigned long flags;
+ struct clk * ret = NULL;
- if (unlikely(ret == 0 && (clk->flags & RATE_PROPAGATES)))
- propagate_rate(clk);
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_get_parent)
+ ret = arch_clock->clk_get_parent(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
return ret;
}
-EXPORT_SYMBOL(clk_set_rate);
+EXPORT_SYMBOL(clk_get_parent);
+/*-------------------------------------------------------------------------
+ * OMAP specific clock functions shared between omap1 and omap2
+ *-------------------------------------------------------------------------*/
-static unsigned calc_ext_dsor(unsigned long rate)
-{
- unsigned dsor;
+unsigned int __initdata mpurate;
- /* MCLK and BCLK divisor selection is not linear:
- * freq = 96MHz / dsor
- *
- * RATIO_SEL range: dsor <-> RATIO_SEL
- * 0..6: (RATIO_SEL+2) <-> (dsor-2)
- * 6..48: (8+(RATIO_SEL-6)*2) <-> ((dsor-8)/2+6)
- * Minimum dsor is 2 and maximum is 96. Odd divisors starting from 9
- * can not be used.
- */
- for (dsor = 2; dsor < 96; ++dsor) {
- if ((dsor & 1) && dsor > 8)
- continue;
- if (rate >= 96000000 / dsor)
- break;
- }
- return dsor;
-}
-
-/* Only needed on 1510 */
-static int set_uart_rate(struct clk * clk, unsigned long rate)
-{
- unsigned int val;
-
- val = omap_readl(clk->enable_reg);
- if (rate == 12000000)
- val &= ~(1 << clk->enable_bit);
- else if (rate == 48000000)
- val |= (1 << clk->enable_bit);
- else
- return -EINVAL;
- omap_writel(val, clk->enable_reg);
- clk->rate = rate;
-
- return 0;
-}
-
-static int set_ext_clk_rate(struct clk * clk, unsigned long rate)
+/*
+ * By default we use the rate set by the bootloader.
+ * You can override this with mpurate= cmdline option.
+ */
+static int __init omap_clk_setup(char *str)
{
- unsigned dsor;
- __u16 ratio_bits;
+ get_option(&str, &mpurate);
- dsor = calc_ext_dsor(rate);
- clk->rate = 96000000 / dsor;
- if (dsor > 8)
- ratio_bits = ((dsor - 8) / 2 + 6) << 2;
- else
- ratio_bits = (dsor - 2) << 2;
+ if (!mpurate)
+ return 1;
- ratio_bits |= omap_readw(clk->enable_reg) & ~0xfd;
- omap_writew(ratio_bits, clk->enable_reg);
+ if (mpurate < 1000)
+ mpurate *= 1000000;
- return 0;
+ return 1;
}
+__setup("mpurate=", omap_clk_setup);
-
-static long round_ext_clk_rate(struct clk * clk, unsigned long rate)
+/* Used for clocks that always have same value as the parent clock */
+void followparent_recalc(struct clk *clk)
{
- return 96000000 / calc_ext_dsor(rate);
+ clk->rate = clk->parent->rate;
}
-
-static void init_ext_clk(struct clk * clk)
+/* Propagate rate to children */
+void propagate_rate(struct clk * tclk)
{
- unsigned dsor;
- __u16 ratio_bits;
+ struct clk *clkp;
- /* Determine current rate and ensure clock is based on 96MHz APLL */
- ratio_bits = omap_readw(clk->enable_reg) & ~1;
- omap_writew(ratio_bits, clk->enable_reg);
-
- ratio_bits = (ratio_bits & 0xfc) >> 2;
- if (ratio_bits > 6)
- dsor = (ratio_bits - 6) * 2 + 8;
- else
- dsor = ratio_bits + 2;
-
- clk-> rate = 96000000 / dsor;
+ list_for_each_entry(clkp, &clocks, node) {
+ if (likely(clkp->parent != tclk))
+ continue;
+ if (likely((u32)clkp->recalc))
+ clkp->recalc(clkp);
+ }
}
-
int clk_register(struct clk *clk)
{
down(&clocks_sem);
@@ -1125,6 +254,7 @@ int clk_register(struct clk *clk)
if (clk->init)
clk->init(clk);
up(&clocks_sem);
+
return 0;
}
EXPORT_SYMBOL(clk_register);
@@ -1137,203 +267,38 @@ void clk_unregister(struct clk *clk)
}
EXPORT_SYMBOL(clk_unregister);
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-/*
- * Resets some clocks that may be left on from bootloader,
- * but leaves serial clocks on. See also omap_late_clk_reset().
- */
-static inline void omap_early_clk_reset(void)
+void clk_deny_idle(struct clk *clk)
{
- //omap_writel(0x3 << 29, MOD_CONF_CTRL_0);
+ unsigned long flags;
+
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_deny_idle)
+ arch_clock->clk_deny_idle(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
}
-#else
-#define omap_early_clk_reset() {}
-#endif
+EXPORT_SYMBOL(clk_deny_idle);
-int __init clk_init(void)
+void clk_allow_idle(struct clk *clk)
{
- struct clk ** clkp;
- const struct omap_clock_config *info;
- int crystal_type = 0; /* Default 12 MHz */
-
- omap_early_clk_reset();
-
- for (clkp = onchip_clks; clkp < onchip_clks+ARRAY_SIZE(onchip_clks); clkp++) {
- if (((*clkp)->flags &CLOCK_IN_OMAP1510) && cpu_is_omap1510()) {
- clk_register(*clkp);
- continue;
- }
-
- if (((*clkp)->flags &CLOCK_IN_OMAP16XX) && cpu_is_omap16xx()) {
- clk_register(*clkp);
- continue;
- }
-
- if (((*clkp)->flags &CLOCK_IN_OMAP730) && cpu_is_omap730()) {
- clk_register(*clkp);
- continue;
- }
- }
-
- info = omap_get_config(OMAP_TAG_CLOCK, struct omap_clock_config);
- if (info != NULL) {
- if (!cpu_is_omap1510())
- crystal_type = info->system_clock_type;
- }
-
-#if defined(CONFIG_ARCH_OMAP730)
- ck_ref.rate = 13000000;
-#elif defined(CONFIG_ARCH_OMAP16XX)
- if (crystal_type == 2)
- ck_ref.rate = 19200000;
-#endif
-
- printk("Clocks: ARM_SYSST: 0x%04x DPLL_CTL: 0x%04x ARM_CKCTL: 0x%04x\n",
- omap_readw(ARM_SYSST), omap_readw(DPLL_CTL),
- omap_readw(ARM_CKCTL));
-
- /* We want to be in syncronous scalable mode */
- omap_writew(0x1000, ARM_SYSST);
-
-#ifdef CONFIG_OMAP_CLOCKS_SET_BY_BOOTLOADER
- /* Use values set by bootloader. Determine PLL rate and recalculate
- * dependent clocks as if kernel had changed PLL or divisors.
- */
- {
- unsigned pll_ctl_val = omap_readw(DPLL_CTL);
-
- ck_dpll1.rate = ck_ref.rate; /* Base xtal rate */
- if (pll_ctl_val & 0x10) {
- /* PLL enabled, apply multiplier and divisor */
- if (pll_ctl_val & 0xf80)
- ck_dpll1.rate *= (pll_ctl_val & 0xf80) >> 7;
- ck_dpll1.rate /= ((pll_ctl_val & 0x60) >> 5) + 1;
- } else {
- /* PLL disabled, apply bypass divisor */
- switch (pll_ctl_val & 0xc) {
- case 0:
- break;
- case 0x4:
- ck_dpll1.rate /= 2;
- break;
- default:
- ck_dpll1.rate /= 4;
- break;
- }
- }
- }
- propagate_rate(&ck_dpll1);
-#else
- /* Find the highest supported frequency and enable it */
- if (select_table_rate(&virtual_ck_mpu, ~0)) {
- printk(KERN_ERR "System frequencies not set. Check your config.\n");
- /* Guess sane values (60MHz) */
- omap_writew(0x2290, DPLL_CTL);
- omap_writew(0x1005, ARM_CKCTL);
- ck_dpll1.rate = 60000000;
- propagate_rate(&ck_dpll1);
- }
-#endif
- /* Cache rates for clocks connected to ck_ref (not dpll1) */
- propagate_rate(&ck_ref);
- printk(KERN_INFO "Clocking rate (xtal/DPLL1/MPU): "
- "%ld.%01ld/%ld.%01ld/%ld.%01ld MHz\n",
- ck_ref.rate / 1000000, (ck_ref.rate / 100000) % 10,
- ck_dpll1.rate / 1000000, (ck_dpll1.rate / 100000) % 10,
- arm_ck.rate / 1000000, (arm_ck.rate / 100000) % 10);
-
-#ifdef CONFIG_MACH_OMAP_PERSEUS2
- /* Select slicer output as OMAP input clock */
- omap_writew(omap_readw(OMAP730_PCC_UPLD_CTRL) & ~0x1, OMAP730_PCC_UPLD_CTRL);
-#endif
-
- /* Turn off DSP and ARM_TIMXO. Make sure ARM_INTHCK is not divided */
- omap_writew(omap_readw(ARM_CKCTL) & 0x0fff, ARM_CKCTL);
-
- /* Put DSP/MPUI into reset until needed */
- omap_writew(0, ARM_RSTCT1);
- omap_writew(1, ARM_RSTCT2);
- omap_writew(0x400, ARM_IDLECT1);
-
- /*
- * According to OMAP5910 Erratum SYS_DMA_1, bit DMACK_REQ (bit 8)
- * of the ARM_IDLECT2 register must be set to zero. The power-on
- * default value of this bit is one.
- */
- omap_writew(0x0000, ARM_IDLECT2); /* Turn LCD clock off also */
-
- /*
- * Only enable those clocks we will need, let the drivers
- * enable other clocks as necessary
- */
- clk_use(&armper_ck);
- clk_use(&armxor_ck);
- clk_use(&armtim_ck);
-
- if (cpu_is_omap1510())
- clk_enable(&arm_gpio_ck);
+ unsigned long flags;
- return 0;
+ spin_lock_irqsave(&clockfw_lock, flags);
+ if (arch_clock->clk_allow_idle)
+ arch_clock->clk_allow_idle(clk);
+ spin_unlock_irqrestore(&clockfw_lock, flags);
}
+EXPORT_SYMBOL(clk_allow_idle);
+/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_OMAP_RESET_CLOCKS
-
-static int __init omap_late_clk_reset(void)
+int __init clk_init(struct clk_functions * custom_clocks)
{
- /* Turn off all unused clocks */
- struct clk *p;
- __u32 regval32;
-
- /* USB_REQ_EN will be disabled later if necessary (usb_dc_ck) */
- regval32 = omap_readw(SOFT_REQ_REG) & (1 << 4);
- omap_writew(regval32, SOFT_REQ_REG);
- omap_writew(0, SOFT_REQ_REG2);
-
- list_for_each_entry(p, &clocks, node) {
- if (p->usecount > 0 || (p->flags & ALWAYS_ENABLED) ||
- p->enable_reg == 0)
- continue;
-
- /* Assume no DSP clocks have been activated by bootloader */
- if (p->flags & DSP_DOMAIN_CLOCK)
- continue;
-
- /* Is the clock already disabled? */
- if (p->flags & ENABLE_REG_32BIT) {
- if (p->flags & VIRTUAL_IO_ADDRESS)
- regval32 = __raw_readl(p->enable_reg);
- else
- regval32 = omap_readl(p->enable_reg);
- } else {
- if (p->flags & VIRTUAL_IO_ADDRESS)
- regval32 = __raw_readw(p->enable_reg);
- else
- regval32 = omap_readw(p->enable_reg);
- }
-
- if ((regval32 & (1 << p->enable_bit)) == 0)
- continue;
-
- /* FIXME: This clock seems to be necessary but no-one
- * has asked for its activation. */
- if (p == &tc2_ck // FIX: pm.c (SRAM), CCP, Camera
- || p == &ck_dpll1out // FIX: SoSSI, SSR
- || p == &arm_gpio_ck // FIX: GPIO code for 1510
- ) {
- printk(KERN_INFO "FIXME: Clock \"%s\" seems unused\n",
- p->name);
- continue;
- }
-
- printk(KERN_INFO "Disabling unused clock \"%s\"... ", p->name);
- __clk_disable(p);
- printk(" done\n");
+ if (!custom_clocks) {
+ printk(KERN_ERR "No custom clock functions registered\n");
+ BUG();
}
+ arch_clock = custom_clocks;
+
return 0;
}
-
-late_initcall(omap_late_clk_reset);
-
-#endif
diff --git a/arch/arm/plat-omap/clock.h b/arch/arm/plat-omap/clock.h
deleted file mode 100644
index a89e1e8c251..00000000000
--- a/arch/arm/plat-omap/clock.h
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/clock.h
- *
- * Copyright (C) 2004 Nokia corporation
- * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
- * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_OMAP_CLOCK_H
-#define __ARCH_ARM_OMAP_CLOCK_H
-
-struct module;
-
-struct clk {
- struct list_head node;
- struct module *owner;
- const char *name;
- struct clk *parent;
- unsigned long rate;
- __s8 usecount;
- __u16 flags;
- __u32 enable_reg;
- __u8 enable_bit;
- __u8 rate_offset;
- void (*recalc)(struct clk *);
- int (*set_rate)(struct clk *, unsigned long);
- long (*round_rate)(struct clk *, unsigned long);
- void (*init)(struct clk *);
-};
-
-
-struct mpu_rate {
- unsigned long rate;
- unsigned long xtal;
- unsigned long pll_rate;
- __u16 ckctl_val;
- __u16 dpllctl_val;
-};
-
-
-/* Clock flags */
-#define RATE_CKCTL 1
-#define RATE_FIXED 2
-#define RATE_PROPAGATES 4
-#define VIRTUAL_CLOCK 8
-#define ALWAYS_ENABLED 16
-#define ENABLE_REG_32BIT 32
-#define CLOCK_IN_OMAP16XX 64
-#define CLOCK_IN_OMAP1510 128
-#define CLOCK_IN_OMAP730 256
-#define DSP_DOMAIN_CLOCK 512
-#define VIRTUAL_IO_ADDRESS 1024
-
-/* ARM_CKCTL bit shifts */
-#define CKCTL_PERDIV_OFFSET 0
-#define CKCTL_LCDDIV_OFFSET 2
-#define CKCTL_ARMDIV_OFFSET 4
-#define CKCTL_DSPDIV_OFFSET 6
-#define CKCTL_TCDIV_OFFSET 8
-#define CKCTL_DSPMMUDIV_OFFSET 10
-/*#define ARM_TIMXO 12*/
-#define EN_DSPCK 13
-/*#define ARM_INTHCK_SEL 14*/ /* Divide-by-2 for mpu inth_ck */
-/* DSP_CKCTL bit shifts */
-#define CKCTL_DSPPERDIV_OFFSET 0
-
-/* ARM_IDLECT1 bit shifts */
-/*#define IDLWDT_ARM 0*/
-/*#define IDLXORP_ARM 1*/
-/*#define IDLPER_ARM 2*/
-/*#define IDLLCD_ARM 3*/
-/*#define IDLLB_ARM 4*/
-/*#define IDLHSAB_ARM 5*/
-/*#define IDLIF_ARM 6*/
-/*#define IDLDPLL_ARM 7*/
-/*#define IDLAPI_ARM 8*/
-/*#define IDLTIM_ARM 9*/
-/*#define SETARM_IDLE 11*/
-
-/* ARM_IDLECT2 bit shifts */
-#define EN_WDTCK 0
-#define EN_XORPCK 1
-#define EN_PERCK 2
-#define EN_LCDCK 3
-#define EN_LBCK 4 /* Not on 1610/1710 */
-/*#define EN_HSABCK 5*/
-#define EN_APICK 6
-#define EN_TIMCK 7
-#define DMACK_REQ 8
-#define EN_GPIOCK 9 /* Not on 1610/1710 */
-/*#define EN_LBFREECK 10*/
-#define EN_CKOUT_ARM 11
-
-/* ARM_IDLECT3 bit shifts */
-#define EN_OCPI_CK 0
-#define EN_TC1_CK 2
-#define EN_TC2_CK 4
-
-/* DSP_IDLECT2 bit shifts (0,1,2 are same as for ARM_IDLECT2) */
-#define EN_DSPTIMCK 5
-
-/* Various register defines for clock controls scattered around OMAP chip */
-#define USB_MCLK_EN_BIT 4 /* In ULPD_CLKC_CTRL */
-#define USB_HOST_HHC_UHOST_EN 9 /* In MOD_CONF_CTRL_0 */
-#define SWD_ULPD_PLL_CLK_REQ 1 /* In SWD_CLK_DIV_CTRL_SEL */
-#define COM_ULPD_PLL_CLK_REQ 1 /* In COM_CLK_DIV_CTRL_SEL */
-#define SWD_CLK_DIV_CTRL_SEL 0xfffe0874
-#define COM_CLK_DIV_CTRL_SEL 0xfffe0878
-#define SOFT_REQ_REG 0xfffe0834
-#define SOFT_REQ_REG2 0xfffe0880
-
-int clk_register(struct clk *clk);
-void clk_unregister(struct clk *clk);
-int clk_init(void);
-
-#endif
diff --git a/arch/arm/plat-omap/common.c b/arch/arm/plat-omap/common.c
index 02bcc6c1cd1..ccdb452630c 100644
--- a/arch/arm/plat-omap/common.c
+++ b/arch/arm/plat-omap/common.c
@@ -31,7 +31,7 @@
#include <asm/arch/mux.h>
#include <asm/arch/fpga.h>
-#include "clock.h"
+#include <asm/arch/clock.h>
#define NO_LENGTH_CHECK 0xffffffff
@@ -117,19 +117,43 @@ EXPORT_SYMBOL(omap_get_var_config);
static int __init omap_add_serial_console(void)
{
- const struct omap_serial_console_config *info;
-
- info = omap_get_config(OMAP_TAG_SERIAL_CONSOLE,
- struct omap_serial_console_config);
- if (info != NULL && info->console_uart) {
- static char speed[11], *opt = NULL;
+ const struct omap_serial_console_config *con_info;
+ const struct omap_uart_config *uart_info;
+ static char speed[11], *opt = NULL;
+ int line, i, uart_idx;
+
+ uart_info = omap_get_config(OMAP_TAG_UART, struct omap_uart_config);
+ con_info = omap_get_config(OMAP_TAG_SERIAL_CONSOLE,
+ struct omap_serial_console_config);
+ if (uart_info == NULL || con_info == NULL)
+ return 0;
+
+ if (con_info->console_uart == 0)
+ return 0;
+
+ if (con_info->console_speed) {
+ snprintf(speed, sizeof(speed), "%u", con_info->console_speed);
+ opt = speed;
+ }
- if (info->console_speed) {
- snprintf(speed, sizeof(speed), "%u", info->console_speed);
- opt = speed;
- }
- return add_preferred_console("ttyS", info->console_uart - 1, opt);
+ uart_idx = con_info->console_uart - 1;
+ if (uart_idx >= OMAP_MAX_NR_PORTS) {
+ printk(KERN_INFO "Console: external UART#%d. "
+ "Not adding it as console this time.\n",
+ uart_idx + 1);
+ return 0;
+ }
+ if (!(uart_info->enabled_uarts & (1 << uart_idx))) {
+ printk(KERN_ERR "Console: Selected UART#%d is "
+ "not enabled for this platform\n",
+ uart_idx + 1);
+ return -1;
+ }
+ line = 0;
+ for (i = 0; i < uart_idx; i++) {
+ if (uart_info->enabled_uarts & (1 << i))
+ line++;
}
- return 0;
+ return add_preferred_console("ttyS", line, opt);
}
console_initcall(omap_add_serial_console);
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
new file mode 100644
index 00000000000..9dcce904b60
--- /dev/null
+++ b/arch/arm/plat-omap/devices.c
@@ -0,0 +1,381 @@
+/*
+ * linux/arch/arm/plat-omap/devices.c
+ *
+ * Common platform device setup/initialization for OMAP1 and OMAP2
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+
+#include <asm/hardware.h>
+#include <asm/io.h>
+#include <asm/mach-types.h>
+#include <asm/mach/map.h>
+
+#include <asm/arch/tc.h>
+#include <asm/arch/board.h>
+#include <asm/arch/mux.h>
+#include <asm/arch/gpio.h>
+
+
+void omap_nop_release(struct device *dev)
+{
+ /* Nothing */
+}
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_I2C_OMAP) || defined(CONFIG_I2C_OMAP_MODULE)
+
+#define OMAP1_I2C_BASE 0xfffb3800
+#define OMAP2_I2C_BASE1 0x48070000
+#define OMAP_I2C_SIZE 0x3f
+#define OMAP1_I2C_INT INT_I2C
+#define OMAP2_I2C_INT1 56
+
+static struct resource i2c_resources1[] = {
+ {
+ .start = 0,
+ .end = 0,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = 0,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+/* DMA not used; works around erratum writing to non-empty i2c fifo */
+
+static struct platform_device omap_i2c_device1 = {
+ .name = "i2c_omap",
+ .id = 1,
+ .dev = {
+ .release = omap_nop_release,
+ },
+ .num_resources = ARRAY_SIZE(i2c_resources1),
+ .resource = i2c_resources1,
+};
+
+/* See also arch/arm/mach-omap2/devices.c for second I2C on 24xx */
+static void omap_init_i2c(void)
+{
+ if (cpu_is_omap24xx()) {
+ i2c_resources1[0].start = OMAP2_I2C_BASE1;
+ i2c_resources1[0].end = OMAP2_I2C_BASE1 + OMAP_I2C_SIZE;
+ i2c_resources1[1].start = OMAP2_I2C_INT1;
+ } else {
+ i2c_resources1[0].start = OMAP1_I2C_BASE;
+ i2c_resources1[0].end = OMAP1_I2C_BASE + OMAP_I2C_SIZE;
+ i2c_resources1[1].start = OMAP1_I2C_INT;
+ }
+
+ /* FIXME define and use a boot tag, in case of boards that
+ * either don't wire up I2C, or chips that mux it differently...
+ * it can include clocking and address info, maybe more.
+ */
+ if (cpu_is_omap24xx()) {
+ omap_cfg_reg(M19_24XX_I2C1_SCL);
+ omap_cfg_reg(L15_24XX_I2C1_SDA);
+ } else {
+ omap_cfg_reg(I2C_SCL);
+ omap_cfg_reg(I2C_SDA);
+ }
+
+ (void) platform_device_register(&omap_i2c_device1);
+}
+
+#else
+static inline void omap_init_i2c(void) {}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define OMAP_MMC1_BASE 0x4809c000
+#define OMAP_MMC1_INT 83
+#else
+#define OMAP_MMC1_BASE 0xfffb7800
+#define OMAP_MMC1_INT INT_MMC
+#endif
+#define OMAP_MMC2_BASE 0xfffb7c00 /* omap16xx only */
+
+static struct omap_mmc_conf mmc1_conf;
+
+static u64 mmc1_dmamask = 0xffffffff;
+
+static struct resource mmc1_resources[] = {
+ {
+ .start = IO_ADDRESS(OMAP_MMC1_BASE),
+ .end = IO_ADDRESS(OMAP_MMC1_BASE) + 0x7f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = OMAP_MMC1_INT,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mmc_omap_device1 = {
+ .name = "mmci-omap",
+ .id = 1,
+ .dev = {
+ .release = omap_nop_release,
+ .dma_mask = &mmc1_dmamask,
+ .platform_data = &mmc1_conf,
+ },
+ .num_resources = ARRAY_SIZE(mmc1_resources),
+ .resource = mmc1_resources,
+};
+
+#ifdef CONFIG_ARCH_OMAP16XX
+
+static struct omap_mmc_conf mmc2_conf;
+
+static u64 mmc2_dmamask = 0xffffffff;
+
+static struct resource mmc2_resources[] = {
+ {
+ .start = IO_ADDRESS(OMAP_MMC2_BASE),
+ .end = IO_ADDRESS(OMAP_MMC2_BASE) + 0x7f,
+ .flags = IORESOURCE_MEM,
+ },
+ {
+ .start = INT_1610_MMC2,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+static struct platform_device mmc_omap_device2 = {
+ .name = "mmci-omap",
+ .id = 2,
+ .dev = {
+ .release = omap_nop_release,
+ .dma_mask = &mmc2_dmamask,
+ .platform_data = &mmc2_conf,
+ },
+ .num_resources = ARRAY_SIZE(mmc2_resources),
+ .resource = mmc2_resources,
+};
+#endif
+
+static void __init omap_init_mmc(void)
+{
+ const struct omap_mmc_config *mmc_conf;
+ const struct omap_mmc_conf *mmc;
+
+ /* NOTE: assumes MMC was never (wrongly) enabled */
+ mmc_conf = omap_get_config(OMAP_TAG_MMC, struct omap_mmc_config);
+ if (!mmc_conf)
+ return;
+
+ /* block 1 is always available and has just one pinout option */
+ mmc = &mmc_conf->mmc[0];
+ if (mmc->enabled) {
+ if (!cpu_is_omap24xx()) {
+ omap_cfg_reg(MMC_CMD);
+ omap_cfg_reg(MMC_CLK);
+ omap_cfg_reg(MMC_DAT0);
+ if (cpu_is_omap1710()) {
+ omap_cfg_reg(M15_1710_MMC_CLKI);
+ omap_cfg_reg(P19_1710_MMC_CMDDIR);
+ omap_cfg_reg(P20_1710_MMC_DATDIR0);
+ }
+ }
+ if (mmc->wire4) {
+ if (!cpu_is_omap24xx()) {
+ omap_cfg_reg(MMC_DAT1);
+ /* NOTE: DAT2 can be on W10 (here) or M15 */
+ if (!mmc->nomux)
+ omap_cfg_reg(MMC_DAT2);
+ omap_cfg_reg(MMC_DAT3);
+ }
+ }
+ mmc1_conf = *mmc;
+ (void) platform_device_register(&mmc_omap_device1);
+ }
+
+#ifdef CONFIG_ARCH_OMAP16XX
+ /* block 2 is on newer chips, and has many pinout options */
+ mmc = &mmc_conf->mmc[1];
+ if (mmc->enabled) {
+ if (!mmc->nomux) {
+ omap_cfg_reg(Y8_1610_MMC2_CMD);
+ omap_cfg_reg(Y10_1610_MMC2_CLK);
+ omap_cfg_reg(R18_1610_MMC2_CLKIN);
+ omap_cfg_reg(W8_1610_MMC2_DAT0);
+ if (mmc->wire4) {
+ omap_cfg_reg(V8_1610_MMC2_DAT1);
+ omap_cfg_reg(W15_1610_MMC2_DAT2);
+ omap_cfg_reg(R10_1610_MMC2_DAT3);
+ }
+
+ /* These are needed for the level shifter */
+ omap_cfg_reg(V9_1610_MMC2_CMDDIR);
+ omap_cfg_reg(V5_1610_MMC2_DATDIR0);
+ omap_cfg_reg(W19_1610_MMC2_DATDIR1);
+ }
+
+ /* Feedback clock must be set on OMAP-1710 MMC2 */
+ if (cpu_is_omap1710())
+ omap_writel(omap_readl(MOD_CONF_CTRL_1) | (1 << 24),
+ MOD_CONF_CTRL_1);
+ mmc2_conf = *mmc;
+ (void) platform_device_register(&mmc_omap_device2);
+ }
+#endif
+ return;
+}
+#else
+static inline void omap_init_mmc(void) {}
+#endif
+
+#if defined(CONFIG_OMAP_WATCHDOG) || defined(CONFIG_OMAP_WATCHDOG_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define OMAP_WDT_BASE 0x48022000
+#else
+#define OMAP_WDT_BASE 0xfffeb000
+#endif
+
+static struct resource wdt_resources[] = {
+ {
+ .start = OMAP_WDT_BASE,
+ .end = OMAP_WDT_BASE + 0x4f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device omap_wdt_device = {
+ .name = "omap_wdt",
+ .id = -1,
+ .dev = {
+ .release = omap_nop_release,
+ },
+ .num_resources = ARRAY_SIZE(wdt_resources),
+ .resource = wdt_resources,
+};
+
+static void omap_init_wdt(void)
+{
+ (void) platform_device_register(&omap_wdt_device);
+}
+#else
+static inline void omap_init_wdt(void) {}
+#endif
+
+/*-------------------------------------------------------------------------*/
+
+#if defined(CONFIG_OMAP_RNG) || defined(CONFIG_OMAP_RNG_MODULE)
+
+#ifdef CONFIG_ARCH_OMAP24XX
+#define OMAP_RNG_BASE 0x480A0000
+#else
+#define OMAP_RNG_BASE 0xfffe5000
+#endif
+
+static struct resource rng_resources[] = {
+ {
+ .start = OMAP_RNG_BASE,
+ .end = OMAP_RNG_BASE + 0x4f,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+static struct platform_device omap_rng_device = {
+ .name = "omap_rng",
+ .id = -1,
+ .dev = {
+ .release = omap_nop_release,
+ },
+ .num_resources = ARRAY_SIZE(rng_resources),
+ .resource = rng_resources,
+};
+
+static void omap_init_rng(void)
+{
+ (void) platform_device_register(&omap_rng_device);
+}
+#else
+static inline void omap_init_rng(void) {}
+#endif
+
+#if defined(CONFIG_FB_OMAP) || defined(CONFIG_FB_OMAP_MODULE)
+
+static struct omap_lcd_config omap_fb_conf;
+
+static u64 omap_fb_dma_mask = ~(u32)0;
+
+static struct platform_device omap_fb_device = {
+ .name = "omapfb",
+ .id = -1,
+ .dev = {
+ .release = omap_nop_release,
+ .dma_mask = &omap_fb_dma_mask,
+ .coherent_dma_mask = ~(u32)0,
+ .platform_data = &omap_fb_conf,
+ },
+ .num_resources = 0,
+};
+
+static inline void omap_init_fb(void)
+{
+ const struct omap_lcd_config *conf;
+
+ conf = omap_get_config(OMAP_TAG_LCD, struct omap_lcd_config);
+ if (conf != NULL)
+ omap_fb_conf = *conf;
+ platform_device_register(&omap_fb_device);
+}
+
+#else
+
+static inline void omap_init_fb(void) {}
+
+#endif
+
+/*
+ * This gets called after board-specific INIT_MACHINE, and initializes most
+ * on-chip peripherals accessible on this board (except for few like USB):
+ *
+ * (a) Does any "standard config" pin muxing needed. Board-specific
+ * code will have muxed GPIO pins and done "nonstandard" setup;
+ * that code could live in the boot loader.
+ * (b) Populating board-specific platform_data with the data drivers
+ * rely on to handle wiring variations.
+ * (c) Creating platform devices as meaningful on this board and
+ * with this kernel configuration.
+ *
+ * Claiming GPIOs, and setting their direction and initial values, is the
+ * responsibility of the device drivers. So is responding to probe().
+ *
+ * Board-specific knowlege like creating devices or pin setup is to be
+ * kept out of drivers as much as possible. In particular, pin setup
+ * may be handled by the boot loader, and drivers should expect it will
+ * normally have been done by the time they're probed.
+ */
+static int __init omap_init_devices(void)
+{
+ /* please keep these calls, and their implementations above,
+ * in alphabetical order so they're easier to sort through.
+ */
+ omap_init_fb();
+ omap_init_i2c();
+ omap_init_mmc();
+ omap_init_wdt();
+ omap_init_rng();
+
+ return 0;
+}
+arch_initcall(omap_init_devices);
+
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index da7b6514565..f5cc21ad095 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -6,6 +6,8 @@
* DMA channel linking for 1610 by Samuel Ortiz <samuel.ortiz@nokia.com>
* Graphics DMA and LCD DMA graphics tranformations
* by Imre Deak <imre.deak@nokia.com>
+ * OMAP2 support Copyright (C) 2004-2005 Texas Instruments, Inc.
+ * Merged to support both OMAP1 and OMAP2 by Tony Lindgren <tony@atomide.com>
* Some functions based on earlier dma-omap.c Copyright (C) 2001 RidgeRun, Inc.
*
* Support functions for the OMAP internal DMA channels.
@@ -31,8 +33,15 @@
#include <asm/arch/tc.h>
-#define OMAP_DMA_ACTIVE 0x01
+#define DEBUG_PRINTS
+#undef DEBUG_PRINTS
+#ifdef DEBUG_PRINTS
+#define debug_printk(x) printk x
+#else
+#define debug_printk(x)
+#endif
+#define OMAP_DMA_ACTIVE 0x01
#define OMAP_DMA_CCR_EN (1 << 7)
#define OMAP_FUNC_MUX_ARM_BASE (0xfffe1000 + 0xec)
@@ -55,7 +64,7 @@ static int dma_chan_count;
static spinlock_t dma_chan_lock;
static struct omap_dma_lch dma_chan[OMAP_LOGICAL_DMA_CH_COUNT];
-const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
+const static u8 omap1_dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
INT_DMA_CH0_6, INT_DMA_CH1_7, INT_DMA_CH2_8, INT_DMA_CH3,
INT_DMA_CH4, INT_DMA_CH5, INT_1610_DMA_CH6, INT_1610_DMA_CH7,
INT_1610_DMA_CH8, INT_1610_DMA_CH9, INT_1610_DMA_CH10,
@@ -63,6 +72,20 @@ const static u8 dma_irq[OMAP_LOGICAL_DMA_CH_COUNT] = {
INT_1610_DMA_CH14, INT_1610_DMA_CH15, INT_DMA_LCD
};
+#define REVISIT_24XX() printk(KERN_ERR "FIXME: no %s on 24xx\n", \
+ __FUNCTION__);
+
+#ifdef CONFIG_ARCH_OMAP15XX
+/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
+int omap_dma_in_1510_mode(void)
+{
+ return enable_1510_mode;
+}
+#else
+#define omap_dma_in_1510_mode() 0
+#endif
+
+#ifdef CONFIG_ARCH_OMAP1
static inline int get_gdma_dev(int req)
{
u32 reg = OMAP_FUNC_MUX_ARM_BASE + ((req - 1) / 5) * 4;
@@ -82,6 +105,9 @@ static inline void set_gdma_dev(int req, int dev)
l |= (dev - 1) << shift;
omap_writel(l, reg);
}
+#else
+#define set_gdma_dev(req, dev) do {} while (0)
+#endif
static void clear_lch_regs(int lch)
{
@@ -121,38 +147,62 @@ void omap_set_dma_priority(int dst_port, int priority)
}
void omap_set_dma_transfer_params(int lch, int data_type, int elem_count,
- int frame_count, int sync_mode)
+ int frame_count, int sync_mode,
+ int dma_trigger, int src_or_dst_synch)
{
- u16 w;
+ OMAP_DMA_CSDP_REG(lch) &= ~0x03;
+ OMAP_DMA_CSDP_REG(lch) |= data_type;
- w = omap_readw(OMAP_DMA_CSDP(lch));
- w &= ~0x03;
- w |= data_type;
- omap_writew(w, OMAP_DMA_CSDP(lch));
+ if (cpu_class_is_omap1()) {
+ OMAP_DMA_CCR_REG(lch) &= ~(1 << 5);
+ if (sync_mode == OMAP_DMA_SYNC_FRAME)
+ OMAP_DMA_CCR_REG(lch) |= 1 << 5;
+
+ OMAP1_DMA_CCR2_REG(lch) &= ~(1 << 2);
+ if (sync_mode == OMAP_DMA_SYNC_BLOCK)
+ OMAP1_DMA_CCR2_REG(lch) |= 1 << 2;
+ }
+
+ if (cpu_is_omap24xx() && dma_trigger) {
+ u32 val = OMAP_DMA_CCR_REG(lch);
+
+ if (dma_trigger > 63)
+ val |= 1 << 20;
+ if (dma_trigger > 31)
+ val |= 1 << 19;
- w = omap_readw(OMAP_DMA_CCR(lch));
- w &= ~(1 << 5);
- if (sync_mode == OMAP_DMA_SYNC_FRAME)
- w |= 1 << 5;
- omap_writew(w, OMAP_DMA_CCR(lch));
+ val |= (dma_trigger & 0x1f);
- w = omap_readw(OMAP_DMA_CCR2(lch));
- w &= ~(1 << 2);
- if (sync_mode == OMAP_DMA_SYNC_BLOCK)
- w |= 1 << 2;
- omap_writew(w, OMAP_DMA_CCR2(lch));
+ if (sync_mode & OMAP_DMA_SYNC_FRAME)
+ val |= 1 << 5;
- omap_writew(elem_count, OMAP_DMA_CEN(lch));
- omap_writew(frame_count, OMAP_DMA_CFN(lch));
+ if (sync_mode & OMAP_DMA_SYNC_BLOCK)
+ val |= 1 << 18;
+ if (src_or_dst_synch)
+ val |= 1 << 24; /* source synch */
+ else
+ val &= ~(1 << 24); /* dest synch */
+
+ OMAP_DMA_CCR_REG(lch) = val;
+ }
+
+ OMAP_DMA_CEN_REG(lch) = elem_count;
+ OMAP_DMA_CFN_REG(lch) = frame_count;
}
+
void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
{
u16 w;
BUG_ON(omap_dma_in_1510_mode());
- w = omap_readw(OMAP_DMA_CCR2(lch)) & ~0x03;
+ if (cpu_is_omap24xx()) {
+ REVISIT_24XX();
+ return;
+ }
+
+ w = OMAP1_DMA_CCR2_REG(lch) & ~0x03;
switch (mode) {
case OMAP_DMA_CONSTANT_FILL:
w |= 0x01;
@@ -165,63 +215,84 @@ void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode, u32 color)
default:
BUG();
}
- omap_writew(w, OMAP_DMA_CCR2(lch));
+ OMAP1_DMA_CCR2_REG(lch) = w;
- w = omap_readw(OMAP_DMA_LCH_CTRL(lch)) & ~0x0f;
+ w = OMAP1_DMA_LCH_CTRL_REG(lch) & ~0x0f;
/* Default is channel type 2D */
if (mode) {
- omap_writew((u16)color, OMAP_DMA_COLOR_L(lch));
- omap_writew((u16)(color >> 16), OMAP_DMA_COLOR_U(lch));
+ OMAP1_DMA_COLOR_L_REG(lch) = (u16)color;
+ OMAP1_DMA_COLOR_U_REG(lch) = (u16)(color >> 16);
w |= 1; /* Channel type G */
}
- omap_writew(w, OMAP_DMA_LCH_CTRL(lch));
+ OMAP1_DMA_LCH_CTRL_REG(lch) = w;
}
-
+/* Note that src_port is only for omap1 */
void omap_set_dma_src_params(int lch, int src_port, int src_amode,
- unsigned long src_start)
+ unsigned long src_start,
+ int src_ei, int src_fi)
{
- u16 w;
+ if (cpu_class_is_omap1()) {
+ OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 2);
+ OMAP_DMA_CSDP_REG(lch) |= src_port << 2;
+ }
+
+ OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 12);
+ OMAP_DMA_CCR_REG(lch) |= src_amode << 12;
+
+ if (cpu_class_is_omap1()) {
+ OMAP1_DMA_CSSA_U_REG(lch) = src_start >> 16;
+ OMAP1_DMA_CSSA_L_REG(lch) = src_start;
+ }
- w = omap_readw(OMAP_DMA_CSDP(lch));
- w &= ~(0x1f << 2);
- w |= src_port << 2;
- omap_writew(w, OMAP_DMA_CSDP(lch));
+ if (cpu_is_omap24xx())
+ OMAP2_DMA_CSSA_REG(lch) = src_start;
- w = omap_readw(OMAP_DMA_CCR(lch));
- w &= ~(0x03 << 12);
- w |= src_amode << 12;
- omap_writew(w, OMAP_DMA_CCR(lch));
+ OMAP_DMA_CSEI_REG(lch) = src_ei;
+ OMAP_DMA_CSFI_REG(lch) = src_fi;
+}
- omap_writew(src_start >> 16, OMAP_DMA_CSSA_U(lch));
- omap_writew(src_start, OMAP_DMA_CSSA_L(lch));
+void omap_set_dma_params(int lch, struct omap_dma_channel_params * params)
+{
+ omap_set_dma_transfer_params(lch, params->data_type,
+ params->elem_count, params->frame_count,
+ params->sync_mode, params->trigger,
+ params->src_or_dst_synch);
+ omap_set_dma_src_params(lch, params->src_port,
+ params->src_amode, params->src_start,
+ params->src_ei, params->src_fi);
+
+ omap_set_dma_dest_params(lch, params->dst_port,
+ params->dst_amode, params->dst_start,
+ params->dst_ei, params->dst_fi);
}
void omap_set_dma_src_index(int lch, int eidx, int fidx)
{
- omap_writew(eidx, OMAP_DMA_CSEI(lch));
- omap_writew(fidx, OMAP_DMA_CSFI(lch));
+ if (cpu_is_omap24xx()) {
+ REVISIT_24XX();
+ return;
+ }
+ OMAP_DMA_CSEI_REG(lch) = eidx;
+ OMAP_DMA_CSFI_REG(lch) = fidx;
}
void omap_set_dma_src_data_pack(int lch, int enable)
{
- u16 w;
-
- w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 6);
- w |= enable ? (1 << 6) : 0;
- omap_writew(w, OMAP_DMA_CSDP(lch));
+ OMAP_DMA_CSDP_REG(lch) &= ~(1 << 6);
+ if (enable)
+ OMAP_DMA_CSDP_REG(lch) |= (1 << 6);
}
void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
- u16 w;
+ OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 7);
- w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 7);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
- w |= (0x01 << 7);
+ OMAP_DMA_CSDP_REG(lch) |= (0x02 << 7);
break;
case OMAP_DMA_DATA_BURST_8:
/* not supported by current hardware
@@ -231,110 +302,283 @@ void omap_set_dma_src_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
default:
BUG();
}
- omap_writew(w, OMAP_DMA_CSDP(lch));
}
+/* Note that dest_port is only for OMAP1 */
void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
- unsigned long dest_start)
+ unsigned long dest_start,
+ int dst_ei, int dst_fi)
{
- u16 w;
+ if (cpu_class_is_omap1()) {
+ OMAP_DMA_CSDP_REG(lch) &= ~(0x1f << 9);
+ OMAP_DMA_CSDP_REG(lch) |= dest_port << 9;
+ }
- w = omap_readw(OMAP_DMA_CSDP(lch));
- w &= ~(0x1f << 9);
- w |= dest_port << 9;
- omap_writew(w, OMAP_DMA_CSDP(lch));
+ OMAP_DMA_CCR_REG(lch) &= ~(0x03 << 14);
+ OMAP_DMA_CCR_REG(lch) |= dest_amode << 14;
+
+ if (cpu_class_is_omap1()) {
+ OMAP1_DMA_CDSA_U_REG(lch) = dest_start >> 16;
+ OMAP1_DMA_CDSA_L_REG(lch) = dest_start;
+ }
- w = omap_readw(OMAP_DMA_CCR(lch));
- w &= ~(0x03 << 14);
- w |= dest_amode << 14;
- omap_writew(w, OMAP_DMA_CCR(lch));
+ if (cpu_is_omap24xx())
+ OMAP2_DMA_CDSA_REG(lch) = dest_start;
- omap_writew(dest_start >> 16, OMAP_DMA_CDSA_U(lch));
- omap_writew(dest_start, OMAP_DMA_CDSA_L(lch));
+ OMAP_DMA_CDEI_REG(lch) = dst_ei;
+ OMAP_DMA_CDFI_REG(lch) = dst_fi;
}
void omap_set_dma_dest_index(int lch, int eidx, int fidx)
{
- omap_writew(eidx, OMAP_DMA_CDEI(lch));
- omap_writew(fidx, OMAP_DMA_CDFI(lch));
+ if (cpu_is_omap24xx()) {
+ REVISIT_24XX();
+ return;
+ }
+ OMAP_DMA_CDEI_REG(lch) = eidx;
+ OMAP_DMA_CDFI_REG(lch) = fidx;
}
void omap_set_dma_dest_data_pack(int lch, int enable)
{
- u16 w;
-
- w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(1 << 13);
- w |= enable ? (1 << 13) : 0;
- omap_writew(w, OMAP_DMA_CSDP(lch));
+ OMAP_DMA_CSDP_REG(lch) &= ~(1 << 13);
+ if (enable)
+ OMAP_DMA_CSDP_REG(lch) |= 1 << 13;
}
void omap_set_dma_dest_burst_mode(int lch, enum omap_dma_burst_mode burst_mode)
{
- u16 w;
+ OMAP_DMA_CSDP_REG(lch) &= ~(0x03 << 14);
- w = omap_readw(OMAP_DMA_CSDP(lch)) & ~(0x03 << 14);
switch (burst_mode) {
case OMAP_DMA_DATA_BURST_DIS:
break;
case OMAP_DMA_DATA_BURST_4:
- w |= (0x01 << 14);
+ OMAP_DMA_CSDP_REG(lch) |= (0x02 << 14);
break;
case OMAP_DMA_DATA_BURST_8:
- w |= (0x03 << 14);
+ OMAP_DMA_CSDP_REG(lch) |= (0x03 << 14);
break;
default:
printk(KERN_ERR "Invalid DMA burst mode\n");
BUG();
return;
}
- omap_writew(w, OMAP_DMA_CSDP(lch));
}
-static inline void init_intr(int lch)
+static inline void omap_enable_channel_irq(int lch)
{
- u16 w;
+ u32 status;
/* Read CSR to make sure it's cleared. */
- w = omap_readw(OMAP_DMA_CSR(lch));
+ status = OMAP_DMA_CSR_REG(lch);
+
/* Enable some nice interrupts. */
- omap_writew(dma_chan[lch].enabled_irqs, OMAP_DMA_CICR(lch));
+ OMAP_DMA_CICR_REG(lch) = dma_chan[lch].enabled_irqs;
+
dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
}
-static inline void enable_lnk(int lch)
+static void omap_disable_channel_irq(int lch)
{
- u16 w;
+ if (cpu_is_omap24xx())
+ OMAP_DMA_CICR_REG(lch) = 0;
+}
+
+void omap_enable_dma_irq(int lch, u16 bits)
+{
+ dma_chan[lch].enabled_irqs |= bits;
+}
- /* Clear the STOP_LNK bits */
- w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
- w &= ~(1 << 14);
- omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
+void omap_disable_dma_irq(int lch, u16 bits)
+{
+ dma_chan[lch].enabled_irqs &= ~bits;
+}
+
+static inline void enable_lnk(int lch)
+{
+ if (cpu_class_is_omap1())
+ OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 14);
- /* And set the ENABLE_LNK bits */
+ /* Set the ENABLE_LNK bits */
if (dma_chan[lch].next_lch != -1)
- omap_writew(dma_chan[lch].next_lch | (1 << 15),
- OMAP_DMA_CLNK_CTRL(lch));
+ OMAP_DMA_CLNK_CTRL_REG(lch) =
+ dma_chan[lch].next_lch | (1 << 15);
}
static inline void disable_lnk(int lch)
{
- u16 w;
-
/* Disable interrupts */
- omap_writew(0, OMAP_DMA_CICR(lch));
+ if (cpu_class_is_omap1()) {
+ OMAP_DMA_CICR_REG(lch) = 0;
+ /* Set the STOP_LNK bit */
+ OMAP_DMA_CLNK_CTRL_REG(lch) |= 1 << 14;
+ }
- /* Set the STOP_LNK bit */
- w = omap_readw(OMAP_DMA_CLNK_CTRL(lch));
- w |= (1 << 14);
- w = omap_writew(w, OMAP_DMA_CLNK_CTRL(lch));
+ if (cpu_is_omap24xx()) {
+ omap_disable_channel_irq(lch);
+ /* Clear the ENABLE_LNK bit */
+ OMAP_DMA_CLNK_CTRL_REG(lch) &= ~(1 << 15);
+ }
dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
}
-void omap_start_dma(int lch)
+static inline void omap2_enable_irq_lch(int lch)
{
- u16 w;
+ u32 val;
+
+ if (!cpu_is_omap24xx())
+ return;
+
+ val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
+ val |= 1 << lch;
+ omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
+}
+
+int omap_request_dma(int dev_id, const char *dev_name,
+ void (* callback)(int lch, u16 ch_status, void *data),
+ void *data, int *dma_ch_out)
+{
+ int ch, free_ch = -1;
+ unsigned long flags;
+ struct omap_dma_lch *chan;
+
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ for (ch = 0; ch < dma_chan_count; ch++) {
+ if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
+ free_ch = ch;
+ if (dev_id == 0)
+ break;
+ }
+ }
+ if (free_ch == -1) {
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+ return -EBUSY;
+ }
+ chan = dma_chan + free_ch;
+ chan->dev_id = dev_id;
+
+ if (cpu_class_is_omap1())
+ clear_lch_regs(free_ch);
+ if (cpu_is_omap24xx())
+ omap_clear_dma(free_ch);
+
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+
+ chan->dev_name = dev_name;
+ chan->callback = callback;
+ chan->data = data;
+ chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ |
+ OMAP_DMA_BLOCK_IRQ;
+
+ if (cpu_is_omap24xx())
+ chan->enabled_irqs |= OMAP2_DMA_TRANS_ERR_IRQ;
+
+ if (cpu_is_omap16xx()) {
+ /* If the sync device is set, configure it dynamically. */
+ if (dev_id != 0) {
+ set_gdma_dev(free_ch + 1, dev_id);
+ dev_id = free_ch + 1;
+ }
+ /* Disable the 1510 compatibility mode and set the sync device
+ * id. */
+ OMAP_DMA_CCR_REG(free_ch) = dev_id | (1 << 10);
+ } else if (cpu_is_omap730() || cpu_is_omap15xx()) {
+ OMAP_DMA_CCR_REG(free_ch) = dev_id;
+ }
+
+ if (cpu_is_omap24xx()) {
+ omap2_enable_irq_lch(free_ch);
+
+ omap_enable_channel_irq(free_ch);
+ /* Clear the CSR register and IRQ status register */
+ OMAP_DMA_CSR_REG(free_ch) = 0x0;
+ omap_writel(~0x0, OMAP_DMA4_IRQSTATUS_L0);
+ }
+
+ *dma_ch_out = free_ch;
+
+ return 0;
+}
+
+void omap_free_dma(int lch)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&dma_chan_lock, flags);
+ if (dma_chan[lch].dev_id == -1) {
+ printk("omap_dma: trying to free nonallocated DMA channel %d\n",
+ lch);
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+ return;
+ }
+ dma_chan[lch].dev_id = -1;
+ dma_chan[lch].next_lch = -1;
+ dma_chan[lch].callback = NULL;
+ spin_unlock_irqrestore(&dma_chan_lock, flags);
+
+ if (cpu_class_is_omap1()) {
+ /* Disable all DMA interrupts for the channel. */
+ OMAP_DMA_CICR_REG(lch) = 0;
+ /* Make sure the DMA transfer is stopped. */
+ OMAP_DMA_CCR_REG(lch) = 0;
+ }
+
+ if (cpu_is_omap24xx()) {
+ u32 val;
+ /* Disable interrupts */
+ val = omap_readl(OMAP_DMA4_IRQENABLE_L0);
+ val &= ~(1 << lch);
+ omap_writel(val, OMAP_DMA4_IRQENABLE_L0);
+
+ /* Clear the CSR register and IRQ status register */
+ OMAP_DMA_CSR_REG(lch) = 0x0;
+
+ val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
+ val |= 1 << lch;
+ omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
+
+ /* Disable all DMA interrupts for the channel. */
+ OMAP_DMA_CICR_REG(lch) = 0;
+
+ /* Make sure the DMA transfer is stopped. */
+ OMAP_DMA_CCR_REG(lch) = 0;
+ omap_clear_dma(lch);
+ }
+}
+
+/*
+ * Clears any DMA state so the DMA engine is ready to restart with new buffers
+ * through omap_start_dma(). Any buffers in flight are discarded.
+ */
+void omap_clear_dma(int lch)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ if (cpu_class_is_omap1()) {
+ int status;
+ OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
+
+ /* Clear pending interrupts */
+ status = OMAP_DMA_CSR_REG(lch);
+ }
+
+ if (cpu_is_omap24xx()) {
+ int i;
+ u32 lch_base = OMAP24XX_DMA_BASE + lch * 0x60 + 0x80;
+ for (i = 0; i < 0x44; i += 4)
+ omap_writel(0, lch_base + i);
+ }
+
+ local_irq_restore(flags);
+}
+
+void omap_start_dma(int lch)
+{
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch;
char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
@@ -348,31 +592,37 @@ void omap_start_dma(int lch)
do {
next_lch = dma_chan[cur_lch].next_lch;
- /* The loop case: we've been here already */
+ /* The loop case: we've been here already */
if (dma_chan_link_map[cur_lch])
break;
/* Mark the current channel */
dma_chan_link_map[cur_lch] = 1;
enable_lnk(cur_lch);
- init_intr(cur_lch);
+ omap_enable_channel_irq(cur_lch);
cur_lch = next_lch;
} while (next_lch != -1);
+ } else if (cpu_is_omap24xx()) {
+ /* Errata: Need to write lch even if not using chaining */
+ OMAP_DMA_CLNK_CTRL_REG(lch) = lch;
}
- init_intr(lch);
+ omap_enable_channel_irq(lch);
+
+ /* Errata: On ES2.0 BUFFERING disable must be set.
+ * This will always fail on ES1.0 */
+ if (cpu_is_omap24xx()) {
+ OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
+ }
+
+ OMAP_DMA_CCR_REG(lch) |= OMAP_DMA_CCR_EN;
- w = omap_readw(OMAP_DMA_CCR(lch));
- w |= OMAP_DMA_CCR_EN;
- omap_writew(w, OMAP_DMA_CCR(lch));
dma_chan[lch].flags |= OMAP_DMA_ACTIVE;
}
void omap_stop_dma(int lch)
{
- u16 w;
-
if (!omap_dma_in_1510_mode() && dma_chan[lch].next_lch != -1) {
int next_lch, cur_lch = lch;
char dma_chan_link_map[OMAP_LOGICAL_DMA_CH_COUNT];
@@ -393,146 +643,83 @@ void omap_stop_dma(int lch)
return;
}
+
/* Disable all interrupts on the channel */
- omap_writew(0, OMAP_DMA_CICR(lch));
+ if (cpu_class_is_omap1())
+ OMAP_DMA_CICR_REG(lch) = 0;
- w = omap_readw(OMAP_DMA_CCR(lch));
- w &= ~OMAP_DMA_CCR_EN;
- omap_writew(w, OMAP_DMA_CCR(lch));
+ OMAP_DMA_CCR_REG(lch) &= ~OMAP_DMA_CCR_EN;
dma_chan[lch].flags &= ~OMAP_DMA_ACTIVE;
}
-void omap_enable_dma_irq(int lch, u16 bits)
+/*
+ * Returns current physical source address for the given DMA channel.
+ * If the channel is running the caller must disable interrupts prior calling
+ * this function and process the returned value before re-enabling interrupt to
+ * prevent races with the interrupt handler. Note that in continuous mode there
+ * is a chance for CSSA_L register overflow inbetween the two reads resulting
+ * in incorrect return value.
+ */
+dma_addr_t omap_get_dma_src_pos(int lch)
{
- dma_chan[lch].enabled_irqs |= bits;
-}
+ dma_addr_t offset;
-void omap_disable_dma_irq(int lch, u16 bits)
-{
- dma_chan[lch].enabled_irqs &= ~bits;
-}
+ if (cpu_class_is_omap1())
+ offset = (dma_addr_t) (OMAP1_DMA_CSSA_L_REG(lch) |
+ (OMAP1_DMA_CSSA_U_REG(lch) << 16));
-static int dma_handle_ch(int ch)
-{
- u16 csr;
+ if (cpu_is_omap24xx())
+ offset = OMAP_DMA_CSAC_REG(lch);
- if (enable_1510_mode && ch >= 6) {
- csr = dma_chan[ch].saved_csr;
- dma_chan[ch].saved_csr = 0;
- } else
- csr = omap_readw(OMAP_DMA_CSR(ch));
- if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
- dma_chan[ch + 6].saved_csr = csr >> 7;
- csr &= 0x7f;
- }
- if ((csr & 0x3f) == 0)
- return 0;
- if (unlikely(dma_chan[ch].dev_id == -1)) {
- printk(KERN_WARNING "Spurious interrupt from DMA channel %d (CSR %04x)\n",
- ch, csr);
- return 0;
- }
- if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
- printk(KERN_WARNING "DMA timeout with device %d\n", dma_chan[ch].dev_id);
- if (unlikely(csr & OMAP_DMA_DROP_IRQ))
- printk(KERN_WARNING "DMA synchronization event drop occurred with device %d\n",
- dma_chan[ch].dev_id);
- if (likely(csr & OMAP_DMA_BLOCK_IRQ))
- dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
- if (likely(dma_chan[ch].callback != NULL))
- dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
- return 1;
+ return offset;
}
-static irqreturn_t dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
+/*
+ * Returns current physical destination address for the given DMA channel.
+ * If the channel is running the caller must disable interrupts prior calling
+ * this function and process the returned value before re-enabling interrupt to
+ * prevent races with the interrupt handler. Note that in continuous mode there
+ * is a chance for CDSA_L register overflow inbetween the two reads resulting
+ * in incorrect return value.
+ */
+dma_addr_t omap_get_dma_dst_pos(int lch)
{
- int ch = ((int) dev_id) - 1;
- int handled = 0;
+ dma_addr_t offset;
- for (;;) {
- int handled_now = 0;
+ if (cpu_class_is_omap1())
+ offset = (dma_addr_t) (OMAP1_DMA_CDSA_L_REG(lch) |
+ (OMAP1_DMA_CDSA_U_REG(lch) << 16));
- handled_now += dma_handle_ch(ch);
- if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
- handled_now += dma_handle_ch(ch + 6);
- if (!handled_now)
- break;
- handled += handled_now;
- }
+ if (cpu_is_omap24xx())
+ offset = OMAP2_DMA_CDSA_REG(lch);
- return handled ? IRQ_HANDLED : IRQ_NONE;
+ return offset;
}
-int omap_request_dma(int dev_id, const char *dev_name,
- void (* callback)(int lch, u16 ch_status, void *data),
- void *data, int *dma_ch_out)
+/*
+ * Returns current source transfer counting for the given DMA channel.
+ * Can be used to monitor the progress of a transfer inside a block.
+ * It must be called with disabled interrupts.
+ */
+int omap_get_dma_src_addr_counter(int lch)
{
- int ch, free_ch = -1;
- unsigned long flags;
- struct omap_dma_lch *chan;
-
- spin_lock_irqsave(&dma_chan_lock, flags);
- for (ch = 0; ch < dma_chan_count; ch++) {
- if (free_ch == -1 && dma_chan[ch].dev_id == -1) {
- free_ch = ch;
- if (dev_id == 0)
- break;
- }
- }
- if (free_ch == -1) {
- spin_unlock_irqrestore(&dma_chan_lock, flags);
- return -EBUSY;
- }
- chan = dma_chan + free_ch;
- chan->dev_id = dev_id;
- clear_lch_regs(free_ch);
- spin_unlock_irqrestore(&dma_chan_lock, flags);
-
- chan->dev_id = dev_id;
- chan->dev_name = dev_name;
- chan->callback = callback;
- chan->data = data;
- chan->enabled_irqs = OMAP_DMA_TOUT_IRQ | OMAP_DMA_DROP_IRQ | OMAP_DMA_BLOCK_IRQ;
-
- if (cpu_is_omap16xx()) {
- /* If the sync device is set, configure it dynamically. */
- if (dev_id != 0) {
- set_gdma_dev(free_ch + 1, dev_id);
- dev_id = free_ch + 1;
- }
- /* Disable the 1510 compatibility mode and set the sync device
- * id. */
- omap_writew(dev_id | (1 << 10), OMAP_DMA_CCR(free_ch));
- } else {
- omap_writew(dev_id, OMAP_DMA_CCR(free_ch));
- }
- *dma_ch_out = free_ch;
-
- return 0;
+ return (dma_addr_t) OMAP_DMA_CSAC_REG(lch);
}
-void omap_free_dma(int ch)
+int omap_dma_running(void)
{
- unsigned long flags;
+ int lch;
- spin_lock_irqsave(&dma_chan_lock, flags);
- if (dma_chan[ch].dev_id == -1) {
- printk("omap_dma: trying to free nonallocated DMA channel %d\n", ch);
- spin_unlock_irqrestore(&dma_chan_lock, flags);
- return;
- }
- dma_chan[ch].dev_id = -1;
- spin_unlock_irqrestore(&dma_chan_lock, flags);
+ /* Check if LCD DMA is running */
+ if (cpu_is_omap16xx())
+ if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
+ return 1;
- /* Disable all DMA interrupts for the channel. */
- omap_writew(0, OMAP_DMA_CICR(ch));
- /* Make sure the DMA transfer is stopped. */
- omap_writew(0, OMAP_DMA_CCR(ch));
-}
+ for (lch = 0; lch < dma_chan_count; lch++)
+ if (OMAP_DMA_CCR_REG(lch) & OMAP_DMA_CCR_EN)
+ return 1;
-int omap_dma_in_1510_mode(void)
-{
- return enable_1510_mode;
+ return 0;
}
/*
@@ -550,7 +737,8 @@ void omap_dma_link_lch (int lch_head, int lch_queue)
if ((dma_chan[lch_head].dev_id == -1) ||
(dma_chan[lch_queue].dev_id == -1)) {
- printk(KERN_ERR "omap_dma: trying to link non requested channels\n");
+ printk(KERN_ERR "omap_dma: trying to link "
+ "non requested channels\n");
dump_stack();
}
@@ -570,20 +758,149 @@ void omap_dma_unlink_lch (int lch_head, int lch_queue)
if (dma_chan[lch_head].next_lch != lch_queue ||
dma_chan[lch_head].next_lch == -1) {
- printk(KERN_ERR "omap_dma: trying to unlink non linked channels\n");
+ printk(KERN_ERR "omap_dma: trying to unlink "
+ "non linked channels\n");
dump_stack();
}
if ((dma_chan[lch_head].flags & OMAP_DMA_ACTIVE) ||
(dma_chan[lch_head].flags & OMAP_DMA_ACTIVE)) {
- printk(KERN_ERR "omap_dma: You need to stop the DMA channels before unlinking\n");
+ printk(KERN_ERR "omap_dma: You need to stop the DMA channels "
+ "before unlinking\n");
dump_stack();
}
dma_chan[lch_head].next_lch = -1;
}
+/*----------------------------------------------------------------------------*/
+
+#ifdef CONFIG_ARCH_OMAP1
+
+static int omap1_dma_handle_ch(int ch)
+{
+ u16 csr;
+
+ if (enable_1510_mode && ch >= 6) {
+ csr = dma_chan[ch].saved_csr;
+ dma_chan[ch].saved_csr = 0;
+ } else
+ csr = OMAP_DMA_CSR_REG(ch);
+ if (enable_1510_mode && ch <= 2 && (csr >> 7) != 0) {
+ dma_chan[ch + 6].saved_csr = csr >> 7;
+ csr &= 0x7f;
+ }
+ if ((csr & 0x3f) == 0)
+ return 0;
+ if (unlikely(dma_chan[ch].dev_id == -1)) {
+ printk(KERN_WARNING "Spurious interrupt from DMA channel "
+ "%d (CSR %04x)\n", ch, csr);
+ return 0;
+ }
+ if (unlikely(csr & OMAP_DMA_TOUT_IRQ))
+ printk(KERN_WARNING "DMA timeout with device %d\n",
+ dma_chan[ch].dev_id);
+ if (unlikely(csr & OMAP_DMA_DROP_IRQ))
+ printk(KERN_WARNING "DMA synchronization event drop occurred "
+ "with device %d\n", dma_chan[ch].dev_id);
+ if (likely(csr & OMAP_DMA_BLOCK_IRQ))
+ dma_chan[ch].flags &= ~OMAP_DMA_ACTIVE;
+ if (likely(dma_chan[ch].callback != NULL))
+ dma_chan[ch].callback(ch, csr, dma_chan[ch].data);
+ return 1;
+}
+
+static irqreturn_t omap1_dma_irq_handler(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ int ch = ((int) dev_id) - 1;
+ int handled = 0;
+
+ for (;;) {
+ int handled_now = 0;
+
+ handled_now += omap1_dma_handle_ch(ch);
+ if (enable_1510_mode && dma_chan[ch + 6].saved_csr)
+ handled_now += omap1_dma_handle_ch(ch + 6);
+ if (!handled_now)
+ break;
+ handled += handled_now;
+ }
+
+ return handled ? IRQ_HANDLED : IRQ_NONE;
+}
+
+#else
+#define omap1_dma_irq_handler NULL
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+
+static int omap2_dma_handle_ch(int ch)
+{
+ u32 status = OMAP_DMA_CSR_REG(ch);
+ u32 val;
+
+ if (!status)
+ return 0;
+ if (unlikely(dma_chan[ch].dev_id == -1))
+ return 0;
+ /* REVISIT: According to 24xx TRM, there's no TOUT_IE */
+ if (unlikely(status & OMAP_DMA_TOUT_IRQ))
+ printk(KERN_INFO "DMA timeout with device %d\n",
+ dma_chan[ch].dev_id);
+ if (unlikely(status & OMAP_DMA_DROP_IRQ))
+ printk(KERN_INFO
+ "DMA synchronization event drop occurred with device "
+ "%d\n", dma_chan[ch].dev_id);
+
+ if (unlikely(status & OMAP2_DMA_TRANS_ERR_IRQ))
+ printk(KERN_INFO "DMA transaction error with device %d\n",
+ dma_chan[ch].dev_id);
+
+ OMAP_DMA_CSR_REG(ch) = 0x20;
+
+ val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
+ /* ch in this function is from 0-31 while in register it is 1-32 */
+ val = 1 << (ch);
+ omap_writel(val, OMAP_DMA4_IRQSTATUS_L0);
+
+ if (likely(dma_chan[ch].callback != NULL))
+ dma_chan[ch].callback(ch, status, dma_chan[ch].data);
+
+ return 0;
+}
+
+/* STATUS register count is from 1-32 while our is 0-31 */
+static irqreturn_t omap2_dma_irq_handler(int irq, void *dev_id,
+ struct pt_regs *regs)
+{
+ u32 val;
+ int i;
+
+ val = omap_readl(OMAP_DMA4_IRQSTATUS_L0);
+
+ for (i = 1; i <= OMAP_LOGICAL_DMA_CH_COUNT; i++) {
+ int active = val & (1 << (i - 1));
+ if (active)
+ omap2_dma_handle_ch(i - 1);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static struct irqaction omap24xx_dma_irq = {
+ .name = "DMA",
+ .handler = omap2_dma_irq_handler,
+ .flags = SA_INTERRUPT
+};
+
+#else
+static struct irqaction omap24xx_dma_irq;
+#endif
+
+/*----------------------------------------------------------------------------*/
static struct lcd_dma_info {
spinlock_t lock;
@@ -795,7 +1112,7 @@ static void set_b1_regs(void)
/* Always set the source port as SDRAM for now*/
w &= ~(0x03 << 6);
if (lcd_dma.callback != NULL)
- w |= 1 << 1; /* Block interrupt enable */
+ w |= 1 << 1; /* Block interrupt enable */
else
w &= ~(1 << 1);
omap_writew(w, OMAP1610_DMA_LCD_CTRL);
@@ -814,7 +1131,8 @@ static void set_b1_regs(void)
omap_writew(fi, OMAP1610_DMA_LCD_SRC_FI_B1_L);
}
-static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id, struct pt_regs *regs)
+static irqreturn_t lcd_dma_irq_handler(int irq, void *dev_id,
+ struct pt_regs *regs)
{
u16 w;
@@ -870,7 +1188,8 @@ void omap_free_lcd_dma(void)
return;
}
if (!enable_1510_mode)
- omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1, OMAP1610_DMA_LCD_CCR);
+ omap_writew(omap_readw(OMAP1610_DMA_LCD_CCR) & ~1,
+ OMAP1610_DMA_LCD_CCR);
lcd_dma.reserved = 0;
spin_unlock(&lcd_dma.lock);
}
@@ -939,93 +1258,24 @@ void omap_stop_lcd_dma(void)
omap_writew(w, OMAP1610_DMA_LCD_CTRL);
}
-/*
- * Clears any DMA state so the DMA engine is ready to restart with new buffers
- * through omap_start_dma(). Any buffers in flight are discarded.
- */
-void omap_clear_dma(int lch)
-{
- unsigned long flags;
- int status;
-
- local_irq_save(flags);
- omap_writew(omap_readw(OMAP_DMA_CCR(lch)) & ~OMAP_DMA_CCR_EN,
- OMAP_DMA_CCR(lch));
- status = OMAP_DMA_CSR(lch); /* clear pending interrupts */
- local_irq_restore(flags);
-}
-
-/*
- * Returns current physical source address for the given DMA channel.
- * If the channel is running the caller must disable interrupts prior calling
- * this function and process the returned value before re-enabling interrupt to
- * prevent races with the interrupt handler. Note that in continuous mode there
- * is a chance for CSSA_L register overflow inbetween the two reads resulting
- * in incorrect return value.
- */
-dma_addr_t omap_get_dma_src_pos(int lch)
-{
- return (dma_addr_t) (omap_readw(OMAP_DMA_CSSA_L(lch)) |
- (omap_readw(OMAP_DMA_CSSA_U(lch)) << 16));
-}
-
-/*
- * Returns current physical destination address for the given DMA channel.
- * If the channel is running the caller must disable interrupts prior calling
- * this function and process the returned value before re-enabling interrupt to
- * prevent races with the interrupt handler. Note that in continuous mode there
- * is a chance for CDSA_L register overflow inbetween the two reads resulting
- * in incorrect return value.
- */
-dma_addr_t omap_get_dma_dst_pos(int lch)
-{
- return (dma_addr_t) (omap_readw(OMAP_DMA_CDSA_L(lch)) |
- (omap_readw(OMAP_DMA_CDSA_U(lch)) << 16));
-}
-
-/*
- * Returns current source transfer counting for the given DMA channel.
- * Can be used to monitor the progress of a transfer inside a block.
- * It must be called with disabled interrupts.
- */
-int omap_get_dma_src_addr_counter(int lch)
-{
- return (dma_addr_t) omap_readw(OMAP_DMA_CSAC(lch));
-}
-
-int omap_dma_running(void)
-{
- int lch;
-
- /* Check if LCD DMA is running */
- if (cpu_is_omap16xx())
- if (omap_readw(OMAP1610_DMA_LCD_CCR) & OMAP_DMA_CCR_EN)
- return 1;
-
- for (lch = 0; lch < dma_chan_count; lch++) {
- u16 w;
-
- w = omap_readw(OMAP_DMA_CCR(lch));
- if (w & OMAP_DMA_CCR_EN)
- return 1;
- }
- return 0;
-}
+/*----------------------------------------------------------------------------*/
static int __init omap_init_dma(void)
{
int ch, r;
- if (cpu_is_omap1510()) {
- printk(KERN_INFO "DMA support for OMAP1510 initialized\n");
+ if (cpu_is_omap15xx()) {
+ printk(KERN_INFO "DMA support for OMAP15xx initialized\n");
dma_chan_count = 9;
enable_1510_mode = 1;
} else if (cpu_is_omap16xx() || cpu_is_omap730()) {
printk(KERN_INFO "OMAP DMA hardware version %d\n",
omap_readw(OMAP_DMA_HW_ID));
printk(KERN_INFO "DMA capabilities: %08x:%08x:%04x:%04x:%04x\n",
- (omap_readw(OMAP_DMA_CAPS_0_U) << 16) | omap_readw(OMAP_DMA_CAPS_0_L),
- (omap_readw(OMAP_DMA_CAPS_1_U) << 16) | omap_readw(OMAP_DMA_CAPS_1_L),
+ (omap_readw(OMAP_DMA_CAPS_0_U) << 16) |
+ omap_readw(OMAP_DMA_CAPS_0_L),
+ (omap_readw(OMAP_DMA_CAPS_1_U) << 16) |
+ omap_readw(OMAP_DMA_CAPS_1_L),
omap_readw(OMAP_DMA_CAPS_2), omap_readw(OMAP_DMA_CAPS_3),
omap_readw(OMAP_DMA_CAPS_4));
if (!enable_1510_mode) {
@@ -1038,6 +1288,11 @@ static int __init omap_init_dma(void)
dma_chan_count = 16;
} else
dma_chan_count = 9;
+ } else if (cpu_is_omap24xx()) {
+ u8 revision = omap_readb(OMAP_DMA4_REVISION);
+ printk(KERN_INFO "OMAP DMA hardware revision %d.%d\n",
+ revision >> 4, revision & 0xf);
+ dma_chan_count = OMAP_LOGICAL_DMA_CH_COUNT;
} else {
dma_chan_count = 0;
return 0;
@@ -1049,41 +1304,56 @@ static int __init omap_init_dma(void)
memset(&dma_chan, 0, sizeof(dma_chan));
for (ch = 0; ch < dma_chan_count; ch++) {
+ omap_clear_dma(ch);
dma_chan[ch].dev_id = -1;
dma_chan[ch].next_lch = -1;
if (ch >= 6 && enable_1510_mode)
continue;
- /* request_irq() doesn't like dev_id (ie. ch) being zero,
- * so we have to kludge around this. */
- r = request_irq(dma_irq[ch], dma_irq_handler, 0, "DMA",
- (void *) (ch + 1));
+ if (cpu_class_is_omap1()) {
+ /* request_irq() doesn't like dev_id (ie. ch) being
+ * zero, so we have to kludge around this. */
+ r = request_irq(omap1_dma_irq[ch],
+ omap1_dma_irq_handler, 0, "DMA",
+ (void *) (ch + 1));
+ if (r != 0) {
+ int i;
+
+ printk(KERN_ERR "unable to request IRQ %d "
+ "for DMA (error %d)\n",
+ omap1_dma_irq[ch], r);
+ for (i = 0; i < ch; i++)
+ free_irq(omap1_dma_irq[i],
+ (void *) (i + 1));
+ return r;
+ }
+ }
+ }
+
+ if (cpu_is_omap24xx())
+ setup_irq(INT_24XX_SDMA_IRQ0, &omap24xx_dma_irq);
+
+ /* FIXME: Update LCD DMA to work on 24xx */
+ if (cpu_class_is_omap1()) {
+ r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0,
+ "LCD DMA", NULL);
if (r != 0) {
int i;
- printk(KERN_ERR "unable to request IRQ %d for DMA (error %d)\n",
- dma_irq[ch], r);
- for (i = 0; i < ch; i++)
- free_irq(dma_irq[i], (void *) (i + 1));
+ printk(KERN_ERR "unable to request IRQ for LCD DMA "
+ "(error %d)\n", r);
+ for (i = 0; i < dma_chan_count; i++)
+ free_irq(omap1_dma_irq[i], (void *) (i + 1));
return r;
}
}
- r = request_irq(INT_DMA_LCD, lcd_dma_irq_handler, 0, "LCD DMA", NULL);
- if (r != 0) {
- int i;
- printk(KERN_ERR "unable to request IRQ for LCD DMA (error %d)\n", r);
- for (i = 0; i < dma_chan_count; i++)
- free_irq(dma_irq[i], (void *) (i + 1));
- return r;
- }
return 0;
}
arch_initcall(omap_init_dma);
-
EXPORT_SYMBOL(omap_get_dma_src_pos);
EXPORT_SYMBOL(omap_get_dma_dst_pos);
EXPORT_SYMBOL(omap_get_dma_src_addr_counter);
@@ -1109,6 +1379,8 @@ EXPORT_SYMBOL(omap_set_dma_dest_index);
EXPORT_SYMBOL(omap_set_dma_dest_data_pack);
EXPORT_SYMBOL(omap_set_dma_dest_burst_mode);
+EXPORT_SYMBOL(omap_set_dma_params);
+
EXPORT_SYMBOL(omap_dma_link_lch);
EXPORT_SYMBOL(omap_dma_unlink_lch);
diff --git a/arch/arm/plat-omap/gpio.c b/arch/arm/plat-omap/gpio.c
index 55059a24ad4..76f721d8513 100644
--- a/arch/arm/plat-omap/gpio.c
+++ b/arch/arm/plat-omap/gpio.c
@@ -140,7 +140,7 @@ static struct gpio_bank gpio_bank_1610[5] = {
};
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
static struct gpio_bank gpio_bank_1510[2] = {
{ OMAP_MPUIO_BASE, INT_MPUIO, IH_MPUIO_BASE, METHOD_MPUIO },
{ OMAP1510_GPIO_BASE, INT_GPIO_BANK1, IH_GPIO_BASE, METHOD_GPIO_1510 }
@@ -173,7 +173,7 @@ static int gpio_bank_count;
static inline struct gpio_bank *get_gpio_bank(int gpio)
{
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
if (OMAP_GPIO_IS_MPUIO(gpio))
return &gpio_bank[0];
@@ -222,7 +222,7 @@ static inline int gpio_valid(int gpio)
return -1;
return 0;
}
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510() && gpio < 16)
return 0;
#endif
@@ -654,7 +654,7 @@ int omap_request_gpio(int gpio)
/* Set trigger to none. You need to enable the trigger after request_irq */
_set_gpio_triggering(bank, get_gpio_index(gpio), IRQT_NOEDGE);
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510) {
void __iomem *reg;
@@ -739,7 +739,7 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
bank = (struct gpio_bank *) desc->data;
if (bank->method == METHOD_MPUIO)
isr_reg = bank->base + OMAP_MPUIO_GPIO_INT;
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510)
isr_reg = bank->base + OMAP1510_GPIO_INT_STATUS;
#endif
@@ -774,7 +774,7 @@ static void gpio_irq_handler(unsigned int irq, struct irqdesc *desc,
d = irq_desc + gpio_irq;
desc_handle_irq(gpio_irq, d, regs);
}
- }
+ }
}
static void gpio_ack_irq(unsigned int irq)
@@ -837,8 +837,9 @@ static struct irqchip mpuio_irq_chip = {
.unmask = mpuio_unmask_irq
};
-static int initialized = 0;
-static struct clk * gpio_ck = NULL;
+static int initialized;
+static struct clk * gpio_ick;
+static struct clk * gpio_fck;
static int __init _omap_gpio_init(void)
{
@@ -848,14 +849,26 @@ static int __init _omap_gpio_init(void)
initialized = 1;
if (cpu_is_omap1510()) {
- gpio_ck = clk_get(NULL, "arm_gpio_ck");
- if (IS_ERR(gpio_ck))
+ gpio_ick = clk_get(NULL, "arm_gpio_ck");
+ if (IS_ERR(gpio_ick))
printk("Could not get arm_gpio_ck\n");
else
- clk_use(gpio_ck);
+ clk_use(gpio_ick);
+ }
+ if (cpu_is_omap24xx()) {
+ gpio_ick = clk_get(NULL, "gpios_ick");
+ if (IS_ERR(gpio_ick))
+ printk("Could not get gpios_ick\n");
+ else
+ clk_use(gpio_ick);
+ gpio_fck = clk_get(NULL, "gpios_fck");
+ if (IS_ERR(gpio_ick))
+ printk("Could not get gpios_fck\n");
+ else
+ clk_use(gpio_fck);
}
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
printk(KERN_INFO "OMAP1510 GPIO hardware\n");
gpio_bank_count = 2;
@@ -901,7 +914,7 @@ static int __init _omap_gpio_init(void)
if (bank->method == METHOD_MPUIO) {
omap_writew(0xFFFF, OMAP_MPUIO_BASE + OMAP_MPUIO_GPIO_MASKIT);
}
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (bank->method == METHOD_GPIO_1510) {
__raw_writew(0xffff, bank->base + OMAP1510_GPIO_INT_MASK);
__raw_writew(0x0000, bank->base + OMAP1510_GPIO_INT_STATUS);
@@ -1038,6 +1051,7 @@ static struct sys_device omap_gpio_device = {
/*
* This may get called early from board specific init
+ * for boards that have interrupts routed via FPGA.
*/
int omap_gpio_init(void)
{
diff --git a/arch/arm/plat-omap/mcbsp.c b/arch/arm/plat-omap/mcbsp.c
index 9c9b7df3faf..ea9475c8665 100644
--- a/arch/arm/plat-omap/mcbsp.c
+++ b/arch/arm/plat-omap/mcbsp.c
@@ -491,17 +491,20 @@ int omap_mcbsp_xmit_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
omap_set_dma_transfer_params(mcbsp[id].dma_tx_lch,
OMAP_DMA_DATA_TYPE_S16,
length >> 1, 1,
- OMAP_DMA_SYNC_ELEMENT);
+ OMAP_DMA_SYNC_ELEMENT,
+ 0, 0);
omap_set_dma_dest_params(mcbsp[id].dma_tx_lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
- mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1);
+ mcbsp[id].io_base + OMAP_MCBSP_REG_DXR1,
+ 0, 0);
omap_set_dma_src_params(mcbsp[id].dma_tx_lch,
OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC,
- buffer);
+ buffer,
+ 0, 0);
omap_start_dma(mcbsp[id].dma_tx_lch);
wait_for_completion(&(mcbsp[id].tx_dma_completion));
@@ -531,17 +534,20 @@ int omap_mcbsp_recv_buffer(unsigned int id, dma_addr_t buffer, unsigned int leng
omap_set_dma_transfer_params(mcbsp[id].dma_rx_lch,
OMAP_DMA_DATA_TYPE_S16,
length >> 1, 1,
- OMAP_DMA_SYNC_ELEMENT);
+ OMAP_DMA_SYNC_ELEMENT,
+ 0, 0);
omap_set_dma_src_params(mcbsp[id].dma_rx_lch,
OMAP_DMA_PORT_TIPB,
OMAP_DMA_AMODE_CONSTANT,
- mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1);
+ mcbsp[id].io_base + OMAP_MCBSP_REG_DRR1,
+ 0, 0);
omap_set_dma_dest_params(mcbsp[id].dma_rx_lch,
OMAP_DMA_PORT_EMIFF,
OMAP_DMA_AMODE_POST_INC,
- buffer);
+ buffer,
+ 0, 0);
omap_start_dma(mcbsp[id].dma_rx_lch);
wait_for_completion(&(mcbsp[id].rx_dma_completion));
@@ -643,7 +649,7 @@ static const struct omap_mcbsp_info mcbsp_730[] = {
};
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
static const struct omap_mcbsp_info mcbsp_1510[] = {
[0] = { .virt_base = OMAP1510_MCBSP1_BASE,
.dma_rx_sync = OMAP_DMA_MCBSP1_RX,
@@ -712,7 +718,7 @@ static int __init omap_mcbsp_init(void)
mcbsp_count = ARRAY_SIZE(mcbsp_730);
}
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
if (cpu_is_omap1510()) {
mcbsp_info = mcbsp_1510;
mcbsp_count = ARRAY_SIZE(mcbsp_1510);
diff --git a/arch/arm/plat-omap/mux.c b/arch/arm/plat-omap/mux.c
index 64482040f89..8c1c016aa68 100644
--- a/arch/arm/plat-omap/mux.c
+++ b/arch/arm/plat-omap/mux.c
@@ -3,7 +3,7 @@
*
* Utility to set the Omap MUX and PULL_DWN registers from a table in mux.h
*
- * Copyright (C) 2003 Nokia Corporation
+ * Copyright (C) 2003 - 2005 Nokia Corporation
*
* Written by Tony Lindgren <tony.lindgren@nokia.com>
*
@@ -25,38 +25,74 @@
#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
+#include <linux/kernel.h>
#include <asm/system.h>
#include <asm/io.h>
#include <linux/spinlock.h>
-
-#define __MUX_C__
#include <asm/arch/mux.h>
#ifdef CONFIG_OMAP_MUX
+#define OMAP24XX_L4_BASE 0x48000000
+#define OMAP24XX_PULL_ENA (1 << 3)
+#define OMAP24XX_PULL_UP (1 << 4)
+
+static struct pin_config * pin_table;
+static unsigned long pin_table_sz;
+
+extern struct pin_config * omap730_pins;
+extern struct pin_config * omap1xxx_pins;
+extern struct pin_config * omap24xx_pins;
+
+int __init omap_mux_register(struct pin_config * pins, unsigned long size)
+{
+ pin_table = pins;
+ pin_table_sz = size;
+
+ return 0;
+}
+
/*
* Sets the Omap MUX and PULL_DWN registers based on the table
*/
-int __init_or_module
-omap_cfg_reg(const reg_cfg_t reg_cfg)
+int __init_or_module omap_cfg_reg(const unsigned long index)
{
static DEFINE_SPINLOCK(mux_spin_lock);
unsigned long flags;
- reg_cfg_set *cfg;
+ struct pin_config *cfg;
unsigned int reg_orig = 0, reg = 0, pu_pd_orig = 0, pu_pd = 0,
pull_orig = 0, pull = 0;
unsigned int mask, warn = 0;
- if (cpu_is_omap7xx())
- return 0;
+ if (!pin_table)
+ BUG();
- if (reg_cfg > ARRAY_SIZE(reg_cfg_table)) {
- printk(KERN_ERR "MUX: reg_cfg %d\n", reg_cfg);
- return -EINVAL;
+ if (index >= pin_table_sz) {
+ printk(KERN_ERR "Invalid pin mux index: %lu (%lu)\n",
+ index, pin_table_sz);
+ dump_stack();
+ return -ENODEV;
}
- cfg = (reg_cfg_set *)&reg_cfg_table[reg_cfg];
+ cfg = (struct pin_config *)&pin_table[index];
+ if (cpu_is_omap24xx()) {
+ u8 reg = 0;
+
+ reg |= cfg->mask & 0x7;
+ if (cfg->pull_val)
+ reg |= OMAP24XX_PULL_ENA;
+ if(cfg->pu_pd_val)
+ reg |= OMAP24XX_PULL_UP;
+#ifdef CONFIG_OMAP_MUX_DEBUG
+ printk("Muxing %s (0x%08x): 0x%02x -> 0x%02x\n",
+ cfg->name, OMAP24XX_L4_BASE + cfg->mux_reg,
+ omap_readb(OMAP24XX_L4_BASE + cfg->mux_reg), reg);
+#endif
+ omap_writeb(reg, OMAP24XX_L4_BASE + cfg->mux_reg);
+
+ return 0;
+ }
/* Check the mux register in question */
if (cfg->mux_reg) {
@@ -157,7 +193,8 @@ omap_cfg_reg(const reg_cfg_t reg_cfg)
return 0;
#endif
}
-
EXPORT_SYMBOL(omap_cfg_reg);
-
+#else
+#define omap_mux_init() do {} while(0)
+#define omap_cfg_reg(x) do {} while(0)
#endif /* CONFIG_OMAP_MUX */
diff --git a/arch/arm/plat-omap/pm.c b/arch/arm/plat-omap/pm.c
index e15c6c1ddec..966cca031ca 100644
--- a/arch/arm/plat-omap/pm.c
+++ b/arch/arm/plat-omap/pm.c
@@ -54,11 +54,12 @@
#include <asm/arch/tps65010.h>
#include <asm/arch/dsp_common.h>
-#include "clock.h"
-#include "sram.h"
+#include <asm/arch/clock.h>
+#include <asm/arch/sram.h>
static unsigned int arm_sleep_save[ARM_SLEEP_SAVE_SIZE];
static unsigned short ulpd_sleep_save[ULPD_SLEEP_SAVE_SIZE];
+static unsigned int mpui730_sleep_save[MPUI730_SLEEP_SAVE_SIZE];
static unsigned int mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_SIZE];
static unsigned int mpui1610_sleep_save[MPUI1610_SLEEP_SAVE_SIZE];
@@ -120,8 +121,8 @@ void omap_pm_idle(void)
*/
static void omap_pm_wakeup_setup(void)
{
- u32 level1_wake = OMAP_IRQ_BIT(INT_IH2_IRQ);
- u32 level2_wake = OMAP_IRQ_BIT(INT_UART2) | OMAP_IRQ_BIT(INT_KEYBOARD);
+ u32 level1_wake = 0;
+ u32 level2_wake = OMAP_IRQ_BIT(INT_UART2);
/*
* Turn off all interrupts except GPIO bank 1, L1-2nd level cascade,
@@ -129,19 +130,29 @@ static void omap_pm_wakeup_setup(void)
* drivers must still separately call omap_set_gpio_wakeup() to
* wake up to a GPIO interrupt.
*/
- if (cpu_is_omap1510() || cpu_is_omap16xx())
- level1_wake |= OMAP_IRQ_BIT(INT_GPIO_BANK1);
- else if (cpu_is_omap730())
- level1_wake |= OMAP_IRQ_BIT(INT_730_GPIO_BANK1);
+ if (cpu_is_omap730())
+ level1_wake = OMAP_IRQ_BIT(INT_730_GPIO_BANK1) |
+ OMAP_IRQ_BIT(INT_730_IH2_IRQ);
+ else if (cpu_is_omap1510())
+ level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
+ OMAP_IRQ_BIT(INT_1510_IH2_IRQ);
+ else if (cpu_is_omap16xx())
+ level1_wake = OMAP_IRQ_BIT(INT_GPIO_BANK1) |
+ OMAP_IRQ_BIT(INT_1610_IH2_IRQ);
omap_writel(~level1_wake, OMAP_IH1_MIR);
- if (cpu_is_omap1510())
+ if (cpu_is_omap730()) {
+ omap_writel(~level2_wake, OMAP_IH2_0_MIR);
+ omap_writel(~(OMAP_IRQ_BIT(INT_730_WAKE_UP_REQ) | OMAP_IRQ_BIT(INT_730_MPUIO_KEYPAD)), OMAP_IH2_1_MIR);
+ } else if (cpu_is_omap1510()) {
+ level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
omap_writel(~level2_wake, OMAP_IH2_MIR);
-
- /* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
- if (cpu_is_omap16xx()) {
+ } else if (cpu_is_omap16xx()) {
+ level2_wake |= OMAP_IRQ_BIT(INT_KEYBOARD);
omap_writel(~level2_wake, OMAP_IH2_0_MIR);
+
+ /* INT_1610_WAKE_UP_REQ is needed for GPIO wakeup... */
omap_writel(~OMAP_IRQ_BIT(INT_1610_WAKE_UP_REQ), OMAP_IH2_1_MIR);
omap_writel(~0x0, OMAP_IH2_2_MIR);
omap_writel(~0x0, OMAP_IH2_3_MIR);
@@ -185,7 +196,17 @@ void omap_pm_suspend(void)
* Save interrupt, MPUI, ARM and UPLD control registers.
*/
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap730()) {
+ MPUI730_SAVE(OMAP_IH1_MIR);
+ MPUI730_SAVE(OMAP_IH2_0_MIR);
+ MPUI730_SAVE(OMAP_IH2_1_MIR);
+ MPUI730_SAVE(MPUI_CTRL);
+ MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
+ MPUI730_SAVE(MPUI_DSP_API_CONFIG);
+ MPUI730_SAVE(EMIFS_CONFIG);
+ MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
+
+ } else if (cpu_is_omap1510()) {
MPUI1510_SAVE(OMAP_IH1_MIR);
MPUI1510_SAVE(OMAP_IH2_MIR);
MPUI1510_SAVE(MPUI_CTRL);
@@ -280,7 +301,13 @@ void omap_pm_suspend(void)
ULPD_RESTORE(ULPD_CLOCK_CTRL);
ULPD_RESTORE(ULPD_STATUS_REQ);
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap730()) {
+ MPUI730_RESTORE(EMIFS_CONFIG);
+ MPUI730_RESTORE(EMIFF_SDRAM_CONFIG);
+ MPUI730_RESTORE(OMAP_IH1_MIR);
+ MPUI730_RESTORE(OMAP_IH2_0_MIR);
+ MPUI730_RESTORE(OMAP_IH2_1_MIR);
+ } else if (cpu_is_omap1510()) {
MPUI1510_RESTORE(MPUI_CTRL);
MPUI1510_RESTORE(MPUI_DSP_BOOT_CONFIG);
MPUI1510_RESTORE(MPUI_DSP_API_CONFIG);
@@ -355,7 +382,14 @@ static int omap_pm_read_proc(
ULPD_SAVE(ULPD_DPLL_CTRL);
ULPD_SAVE(ULPD_POWER_CTRL);
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap730()) {
+ MPUI730_SAVE(MPUI_CTRL);
+ MPUI730_SAVE(MPUI_DSP_STATUS);
+ MPUI730_SAVE(MPUI_DSP_BOOT_CONFIG);
+ MPUI730_SAVE(MPUI_DSP_API_CONFIG);
+ MPUI730_SAVE(EMIFF_SDRAM_CONFIG);
+ MPUI730_SAVE(EMIFS_CONFIG);
+ } else if (cpu_is_omap1510()) {
MPUI1510_SAVE(MPUI_CTRL);
MPUI1510_SAVE(MPUI_DSP_STATUS);
MPUI1510_SAVE(MPUI_DSP_BOOT_CONFIG);
@@ -404,7 +438,21 @@ static int omap_pm_read_proc(
ULPD_SHOW(ULPD_STATUS_REQ),
ULPD_SHOW(ULPD_POWER_CTRL));
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap730()) {
+ my_buffer_offset += sprintf(my_base + my_buffer_offset,
+ "MPUI730_CTRL_REG 0x%-8x \n"
+ "MPUI730_DSP_STATUS_REG: 0x%-8x \n"
+ "MPUI730_DSP_BOOT_CONFIG_REG: 0x%-8x \n"
+ "MPUI730_DSP_API_CONFIG_REG: 0x%-8x \n"
+ "MPUI730_SDRAM_CONFIG_REG: 0x%-8x \n"
+ "MPUI730_EMIFS_CONFIG_REG: 0x%-8x \n",
+ MPUI730_SHOW(MPUI_CTRL),
+ MPUI730_SHOW(MPUI_DSP_STATUS),
+ MPUI730_SHOW(MPUI_DSP_BOOT_CONFIG),
+ MPUI730_SHOW(MPUI_DSP_API_CONFIG),
+ MPUI730_SHOW(EMIFF_SDRAM_CONFIG),
+ MPUI730_SHOW(EMIFS_CONFIG));
+ } else if (cpu_is_omap1510()) {
my_buffer_offset += sprintf(my_base + my_buffer_offset,
"MPUI1510_CTRL_REG 0x%-8x \n"
"MPUI1510_DSP_STATUS_REG: 0x%-8x \n"
@@ -553,7 +601,12 @@ static int __init omap_pm_init(void)
* These routines need to be in SRAM as that's the only
* memory the MPU can see when it wakes up.
*/
- if (cpu_is_omap1510()) {
+ if (cpu_is_omap730()) {
+ omap_sram_idle = omap_sram_push(omap730_idle_loop_suspend,
+ omap730_idle_loop_suspend_sz);
+ omap_sram_suspend = omap_sram_push(omap730_cpu_suspend,
+ omap730_cpu_suspend_sz);
+ } else if (cpu_is_omap1510()) {
omap_sram_idle = omap_sram_push(omap1510_idle_loop_suspend,
omap1510_idle_loop_suspend_sz);
omap_sram_suspend = omap_sram_push(omap1510_cpu_suspend,
@@ -572,7 +625,11 @@ static int __init omap_pm_init(void)
pm_idle = omap_pm_idle;
- setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
+ if (cpu_is_omap730())
+ setup_irq(INT_730_WAKE_UP_REQ, &omap_wakeup_irq);
+ else if (cpu_is_omap16xx())
+ setup_irq(INT_1610_WAKE_UP_REQ, &omap_wakeup_irq);
+
#if 0
/* --- BEGIN BOARD-DEPENDENT CODE --- */
/* Sleepx mask direction */
@@ -591,7 +648,9 @@ static int __init omap_pm_init(void)
omap_writew(ULPD_POWER_CTRL_REG_VAL, ULPD_POWER_CTRL);
/* Configure IDLECT3 */
- if (cpu_is_omap16xx())
+ if (cpu_is_omap730())
+ omap_writel(OMAP730_IDLECT3_VAL, OMAP730_IDLECT3);
+ else if (cpu_is_omap16xx())
omap_writel(OMAP1610_IDLECT3_VAL, OMAP1610_IDLECT3);
pm_set_ops(&omap_pm_ops);
@@ -600,8 +659,10 @@ static int __init omap_pm_init(void)
omap_pm_init_proc();
#endif
- /* configure LOW_PWR pin */
- omap_cfg_reg(T20_1610_LOW_PWR);
+ if (cpu_is_omap16xx()) {
+ /* configure LOW_PWR pin */
+ omap_cfg_reg(T20_1610_LOW_PWR);
+ }
return 0;
}
diff --git a/arch/arm/plat-omap/sleep.S b/arch/arm/plat-omap/sleep.S
index 9f745836f6a..4cd7d292f85 100644
--- a/arch/arm/plat-omap/sleep.S
+++ b/arch/arm/plat-omap/sleep.S
@@ -1,7 +1,7 @@
/*
* linux/arch/arm/plat-omap/sleep.S
*
- * Low-level OMAP1510/1610 sleep/wakeUp support
+ * Low-level OMAP730/1510/1610 sleep/wakeUp support
*
* Initial SA1110 code:
* Copyright (c) 2001 Cliff Brake <cbrake@accelent.com>
@@ -52,7 +52,57 @@
* processor specific functions here.
*/
-#ifdef CONFIG_ARCH_OMAP1510
+#if defined(CONFIG_ARCH_OMAP730)
+ENTRY(omap730_idle_loop_suspend)
+
+ stmfd sp!, {r0 - r12, lr} @ save registers on stack
+
+ @ load base address of ARM_IDLECT1 and ARM_IDLECT2
+ mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
+ orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
+ orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
+
+ @ turn off clock domains
+ @ get ARM_IDLECT2 into r2
+ ldrh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+ mov r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff
+ orr r5, r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff00
+ strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+
+ @ request ARM idle
+ @ get ARM_IDLECT1 into r1
+ ldrh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+ orr r3, r1, #OMAP730_IDLE_LOOP_REQUEST & 0xffff
+ strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+ mov r5, #IDLE_WAIT_CYCLES & 0xff
+ orr r5, r5, #IDLE_WAIT_CYCLES & 0xff00
+l_730: subs r5, r5, #1
+ bne l_730
+/*
+ * Let's wait for the next clock tick to wake us up.
+ */
+ mov r0, #0
+ mcr p15, 0, r0, c7, c0, 4 @ wait for interrupt
+/*
+ * omap730_idle_loop_suspend()'s resume point.
+ *
+ * It will just start executing here, so we'll restore stuff from the
+ * stack, reset the ARM_IDLECT1 and ARM_IDLECT2.
+ */
+
+ @ restore ARM_IDLECT1 and ARM_IDLECT2 and return
+ @ r1 has ARM_IDLECT1 and r2 still has ARM_IDLECT2
+ strh r2, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+ strh r1, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+ ldmfd sp!, {r0 - r12, pc} @ restore regs and return
+
+ENTRY(omap730_idle_loop_suspend_sz)
+ .word . - omap730_idle_loop_suspend
+#endif /* CONFIG_ARCH_OMAP730 */
+
+#ifdef CONFIG_ARCH_OMAP15XX
ENTRY(omap1510_idle_loop_suspend)
stmfd sp!, {r0 - r12, lr} @ save registers on stack
@@ -100,7 +150,7 @@ l_1510: subs r5, r5, #1
ENTRY(omap1510_idle_loop_suspend_sz)
.word . - omap1510_idle_loop_suspend
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
ENTRY(omap1610_idle_loop_suspend)
@@ -169,7 +219,86 @@ ENTRY(omap1610_idle_loop_suspend_sz)
*
*/
-#ifdef CONFIG_ARCH_OMAP1510
+#if defined(CONFIG_ARCH_OMAP730)
+ENTRY(omap730_cpu_suspend)
+
+ @ save registers on stack
+ stmfd sp!, {r0 - r12, lr}
+
+ @ Drain write cache
+ mov r4, #0
+ mcr p15, 0, r0, c7, c10, 4
+ nop
+
+ @ load base address of Traffic Controller
+ mov r6, #TCMIF_ASM_BASE & 0xff000000
+ orr r6, r6, #TCMIF_ASM_BASE & 0x00ff0000
+ orr r6, r6, #TCMIF_ASM_BASE & 0x0000ff00
+
+ @ prepare to put SDRAM into self-refresh manually
+ ldr r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
+ orr r9, r7, #SELF_REFRESH_MODE & 0xff000000
+ orr r9, r9, #SELF_REFRESH_MODE & 0x000000ff
+ str r9, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
+
+ @ prepare to put EMIFS to Sleep
+ ldr r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
+ orr r9, r8, #IDLE_EMIFS_REQUEST & 0xff
+ str r9, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
+
+ @ load base address of ARM_IDLECT1 and ARM_IDLECT2
+ mov r4, #CLKGEN_REG_ASM_BASE & 0xff000000
+ orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x00ff0000
+ orr r4, r4, #CLKGEN_REG_ASM_BASE & 0x0000ff00
+
+ @ turn off clock domains
+ @ do not disable PERCK (0x04)
+ mov r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff
+ orr r5, r5, #OMAP730_IDLECT2_SLEEP_VAL & 0xff00
+ strh r5, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+
+ @ request ARM idle
+ mov r3, #OMAP730_IDLECT1_SLEEP_VAL & 0xff
+ orr r3, r3, #OMAP730_IDLECT1_SLEEP_VAL & 0xff00
+ strh r3, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+ @ disable instruction cache
+ mrc p15, 0, r9, c1, c0, 0
+ bic r2, r9, #0x1000
+ mcr p15, 0, r2, c1, c0, 0
+ nop
+
+/*
+ * Let's wait for the next wake up event to wake us up. r0 can't be
+ * used here because r0 holds ARM_IDLECT1
+ */
+ mov r2, #0
+ mcr p15, 0, r2, c7, c0, 4 @ wait for interrupt
+/*
+ * omap730_cpu_suspend()'s resume point.
+ *
+ * It will just start executing here, so we'll restore stuff from the
+ * stack.
+ */
+ @ re-enable Icache
+ mcr p15, 0, r9, c1, c0, 0
+
+ @ reset the ARM_IDLECT1 and ARM_IDLECT2.
+ strh r1, [r4, #ARM_IDLECT2_ASM_OFFSET & 0xff]
+ strh r0, [r4, #ARM_IDLECT1_ASM_OFFSET & 0xff]
+
+ @ Restore EMIFF controls
+ str r7, [r6, #EMIFF_SDRAM_CONFIG_ASM_OFFSET & 0xff]
+ str r8, [r6, #EMIFS_CONFIG_ASM_OFFSET & 0xff]
+
+ @ restore regs and return
+ ldmfd sp!, {r0 - r12, pc}
+
+ENTRY(omap730_cpu_suspend_sz)
+ .word . - omap730_cpu_suspend
+#endif /* CONFIG_ARCH_OMAP730 */
+
+#ifdef CONFIG_ARCH_OMAP15XX
ENTRY(omap1510_cpu_suspend)
@ save registers on stack
@@ -241,7 +370,7 @@ l_1510_2:
ENTRY(omap1510_cpu_suspend_sz)
.word . - omap1510_cpu_suspend
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
#if defined(CONFIG_ARCH_OMAP16XX)
ENTRY(omap1610_cpu_suspend)
diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
index 7ad69f14a3e..792f6637583 100644
--- a/arch/arm/plat-omap/sram.c
+++ b/arch/arm/plat-omap/sram.c
@@ -20,10 +20,13 @@
#include <asm/io.h>
#include <asm/cacheflush.h>
-#include "sram.h"
+#include <asm/arch/sram.h>
+
+#define OMAP1_SRAM_PA 0x20000000
+#define OMAP1_SRAM_VA 0xd0000000
+#define OMAP2_SRAM_PA 0x40200000
+#define OMAP2_SRAM_VA 0xd0000000
-#define OMAP1_SRAM_BASE 0xd0000000
-#define OMAP1_SRAM_START 0x20000000
#define SRAM_BOOTLOADER_SZ 0x80
static unsigned long omap_sram_base;
@@ -31,37 +34,40 @@ static unsigned long omap_sram_size;
static unsigned long omap_sram_ceil;
/*
- * The amount of SRAM depends on the core type:
- * 730 = 200K, 1510 = 512K, 5912 = 256K, 1610 = 16K, 1710 = 16K
+ * The amount of SRAM depends on the core type.
* Note that we cannot try to test for SRAM here because writes
* to secure SRAM will hang the system. Also the SRAM is not
* yet mapped at this point.
*/
void __init omap_detect_sram(void)
{
- omap_sram_base = OMAP1_SRAM_BASE;
+ if (!cpu_is_omap24xx())
+ omap_sram_base = OMAP1_SRAM_VA;
+ else
+ omap_sram_base = OMAP2_SRAM_VA;
if (cpu_is_omap730())
- omap_sram_size = 0x32000;
- else if (cpu_is_omap1510())
- omap_sram_size = 0x80000;
+ omap_sram_size = 0x32000; /* 200K */
+ else if (cpu_is_omap15xx())
+ omap_sram_size = 0x30000; /* 192K */
else if (cpu_is_omap1610() || cpu_is_omap1621() || cpu_is_omap1710())
- omap_sram_size = 0x4000;
+ omap_sram_size = 0x4000; /* 16K */
else if (cpu_is_omap1611())
- omap_sram_size = 0x3e800;
+ omap_sram_size = 0x3e800; /* 250K */
+ else if (cpu_is_omap2420())
+ omap_sram_size = 0xa0014; /* 640K */
else {
printk(KERN_ERR "Could not detect SRAM size\n");
omap_sram_size = 0x4000;
}
- printk(KERN_INFO "SRAM size: 0x%lx\n", omap_sram_size);
omap_sram_ceil = omap_sram_base + omap_sram_size;
}
static struct map_desc omap_sram_io_desc[] __initdata = {
{ /* .length gets filled in at runtime */
- .virtual = OMAP1_SRAM_BASE,
- .pfn = __phys_to_pfn(OMAP1_SRAM_START),
+ .virtual = OMAP1_SRAM_VA,
+ .pfn = __phys_to_pfn(OMAP1_SRAM_PA),
.type = MT_DEVICE
}
};
@@ -76,10 +82,19 @@ void __init omap_map_sram(void)
if (omap_sram_size == 0)
return;
+ if (cpu_is_omap24xx()) {
+ omap_sram_io_desc[0].virtual = OMAP2_SRAM_VA;
+ omap_sram_io_desc[0].pfn = __phys_to_pfn(OMAP2_SRAM_PA);
+ }
+
omap_sram_io_desc[0].length = (omap_sram_size + PAGE_SIZE-1)/PAGE_SIZE;
omap_sram_io_desc[0].length *= PAGE_SIZE;
iotable_init(omap_sram_io_desc, ARRAY_SIZE(omap_sram_io_desc));
+ printk(KERN_INFO "SRAM: Mapped pa 0x%08lx to va 0x%08lx size: 0x%lx\n",
+ omap_sram_io_desc[0].pfn, omap_sram_io_desc[0].virtual,
+ omap_sram_io_desc[0].length);
+
/*
* Looks like we need to preserve some bootloader code at the
* beginning of SRAM for jumping to flash for reboot to work...
@@ -88,16 +103,6 @@ void __init omap_map_sram(void)
omap_sram_size - SRAM_BOOTLOADER_SZ);
}
-static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl) = NULL;
-
-void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
-{
- if (_omap_sram_reprogram_clock == NULL)
- panic("Cannot use SRAM");
-
- return _omap_sram_reprogram_clock(dpllctl, ckctl);
-}
-
void * omap_sram_push(void * start, unsigned long size)
{
if (size > (omap_sram_ceil - (omap_sram_base + SRAM_BOOTLOADER_SZ))) {
@@ -111,10 +116,94 @@ void * omap_sram_push(void * start, unsigned long size)
return (void *)omap_sram_ceil;
}
-void __init omap_sram_init(void)
+static void omap_sram_error(void)
+{
+ panic("Uninitialized SRAM function\n");
+}
+
+#ifdef CONFIG_ARCH_OMAP1
+
+static void (*_omap_sram_reprogram_clock)(u32 dpllctl, u32 ckctl);
+
+void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl)
+{
+ if (!_omap_sram_reprogram_clock)
+ omap_sram_error();
+
+ return _omap_sram_reprogram_clock(dpllctl, ckctl);
+}
+
+int __init omap1_sram_init(void)
{
- omap_detect_sram();
- omap_map_sram();
_omap_sram_reprogram_clock = omap_sram_push(sram_reprogram_clock,
sram_reprogram_clock_sz);
+
+ return 0;
+}
+
+#else
+#define omap1_sram_init() do {} while (0)
+#endif
+
+#ifdef CONFIG_ARCH_OMAP2
+
+static void (*_omap2_sram_ddr_init)(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+ u32 base_cs, u32 force_unlock);
+
+void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+ u32 base_cs, u32 force_unlock)
+{
+ if (!_omap2_sram_ddr_init)
+ omap_sram_error();
+
+ return _omap2_sram_ddr_init(slow_dll_ctrl, fast_dll_ctrl,
+ base_cs, force_unlock);
+}
+
+static void (*_omap2_sram_reprogram_sdrc)(u32 perf_level, u32 dll_val,
+ u32 mem_type);
+
+void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type)
+{
+ if (!_omap2_sram_reprogram_sdrc)
+ omap_sram_error();
+
+ return _omap2_sram_reprogram_sdrc(perf_level, dll_val, mem_type);
+}
+
+static u32 (*_omap2_set_prcm)(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
+
+u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass)
+{
+ if (!_omap2_set_prcm)
+ omap_sram_error();
+
+ return _omap2_set_prcm(dpll_ctrl_val, sdrc_rfr_val, bypass);
+}
+
+int __init omap2_sram_init(void)
+{
+ _omap2_sram_ddr_init = omap_sram_push(sram_ddr_init, sram_ddr_init_sz);
+
+ _omap2_sram_reprogram_sdrc = omap_sram_push(sram_reprogram_sdrc,
+ sram_reprogram_sdrc_sz);
+ _omap2_set_prcm = omap_sram_push(sram_set_prcm, sram_set_prcm_sz);
+
+ return 0;
+}
+#else
+#define omap2_sram_init() do {} while (0)
+#endif
+
+int __init omap_sram_init(void)
+{
+ omap_detect_sram();
+ omap_map_sram();
+
+ if (!cpu_is_omap24xx())
+ omap1_sram_init();
+ else
+ omap2_sram_init();
+
+ return 0;
}
diff --git a/arch/arm/plat-omap/sram.h b/arch/arm/plat-omap/sram.h
deleted file mode 100644
index 71984efa6ae..00000000000
--- a/arch/arm/plat-omap/sram.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/*
- * linux/arch/arm/plat-omap/sram.h
- *
- * Interface for functions that need to be run in internal SRAM
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __ARCH_ARM_OMAP_SRAM_H
-#define __ARCH_ARM_OMAP_SRAM_H
-
-extern void * omap_sram_push(void * start, unsigned long size);
-extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
-
-/* Do not use these */
-extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
-extern unsigned long sram_reprogram_clock_sz;
-
-#endif
diff --git a/arch/arm/plat-omap/usb.c b/arch/arm/plat-omap/usb.c
index 205e2d0b826..00afc7a8c2a 100644
--- a/arch/arm/plat-omap/usb.c
+++ b/arch/arm/plat-omap/usb.c
@@ -91,6 +91,8 @@ EXPORT_SYMBOL(otg_set_transceiver);
/*-------------------------------------------------------------------------*/
+#if defined(CONFIG_ARCH_OMAP_OTG) || defined(CONFIG_ARCH_OMAP15XX)
+
static u32 __init omap_usb0_init(unsigned nwires, unsigned is_device)
{
u32 syscon1 = 0;
@@ -271,6 +273,8 @@ static u32 __init omap_usb2_init(unsigned nwires, unsigned alt_pingroup)
return syscon1 << 24;
}
+#endif
+
/*-------------------------------------------------------------------------*/
#if defined(CONFIG_USB_GADGET_OMAP) || \
@@ -494,7 +498,7 @@ static inline void omap_otg_init(struct omap_usb_config *config) {}
/*-------------------------------------------------------------------------*/
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
#define ULPD_DPLL_CTRL_REG __REG16(ULPD_DPLL_CTRL)
#define DPLL_IOB (1 << 13)
@@ -507,7 +511,6 @@ static inline void omap_otg_init(struct omap_usb_config *config) {}
static void __init omap_1510_usb_init(struct omap_usb_config *config)
{
- int status;
unsigned int val;
omap_usb0_init(config->pins[0], is_usb0_device(config));
@@ -539,6 +542,8 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
#ifdef CONFIG_USB_GADGET_OMAP
if (config->register_dev) {
+ int status;
+
udc_device.dev.platform_data = config;
status = platform_device_register(&udc_device);
if (status)
@@ -549,6 +554,8 @@ static void __init omap_1510_usb_init(struct omap_usb_config *config)
#if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE)
if (config->register_host) {
+ int status;
+
ohci_device.dev.platform_data = config;
status = platform_device_register(&ohci_device);
if (status)
diff --git a/arch/i386/pci/fixup.c b/arch/i386/pci/fixup.c
index 3984226a8b9..eeb1b1f2d54 100644
--- a/arch/i386/pci/fixup.c
+++ b/arch/i386/pci/fixup.c
@@ -433,9 +433,8 @@ static void __devinit pci_post_fixup_toshiba_ohci1394(struct pci_dev *dev)
return; /* only applies to certain Toshibas (so far) */
/* Restore config space on Toshiba laptops */
- mdelay(10);
pci_write_config_word(dev, PCI_CACHE_LINE_SIZE, toshiba_line_size);
- pci_write_config_word(dev, PCI_INTERRUPT_LINE, dev->irq);
+ pci_read_config_byte(dev, PCI_INTERRUPT_LINE, (u8 *)&dev->irq);
pci_write_config_dword(dev, PCI_BASE_ADDRESS_0,
pci_resource_start(dev, 0));
pci_write_config_dword(dev, PCI_BASE_ADDRESS_1,
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index f72ea6aebcb..a3aa45cbcfa 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -987,7 +987,7 @@ efi_initialize_iomem_resources(struct resource *code_resource,
break;
}
- if ((res = kcalloc(1, sizeof(struct resource), GFP_KERNEL)) == NULL) {
+ if ((res = kzalloc(sizeof(struct resource), GFP_KERNEL)) == NULL) {
printk(KERN_ERR "failed to alocate resource for iomem\n");
return;
}
diff --git a/arch/ia64/kernel/kprobes.c b/arch/ia64/kernel/kprobes.c
index 96736a119c9..801eeaeaf3d 100644
--- a/arch/ia64/kernel/kprobes.c
+++ b/arch/ia64/kernel/kprobes.c
@@ -347,7 +347,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
((struct fnptr *)kretprobe_trampoline)->ip;
spin_lock_irqsave(&kretprobe_lock, flags);
- head = kretprobe_inst_table_head(current);
+ head = kretprobe_inst_table_head(current);
/*
* It is possible to have multiple instances associated with a given
@@ -363,9 +363,9 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* kretprobe_trampoline
*/
hlist_for_each_entry_safe(ri, node, tmp, head, hlist) {
- if (ri->task != current)
+ if (ri->task != current)
/* another task is sharing our hash bucket */
- continue;
+ continue;
if (ri->rp && ri->rp->handler)
ri->rp->handler(ri, regs);
@@ -394,7 +394,7 @@ int __kprobes trampoline_probe_handler(struct kprobe *p, struct pt_regs *regs)
* kprobe_handler() that we don't want the post_handler
* to run (and have re-enabled preemption)
*/
- return 1;
+ return 1;
}
/* Called with kretprobe_lock held */
@@ -739,12 +739,16 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
switch(val) {
case DIE_BREAK:
- if (pre_kprobes_handler(args))
- ret = NOTIFY_STOP;
+ /* err is break number from ia64_bad_break() */
+ if (args->err == 0x80200 || args->err == 0x80300)
+ if (pre_kprobes_handler(args))
+ ret = NOTIFY_STOP;
break;
- case DIE_SS:
- if (post_kprobes_handler(args->regs))
- ret = NOTIFY_STOP;
+ case DIE_FAULT:
+ /* err is vector number from ia64_fault() */
+ if (args->err == 36)
+ if (post_kprobes_handler(args->regs))
+ ret = NOTIFY_STOP;
break;
case DIE_PAGE_FAULT:
/* kprobe_running() needs smp_processor_id() */
diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
index 52c47da1724..355af15287c 100644
--- a/arch/ia64/kernel/mca.c
+++ b/arch/ia64/kernel/mca.c
@@ -51,6 +51,9 @@
*
* 2005-08-12 Keith Owens <kaos@sgi.com>
* Convert MCA/INIT handlers to use per event stacks and SAL/OS state.
+ *
+ * 2005-10-07 Keith Owens <kaos@sgi.com>
+ * Add notify_die() hooks.
*/
#include <linux/config.h>
#include <linux/types.h>
@@ -58,7 +61,6 @@
#include <linux/sched.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/kallsyms.h>
#include <linux/smp_lock.h>
#include <linux/bootmem.h>
#include <linux/acpi.h>
@@ -69,6 +71,7 @@
#include <linux/workqueue.h>
#include <asm/delay.h>
+#include <asm/kdebug.h>
#include <asm/machvec.h>
#include <asm/meminit.h>
#include <asm/page.h>
@@ -132,6 +135,14 @@ extern void salinfo_log_wakeup(int type, u8 *buffer, u64 size, int irqsafe);
static int mca_init;
+
+static void inline
+ia64_mca_spin(const char *func)
+{
+ printk(KERN_EMERG "%s: spinning here, not returning to SAL\n", func);
+ while (1)
+ cpu_relax();
+}
/*
* IA64_MCA log support
*/
@@ -526,13 +537,16 @@ ia64_mca_wakeup_all(void)
* Outputs : None
*/
static irqreturn_t
-ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
+ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *regs)
{
unsigned long flags;
int cpu = smp_processor_id();
/* Mask all interrupts */
local_irq_save(flags);
+ if (notify_die(DIE_MCA_RENDZVOUS_ENTER, "MCA", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_DONE;
/* Register with the SAL monarch that the slave has
@@ -540,10 +554,18 @@ ia64_mca_rendez_int_handler(int rendez_irq, void *arg, struct pt_regs *ptregs)
*/
ia64_sal_mc_rendez();
+ if (notify_die(DIE_MCA_RENDZVOUS_PROCESS, "MCA", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
+
/* Wait for the monarch cpu to exit. */
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
+ if (notify_die(DIE_MCA_RENDZVOUS_LEAVE, "MCA", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
+
/* Enable all interrupts */
local_irq_restore(flags);
return IRQ_HANDLED;
@@ -933,6 +955,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
previous_current = ia64_mca_modify_original_stack(regs, sw, sos, "MCA");
monarch_cpu = cpu;
+ if (notify_die(DIE_MCA_MONARCH_ENTER, "MCA", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
ia64_wait_for_slaves(cpu);
/* Wakeup all the processors which are spinning in the rendezvous loop.
@@ -942,6 +967,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
* spinning in SAL does not work.
*/
ia64_mca_wakeup_all();
+ if (notify_die(DIE_MCA_MONARCH_PROCESS, "MCA", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
/* Get the MCA error record and log it */
ia64_mca_log_sal_error_record(SAL_INFO_TYPE_MCA);
@@ -960,6 +988,9 @@ ia64_mca_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_sal_clear_state_info(SAL_INFO_TYPE_MCA);
sos->os_status = IA64_MCA_CORRECTED;
}
+ if (notify_die(DIE_MCA_MONARCH_LEAVE, "MCA", regs, 0, 0, recover)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
set_curr_task(cpu, previous_current);
monarch_cpu = -1;
@@ -1188,6 +1219,37 @@ ia64_mca_cpe_poll (unsigned long dummy)
#endif /* CONFIG_ACPI */
+static int
+default_monarch_init_process(struct notifier_block *self, unsigned long val, void *data)
+{
+ int c;
+ struct task_struct *g, *t;
+ if (val != DIE_INIT_MONARCH_PROCESS)
+ return NOTIFY_DONE;
+ printk(KERN_ERR "Processes interrupted by INIT -");
+ for_each_online_cpu(c) {
+ struct ia64_sal_os_state *s;
+ t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
+ s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
+ g = s->prev_task;
+ if (g) {
+ if (g->pid)
+ printk(" %d", g->pid);
+ else
+ printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
+ }
+ }
+ printk("\n\n");
+ if (read_trylock(&tasklist_lock)) {
+ do_each_thread (g, t) {
+ printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
+ show_stack(t, NULL);
+ } while_each_thread (g, t);
+ read_unlock(&tasklist_lock);
+ }
+ return NOTIFY_DONE;
+}
+
/*
* C portion of the OS INIT handler
*
@@ -1212,8 +1274,7 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
static atomic_t slaves;
static atomic_t monarchs;
task_t *previous_current;
- int cpu = smp_processor_id(), c;
- struct task_struct *g, *t;
+ int cpu = smp_processor_id();
oops_in_progress = 1; /* FIXME: make printk NMI/MCA/INIT safe */
console_loglevel = 15; /* make sure printks make it to console */
@@ -1253,8 +1314,17 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_INIT;
while (monarch_cpu == -1)
cpu_relax(); /* spin until monarch enters */
+ if (notify_die(DIE_INIT_SLAVE_ENTER, "INIT", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
+ if (notify_die(DIE_INIT_SLAVE_PROCESS, "INIT", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
while (monarch_cpu != -1)
cpu_relax(); /* spin until monarch leaves */
+ if (notify_die(DIE_INIT_SLAVE_LEAVE, "INIT", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
printk("Slave on cpu %d returning to normal service.\n", cpu);
set_curr_task(cpu, previous_current);
ia64_mc_info.imi_rendez_checkin[cpu] = IA64_MCA_RENDEZ_CHECKIN_NOTDONE;
@@ -1263,6 +1333,9 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
}
monarch_cpu = cpu;
+ if (notify_die(DIE_INIT_MONARCH_ENTER, "INIT", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
/*
* Wait for a bit. On some machines (e.g., HP's zx2000 and zx6000, INIT can be
@@ -1273,27 +1346,16 @@ ia64_init_handler(struct pt_regs *regs, struct switch_stack *sw,
printk("Delaying for 5 seconds...\n");
udelay(5*1000000);
ia64_wait_for_slaves(cpu);
- printk(KERN_ERR "Processes interrupted by INIT -");
- for_each_online_cpu(c) {
- struct ia64_sal_os_state *s;
- t = __va(__per_cpu_mca[c] + IA64_MCA_CPU_INIT_STACK_OFFSET);
- s = (struct ia64_sal_os_state *)((char *)t + MCA_SOS_OFFSET);
- g = s->prev_task;
- if (g) {
- if (g->pid)
- printk(" %d", g->pid);
- else
- printk(" %d (cpu %d task 0x%p)", g->pid, task_cpu(g), g);
- }
- }
- printk("\n\n");
- if (read_trylock(&tasklist_lock)) {
- do_each_thread (g, t) {
- printk("\nBacktrace of pid %d (%s)\n", t->pid, t->comm);
- show_stack(t, NULL);
- } while_each_thread (g, t);
- read_unlock(&tasklist_lock);
- }
+ /* If nobody intercepts DIE_INIT_MONARCH_PROCESS then we drop through
+ * to default_monarch_init_process() above and just print all the
+ * tasks.
+ */
+ if (notify_die(DIE_INIT_MONARCH_PROCESS, "INIT", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
+ if (notify_die(DIE_INIT_MONARCH_LEAVE, "INIT", regs, 0, 0, 0)
+ == NOTIFY_STOP)
+ ia64_mca_spin(__FUNCTION__);
printk("\nINIT dump complete. Monarch on cpu %d returning to normal service.\n", cpu);
atomic_dec(&monarchs);
set_curr_task(cpu, previous_current);
@@ -1462,6 +1524,10 @@ ia64_mca_init(void)
s64 rc;
struct ia64_sal_retval isrv;
u64 timeout = IA64_MCA_RENDEZ_TIMEOUT; /* platform specific */
+ static struct notifier_block default_init_monarch_nb = {
+ .notifier_call = default_monarch_init_process,
+ .priority = 0/* we need to notified last */
+ };
IA64_MCA_DEBUG("%s: begin\n", __FUNCTION__);
@@ -1555,6 +1621,10 @@ ia64_mca_init(void)
"(status %ld)\n", rc);
return;
}
+ if (register_die_notifier(&default_init_monarch_nb)) {
+ printk(KERN_ERR "Failed to register default monarch INIT process\n");
+ return;
+ }
IA64_MCA_DEBUG("%s: registered OS INIT handler with SAL\n", __FUNCTION__);
diff --git a/arch/ia64/kernel/mca_drv.c b/arch/ia64/kernel/mca_drv.c
index eb39bc9c133..3492e3211a4 100644
--- a/arch/ia64/kernel/mca_drv.c
+++ b/arch/ia64/kernel/mca_drv.c
@@ -547,9 +547,20 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
(pal_processor_state_info_t*)peidx_psp(peidx);
/*
- * We cannot recover errors with other than bus_check.
+ * Processor recovery status must key off of the PAL recovery
+ * status in the Processor State Parameter.
*/
- if (psp->cc || psp->rc || psp->uc)
+
+ /*
+ * The machine check is corrected.
+ */
+ if (psp->cm == 1)
+ return 1;
+
+ /*
+ * The error was not contained. Software must be reset.
+ */
+ if (psp->us || psp->ci == 0)
return 0;
/*
@@ -570,8 +581,6 @@ recover_from_processor_error(int platform, slidx_table_t *slidx,
return 0;
if (pbci->eb && pbci->bsi > 0)
return 0;
- if (psp->ci == 0)
- return 0;
/*
* This is a local MCA and estimated as recoverble external bus error.
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 640d6908f8e..e92ea64d804 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -4,6 +4,9 @@
* Copyright (C) 1998-2003 Hewlett-Packard Co
* David Mosberger-Tang <davidm@hpl.hp.com>
* 04/11/17 Ashok Raj <ashok.raj@intel.com> Added CPU Hotplug Support
+ *
+ * 2005-10-07 Keith Owens <kaos@sgi.com>
+ * Add notify_die() hooks.
*/
#define __KERNEL_SYSCALLS__ /* see <asm/unistd.h> */
#include <linux/config.h>
@@ -34,6 +37,7 @@
#include <asm/elf.h>
#include <asm/ia32.h>
#include <asm/irq.h>
+#include <asm/kdebug.h>
#include <asm/pgalloc.h>
#include <asm/processor.h>
#include <asm/sal.h>
@@ -808,12 +812,14 @@ cpu_halt (void)
void
machine_restart (char *restart_cmd)
{
+ (void) notify_die(DIE_MACHINE_RESTART, restart_cmd, NULL, 0, 0, 0);
(*efi.reset_system)(EFI_RESET_WARM, 0, 0, NULL);
}
void
machine_halt (void)
{
+ (void) notify_die(DIE_MACHINE_HALT, "", NULL, 0, 0, 0);
cpu_halt();
}
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 3af6de36a48..5add0bcf87a 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -461,6 +461,7 @@ setup_arch (char **cmdline_p)
#endif
cpu_init(); /* initialize the bootstrap CPU */
+ mmu_context_init(); /* initialize context_id bitmap */
#ifdef CONFIG_ACPI
acpi_boot_init();
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 774f34b675c..58ce07efc56 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -387,15 +387,14 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
struct sigscratch *scr)
{
extern char __kernel_sigtramp[];
- unsigned long tramp_addr, new_rbs = 0;
+ unsigned long tramp_addr, new_rbs = 0, new_sp;
struct sigframe __user *frame;
long err;
- frame = (void __user *) scr->pt.r12;
+ new_sp = scr->pt.r12;
tramp_addr = (unsigned long) __kernel_sigtramp;
- if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags((unsigned long) frame) == 0) {
- frame = (void __user *) ((current->sas_ss_sp + current->sas_ss_size)
- & ~(STACK_ALIGN - 1));
+ if ((ka->sa.sa_flags & SA_ONSTACK) && sas_ss_flags(new_sp) == 0) {
+ new_sp = current->sas_ss_sp + current->sas_ss_size;
/*
* We need to check for the register stack being on the signal stack
* separately, because it's switched separately (memory stack is switched
@@ -404,7 +403,7 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
if (!rbs_on_sig_stack(scr->pt.ar_bspstore))
new_rbs = (current->sas_ss_sp + sizeof(long) - 1) & ~(sizeof(long) - 1);
}
- frame = (void __user *) frame - ((sizeof(*frame) + STACK_ALIGN - 1) & ~(STACK_ALIGN - 1));
+ frame = (void __user *) ((new_sp - sizeof(*frame)) & -STACK_ALIGN);
if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
return force_sigsegv_info(sig, frame);
diff --git a/arch/ia64/kernel/traps.c b/arch/ia64/kernel/traps.c
index f970359e7ed..fba5fdd1f96 100644
--- a/arch/ia64/kernel/traps.c
+++ b/arch/ia64/kernel/traps.c
@@ -30,17 +30,20 @@ fpswa_interface_t *fpswa_interface;
EXPORT_SYMBOL(fpswa_interface);
struct notifier_block *ia64die_chain;
-static DEFINE_SPINLOCK(die_notifier_lock);
-int register_die_notifier(struct notifier_block *nb)
+int
+register_die_notifier(struct notifier_block *nb)
{
- int err = 0;
- unsigned long flags;
- spin_lock_irqsave(&die_notifier_lock, flags);
- err = notifier_chain_register(&ia64die_chain, nb);
- spin_unlock_irqrestore(&die_notifier_lock, flags);
- return err;
+ return notifier_chain_register(&ia64die_chain, nb);
}
+EXPORT_SYMBOL_GPL(register_die_notifier);
+
+int
+unregister_die_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&ia64die_chain, nb);
+}
+EXPORT_SYMBOL_GPL(unregister_die_notifier);
void __init
trap_init (void)
@@ -105,6 +108,7 @@ die (const char *str, struct pt_regs *regs, long err)
if (++die.lock_owner_depth < 3) {
printk("%s[%d]: %s %ld [%d]\n",
current->comm, current->pid, str, err, ++die_counter);
+ (void) notify_die(DIE_OOPS, (char *)str, regs, err, 255, SIGSEGV);
show_regs(regs);
} else
printk(KERN_ERR "Recursive die() failure, output suppressed\n");
@@ -155,9 +159,8 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
switch (break_num) {
case 0: /* unknown error (used by GCC for __builtin_abort()) */
if (notify_die(DIE_BREAK, "break 0", regs, break_num, TRAP_BRKPT, SIGTRAP)
- == NOTIFY_STOP) {
+ == NOTIFY_STOP)
return;
- }
die_if_kernel("bugcheck!", regs, break_num);
sig = SIGILL; code = ILL_ILLOPC;
break;
@@ -210,15 +213,6 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
sig = SIGILL; code = __ILL_BNDMOD;
break;
- case 0x80200:
- case 0x80300:
- if (notify_die(DIE_BREAK, "kprobe", regs, break_num, TRAP_BRKPT, SIGTRAP)
- == NOTIFY_STOP) {
- return;
- }
- sig = SIGTRAP; code = TRAP_BRKPT;
- break;
-
default:
if (break_num < 0x40000 || break_num > 0x100000)
die_if_kernel("Bad break", regs, break_num);
@@ -226,6 +220,9 @@ __kprobes ia64_bad_break (unsigned long break_num, struct pt_regs *regs)
if (break_num < 0x80000) {
sig = SIGILL; code = __ILL_BREAK;
} else {
+ if (notify_die(DIE_BREAK, "bad break", regs, break_num, TRAP_BRKPT, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
sig = SIGTRAP; code = TRAP_BRKPT;
}
}
@@ -578,12 +575,11 @@ ia64_fault (unsigned long vector, unsigned long isr, unsigned long ifa,
#endif
break;
case 35: siginfo.si_code = TRAP_BRANCH; ifa = 0; break;
- case 36:
- if (notify_die(DIE_SS, "ss", &regs, vector,
- vector, SIGTRAP) == NOTIFY_STOP)
- return;
- siginfo.si_code = TRAP_TRACE; ifa = 0; break;
+ case 36: siginfo.si_code = TRAP_TRACE; ifa = 0; break;
}
+ if (notify_die(DIE_FAULT, "ia64_fault", &regs, vector, siginfo.si_code, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
siginfo.si_signo = SIGTRAP;
siginfo.si_errno = 0;
siginfo.si_addr = (void __user *) ifa;
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index a88cdb7232f..0f776b032d3 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -350,14 +350,12 @@ static void __init initialize_pernode_data(void)
* for best.
* @nid: node id
* @pernodesize: size of this node's pernode data
- * @align: alignment to use for this node's pernode data
*/
-static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
- unsigned long align)
+static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize)
{
void *ptr = NULL;
u8 best = 0xff;
- int bestnode = -1, node;
+ int bestnode = -1, node, anynode = 0;
for_each_online_node(node) {
if (node_isset(node, memory_less_mask))
@@ -366,13 +364,15 @@ static void __init *memory_less_node_alloc(int nid, unsigned long pernodesize,
best = node_distance(nid, node);
bestnode = node;
}
+ anynode = node;
}
- ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat,
- pernodesize, align, __pa(MAX_DMA_ADDRESS));
+ if (bestnode == -1)
+ bestnode = anynode;
+
+ ptr = __alloc_bootmem_node(mem_data[bestnode].pgdat, pernodesize,
+ PERCPU_PAGE_SIZE, __pa(MAX_DMA_ADDRESS));
- if (!ptr)
- panic("NO memory for memory less node\n");
return ptr;
}
@@ -413,8 +413,7 @@ static void __init memory_less_nodes(void)
for_each_node_mask(node, memory_less_mask) {
pernodesize = compute_pernodesize(node);
- pernode = memory_less_node_alloc(node, pernodesize,
- (node) ? (node * PERCPU_PAGE_SIZE) : (1024*1024));
+ pernode = memory_less_node_alloc(node, pernodesize);
fill_pernode(node, __pa(pernode), pernodesize);
}
diff --git a/arch/ia64/mm/tlb.c b/arch/ia64/mm/tlb.c
index c79a9b96d02..41105d45442 100644
--- a/arch/ia64/mm/tlb.c
+++ b/arch/ia64/mm/tlb.c
@@ -8,6 +8,8 @@
* Modified RID allocation for SMP
* Goutham Rao <goutham.rao@intel.com>
* IPI based ptc implementation and A-step IPI implementation.
+ * Rohit Seth <rohit.seth@intel.com>
+ * Ken Chen <kenneth.w.chen@intel.com>
*/
#include <linux/config.h>
#include <linux/module.h>
@@ -16,78 +18,75 @@
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/mm.h>
+#include <linux/bootmem.h>
#include <asm/delay.h>
#include <asm/mmu_context.h>
#include <asm/pgalloc.h>
#include <asm/pal.h>
#include <asm/tlbflush.h>
+#include <asm/dma.h>
static struct {
unsigned long mask; /* mask of supported purge page-sizes */
- unsigned long max_bits; /* log2() of largest supported purge page-size */
+ unsigned long max_bits; /* log2 of largest supported purge page-size */
} purge;
struct ia64_ctx ia64_ctx = {
.lock = SPIN_LOCK_UNLOCKED,
.next = 1,
- .limit = (1 << 15) - 1, /* start out with the safe (architected) limit */
.max_ctx = ~0U
};
DEFINE_PER_CPU(u8, ia64_need_tlb_flush);
/*
+ * Initializes the ia64_ctx.bitmap array based on max_ctx+1.
+ * Called after cpu_init() has setup ia64_ctx.max_ctx based on
+ * maximum RID that is supported by boot CPU.
+ */
+void __init
+mmu_context_init (void)
+{
+ ia64_ctx.bitmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
+ ia64_ctx.flushmap = alloc_bootmem((ia64_ctx.max_ctx+1)>>3);
+}
+
+/*
* Acquire the ia64_ctx.lock before calling this function!
*/
void
wrap_mmu_context (struct mm_struct *mm)
{
- unsigned long tsk_context, max_ctx = ia64_ctx.max_ctx;
- struct task_struct *tsk;
- int i;
+ int i, cpu;
+ unsigned long flush_bit;
- if (ia64_ctx.next > max_ctx)
- ia64_ctx.next = 300; /* skip daemons */
- ia64_ctx.limit = max_ctx + 1;
+ for (i=0; i <= ia64_ctx.max_ctx / BITS_PER_LONG; i++) {
+ flush_bit = xchg(&ia64_ctx.flushmap[i], 0);
+ ia64_ctx.bitmap[i] ^= flush_bit;
+ }
+
+ /* use offset at 300 to skip daemons */
+ ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, 300);
+ ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
/*
- * Scan all the task's mm->context and set proper safe range
+ * can't call flush_tlb_all() here because of race condition
+ * with O(1) scheduler [EF]
*/
-
- read_lock(&tasklist_lock);
- repeat:
- for_each_process(tsk) {
- if (!tsk->mm)
- continue;
- tsk_context = tsk->mm->context;
- if (tsk_context == ia64_ctx.next) {
- if (++ia64_ctx.next >= ia64_ctx.limit) {
- /* empty range: reset the range limit and start over */
- if (ia64_ctx.next > max_ctx)
- ia64_ctx.next = 300;
- ia64_ctx.limit = max_ctx + 1;
- goto repeat;
- }
- }
- if ((tsk_context > ia64_ctx.next) && (tsk_context < ia64_ctx.limit))
- ia64_ctx.limit = tsk_context;
- }
- read_unlock(&tasklist_lock);
- /* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
- {
- int cpu = get_cpu(); /* prevent preemption/migration */
- for_each_online_cpu(i) {
- if (i != cpu)
- per_cpu(ia64_need_tlb_flush, i) = 1;
- }
- put_cpu();
- }
+ cpu = get_cpu(); /* prevent preemption/migration */
+ for_each_online_cpu(i)
+ if (i != cpu)
+ per_cpu(ia64_need_tlb_flush, i) = 1;
+ put_cpu();
local_flush_tlb_all();
}
void
-ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits)
+ia64_global_tlb_purge (struct mm_struct *mm, unsigned long start,
+ unsigned long end, unsigned long nbits)
{
static DEFINE_SPINLOCK(ptcg_lock);
@@ -135,7 +134,8 @@ local_flush_tlb_all (void)
}
void
-flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long end)
+flush_tlb_range (struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long size = end - start;
@@ -149,7 +149,8 @@ flush_tlb_range (struct vm_area_struct *vma, unsigned long start, unsigned long
#endif
nbits = ia64_fls(size + 0xfff);
- while (unlikely (((1UL << nbits) & purge.mask) == 0) && (nbits < purge.max_bits))
+ while (unlikely (((1UL << nbits) & purge.mask) == 0) &&
+ (nbits < purge.max_bits))
++nbits;
if (nbits > purge.max_bits)
nbits = purge.max_bits;
@@ -191,5 +192,5 @@ ia64_tlb_init (void)
local_cpu_data->ptce_stride[0] = ptce_info.stride[0];
local_cpu_data->ptce_stride[1] = ptce_info.stride[1];
- local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
+ local_flush_tlb_all(); /* nuke left overs from bootstrapping... */
}
diff --git a/arch/ia64/pci/pci.c b/arch/ia64/pci/pci.c
index 017cfc3f478..20d76fae24e 100644
--- a/arch/ia64/pci/pci.c
+++ b/arch/ia64/pci/pci.c
@@ -95,7 +95,7 @@ pci_sal_write (unsigned int seg, unsigned int bus, unsigned int devfn,
}
static struct pci_raw_ops pci_sal_ops = {
- .read = pci_sal_read,
+ .read = pci_sal_read,
.write = pci_sal_write
};
@@ -137,35 +137,98 @@ alloc_pci_controller (int seg)
return controller;
}
-static u64 __devinit
-add_io_space (struct acpi_resource_address64 *addr)
+struct pci_root_info {
+ struct pci_controller *controller;
+ char *name;
+};
+
+static unsigned int
+new_space (u64 phys_base, int sparse)
{
- u64 offset;
- int sparse = 0;
+ u64 mmio_base;
int i;
- if (addr->address_translation_offset == 0)
- return IO_SPACE_BASE(0); /* part of legacy IO space */
-
- if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
- sparse = 1;
+ if (phys_base == 0)
+ return 0; /* legacy I/O port space */
- offset = (u64) ioremap(addr->address_translation_offset, 0);
+ mmio_base = (u64) ioremap(phys_base, 0);
for (i = 0; i < num_io_spaces; i++)
- if (io_space[i].mmio_base == offset &&
+ if (io_space[i].mmio_base == mmio_base &&
io_space[i].sparse == sparse)
- return IO_SPACE_BASE(i);
+ return i;
if (num_io_spaces == MAX_IO_SPACES) {
- printk("Too many IO port spaces\n");
+ printk(KERN_ERR "PCI: Too many IO port spaces "
+ "(MAX_IO_SPACES=%lu)\n", MAX_IO_SPACES);
return ~0;
}
i = num_io_spaces++;
- io_space[i].mmio_base = offset;
+ io_space[i].mmio_base = mmio_base;
io_space[i].sparse = sparse;
- return IO_SPACE_BASE(i);
+ return i;
+}
+
+static u64 __devinit
+add_io_space (struct pci_root_info *info, struct acpi_resource_address64 *addr)
+{
+ struct resource *resource;
+ char *name;
+ u64 base, min, max, base_port;
+ unsigned int sparse = 0, space_nr, len;
+
+ resource = kzalloc(sizeof(*resource), GFP_KERNEL);
+ if (!resource) {
+ printk(KERN_ERR "PCI: No memory for %s I/O port space\n",
+ info->name);
+ goto out;
+ }
+
+ len = strlen(info->name) + 32;
+ name = kzalloc(len, GFP_KERNEL);
+ if (!name) {
+ printk(KERN_ERR "PCI: No memory for %s I/O port space name\n",
+ info->name);
+ goto free_resource;
+ }
+
+ min = addr->min_address_range;
+ max = min + addr->address_length - 1;
+ if (addr->attribute.io.translation_attribute == ACPI_SPARSE_TRANSLATION)
+ sparse = 1;
+
+ space_nr = new_space(addr->address_translation_offset, sparse);
+ if (space_nr == ~0)
+ goto free_name;
+
+ base = __pa(io_space[space_nr].mmio_base);
+ base_port = IO_SPACE_BASE(space_nr);
+ snprintf(name, len, "%s I/O Ports %08lx-%08lx", info->name,
+ base_port + min, base_port + max);
+
+ /*
+ * The SDM guarantees the legacy 0-64K space is sparse, but if the
+ * mapping is done by the processor (not the bridge), ACPI may not
+ * mark it as sparse.
+ */
+ if (space_nr == 0)
+ sparse = 1;
+
+ resource->name = name;
+ resource->flags = IORESOURCE_MEM;
+ resource->start = base + (sparse ? IO_SPACE_SPARSE_ENCODING(min) : min);
+ resource->end = base + (sparse ? IO_SPACE_SPARSE_ENCODING(max) : max);
+ insert_resource(&iomem_resource, resource);
+
+ return base_port;
+
+free_name:
+ kfree(name);
+free_resource:
+ kfree(resource);
+out:
+ return ~0;
}
static acpi_status __devinit resource_to_window(struct acpi_resource *resource,
@@ -205,11 +268,6 @@ count_window (struct acpi_resource *resource, void *data)
return AE_OK;
}
-struct pci_root_info {
- struct pci_controller *controller;
- char *name;
-};
-
static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
{
struct pci_root_info *info = data;
@@ -231,7 +289,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
} else if (addr.resource_type == ACPI_IO_RANGE) {
flags = IORESOURCE_IO;
root = &ioport_resource;
- offset = add_io_space(&addr);
+ offset = add_io_space(info, &addr);
if (offset == ~0)
return AE_OK;
} else
@@ -241,7 +299,7 @@ static __devinit acpi_status add_window(struct acpi_resource *res, void *data)
window->resource.name = info->name;
window->resource.flags = flags;
window->resource.start = addr.min_address_range + offset;
- window->resource.end = addr.max_address_range + offset;
+ window->resource.end = window->resource.start + addr.address_length - 1;
window->resource.child = NULL;
window->offset = offset;
@@ -739,7 +797,7 @@ int pci_vector_resources(int last, int nr_released)
{
int count = nr_released;
- count += (IA64_LAST_DEVICE_VECTOR - last);
+ count += (IA64_LAST_DEVICE_VECTOR - last);
return count;
}
diff --git a/arch/ia64/sn/kernel/io_init.c b/arch/ia64/sn/kernel/io_init.c
index b4f5053f5e1..05e4ea88998 100644
--- a/arch/ia64/sn/kernel/io_init.c
+++ b/arch/ia64/sn/kernel/io_init.c
@@ -349,7 +349,7 @@ void sn_pci_controller_fixup(int segment, int busnum, struct pci_bus *bus)
return; /*bus # does not exist */
prom_bussoft_ptr = __va(prom_bussoft_ptr);
- controller = kcalloc(1,sizeof(struct pci_controller), GFP_KERNEL);
+ controller = kzalloc(sizeof(struct pci_controller), GFP_KERNEL);
controller->segment = segment;
if (!controller)
BUG();
diff --git a/arch/ia64/sn/kernel/xpc.h b/arch/ia64/sn/kernel/xpc.h
index fbcedc7c27f..5483a9f227d 100644
--- a/arch/ia64/sn/kernel/xpc.h
+++ b/arch/ia64/sn/kernel/xpc.h
@@ -163,7 +163,7 @@ struct xpc_vars {
u8 version;
u64 heartbeat;
u64 heartbeating_to_mask;
- u64 kdb_status; /* 0 = machine running */
+ u64 heartbeat_offline; /* if 0, heartbeat should be changing */
int act_nasid;
int act_phys_cpuid;
u64 vars_part_pa;
diff --git a/arch/ia64/sn/kernel/xpc_main.c b/arch/ia64/sn/kernel/xpc_main.c
index cece3c7c69b..b617236524c 100644
--- a/arch/ia64/sn/kernel/xpc_main.c
+++ b/arch/ia64/sn/kernel/xpc_main.c
@@ -57,6 +57,7 @@
#include <linux/reboot.h>
#include <asm/sn/intr.h>
#include <asm/sn/sn_sal.h>
+#include <asm/kdebug.h>
#include <asm/uaccess.h>
#include "xpc.h"
@@ -188,6 +189,11 @@ static struct notifier_block xpc_reboot_notifier = {
.notifier_call = xpc_system_reboot,
};
+static int xpc_system_die(struct notifier_block *, unsigned long, void *);
+static struct notifier_block xpc_die_notifier = {
+ .notifier_call = xpc_system_die,
+};
+
/*
* Timer function to enforce the timelimit on the partition disengage request.
@@ -997,6 +1003,9 @@ xpc_do_exit(enum xpc_retval reason)
/* take ourselves off of the reboot_notifier_list */
(void) unregister_reboot_notifier(&xpc_reboot_notifier);
+ /* take ourselves off of the die_notifier list */
+ (void) unregister_die_notifier(&xpc_die_notifier);
+
/* close down protections for IPI operations */
xpc_restrict_IPI_ops();
@@ -1011,6 +1020,63 @@ xpc_do_exit(enum xpc_retval reason)
/*
+ * Called when the system is about to be either restarted or halted.
+ */
+static void
+xpc_die_disengage(void)
+{
+ struct xpc_partition *part;
+ partid_t partid;
+ unsigned long engaged;
+ long time, print_time, disengage_request_timeout;
+
+
+ /* keep xpc_hb_checker thread from doing anything (just in case) */
+ xpc_exiting = 1;
+
+ xpc_vars->heartbeating_to_mask = 0; /* indicate we're deactivated */
+
+ for (partid = 1; partid < XP_MAX_PARTITIONS; partid++) {
+ part = &xpc_partitions[partid];
+
+ if (!XPC_SUPPORTS_DISENGAGE_REQUEST(part->
+ remote_vars_version)) {
+
+ /* just in case it was left set by an earlier XPC */
+ xpc_clear_partition_engaged(1UL << partid);
+ continue;
+ }
+
+ if (xpc_partition_engaged(1UL << partid) ||
+ part->act_state != XPC_P_INACTIVE) {
+ xpc_request_partition_disengage(part);
+ xpc_mark_partition_disengaged(part);
+ xpc_IPI_send_disengage(part);
+ }
+ }
+
+ print_time = rtc_time();
+ disengage_request_timeout = print_time +
+ (xpc_disengage_request_timelimit * sn_rtc_cycles_per_second);
+
+ /* wait for all other partitions to disengage from us */
+
+ while ((engaged = xpc_partition_engaged(-1UL)) &&
+ (time = rtc_time()) < disengage_request_timeout) {
+
+ if (time >= print_time) {
+ dev_info(xpc_part, "waiting for remote partitions to "
+ "disengage, engaged=0x%lx\n", engaged);
+ print_time = time + (XPC_DISENGAGE_PRINTMSG_INTERVAL *
+ sn_rtc_cycles_per_second);
+ }
+ }
+ dev_info(xpc_part, "finished waiting for remote partitions to "
+ "disengage, engaged=0x%lx\n", engaged);
+}
+
+
+/*
* This function is called when the system is being rebooted.
*/
static int
@@ -1038,6 +1104,33 @@ xpc_system_reboot(struct notifier_block *nb, unsigned long event, void *unused)
}
+/*
+ * This function is called when the system is being rebooted.
+ */
+static int
+xpc_system_die(struct notifier_block *nb, unsigned long event, void *unused)
+{
+ switch (event) {
+ case DIE_MACHINE_RESTART:
+ case DIE_MACHINE_HALT:
+ xpc_die_disengage();
+ break;
+ case DIE_MCA_MONARCH_ENTER:
+ case DIE_INIT_MONARCH_ENTER:
+ xpc_vars->heartbeat++;
+ xpc_vars->heartbeat_offline = 1;
+ break;
+ case DIE_MCA_MONARCH_LEAVE:
+ case DIE_INIT_MONARCH_LEAVE:
+ xpc_vars->heartbeat++;
+ xpc_vars->heartbeat_offline = 0;
+ break;
+ }
+
+ return NOTIFY_DONE;
+}
+
+
int __init
xpc_init(void)
{
@@ -1154,6 +1247,12 @@ xpc_init(void)
dev_warn(xpc_part, "can't register reboot notifier\n");
}
+ /* add ourselves to the die_notifier list (i.e., ia64die_chain) */
+ ret = register_die_notifier(&xpc_die_notifier);
+ if (ret != 0) {
+ dev_warn(xpc_part, "can't register die notifier\n");
+ }
+
/*
* Set the beating to other partitions into motion. This is
@@ -1179,6 +1278,9 @@ xpc_init(void)
/* take ourselves off of the reboot_notifier_list */
(void) unregister_reboot_notifier(&xpc_reboot_notifier);
+ /* take ourselves off of the die_notifier list */
+ (void) unregister_die_notifier(&xpc_die_notifier);
+
del_timer_sync(&xpc_hb_timer);
free_irq(SGI_XPC_ACTIVATE, NULL);
xpc_restrict_IPI_ops();
diff --git a/arch/ia64/sn/kernel/xpc_partition.c b/arch/ia64/sn/kernel/xpc_partition.c
index 581e113d2d3..cdd6431853a 100644
--- a/arch/ia64/sn/kernel/xpc_partition.c
+++ b/arch/ia64/sn/kernel/xpc_partition.c
@@ -436,13 +436,13 @@ xpc_check_remote_hb(void)
}
dev_dbg(xpc_part, "partid = %d, heartbeat = %ld, last_heartbeat"
- " = %ld, kdb_status = %ld, HB_mask = 0x%lx\n", partid,
- remote_vars->heartbeat, part->last_heartbeat,
- remote_vars->kdb_status,
+ " = %ld, heartbeat_offline = %ld, HB_mask = 0x%lx\n",
+ partid, remote_vars->heartbeat, part->last_heartbeat,
+ remote_vars->heartbeat_offline,
remote_vars->heartbeating_to_mask);
if (((remote_vars->heartbeat == part->last_heartbeat) &&
- (remote_vars->kdb_status == 0)) ||
+ (remote_vars->heartbeat_offline == 0)) ||
!xpc_hb_allowed(sn_partition_id, remote_vars)) {
XPC_DEACTIVATE_PARTITION(part, xpcNoHeartbeat);
diff --git a/arch/ia64/sn/pci/tioce_provider.c b/arch/ia64/sn/pci/tioce_provider.c
index 9f03d4e5121..dda196c9e32 100644
--- a/arch/ia64/sn/pci/tioce_provider.c
+++ b/arch/ia64/sn/pci/tioce_provider.c
@@ -218,7 +218,7 @@ tioce_alloc_map(struct tioce_kernel *ce_kern, int type, int port,
if (i > last)
return 0;
- map = kcalloc(1, sizeof(struct tioce_dmamap), GFP_ATOMIC);
+ map = kzalloc(sizeof(struct tioce_dmamap), GFP_ATOMIC);
if (!map)
return 0;
@@ -555,7 +555,7 @@ tioce_kern_init(struct tioce_common *tioce_common)
struct tioce *tioce_mmr;
struct tioce_kernel *tioce_kern;
- tioce_kern = kcalloc(1, sizeof(struct tioce_kernel), GFP_KERNEL);
+ tioce_kern = kzalloc(sizeof(struct tioce_kernel), GFP_KERNEL);
if (!tioce_kern) {
return NULL;
}
@@ -727,7 +727,7 @@ tioce_bus_fixup(struct pcibus_bussoft *prom_bussoft, struct pci_controller *cont
* Allocate kernel bus soft and copy from prom.
*/
- tioce_common = kcalloc(1, sizeof(struct tioce_common), GFP_KERNEL);
+ tioce_common = kzalloc(sizeof(struct tioce_common), GFP_KERNEL);
if (!tioce_common)
return NULL;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 1493c7896fe..ed31062029f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -599,6 +599,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
+config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile
index b3ae2993efb..c04bbd32059 100644
--- a/arch/powerpc/kernel/Makefile
+++ b/arch/powerpc/kernel/Makefile
@@ -4,6 +4,7 @@
ifeq ($(CONFIG_PPC64),y)
EXTRA_CFLAGS += -mno-minimal-toc
+CFLAGS_ioctl32.o += -Ifs/
endif
ifeq ($(CONFIG_PPC32),y)
CFLAGS_prom_init.o += -fPIC
@@ -11,15 +12,21 @@ CFLAGS_btext.o += -fPIC
endif
obj-y := semaphore.o cputable.o ptrace.o syscalls.o \
- signal_32.o pmc.o
+ irq.o signal_32.o pmc.o
obj-$(CONFIG_PPC64) += setup_64.o binfmt_elf32.o sys_ppc32.o \
- signal_64.o ptrace32.o systbl.o
+ signal_64.o ptrace32.o systbl.o \
+ paca.o ioctl32.o cpu_setup_power4.o \
+ firmware.o sysfs.o
obj-$(CONFIG_ALTIVEC) += vecemu.o vector.o
obj-$(CONFIG_POWER4) += idle_power4.o
obj-$(CONFIG_PPC_OF) += of_device.o
-obj-$(CONFIG_PPC_RTAS) += rtas.o
+procfs-$(CONFIG_PPC64) := proc_ppc64.o
+obj-$(CONFIG_PROC_FS) += $(procfs-y)
+rtaspci-$(CONFIG_PPC64) := rtas_pci.o
+obj-$(CONFIG_PPC_RTAS) += rtas.o $(rtaspci-y)
obj-$(CONFIG_RTAS_FLASH) += rtas_flash.o
obj-$(CONFIG_RTAS_PROC) += rtas-proc.o
+obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_GENERIC_TBSYNC) += smp-tbsync.o
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index b7575725199..8793102711a 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -106,7 +106,6 @@ int main(void)
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
- DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */
diff --git a/arch/ppc64/kernel/cpu_setup_power4.S b/arch/powerpc/kernel/cpu_setup_power4.S
index 1fb673c511f..cca942fe611 100644
--- a/arch/ppc64/kernel/cpu_setup_power4.S
+++ b/arch/powerpc/kernel/cpu_setup_power4.S
@@ -114,11 +114,11 @@ _GLOBAL(__setup_cpu_ppc970)
.data
.balign L1_CACHE_BYTES,0
-cpu_state_storage:
+cpu_state_storage:
.space CS_SIZE
.balign L1_CACHE_BYTES,0
.text
-
+
/* Called in normal context to backup CPU 0 state. This
* does not include cache settings. This function is also
* called for machine sleep. This does not include the MMU
@@ -151,7 +151,7 @@ _GLOBAL(__save_cpu_setup)
std r3,CS_HID4(r5)
mfspr r3,SPRN_HID5
std r3,CS_HID5(r5)
-
+
2:
mtcr r7
blr
@@ -213,7 +213,7 @@ _GLOBAL(__restore_cpu_setup)
mtspr SPRN_HID1,r3
sync
isync
-
+
/* Restore HID4 */
ld r3,CS_HID4(r5)
sync
diff --git a/arch/powerpc/kernel/cputable.c b/arch/powerpc/kernel/cputable.c
index cc4e9eb1c13..1d85cedbbb7 100644
--- a/arch/powerpc/kernel/cputable.c
+++ b/arch/powerpc/kernel/cputable.c
@@ -52,6 +52,9 @@ extern void __setup_cpu_ppc970(unsigned long offset, struct cpu_spec* spec);
#define COMMON_USER (PPC_FEATURE_32 | PPC_FEATURE_HAS_FPU | \
PPC_FEATURE_HAS_MMU)
#define COMMON_USER_PPC64 (COMMON_USER | PPC_FEATURE_64)
+#define COMMON_USER_POWER4 (COMMON_USER_PPC64 | PPC_FEATURE_POWER4)
+#define COMMON_USER_POWER5 (COMMON_USER_PPC64 | PPC_FEATURE_POWER5)
+#define COMMON_USER_POWER5_PLUS (COMMON_USER_PPC64 | PPC_FEATURE_POWER5_PLUS)
/* We only set the spe features if the kernel was compiled with
@@ -160,7 +163,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00350000,
.cpu_name = "POWER4 (gp)",
.cpu_features = CPU_FTRS_POWER4,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -175,7 +178,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00380000,
.cpu_name = "POWER4+ (gq)",
.cpu_features = CPU_FTRS_POWER4,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_POWER4,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 8,
@@ -190,7 +193,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00390000,
.cpu_name = "PPC970",
.cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_PPC64 |
+ .cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -212,7 +215,7 @@ struct cpu_spec cpu_specs[] = {
#else
.cpu_features = CPU_FTRS_PPC970,
#endif
- .cpu_user_features = COMMON_USER_PPC64 |
+ .cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -230,7 +233,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x00440000,
.cpu_name = "PPC970MP",
.cpu_features = CPU_FTRS_PPC970,
- .cpu_user_features = COMMON_USER_PPC64 |
+ .cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
@@ -245,7 +248,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x003a0000,
.cpu_name = "POWER5 (gr)",
.cpu_features = CPU_FTRS_POWER5,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_POWER5,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -260,7 +263,7 @@ struct cpu_spec cpu_specs[] = {
.pvr_value = 0x003b0000,
.cpu_name = "POWER5 (gs)",
.cpu_features = CPU_FTRS_POWER5,
- .cpu_user_features = COMMON_USER_PPC64,
+ .cpu_user_features = COMMON_USER_POWER5_PLUS,
.icache_bsize = 128,
.dcache_bsize = 128,
.num_pmcs = 6,
@@ -276,7 +279,7 @@ struct cpu_spec cpu_specs[] = {
.cpu_name = "Cell Broadband Engine",
.cpu_features = CPU_FTRS_CELL,
.cpu_user_features = COMMON_USER_PPC64 |
- PPC_FEATURE_HAS_ALTIVEC_COMP,
+ PPC_FEATURE_CELL | PPC_FEATURE_HAS_ALTIVEC_COMP,
.icache_bsize = 128,
.dcache_bsize = 128,
.cpu_setup = __setup_cpu_be,
diff --git a/arch/ppc64/kernel/firmware.c b/arch/powerpc/kernel/firmware.c
index d8432c0fb27..65eae752a52 100644
--- a/arch/ppc64/kernel/firmware.c
+++ b/arch/powerpc/kernel/firmware.c
@@ -1,6 +1,4 @@
/*
- * arch/ppc64/kernel/firmware.c
- *
* Extracted from cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
diff --git a/arch/powerpc/kernel/fpu.S b/arch/powerpc/kernel/fpu.S
index 4d6001fa1cf..b780b42c95f 100644
--- a/arch/powerpc/kernel/fpu.S
+++ b/arch/powerpc/kernel/fpu.S
@@ -41,20 +41,20 @@ _GLOBAL(load_up_fpu)
#ifndef CONFIG_SMP
LOADBASE(r3, last_task_used_math)
toreal(r3)
- LDL r4,OFF(last_task_used_math)(r3)
- CMPI 0,r4,0
+ PPC_LL r4,OFF(last_task_used_math)(r3)
+ PPC_LCMPI 0,r4,0
beq 1f
toreal(r4)
addi r4,r4,THREAD /* want last_task_used_math->thread */
SAVE_32FPRS(0, r4)
mffs fr0
stfd fr0,THREAD_FPSCR(r4)
- LDL r5,PT_REGS(r4)
+ PPC_LL r5,PT_REGS(r4)
toreal(r5)
- LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r10,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r10 /* disable FP for previous task */
- STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#endif /* CONFIG_SMP */
/* enable use of FP after return */
@@ -77,7 +77,7 @@ _GLOBAL(load_up_fpu)
#ifndef CONFIG_SMP
subi r4,r5,THREAD
fromreal(r4)
- STL r4,OFF(last_task_used_math)(r3)
+ PPC_STL r4,OFF(last_task_used_math)(r3)
#endif /* CONFIG_SMP */
/* restore registers and return */
/* we haven't used ctr or xer or lr */
@@ -97,24 +97,24 @@ _GLOBAL(giveup_fpu)
MTMSRD(r5) /* enable use of fpu now */
SYNC_601
isync
- CMPI 0,r3,0
+ PPC_LCMPI 0,r3,0
beqlr- /* if no previous owner, done */
addi r3,r3,THREAD /* want THREAD of task */
- LDL r5,PT_REGS(r3)
- CMPI 0,r5,0
+ PPC_LL r5,PT_REGS(r3)
+ PPC_LCMPI 0,r5,0
SAVE_32FPRS(0, r3)
mffs fr0
stfd fr0,THREAD_FPSCR(r3)
beq 1f
- LDL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ PPC_LL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
li r3,MSR_FP|MSR_FE0|MSR_FE1
andc r4,r4,r3 /* disable FP for previous task */
- STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
+ PPC_STL r4,_MSR-STACK_FRAME_OVERHEAD(r5)
1:
#ifndef CONFIG_SMP
li r5,0
LOADBASE(r4,last_task_used_math)
- STL r5,OFF(last_task_used_math)(r4)
+ PPC_STL r5,OFF(last_task_used_math)(r4)
#endif /* CONFIG_SMP */
blr
diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S
index 16ab40daa73..8a8bf79ef04 100644
--- a/arch/powerpc/kernel/head_64.S
+++ b/arch/powerpc/kernel/head_64.S
@@ -28,7 +28,6 @@
#include <asm/reg.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/systemcfg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
@@ -1697,25 +1696,14 @@ _GLOBAL(pmac_secondary_start)
* SPRG3 = paca virtual address
*/
_GLOBAL(__secondary_start)
+ /* Set thread priority to MEDIUM */
+ HMT_MEDIUM
- HMT_MEDIUM /* Set thread priority to MEDIUM */
-
+ /* Load TOC */
ld r2,PACATOC(r13)
- li r6,0
- stb r6,PACAPROCENABLED(r13)
-
-#ifndef CONFIG_PPC_ISERIES
- /* Initialize the page table pointer register. */
- LOADADDR(r6,_SDR1)
- ld r6,0(r6) /* get the value of _SDR1 */
- mtspr SPRN_SDR1,r6 /* set the htab location */
-#endif
- /* Initialize the first segment table (or SLB) entry */
- ld r3,PACASTABVIRT(r13) /* get addr of segment table */
-BEGIN_FTR_SECTION
- bl .stab_initialize
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
- bl .slb_initialize
+
+ /* Do early setup for that CPU (stab, slb, hash table pointer) */
+ bl .early_setup_secondary
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOADADDR(r3,current_set)
@@ -1724,37 +1712,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
std r1,PACAKSAVE(r13)
- ld r3,PACASTABREAL(r13) /* get raddr of segment table */
- ori r4,r3,1 /* turn on valid bit */
-
-#ifdef CONFIG_PPC_ISERIES
- li r0,-1 /* hypervisor call */
- li r3,1
- sldi r3,r3,63 /* 0x8000000000000000 */
- ori r3,r3,4 /* 0x8000000000000004 */
- sc /* HvCall_setASR */
-#else
- /* set the ASR */
- ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
- ld r3,0(r3)
- lwz r3,PLATFORM(r3) /* r3 = platform flags */
- andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
- beq 98f /* branch if result is 0 */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmpwi r3,0x37 /* SStar */
- beq 97f
- cmpwi r3,0x36 /* IStar */
- beq 97f
- cmpwi r3,0x34 /* Pulsar */
- bne 98f
-97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
- HVSC /* Invoking hcall */
- b 99f
-98: /* !(rpa hypervisor) || !(star) */
- mtasr r4 /* set the stab location */
-99:
-#endif
+ /* Clear backchain so we get nice backtraces */
li r7,0
mtlr r7
@@ -1777,6 +1735,7 @@ _GLOBAL(start_secondary_prolog)
li r3,0
std r3,0(r1) /* Zero the stack frame pointer */
bl .start_secondary
+ b .
#endif
/*
@@ -1896,40 +1855,6 @@ _STATIC(start_here_multiplatform)
mr r3,r31
bl .early_setup
- /* set the ASR */
- ld r3,PACASTABREAL(r13)
- ori r4,r3,1 /* turn on valid bit */
- ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
- ld r3,0(r3)
- lwz r3,PLATFORM(r3) /* r3 = platform flags */
- andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
- beq 98f /* branch if result is 0 */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmpwi r3,0x37 /* SStar */
- beq 97f
- cmpwi r3,0x36 /* IStar */
- beq 97f
- cmpwi r3,0x34 /* Pulsar */
- bne 98f
-97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
- HVSC /* Invoking hcall */
- b 99f
-98: /* !(rpa hypervisor) || !(star) */
- mtasr r4 /* set the stab location */
-99:
- /* Set SDR1 (hash table pointer) */
- ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
- ld r3,0(r3)
- lwz r3,PLATFORM(r3) /* r3 = platform flags */
- /* Test if bit 0 is set (LPAR bit) */
- andi. r3,r3,PLATFORM_LPAR
- bne 98f /* branch if result is !0 */
- LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
- add r6,r6,r26
- ld r6,0(r6) /* get the value of _SDR1 */
- mtspr SPRN_SDR1,r6 /* set the htab location */
-98:
LOADADDR(r3,.start_here_common)
SET_REG_TO_CONST(r4, MSR_KERNEL)
mtspr SPRN_SRR0,r3
diff --git a/arch/ppc64/kernel/ioctl32.c b/arch/powerpc/kernel/ioctl32.c
index ba4a899045c..3fa6a93adbd 100644
--- a/arch/ppc64/kernel/ioctl32.c
+++ b/arch/powerpc/kernel/ioctl32.c
@@ -1,6 +1,6 @@
-/*
+/*
* ioctl32.c: Conversion between 32bit and 64bit native ioctls.
- *
+ *
* Based on sparc64 ioctl32.c by:
*
* Copyright (C) 1997-2000 Jakub Jelinek (jakub@redhat.com)
diff --git a/arch/ppc64/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 87474584033..4b7940693f3 100644
--- a/arch/ppc64/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -5,12 +5,12 @@
* Copyright (C) 1992 Linus Torvalds
* Adapted from arch/i386 by Gary Thomas
* Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- * Updated and modified by Cort Dougan (cort@cs.nmt.edu)
- * Copyright (C) 1996 Cort Dougan
+ * Updated and modified by Cort Dougan <cort@fsmlabs.com>
+ * Copyright (C) 1996-2001 Cort Dougan
* Adapted for Power Macintosh by Paul Mackerras
* Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
* Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- *
+ *
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
@@ -21,6 +21,14 @@
* instead of just grabbing them. Thus setups with different IRQ numbers
* shouldn't result in any weird surprises, and installing new handlers
* should be easier.
+ *
+ * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
+ * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
+ * mask register (of which only 16 are defined), hence the weird shifting
+ * and complement of the cached_irq_mask. I want to be able to stuff
+ * this right into the SIU SMASK register.
+ * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
+ * to reduce code space and undefined function references.
*/
#include <linux/errno.h>
@@ -29,6 +37,7 @@
#include <linux/kernel_stat.h>
#include <linux/signal.h>
#include <linux/sched.h>
+#include <linux/ptrace.h>
#include <linux/ioport.h>
#include <linux/interrupt.h>
#include <linux/timex.h>
@@ -40,9 +49,13 @@
#include <linux/irq.h>
#include <linux/proc_fs.h>
#include <linux/random.h>
-#include <linux/kallsyms.h>
+#include <linux/seq_file.h>
+#include <linux/cpumask.h>
#include <linux/profile.h>
#include <linux/bitops.h>
+#ifdef CONFIG_PPC64
+#include <linux/kallsyms.h>
+#endif
#include <asm/uaccess.h>
#include <asm/system.h>
@@ -52,35 +65,54 @@
#include <asm/cache.h>
#include <asm/prom.h>
#include <asm/ptrace.h>
-#include <asm/iseries/it_lp_queue.h>
#include <asm/machdep.h>
+#ifdef CONFIG_PPC64
+#include <asm/iseries/it_lp_queue.h>
#include <asm/paca.h>
+#endif
-#ifdef CONFIG_SMP
-extern void iSeries_smp_message_recv( struct pt_regs * );
+static int ppc_spurious_interrupts;
+
+#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
+extern void iSeries_smp_message_recv(struct pt_regs *);
+#endif
+
+#ifdef CONFIG_PPC32
+#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
+
+unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
+atomic_t ppc_n_lost_interrupts;
+
+#ifdef CONFIG_TAU_INT
+extern int tau_initialized;
+extern int tau_interrupts(int);
+#endif
+
+#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
+extern atomic_t ipi_recv;
+extern atomic_t ipi_sent;
#endif
+#endif /* CONFIG_PPC32 */
-extern irq_desc_t irq_desc[NR_IRQS];
+#ifdef CONFIG_PPC64
EXPORT_SYMBOL(irq_desc);
int distribute_irqs = 1;
int __irq_offset_value;
-int ppc_spurious_interrupts;
u64 ppc64_interrupt_controller;
+#endif /* CONFIG_PPC64 */
int show_interrupts(struct seq_file *p, void *v)
{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
+ int i = *(loff_t *)v, j;
+ struct irqaction *action;
irq_desc_t *desc;
unsigned long flags;
if (i == 0) {
- seq_printf(p, " ");
- for (j=0; j<NR_CPUS; j++) {
- if (cpu_online(j))
- seq_printf(p, "CPU%d ",j);
- }
+ seq_puts(p, " ");
+ for_each_online_cpu(j)
+ seq_printf(p, "CPU%d ", j);
seq_putc(p, '\n');
}
@@ -92,26 +124,41 @@ int show_interrupts(struct seq_file *p, void *v)
goto skip;
seq_printf(p, "%3d: ", i);
#ifdef CONFIG_SMP
- for (j = 0; j < NR_CPUS; j++) {
- if (cpu_online(j))
- seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
- }
+ for_each_online_cpu(j)
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
#else
seq_printf(p, "%10u ", kstat_irqs(i));
#endif /* CONFIG_SMP */
if (desc->handler)
- seq_printf(p, " %s ", desc->handler->typename );
+ seq_printf(p, " %s ", desc->handler->typename);
else
- seq_printf(p, " None ");
+ seq_puts(p, " None ");
seq_printf(p, "%s", (desc->status & IRQ_LEVEL) ? "Level " : "Edge ");
- seq_printf(p, " %s",action->name);
- for (action=action->next; action; action = action->next)
+ seq_printf(p, " %s", action->name);
+ for (action = action->next; action; action = action->next)
seq_printf(p, ", %s", action->name);
seq_putc(p, '\n');
skip:
spin_unlock_irqrestore(&desc->lock, flags);
- } else if (i == NR_IRQS)
+ } else if (i == NR_IRQS) {
+#ifdef CONFIG_PPC32
+#ifdef CONFIG_TAU_INT
+ if (tau_initialized){
+ seq_puts(p, "TAU: ");
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", tau_interrupts(j));
+ seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
+ }
+#endif
+#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
+ /* should this be per processor send/receive? */
+ seq_printf(p, "IPI (recv/sent): %10u/%u\n",
+ atomic_read(&ipi_recv), atomic_read(&ipi_sent));
+#endif
+#endif /* CONFIG_PPC32 */
seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
+ }
return 0;
}
@@ -144,126 +191,6 @@ void fixup_irqs(cpumask_t map)
}
#endif
-extern int noirqdebug;
-
-/*
- * Eventually, this should take an array of interrupts and an array size
- * so it can dispatch multiple interrupts.
- */
-void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
-{
- int status;
- struct irqaction *action;
- int cpu = smp_processor_id();
- irq_desc_t *desc = get_irq_desc(irq);
- irqreturn_t action_ret;
-#ifdef CONFIG_IRQSTACKS
- struct thread_info *curtp, *irqtp;
-#endif
-
- kstat_cpu(cpu).irqs[irq]++;
-
- if (desc->status & IRQ_PER_CPU) {
- /* no locking required for CPU-local interrupts: */
- ack_irq(irq);
- action_ret = handle_IRQ_event(irq, regs, desc->action);
- desc->handler->end(irq);
- return;
- }
-
- spin_lock(&desc->lock);
- ack_irq(irq);
- /*
- REPLAY is when Linux resends an IRQ that was dropped earlier
- WAITING is used by probe to mark irqs that are being tested
- */
- status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
- status |= IRQ_PENDING; /* we _want_ to handle it */
-
- /*
- * If the IRQ is disabled for whatever reason, we cannot
- * use the action we have.
- */
- action = NULL;
- if (likely(!(status & (IRQ_DISABLED | IRQ_INPROGRESS)))) {
- action = desc->action;
- if (!action || !action->handler) {
- ppc_spurious_interrupts++;
- printk(KERN_DEBUG "Unhandled interrupt %x, disabled\n", irq);
- /* We can't call disable_irq here, it would deadlock */
- if (!desc->depth)
- desc->depth = 1;
- desc->status |= IRQ_DISABLED;
- /* This is not a real spurrious interrupt, we
- * have to eoi it, so we jump to out
- */
- mask_irq(irq);
- goto out;
- }
- status &= ~IRQ_PENDING; /* we commit to handling */
- status |= IRQ_INPROGRESS; /* we are handling it */
- }
- desc->status = status;
-
- /*
- * If there is no IRQ handler or it was disabled, exit early.
- Since we set PENDING, if another processor is handling
- a different instance of this same irq, the other processor
- will take care of it.
- */
- if (unlikely(!action))
- goto out;
-
- /*
- * Edge triggered interrupts need to remember
- * pending events.
- * This applies to any hw interrupts that allow a second
- * instance of the same irq to arrive while we are in do_IRQ
- * or in the handler. But the code here only handles the _second_
- * instance of the irq, not the third or fourth. So it is mostly
- * useful for irq hardware that does not mask cleanly in an
- * SMP environment.
- */
- for (;;) {
- spin_unlock(&desc->lock);
-
-#ifdef CONFIG_IRQSTACKS
- /* Switch to the irq stack to handle this */
- curtp = current_thread_info();
- irqtp = hardirq_ctx[smp_processor_id()];
- if (curtp != irqtp) {
- irqtp->task = curtp->task;
- irqtp->flags = 0;
- action_ret = call_handle_IRQ_event(irq, regs, action, irqtp);
- irqtp->task = NULL;
- if (irqtp->flags)
- set_bits(irqtp->flags, &curtp->flags);
- } else
-#endif
- action_ret = handle_IRQ_event(irq, regs, action);
-
- spin_lock(&desc->lock);
- if (!noirqdebug)
- note_interrupt(irq, desc, action_ret, regs);
- if (likely(!(desc->status & IRQ_PENDING)))
- break;
- desc->status &= ~IRQ_PENDING;
- }
-out:
- desc->status &= ~IRQ_INPROGRESS;
- /*
- * The ->end() handler has to deal with interrupts which got
- * disabled while the handler was running.
- */
- if (desc->handler) {
- if (desc->handler->end)
- desc->handler->end(irq);
- else if (desc->handler->enable)
- desc->handler->enable(irq);
- }
- spin_unlock(&desc->lock);
-}
-
#ifdef CONFIG_PPC_ISERIES
void do_IRQ(struct pt_regs *regs)
{
@@ -310,8 +237,11 @@ void do_IRQ(struct pt_regs *regs)
void do_IRQ(struct pt_regs *regs)
{
int irq;
+#ifdef CONFIG_IRQSTACKS
+ struct thread_info *curtp, *irqtp;
+#endif
- irq_enter();
+ irq_enter();
#ifdef CONFIG_DEBUG_STACKOVERFLOW
/* Debugging check for stack overflow: is there less than 2KB free? */
@@ -328,20 +258,44 @@ void do_IRQ(struct pt_regs *regs)
}
#endif
+ /*
+ * Every platform is required to implement ppc_md.get_irq.
+ * This function will either return an irq number or -1 to
+ * indicate there are no more pending.
+ * The value -2 is for buggy hardware and means that this IRQ
+ * has already been handled. -- Tom
+ */
irq = ppc_md.get_irq(regs);
- if (irq >= 0)
- ppc_irq_dispatch_handler(regs, irq);
- else
- /* That's not SMP safe ... but who cares ? */
- ppc_spurious_interrupts++;
-
- irq_exit();
+ if (irq >= 0) {
+#ifdef CONFIG_IRQSTACKS
+ /* Switch to the irq stack to handle this */
+ curtp = current_thread_info();
+ irqtp = hardirq_ctx[smp_processor_id()];
+ if (curtp != irqtp) {
+ irqtp->task = curtp->task;
+ irqtp->flags = 0;
+ call___do_IRQ(irq, regs, irqtp);
+ irqtp->task = NULL;
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+ } else
+#endif
+ __do_IRQ(irq, regs);
+ } else
+#ifdef CONFIG_PPC32
+ if (irq != -2)
+#endif
+ /* That's not SMP safe ... but who cares ? */
+ ppc_spurious_interrupts++;
+ irq_exit();
}
+
#endif /* CONFIG_PPC_ISERIES */
void __init init_IRQ(void)
{
+#ifdef CONFIG_PPC64
static int once = 0;
if (once)
@@ -349,10 +303,14 @@ void __init init_IRQ(void)
once++;
+#endif
ppc_md.init_IRQ();
+#ifdef CONFIG_PPC64
irq_ctx_init();
+#endif
}
+#ifdef CONFIG_PPC64
#ifndef CONFIG_PPC_ISERIES
/*
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
@@ -517,3 +475,4 @@ static int __init setup_noirqdistrib(char *str)
}
__setup("noirqdistrib", setup_noirqdistrib);
+#endif /* CONFIG_PPC64 */
diff --git a/arch/ppc64/kernel/lparcfg.c b/arch/powerpc/kernel/lparcfg.c
index 3e7b2f28ec8..5e954fae031 100644
--- a/arch/ppc64/kernel/lparcfg.c
+++ b/arch/powerpc/kernel/lparcfg.c
@@ -35,6 +35,7 @@
#include <asm/time.h>
#include <asm/iseries/it_exp_vpd_panel.h>
#include <asm/prom.h>
+#include <asm/systemcfg.h>
#define MODULE_VERS "1.6"
#define MODULE_NAME "lparcfg"
@@ -96,7 +97,7 @@ static unsigned long get_purr(void)
#define lparcfg_write NULL
-/*
+/*
* Methods used to fetch LPAR data when running on an iSeries platform.
*/
static int lparcfg_data(struct seq_file *m, void *v)
@@ -168,7 +169,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_PSERIES
-/*
+/*
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
@@ -177,7 +178,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
* entitled_capacity,unallocated_capacity,
* aggregation, resource_capability).
*
- * R4 = Entitled Processor Capacity Percentage.
+ * R4 = Entitled Processor Capacity Percentage.
* R5 = Unallocated Processor Capacity Percentage.
* R6 (AABBCCDDEEFFGGHH).
* XXXX - reserved (0)
@@ -190,7 +191,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
* XX - variable processor Capacity Weight
* XX - Unallocated Variable Processor Capacity Weight.
* XXXX - Active processors in Physical Processor Pool.
- * XXXX - Processors active on platform.
+ * XXXX - Processors active on platform.
*/
static unsigned int h_get_ppp(unsigned long *entitled,
unsigned long *unallocated,
@@ -273,7 +274,7 @@ static void parse_system_parameter_string(struct seq_file *m)
if (!workbuffer) {
printk(KERN_ERR "%s %s kmalloc failure at line %d \n",
__FILE__, __FUNCTION__, __LINE__);
- kfree(local_buffer);
+ kfree(local_buffer);
return;
}
#ifdef LPARCFG_DEBUG
@@ -371,7 +372,7 @@ static int lparcfg_data(struct seq_file *m, void *v)
lrdrp = (int *)get_property(rtas_node, "ibm,lrdr-capacity", NULL);
if (lrdrp == NULL) {
- partition_potential_processors = systemcfg->processorCount;
+ partition_potential_processors = _systemcfg->processorCount;
} else {
partition_potential_processors = *(lrdrp + 4);
}
diff --git a/arch/powerpc/kernel/misc_32.S b/arch/powerpc/kernel/misc_32.S
index 3bedb532aed..f6d84a75ed2 100644
--- a/arch/powerpc/kernel/misc_32.S
+++ b/arch/powerpc/kernel/misc_32.S
@@ -519,7 +519,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
*
* flush_icache_range(unsigned long start, unsigned long stop)
*/
-_GLOBAL(flush_icache_range)
+_GLOBAL(__flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
@@ -607,27 +607,6 @@ _GLOBAL(invalidate_dcache_range)
sync /* wait for dcbi's to get to ram */
blr
-#ifdef CONFIG_NOT_COHERENT_CACHE
-/*
- * 40x cores have 8K or 16K dcache and 32 byte line size.
- * 44x has a 32K dcache and 32 byte line size.
- * 8xx has 1, 2, 4, 8K variants.
- * For now, cover the worst case of the 44x.
- * Must be called with external interrupts disabled.
- */
-#define CACHE_NWAYS 64
-#define CACHE_NLINES 16
-
-_GLOBAL(flush_dcache_all)
- li r4, (2 * CACHE_NWAYS * CACHE_NLINES)
- mtctr r4
- lis r5, KERNELBASE@h
-1: lwz r3, 0(r5) /* Load one word from every line */
- addi r5, r5, L1_CACHE_BYTES
- bdnz 1b
- blr
-#endif /* CONFIG_NOT_COHERENT_CACHE */
-
/*
* Flush a particular page from the data cache to RAM.
* Note: this is necessary because the instruction cache does *not*
diff --git a/arch/powerpc/kernel/misc_64.S b/arch/powerpc/kernel/misc_64.S
index ae1433da09b..ae48a002f81 100644
--- a/arch/powerpc/kernel/misc_64.S
+++ b/arch/powerpc/kernel/misc_64.S
@@ -89,12 +89,12 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
-_GLOBAL(call_handle_IRQ_event)
+_GLOBAL(call___do_IRQ)
mflr r0
std r0,16(r1)
- stdu r1,THREAD_SIZE-112(r6)
- mr r1,r6
- bl .handle_IRQ_event
+ stdu r1,THREAD_SIZE-112(r5)
+ mr r1,r5
+ bl .__do_IRQ
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
diff --git a/arch/ppc64/kernel/pacaData.c b/arch/powerpc/kernel/paca.c
index 3133c72b28e..3cf2517c5f9 100644
--- a/arch/ppc64/kernel/pacaData.c
+++ b/arch/powerpc/kernel/paca.c
@@ -15,7 +15,7 @@
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/page.h>
-
+#include <asm/systemcfg.h>
#include <asm/lppaca.h>
#include <asm/iseries/it_lp_queue.h>
#include <asm/paca.h>
@@ -24,15 +24,14 @@ static union {
struct systemcfg data;
u8 page[PAGE_SIZE];
} systemcfg_store __attribute__((__section__(".data.page.aligned")));
-struct systemcfg *systemcfg = &systemcfg_store.data;
-EXPORT_SYMBOL(systemcfg);
+struct systemcfg *_systemcfg = &systemcfg_store.data;
/* This symbol is provided by the linker - let it fill in the paca
* field correctly */
extern unsigned long __toc_start;
-/* The Paca is an array with one entry per processor. Each contains an
+/* The Paca is an array with one entry per processor. Each contains an
* lppaca, which contains the information shared between the
* hypervisor and Linux. Each also contains an ItLpRegSave area which
* is used by the hypervisor to save registers.
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 47d6f7e2ea9..5dcf4ba05ee 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -44,6 +44,7 @@
#include <asm/cputable.h>
#include <asm/btext.h>
#include <asm/div64.h>
+#include <asm/signal.h>
#ifdef CONFIG_8xx
#include <asm/commproc.h>
@@ -56,7 +57,6 @@ extern void machine_check_exception(struct pt_regs *regs);
extern void alignment_exception(struct pt_regs *regs);
extern void program_check_exception(struct pt_regs *regs);
extern void single_step_exception(struct pt_regs *regs);
-extern int do_signal(sigset_t *, struct pt_regs *);
extern int pmac_newworld;
extern int sys_sigreturn(struct pt_regs *regs);
@@ -188,9 +188,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
-#if defined(CONFIG_PPC_MULTIPLATFORM) && defined(CONFIG_PPC32)
-EXPORT_SYMBOL(_machine);
-#endif
#ifdef CONFIG_PPC_PMAC
EXPORT_SYMBOL(sys_ctrler);
#endif
diff --git a/arch/ppc64/kernel/proc_ppc64.c b/arch/powerpc/kernel/proc_ppc64.c
index 24e955ee948..a1c19502fe8 100644
--- a/arch/ppc64/kernel/proc_ppc64.c
+++ b/arch/powerpc/kernel/proc_ppc64.c
@@ -1,18 +1,16 @@
/*
- * arch/ppc64/kernel/proc_ppc64.c
- *
* Copyright (C) 2001 Mike Corrigan & Dave Engebretsen IBM Corporation
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -53,7 +51,7 @@ static int __init proc_ppc64_create(void)
if (!root)
return 1;
- if (!(systemcfg->platform & (PLATFORM_PSERIES | PLATFORM_CELL)))
+ if (!(platform_is_pseries() || _machine == PLATFORM_CELL))
return 0;
if (!proc_mkdir("rtas", root))
@@ -74,7 +72,7 @@ static int __init proc_ppc64_init(void)
if (!pde)
return 1;
pde->nlink = 1;
- pde->data = systemcfg;
+ pde->data = _systemcfg;
pde->size = PAGE_SIZE;
pde->proc_fops = &page_map_fops;
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index f645adb5753..6a5b468edb4 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -48,9 +48,6 @@
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#include <asm/pci-bridge.h>
-#ifdef CONFIG_PPC64
-#include <asm/systemcfg.h>
-#endif
#ifdef DEBUG
#define DBG(fmt...) printk(KERN_ERR fmt)
@@ -74,10 +71,6 @@ struct isa_reg_property {
typedef int interpret_func(struct device_node *, unsigned long *,
int, int, int);
-extern struct rtas_t rtas;
-extern struct lmb lmb;
-extern unsigned long klimit;
-
static int __initdata dt_root_addr_cells;
static int __initdata dt_root_size_cells;
@@ -391,7 +384,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
#ifdef CONFIG_PPC64
/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
- if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
+ if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
char *name = get_property(ic->parent, "name", NULL);
if (name && !strcmp(name, "u3"))
np->intrs[intrcount].line += 128;
@@ -1087,9 +1080,9 @@ void __init unflatten_device_tree(void)
static int __init early_init_dt_scan_cpus(unsigned long node,
const char *uname, int depth, void *data)
{
- char *type = of_get_flat_dt_prop(node, "device_type", NULL);
u32 *prop;
- unsigned long size = 0;
+ unsigned long size;
+ char *type = of_get_flat_dt_prop(node, "device_type", &size);
/* We are scanning "cpu" nodes only */
if (type == NULL || strcmp(type, "cpu") != 0)
@@ -1115,7 +1108,7 @@ static int __init early_init_dt_scan_cpus(unsigned long node,
#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
- prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", &size);
+ prop = (u32 *)of_get_flat_dt_prop(node, "ibm,vmx", NULL);
if (prop && (*prop) > 0) {
cur_cpu_spec->cpu_features |= CPU_FTR_ALTIVEC;
cur_cpu_spec->cpu_user_features |= PPC_FEATURE_HAS_ALTIVEC;
@@ -1161,13 +1154,9 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL)
return 0;
-#ifdef CONFIG_PPC64
- systemcfg->platform = *prop;
-#else
#ifdef CONFIG_PPC_MULTIPLATFORM
_machine = *prop;
#endif
-#endif
#ifdef CONFIG_PPC64
/* check if iommu is forced on or off */
@@ -1264,7 +1253,14 @@ static int __init early_init_dt_scan_memory(unsigned long node,
unsigned long l;
/* We are scanning "memory" nodes only */
- if (type == NULL || strcmp(type, "memory") != 0)
+ if (type == NULL) {
+ /*
+ * The longtrail doesn't have a device_type on the
+ * /memory node, so look for the node called /memory@0.
+ */
+ if (depth != 1 || strcmp(uname, "memory@0") != 0)
+ return 0;
+ } else if (strcmp(type, "memory") != 0)
return 0;
reg = (cell_t *)of_get_flat_dt_prop(node, "reg", &l);
@@ -1339,9 +1335,6 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
-#ifdef CONFIG_PPC64
- systemcfg->physicalMemorySize = lmb_phys_mem_size();
-#endif
lmb_reserve(0, __pa(klimit));
DBG("Phys. mem: %lx\n", lmb_phys_mem_size());
@@ -1908,7 +1901,7 @@ static int of_finish_dynamic_node(struct device_node *node,
/* We don't support that function on PowerMac, at least
* not yet
*/
- if (systemcfg->platform == PLATFORM_POWERMAC)
+ if (_machine == PLATFORM_POWERMAC)
return -ENODEV;
/* fix up new node's linux_phandle field */
@@ -1992,9 +1985,11 @@ int prom_add_property(struct device_node* np, struct property* prop)
*next = prop;
write_unlock(&devtree_lock);
+#ifdef CONFIG_PROC_DEVICETREE
/* try to add to proc as well if it was initialized */
if (np->pde)
proc_device_tree_add_prop(np->pde, prop);
+#endif /* CONFIG_PROC_DEVICETREE */
return 0;
}
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 6dc33d19fc2..4ce0105c308 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -94,11 +94,17 @@ extern const struct linux_logo logo_linux_clut224;
#ifdef CONFIG_PPC64
#define RELOC(x) (*PTRRELOC(&(x)))
#define ADDR(x) (u32) add_reloc_offset((unsigned long)(x))
+#define OF_WORKAROUNDS 0
#else
#define RELOC(x) (x)
#define ADDR(x) (u32) (x)
+#define OF_WORKAROUNDS of_workarounds
+int of_workarounds;
#endif
+#define OF_WA_CLAIM 1 /* do phys/virt claim separately, then map */
+#define OF_WA_LONGTRAIL 2 /* work around longtrail bugs */
+
#define PROM_BUG() do { \
prom_printf("kernel BUG at %s line 0x%x!\n", \
RELOC(__FILE__), __LINE__); \
@@ -111,11 +117,6 @@ extern const struct linux_logo logo_linux_clut224;
#define prom_debug(x...)
#endif
-#ifdef CONFIG_PPC32
-#define PLATFORM_POWERMAC _MACH_Pmac
-#define PLATFORM_CHRP _MACH_chrp
-#endif
-
typedef u32 prom_arg_t;
@@ -128,10 +129,11 @@ struct prom_args {
struct prom_t {
ihandle root;
- ihandle chosen;
+ phandle chosen;
int cpu;
ihandle stdout;
ihandle mmumap;
+ ihandle memory;
};
struct mem_map_entry {
@@ -360,16 +362,36 @@ static void __init prom_printf(const char *format, ...)
static unsigned int __init prom_claim(unsigned long virt, unsigned long size,
unsigned long align)
{
- int ret;
struct prom_t *_prom = &RELOC(prom);
- ret = call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
- (prom_arg_t)align);
- if (ret != -1 && _prom->mmumap != 0)
- /* old pmacs need us to map as well */
+ if (align == 0 && (OF_WORKAROUNDS & OF_WA_CLAIM)) {
+ /*
+ * Old OF requires we claim physical and virtual separately
+ * and then map explicitly (assuming virtual mode)
+ */
+ int ret;
+ prom_arg_t result;
+
+ ret = call_prom_ret("call-method", 5, 2, &result,
+ ADDR("claim"), _prom->memory,
+ align, size, virt);
+ if (ret != 0 || result == -1)
+ return -1;
+ ret = call_prom_ret("call-method", 5, 2, &result,
+ ADDR("claim"), _prom->mmumap,
+ align, size, virt);
+ if (ret != 0) {
+ call_prom("call-method", 4, 1, ADDR("release"),
+ _prom->memory, size, virt);
+ return -1;
+ }
+ /* the 0x12 is M (coherence) + PP == read/write */
call_prom("call-method", 6, 1,
- ADDR("map"), _prom->mmumap, 0, size, virt, virt);
- return ret;
+ ADDR("map"), _prom->mmumap, 0x12, size, virt, virt);
+ return virt;
+ }
+ return call_prom("claim", 3, 1, (prom_arg_t)virt, (prom_arg_t)size,
+ (prom_arg_t)align);
}
static void __init __attribute__((noreturn)) prom_panic(const char *reason)
@@ -415,11 +437,52 @@ static int inline prom_getproplen(phandle node, const char *pname)
return call_prom("getproplen", 2, 1, node, ADDR(pname));
}
-static int inline prom_setprop(phandle node, const char *pname,
- void *value, size_t valuelen)
+static void add_string(char **str, const char *q)
{
- return call_prom("setprop", 4, 1, node, ADDR(pname),
- (u32)(unsigned long) value, (u32) valuelen);
+ char *p = *str;
+
+ while (*q)
+ *p++ = *q++;
+ *p++ = ' ';
+ *str = p;
+}
+
+static char *tohex(unsigned int x)
+{
+ static char digits[] = "0123456789abcdef";
+ static char result[9];
+ int i;
+
+ result[8] = 0;
+ i = 8;
+ do {
+ --i;
+ result[i] = digits[x & 0xf];
+ x >>= 4;
+ } while (x != 0 && i > 0);
+ return &result[i];
+}
+
+static int __init prom_setprop(phandle node, const char *nodename,
+ const char *pname, void *value, size_t valuelen)
+{
+ char cmd[256], *p;
+
+ if (!(OF_WORKAROUNDS & OF_WA_LONGTRAIL))
+ return call_prom("setprop", 4, 1, node, ADDR(pname),
+ (u32)(unsigned long) value, (u32) valuelen);
+
+ /* gah... setprop doesn't work on longtrail, have to use interpret */
+ p = cmd;
+ add_string(&p, "dev");
+ add_string(&p, nodename);
+ add_string(&p, tohex((u32)(unsigned long) value));
+ add_string(&p, tohex(valuelen));
+ add_string(&p, tohex(ADDR(pname)));
+ add_string(&p, tohex(strlen(RELOC(pname))));
+ add_string(&p, "property");
+ *p = 0;
+ return call_prom("interpret", 1, 1, (u32)(unsigned long) cmd);
}
/* We can't use the standard versions because of RELOC headaches. */
@@ -980,7 +1043,7 @@ static void __init prom_instantiate_rtas(void)
rtas_inst = call_prom("open", 1, 1, ADDR("/rtas"));
if (!IHANDLE_VALID(rtas_inst)) {
- prom_printf("opening rtas package failed");
+ prom_printf("opening rtas package failed (%x)\n", rtas_inst);
return;
}
@@ -988,7 +1051,7 @@ static void __init prom_instantiate_rtas(void)
if (call_prom_ret("call-method", 3, 2, &entry,
ADDR("instantiate-rtas"),
- rtas_inst, base) == PROM_ERROR
+ rtas_inst, base) != 0
|| entry == 0) {
prom_printf(" failed\n");
return;
@@ -997,8 +1060,10 @@ static void __init prom_instantiate_rtas(void)
reserve_mem(base, size);
- prom_setprop(rtas_node, "linux,rtas-base", &base, sizeof(base));
- prom_setprop(rtas_node, "linux,rtas-entry", &entry, sizeof(entry));
+ prom_setprop(rtas_node, "/rtas", "linux,rtas-base",
+ &base, sizeof(base));
+ prom_setprop(rtas_node, "/rtas", "linux,rtas-entry",
+ &entry, sizeof(entry));
prom_debug("rtas base = 0x%x\n", base);
prom_debug("rtas entry = 0x%x\n", entry);
@@ -1089,10 +1154,6 @@ static void __init prom_initialize_tce_table(void)
if (base < local_alloc_bottom)
local_alloc_bottom = base;
- /* Save away the TCE table attributes for later use. */
- prom_setprop(node, "linux,tce-base", &base, sizeof(base));
- prom_setprop(node, "linux,tce-size", &minsize, sizeof(minsize));
-
/* It seems OF doesn't null-terminate the path :-( */
memset(path, 0, sizeof(path));
/* Call OF to setup the TCE hardware */
@@ -1101,6 +1162,10 @@ static void __init prom_initialize_tce_table(void)
prom_printf("package-to-path failed\n");
}
+ /* Save away the TCE table attributes for later use. */
+ prom_setprop(node, path, "linux,tce-base", &base, sizeof(base));
+ prom_setprop(node, path, "linux,tce-size", &minsize, sizeof(minsize));
+
prom_debug("TCE table: %s\n", path);
prom_debug("\tnode = 0x%x\n", node);
prom_debug("\tbase = 0x%x\n", base);
@@ -1342,6 +1407,7 @@ static void __init prom_init_client_services(unsigned long pp)
/*
* For really old powermacs, we need to map things we claim.
* For that, we need the ihandle of the mmu.
+ * Also, on the longtrail, we need to work around other bugs.
*/
static void __init prom_find_mmu(void)
{
@@ -1355,12 +1421,19 @@ static void __init prom_find_mmu(void)
if (prom_getprop(oprom, "model", version, sizeof(version)) <= 0)
return;
version[sizeof(version) - 1] = 0;
- prom_printf("OF version is '%s'\n", version);
/* XXX might need to add other versions here */
- if (strcmp(version, "Open Firmware, 1.0.5") != 0)
+ if (strcmp(version, "Open Firmware, 1.0.5") == 0)
+ of_workarounds = OF_WA_CLAIM;
+ else if (strncmp(version, "FirmWorks,3.", 12) == 0) {
+ of_workarounds = OF_WA_CLAIM | OF_WA_LONGTRAIL;
+ call_prom("interpret", 1, 1, "dev /memory 0 to allow-reclaim");
+ } else
return;
+ _prom->memory = call_prom("open", 1, 1, ADDR("/memory"));
prom_getprop(_prom->chosen, "mmu", &_prom->mmumap,
sizeof(_prom->mmumap));
+ if (!IHANDLE_VALID(_prom->memory) || !IHANDLE_VALID(_prom->mmumap))
+ of_workarounds &= ~OF_WA_CLAIM; /* hmmm */
}
#else
#define prom_find_mmu()
@@ -1382,16 +1455,17 @@ static void __init prom_init_stdout(void)
memset(path, 0, 256);
call_prom("instance-to-path", 3, 1, _prom->stdout, path, 255);
val = call_prom("instance-to-package", 1, 1, _prom->stdout);
- prom_setprop(_prom->chosen, "linux,stdout-package", &val, sizeof(val));
+ prom_setprop(_prom->chosen, "/chosen", "linux,stdout-package",
+ &val, sizeof(val));
prom_printf("OF stdout device is: %s\n", RELOC(of_stdout_device));
- prom_setprop(_prom->chosen, "linux,stdout-path",
- RELOC(of_stdout_device), strlen(RELOC(of_stdout_device))+1);
+ prom_setprop(_prom->chosen, "/chosen", "linux,stdout-path",
+ path, strlen(path) + 1);
/* If it's a display, note it */
memset(type, 0, sizeof(type));
prom_getprop(val, "device_type", type, sizeof(type));
if (strcmp(type, RELOC("display")) == 0)
- prom_setprop(val, "linux,boot-display", NULL, 0);
+ prom_setprop(val, path, "linux,boot-display", NULL, 0);
}
static void __init prom_close_stdin(void)
@@ -1514,7 +1588,7 @@ static void __init prom_check_displays(void)
/* Success */
prom_printf("done\n");
- prom_setprop(node, "linux,opened", NULL, 0);
+ prom_setprop(node, path, "linux,opened", NULL, 0);
/* Setup a usable color table when the appropriate
* method is available. Should update this to set-colors */
@@ -1884,9 +1958,11 @@ static void __init fixup_device_tree(void)
/* interrupt on this revision of u3 is number 0 and level */
interrupts[0] = 0;
interrupts[1] = 1;
- prom_setprop(i2c, "interrupts", &interrupts, sizeof(interrupts));
+ prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupts",
+ &interrupts, sizeof(interrupts));
parent = (u32)mpic;
- prom_setprop(i2c, "interrupt-parent", &parent, sizeof(parent));
+ prom_setprop(i2c, "/u3@0,f8000000/i2c@f8001000", "interrupt-parent",
+ &parent, sizeof(parent));
#endif
}
@@ -1922,11 +1998,11 @@ static void __init prom_check_initrd(unsigned long r3, unsigned long r4)
RELOC(prom_initrd_end) = RELOC(prom_initrd_start) + r4;
val = RELOC(prom_initrd_start);
- prom_setprop(_prom->chosen, "linux,initrd-start", &val,
- sizeof(val));
+ prom_setprop(_prom->chosen, "/chosen", "linux,initrd-start",
+ &val, sizeof(val));
val = RELOC(prom_initrd_end);
- prom_setprop(_prom->chosen, "linux,initrd-end", &val,
- sizeof(val));
+ prom_setprop(_prom->chosen, "/chosen", "linux,initrd-end",
+ &val, sizeof(val));
reserve_mem(RELOC(prom_initrd_start),
RELOC(prom_initrd_end) - RELOC(prom_initrd_start));
@@ -1969,14 +2045,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
prom_init_client_services(pp);
/*
- * Init prom stdout device
+ * See if this OF is old enough that we need to do explicit maps
+ * and other workarounds
*/
- prom_init_stdout();
+ prom_find_mmu();
/*
- * See if this OF is old enough that we need to do explicit maps
+ * Init prom stdout device
*/
- prom_find_mmu();
+ prom_init_stdout();
/*
* Check for an initrd
@@ -1989,14 +2066,15 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
*/
RELOC(of_platform) = prom_find_machine_type();
getprop_rval = RELOC(of_platform);
- prom_setprop(_prom->chosen, "linux,platform",
+ prom_setprop(_prom->chosen, "/chosen", "linux,platform",
&getprop_rval, sizeof(getprop_rval));
#ifdef CONFIG_PPC_PSERIES
/*
* On pSeries, inform the firmware about our capabilities
*/
- if (RELOC(of_platform) & PLATFORM_PSERIES)
+ if (RELOC(of_platform) == PLATFORM_PSERIES ||
+ RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
prom_send_capabilities();
#endif
@@ -2050,21 +2128,23 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
* Fill in some infos for use by the kernel later on
*/
if (RELOC(prom_memory_limit))
- prom_setprop(_prom->chosen, "linux,memory-limit",
+ prom_setprop(_prom->chosen, "/chosen", "linux,memory-limit",
&RELOC(prom_memory_limit),
sizeof(prom_memory_limit));
#ifdef CONFIG_PPC64
if (RELOC(ppc64_iommu_off))
- prom_setprop(_prom->chosen, "linux,iommu-off", NULL, 0);
+ prom_setprop(_prom->chosen, "/chosen", "linux,iommu-off",
+ NULL, 0);
if (RELOC(iommu_force_on))
- prom_setprop(_prom->chosen, "linux,iommu-force-on", NULL, 0);
+ prom_setprop(_prom->chosen, "/chosen", "linux,iommu-force-on",
+ NULL, 0);
if (RELOC(prom_tce_alloc_start)) {
- prom_setprop(_prom->chosen, "linux,tce-alloc-start",
+ prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-start",
&RELOC(prom_tce_alloc_start),
sizeof(prom_tce_alloc_start));
- prom_setprop(_prom->chosen, "linux,tce-alloc-end",
+ prom_setprop(_prom->chosen, "/chosen", "linux,tce-alloc-end",
&RELOC(prom_tce_alloc_end),
sizeof(prom_tce_alloc_end));
}
@@ -2081,8 +2161,13 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
prom_printf("copying OF device tree ...\n");
flatten_device_tree();
- /* in case stdin is USB and still active on IBM machines... */
- prom_close_stdin();
+ /*
+ * in case stdin is USB and still active on IBM machines...
+ * Unfortunately quiesce crashes on some powermacs if we have
+ * closed stdin already (in particular the powerbook 101).
+ */
+ if (RELOC(of_platform) != PLATFORM_POWERMAC)
+ prom_close_stdin();
/*
* Call OF "quiesce" method to shut down pending DMA's from
diff --git a/arch/powerpc/kernel/rtas-proc.c b/arch/powerpc/kernel/rtas-proc.c
index 5bdd5b079d9..ae1a36449cc 100644
--- a/arch/powerpc/kernel/rtas-proc.c
+++ b/arch/powerpc/kernel/rtas-proc.c
@@ -259,7 +259,7 @@ static int __init proc_rtas_init(void)
{
struct proc_dir_entry *entry;
- if (!(systemcfg->platform & PLATFORM_PSERIES))
+ if (_machine != PLATFORM_PSERIES && _machine != PLATFORM_PSERIES_LPAR)
return 1;
rtas_node = of_find_node_by_name(NULL, "rtas");
diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c
index 9d4e07f6f1e..4283fa33f78 100644
--- a/arch/powerpc/kernel/rtas.c
+++ b/arch/powerpc/kernel/rtas.c
@@ -29,9 +29,6 @@
#include <asm/delay.h>
#include <asm/uaccess.h>
#include <asm/lmb.h>
-#ifdef CONFIG_PPC64
-#include <asm/systemcfg.h>
-#endif
struct rtas_t rtas = {
.lock = SPIN_LOCK_UNLOCKED
@@ -671,7 +668,7 @@ void __init rtas_initialize(void)
* the stop-self token if any
*/
#ifdef CONFIG_PPC64
- if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
+ if (_machine == PLATFORM_PSERIES_LPAR)
rtas_region = min(lmb.rmo_size, RTAS_INSTANTIATE_MAX);
#endif
rtas_rmo_buf = lmb_alloc_base(RTAS_RMOBUF_MAX, PAGE_SIZE, rtas_region);
diff --git a/arch/ppc64/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index 3c3f19192fc..0e5a8e11665 100644
--- a/arch/ppc64/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -5,19 +5,19 @@
* Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM
*
* RTAS specific routines for PCI.
- *
+ *
* Based on code from pci.c, chrp_pci.c and pSeries_pci.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -47,7 +47,7 @@ static int write_pci_config;
static int ibm_read_pci_config;
static int ibm_write_pci_config;
-static int config_access_valid(struct pci_dn *dn, int where)
+static inline int config_access_valid(struct pci_dn *dn, int where)
{
if (where < 256)
return 1;
@@ -72,16 +72,14 @@ static int of_device_available(struct device_node * dn)
return 0;
}
-static int rtas_read_config(struct device_node *dn, int where, int size, u32 *val)
+static int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
{
int returnval = -1;
unsigned long buid, addr;
int ret;
- struct pci_dn *pdn;
- if (!dn || !dn->data)
+ if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
- pdn = dn->data;
if (!config_access_valid(pdn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -90,7 +88,7 @@ static int rtas_read_config(struct device_node *dn, int where, int size, u32 *va
buid = pdn->phb->buid;
if (buid) {
ret = rtas_call(ibm_read_pci_config, 4, 2, &returnval,
- addr, buid >> 32, buid & 0xffffffff, size);
+ addr, BUID_HI(buid), BUID_LO(buid), size);
} else {
ret = rtas_call(read_pci_config, 2, 2, &returnval, addr, size);
}
@@ -100,7 +98,7 @@ static int rtas_read_config(struct device_node *dn, int where, int size, u32 *va
return PCIBIOS_DEVICE_NOT_FOUND;
if (returnval == EEH_IO_ERROR_VALUE(size) &&
- eeh_dn_check_failure (dn, NULL))
+ eeh_dn_check_failure (pdn->node, NULL))
return PCIBIOS_DEVICE_NOT_FOUND;
return PCIBIOS_SUCCESSFUL;
@@ -118,23 +116,23 @@ static int rtas_pci_read_config(struct pci_bus *bus,
busdn = bus->sysdata; /* must be a phb */
/* Search only direct children of the bus */
- for (dn = busdn->child; dn; dn = dn->sibling)
- if (dn->data && PCI_DN(dn)->devfn == devfn
+ for (dn = busdn->child; dn; dn = dn->sibling) {
+ struct pci_dn *pdn = PCI_DN(dn);
+ if (pdn && pdn->devfn == devfn
&& of_device_available(dn))
- return rtas_read_config(dn, where, size, val);
+ return rtas_read_config(pdn, where, size, val);
+ }
return PCIBIOS_DEVICE_NOT_FOUND;
}
-int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
+int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
{
unsigned long buid, addr;
int ret;
- struct pci_dn *pdn;
- if (!dn || !dn->data)
+ if (!pdn)
return PCIBIOS_DEVICE_NOT_FOUND;
- pdn = dn->data;
if (!config_access_valid(pdn, where))
return PCIBIOS_BAD_REGISTER_NUMBER;
@@ -142,7 +140,8 @@ int rtas_write_config(struct device_node *dn, int where, int size, u32 val)
(pdn->devfn << 8) | (where & 0xff);
buid = pdn->phb->buid;
if (buid) {
- ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr, buid >> 32, buid & 0xffffffff, size, (ulong) val);
+ ret = rtas_call(ibm_write_pci_config, 5, 1, NULL, addr,
+ BUID_HI(buid), BUID_LO(buid), size, (ulong) val);
} else {
ret = rtas_call(write_pci_config, 3, 1, NULL, addr, size, (ulong)val);
}
@@ -165,10 +164,12 @@ static int rtas_pci_write_config(struct pci_bus *bus,
busdn = bus->sysdata; /* must be a phb */
/* Search only direct children of the bus */
- for (dn = busdn->child; dn; dn = dn->sibling)
- if (dn->data && PCI_DN(dn)->devfn == devfn
+ for (dn = busdn->child; dn; dn = dn->sibling) {
+ struct pci_dn *pdn = PCI_DN(dn);
+ if (pdn && pdn->devfn == devfn
&& of_device_available(dn))
- return rtas_write_config(dn, where, size, val);
+ return rtas_write_config(pdn, where, size, val);
+ }
return PCIBIOS_DEVICE_NOT_FOUND;
}
@@ -221,7 +222,7 @@ static void python_countermeasures(struct device_node *dev,
/* Python's register file is 1 MB in size. */
chip_regs = ioremap(reg_struct.address & ~(0xfffffUL), 0x100000);
- /*
+ /*
* Firmware doesn't always clear this bit which is critical
* for good performance - Anton
*/
@@ -292,7 +293,7 @@ static int phb_set_bus_ranges(struct device_node *dev,
if (bus_range == NULL || len < 2 * sizeof(int)) {
return 1;
}
-
+
phb->first_busno = bus_range[0];
phb->last_busno = bus_range[1];
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index e22856ecb5a..bae4bff138f 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -33,6 +33,7 @@
#include <asm/io.h>
#include <asm/prom.h>
#include <asm/processor.h>
+#include <asm/systemcfg.h>
#include <asm/pgtable.h>
#include <asm/smp.h>
#include <asm/elf.h>
@@ -51,6 +52,9 @@
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/lmb.h>
+#include <asm/xmon.h>
+
+#include "setup.h"
#undef DEBUG
@@ -60,6 +64,13 @@
#define DBG(fmt...)
#endif
+#ifdef CONFIG_PPC_MULTIPLATFORM
+int _machine = 0;
+EXPORT_SYMBOL(_machine);
+#endif
+
+unsigned long klimit = (unsigned long) _end;
+
/*
* This still seems to be needed... -- paulus
*/
@@ -510,8 +521,8 @@ void __init smp_setup_cpu_maps(void)
* On pSeries LPAR, we need to know how many cpus
* could possibly be added to this partition.
*/
- if (systemcfg->platform == PLATFORM_PSERIES_LPAR &&
- (dn = of_find_node_by_path("/rtas"))) {
+ if (_machine == PLATFORM_PSERIES_LPAR &&
+ (dn = of_find_node_by_path("/rtas"))) {
int num_addr_cell, num_size_cell, maxcpus;
unsigned int *ireg;
@@ -555,7 +566,27 @@ void __init smp_setup_cpu_maps(void)
cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]);
}
- systemcfg->processorCount = num_present_cpus();
+ _systemcfg->processorCount = num_present_cpus();
#endif /* CONFIG_PPC64 */
}
#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_XMON
+static int __init early_xmon(char *p)
+{
+ /* ensure xmon is enabled */
+ if (p) {
+ if (strncmp(p, "on", 2) == 0)
+ xmon_init(1);
+ if (strncmp(p, "off", 3) == 0)
+ xmon_init(0);
+ if (strncmp(p, "early", 5) != 0)
+ return 0;
+ }
+ xmon_init(1);
+ debugger(NULL);
+
+ return 0;
+}
+early_param("xmon", early_xmon);
+#endif
diff --git a/arch/powerpc/kernel/setup.h b/arch/powerpc/kernel/setup.h
new file mode 100644
index 00000000000..2ebba755272
--- /dev/null
+++ b/arch/powerpc/kernel/setup.h
@@ -0,0 +1,6 @@
+#ifndef _POWERPC_KERNEL_SETUP_H
+#define _POWERPC_KERNEL_SETUP_H
+
+void check_for_initrd(void);
+
+#endif /* _POWERPC_KERNEL_SETUP_H */
diff --git a/arch/powerpc/kernel/setup_32.c b/arch/powerpc/kernel/setup_32.c
index 3af2631e3fa..c98cfcc9cd9 100644
--- a/arch/powerpc/kernel/setup_32.c
+++ b/arch/powerpc/kernel/setup_32.c
@@ -40,6 +40,8 @@
#include <asm/xmon.h>
#include <asm/time.h>
+#include "setup.h"
+
#define DBG(fmt...)
#if defined CONFIG_KGDB
@@ -70,8 +72,6 @@ unsigned int DMA_MODE_WRITE;
int have_of = 1;
#ifdef CONFIG_PPC_MULTIPLATFORM
-int _machine = 0;
-
extern void prep_init(void);
extern void pmac_init(void);
extern void chrp_init(void);
@@ -279,7 +279,6 @@ arch_initcall(ppc_init);
/* Warning, IO base is not yet inited */
void __init setup_arch(char **cmdline_p)
{
- extern char *klimit;
extern void do_init_bootmem(void);
/* so udelay does something sensible, assume <= 1000 bogomips */
@@ -303,14 +302,9 @@ void __init setup_arch(char **cmdline_p)
pmac_feature_init(); /* New cool way */
#endif
-#ifdef CONFIG_XMON
- xmon_map_scc();
- if (strstr(cmd_line, "xmon")) {
- xmon_init(1);
- debugger(NULL);
- }
-#endif /* CONFIG_XMON */
- if ( ppc_md.progress ) ppc_md.progress("setup_arch: enter", 0x3eab);
+#ifdef CONFIG_XMON_DEFAULT
+ xmon_init(1);
+#endif
#if defined(CONFIG_KGDB)
if (ppc_md.kgdb_map_scc)
@@ -343,7 +337,7 @@ void __init setup_arch(char **cmdline_p)
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long) _etext;
init_mm.end_data = (unsigned long) _edata;
- init_mm.brk = (unsigned long) klimit;
+ init_mm.brk = klimit;
/* Save unparsed command line copy for /proc/cmdline */
strlcpy(saved_command_line, cmd_line, COMMAND_LINE_SIZE);
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 0471e843b6c..6791668213e 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -61,6 +61,8 @@
#include <asm/xmon.h>
#include <asm/udbg.h>
+#include "setup.h"
+
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
@@ -94,15 +96,6 @@ extern void udbg_init_maple_realmode(void);
do { udbg_putc = call_rtas_display_status_delay; } while(0)
#endif
-/* extern void *stab; */
-extern unsigned long klimit;
-
-extern void mm_init_ppc64(void);
-extern void stab_initialize(unsigned long stab);
-extern void htab_initialize(void);
-extern void early_init_devtree(void *flat_dt);
-extern void unflatten_device_tree(void);
-
int have_of = 1;
int boot_cpuid = 0;
int boot_cpuid_phys = 0;
@@ -254,11 +247,10 @@ void __init early_setup(unsigned long dt_ptr)
* Iterate all ppc_md structures until we find the proper
* one for the current machine type
*/
- DBG("Probing machine type for platform %x...\n",
- systemcfg->platform);
+ DBG("Probing machine type for platform %x...\n", _machine);
for (mach = machines; *mach; mach++) {
- if ((*mach)->probe(systemcfg->platform))
+ if ((*mach)->probe(_machine))
break;
}
/* What can we do if we didn't find ? */
@@ -290,6 +282,28 @@ void __init early_setup(unsigned long dt_ptr)
DBG(" <- early_setup()\n");
}
+#ifdef CONFIG_SMP
+void early_setup_secondary(void)
+{
+ struct paca_struct *lpaca = get_paca();
+
+ /* Mark enabled in PACA */
+ lpaca->proc_enabled = 0;
+
+ /* Initialize hash table for that CPU */
+ htab_initialize_secondary();
+
+ /* Initialize STAB/SLB. We use a virtual address as it works
+ * in real mode on pSeries and we want a virutal address on
+ * iSeries anyway
+ */
+ if (cpu_has_feature(CPU_FTR_SLB))
+ slb_initialize();
+ else
+ stab_initialize(lpaca->stab_addr);
+}
+
+#endif /* CONFIG_SMP */
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC)
void smp_release_cpus(void)
@@ -315,7 +329,8 @@ void smp_release_cpus(void)
#endif /* CONFIG_SMP || CONFIG_KEXEC */
/*
- * Initialize some remaining members of the ppc64_caches and systemcfg structures
+ * Initialize some remaining members of the ppc64_caches and systemcfg
+ * structures
* (at least until we get rid of them completely). This is mostly some
* cache informations about the CPU that will be used by cache flush
* routines and/or provided to userland
@@ -340,7 +355,7 @@ static void __init initialize_cache_info(void)
const char *dc, *ic;
/* Then read cache informations */
- if (systemcfg->platform == PLATFORM_POWERMAC) {
+ if (_machine == PLATFORM_POWERMAC) {
dc = "d-cache-block-size";
ic = "i-cache-block-size";
} else {
@@ -360,8 +375,8 @@ static void __init initialize_cache_info(void)
DBG("Argh, can't find dcache properties ! "
"sizep: %p, lsizep: %p\n", sizep, lsizep);
- systemcfg->dcache_size = ppc64_caches.dsize = size;
- systemcfg->dcache_line_size =
+ _systemcfg->dcache_size = ppc64_caches.dsize = size;
+ _systemcfg->dcache_line_size =
ppc64_caches.dline_size = lsize;
ppc64_caches.log_dline_size = __ilog2(lsize);
ppc64_caches.dlines_per_page = PAGE_SIZE / lsize;
@@ -378,8 +393,8 @@ static void __init initialize_cache_info(void)
DBG("Argh, can't find icache properties ! "
"sizep: %p, lsizep: %p\n", sizep, lsizep);
- systemcfg->icache_size = ppc64_caches.isize = size;
- systemcfg->icache_line_size =
+ _systemcfg->icache_size = ppc64_caches.isize = size;
+ _systemcfg->icache_line_size =
ppc64_caches.iline_size = lsize;
ppc64_caches.log_iline_size = __ilog2(lsize);
ppc64_caches.ilines_per_page = PAGE_SIZE / lsize;
@@ -387,10 +402,12 @@ static void __init initialize_cache_info(void)
}
/* Add an eye catcher and the systemcfg layout version number */
- strcpy(systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
- systemcfg->version.major = SYSTEMCFG_MAJOR;
- systemcfg->version.minor = SYSTEMCFG_MINOR;
- systemcfg->processor = mfspr(SPRN_PVR);
+ strcpy(_systemcfg->eye_catcher, "SYSTEMCFG:PPC64");
+ _systemcfg->version.major = SYSTEMCFG_MAJOR;
+ _systemcfg->version.minor = SYSTEMCFG_MINOR;
+ _systemcfg->processor = mfspr(SPRN_PVR);
+ _systemcfg->platform = _machine;
+ _systemcfg->physicalMemorySize = lmb_phys_mem_size();
DBG(" <- initialize_cache_info()\n");
}
@@ -479,10 +496,10 @@ void __init setup_system(void)
printk("-----------------------------------------------------\n");
printk("ppc64_pft_size = 0x%lx\n", ppc64_pft_size);
printk("ppc64_interrupt_controller = 0x%ld\n", ppc64_interrupt_controller);
- printk("systemcfg = 0x%p\n", systemcfg);
- printk("systemcfg->platform = 0x%x\n", systemcfg->platform);
- printk("systemcfg->processorCount = 0x%lx\n", systemcfg->processorCount);
- printk("systemcfg->physicalMemorySize = 0x%lx\n", systemcfg->physicalMemorySize);
+ printk("systemcfg = 0x%p\n", _systemcfg);
+ printk("systemcfg->platform = 0x%x\n", _systemcfg->platform);
+ printk("systemcfg->processorCount = 0x%lx\n", _systemcfg->processorCount);
+ printk("systemcfg->physicalMemorySize = 0x%lx\n", _systemcfg->physicalMemorySize);
printk("ppc64_caches.dcache_line_size = 0x%x\n",
ppc64_caches.dline_size);
printk("ppc64_caches.icache_line_size = 0x%x\n",
@@ -564,12 +581,12 @@ void __init setup_syscall_map(void)
for (i = 0; i < __NR_syscalls; i++) {
if (sys_call_table[i*2] != sys_ni_syscall) {
count64++;
- systemcfg->syscall_map_64[i >> 5] |=
+ _systemcfg->syscall_map_64[i >> 5] |=
0x80000000UL >> (i & 0x1f);
}
if (sys_call_table[i*2+1] != sys_ni_syscall) {
count32++;
- systemcfg->syscall_map_32[i >> 5] |=
+ _systemcfg->syscall_map_32[i >> 5] |=
0x80000000UL >> (i & 0x1f);
}
}
@@ -858,26 +875,6 @@ int check_legacy_ioport(unsigned long base_port)
}
EXPORT_SYMBOL(check_legacy_ioport);
-#ifdef CONFIG_XMON
-static int __init early_xmon(char *p)
-{
- /* ensure xmon is enabled */
- if (p) {
- if (strncmp(p, "on", 2) == 0)
- xmon_init(1);
- if (strncmp(p, "off", 3) == 0)
- xmon_init(0);
- if (strncmp(p, "early", 5) != 0)
- return 0;
- }
- xmon_init(1);
- debugger(NULL);
-
- return 0;
-}
-early_param("xmon", early_xmon);
-#endif
-
void cpu_die(void)
{
if (ppc_md.cpu_die)
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 081d931eae4..a7c4515f320 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -42,6 +42,7 @@
#include <asm/uaccess.h>
#include <asm/cacheflush.h>
+#include <asm/sigcontext.h>
#ifdef CONFIG_PPC64
#include "ppc32.h"
#include <asm/unistd.h>
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5c330c3366e..e28a139c29d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -44,6 +44,7 @@
#include <asm/cputable.h>
#include <asm/system.h>
#include <asm/mpic.h>
+#include <asm/systemcfg.h>
#ifdef CONFIG_PPC64
#include <asm/paca.h>
#endif
@@ -368,9 +369,11 @@ int generic_cpu_disable(void)
if (cpu == boot_cpuid)
return -EBUSY;
- systemcfg->processorCount--;
cpu_clear(cpu, cpu_online_map);
+#ifdef CONFIG_PPC64
+ _systemcfg->processorCount--;
fixup_irqs(cpu_online_map);
+#endif
return 0;
}
@@ -388,9 +391,11 @@ int generic_cpu_enable(unsigned int cpu)
while (!cpu_online(cpu))
cpu_relax();
+#ifdef CONFIG_PPC64
fixup_irqs(cpu_online_map);
/* counter the irq disable in fixup_irqs */
local_irq_enable();
+#endif
return 0;
}
@@ -419,7 +424,9 @@ void generic_mach_cpu_die(void)
while (__get_cpu_var(cpu_state) != CPU_UP_PREPARE)
cpu_relax();
+#ifdef CONFIG_PPC64
flush_tlb_pending();
+#endif
cpu_set(cpu, cpu_online_map);
local_irq_enable();
}
@@ -510,6 +517,7 @@ int __devinit start_secondary(void *unused)
smp_store_cpu_info(cpu);
set_dec(tb_ticks_per_jiffy);
+ preempt_disable();
cpu_callin_map[cpu] = 1;
smp_ops->setup_cpu(cpu);
diff --git a/arch/powerpc/kernel/sys_ppc32.c b/arch/powerpc/kernel/sys_ppc32.c
index a8210ed5c68..9c921d1c408 100644
--- a/arch/powerpc/kernel/sys_ppc32.c
+++ b/arch/powerpc/kernel/sys_ppc32.c
@@ -52,7 +52,6 @@
#include <asm/semaphore.h>
#include <asm/time.h>
#include <asm/mmu_context.h>
-#include <asm/systemcfg.h>
#include <asm/ppc-pci.h>
/* readdir & getdents */
diff --git a/arch/ppc64/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
index e99ec62c2c5..850af198fb5 100644
--- a/arch/ppc64/kernel/sysfs.c
+++ b/arch/powerpc/kernel/sysfs.c
@@ -232,7 +232,7 @@ static void register_cpu_online(unsigned int cpu)
sysdev_create_file(s, &attr_pmc7);
if (cur_cpu_spec->num_pmcs >= 8)
sysdev_create_file(s, &attr_pmc8);
-
+
if (cpu_has_feature(CPU_FTR_SMT))
sysdev_create_file(s, &attr_purr);
}
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index a6282b625b4..260b6ecd26a 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -271,13 +271,13 @@ static inline void update_gtod(u64 new_tb_stamp, u64 new_stamp_xsec,
* tb_to_xs and stamp_xsec values are consistent. If not, then it
* loops back and reads them again until this criteria is met.
*/
- ++(systemcfg->tb_update_count);
+ ++(_systemcfg->tb_update_count);
smp_wmb();
- systemcfg->tb_orig_stamp = new_tb_stamp;
- systemcfg->stamp_xsec = new_stamp_xsec;
- systemcfg->tb_to_xs = new_tb_to_xs;
+ _systemcfg->tb_orig_stamp = new_tb_stamp;
+ _systemcfg->stamp_xsec = new_stamp_xsec;
+ _systemcfg->tb_to_xs = new_tb_to_xs;
smp_wmb();
- ++(systemcfg->tb_update_count);
+ ++(_systemcfg->tb_update_count);
#endif
}
@@ -357,8 +357,9 @@ static void iSeries_tb_recal(void)
do_gtod.tb_ticks_per_sec = tb_ticks_per_sec;
tb_to_xs = divres.result_low;
do_gtod.varp->tb_to_xs = tb_to_xs;
- systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
- systemcfg->tb_to_xs = tb_to_xs;
+ _systemcfg->tb_ticks_per_sec =
+ tb_ticks_per_sec;
+ _systemcfg->tb_to_xs = tb_to_xs;
}
else {
printk( "Titan recalibrate: FAILED (difference > 4 percent)\n"
@@ -483,6 +484,8 @@ void __init smp_space_timers(unsigned int max_cpus)
unsigned long offset = tb_ticks_per_jiffy / max_cpus;
unsigned long previous_tb = per_cpu(last_jiffy, boot_cpuid);
+ /* make sure tb > per_cpu(last_jiffy, cpu) for all cpus always */
+ previous_tb -= tb_ticks_per_jiffy;
for_each_cpu(i) {
if (i != boot_cpuid) {
previous_tb += offset;
@@ -559,8 +562,8 @@ int do_settimeofday(struct timespec *tv)
update_gtod(tb_last_jiffy, new_xsec, do_gtod.varp->tb_to_xs);
#ifdef CONFIG_PPC64
- systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
- systemcfg->tz_dsttime = sys_tz.tz_dsttime;
+ _systemcfg->tz_minuteswest = sys_tz.tz_minuteswest;
+ _systemcfg->tz_dsttime = sys_tz.tz_dsttime;
#endif
write_sequnlock_irqrestore(&xtime_lock, flags);
@@ -711,11 +714,11 @@ void __init time_init(void)
do_gtod.varp->tb_to_xs = tb_to_xs;
do_gtod.tb_to_us = tb_to_us;
#ifdef CONFIG_PPC64
- systemcfg->tb_orig_stamp = tb_last_jiffy;
- systemcfg->tb_update_count = 0;
- systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
- systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
- systemcfg->tb_to_xs = tb_to_xs;
+ _systemcfg->tb_orig_stamp = tb_last_jiffy;
+ _systemcfg->tb_update_count = 0;
+ _systemcfg->tb_ticks_per_sec = tb_ticks_per_sec;
+ _systemcfg->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC;
+ _systemcfg->tb_to_xs = tb_to_xs;
#endif
time_freq = 0;
diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
index 0578f838760..2020bb7648f 100644
--- a/arch/powerpc/kernel/traps.c
+++ b/arch/powerpc/kernel/traps.c
@@ -129,7 +129,7 @@ int die(const char *str, struct pt_regs *regs, long err)
nl = 1;
#endif
#ifdef CONFIG_PPC64
- switch (systemcfg->platform) {
+ switch (_machine) {
case PLATFORM_PSERIES:
printk("PSERIES ");
nl = 1;
diff --git a/arch/powerpc/lib/bitops.c b/arch/powerpc/lib/bitops.c
index b67ce3004eb..f68ad71a018 100644
--- a/arch/powerpc/lib/bitops.c
+++ b/arch/powerpc/lib/bitops.c
@@ -41,7 +41,7 @@ unsigned long find_next_bit(const unsigned long *addr, unsigned long size,
tmp = *p;
found_first:
- tmp &= (~0UL >> (64 - size));
+ tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 22e47487613..706e8a63ced 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -84,10 +84,11 @@
extern unsigned long dart_tablebase;
#endif /* CONFIG_U3_DART */
+static unsigned long _SDR1;
+struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
+
hpte_t *htab_address;
unsigned long htab_hash_mask;
-unsigned long _SDR1;
-struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT];
int mmu_linear_psize = MMU_PAGE_4K;
int mmu_virtual_psize = MMU_PAGE_4K;
#ifdef CONFIG_HUGETLB_PAGE
@@ -165,7 +166,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
* normal insert callback here.
*/
#ifdef CONFIG_PPC_ISERIES
- if (systemcfg->platform == PLATFORM_ISERIES_LPAR)
+ if (_machine == PLATFORM_ISERIES_LPAR)
ret = iSeries_hpte_insert(hpteg, va,
virt_to_abs(paddr),
tmp_mode,
@@ -174,7 +175,7 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
else
#endif
#ifdef CONFIG_PPC_PSERIES
- if (systemcfg->platform & PLATFORM_LPAR)
+ if (_machine & PLATFORM_LPAR)
ret = pSeries_lpar_hpte_insert(hpteg, va,
virt_to_abs(paddr),
tmp_mode,
@@ -293,7 +294,7 @@ static void __init htab_init_page_sizes(void)
* Not in the device-tree, let's fallback on known size
* list for 16M capable GP & GR
*/
- if ((systemcfg->platform != PLATFORM_ISERIES_LPAR) &&
+ if ((_machine != PLATFORM_ISERIES_LPAR) &&
cpu_has_feature(CPU_FTR_16M_PAGE))
memcpy(mmu_psize_defs, mmu_psize_defaults_gp,
sizeof(mmu_psize_defaults_gp));
@@ -364,7 +365,7 @@ static int __init htab_dt_scan_pftsize(unsigned long node,
static unsigned long __init htab_get_table_size(void)
{
- unsigned long rnd_mem_size, pteg_count;
+ unsigned long mem_size, rnd_mem_size, pteg_count;
/* If hash size isn't already provided by the platform, we try to
* retreive it from the device-tree. If it's not there neither, we
@@ -376,8 +377,9 @@ static unsigned long __init htab_get_table_size(void)
return 1UL << ppc64_pft_size;
/* round mem_size up to next power of 2 */
- rnd_mem_size = 1UL << __ilog2(systemcfg->physicalMemorySize);
- if (rnd_mem_size < systemcfg->physicalMemorySize)
+ mem_size = lmb_phys_mem_size();
+ rnd_mem_size = 1UL << __ilog2(mem_size);
+ if (rnd_mem_size < mem_size)
rnd_mem_size <<= 1;
/* # pages / 2 */
@@ -386,6 +388,15 @@ static unsigned long __init htab_get_table_size(void)
return pteg_count << 7;
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+void create_section_mapping(unsigned long start, unsigned long end)
+{
+ BUG_ON(htab_bolt_mapping(start, end, start,
+ _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX,
+ mmu_linear_psize));
+}
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
void __init htab_initialize(void)
{
unsigned long table, htab_size_bytes;
@@ -410,7 +421,7 @@ void __init htab_initialize(void)
htab_hash_mask = pteg_count - 1;
- if (systemcfg->platform & PLATFORM_LPAR) {
+ if (platform_is_lpar()) {
/* Using a hypervisor which owns the htab */
htab_address = NULL;
_SDR1 = 0;
@@ -431,6 +442,9 @@ void __init htab_initialize(void)
/* Initialize the HPT with no entries */
memset((void *)table, 0, htab_size_bytes);
+
+ /* Set SDR1 */
+ mtspr(SPRN_SDR1, _SDR1);
}
mode_rw = _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX;
@@ -500,6 +514,12 @@ void __init htab_initialize(void)
#undef KB
#undef MB
+void __init htab_initialize_secondary(void)
+{
+ if (!platform_is_lpar())
+ mtspr(SPRN_SDR1, _SDR1);
+}
+
/*
* Called by asm hashtable.S for doing lazy icache flush
*/
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index 4612a79dfb6..7d4b8b5f060 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -84,9 +84,6 @@ void MMU_init(void);
/* XXX should be in current.h -- paulus */
extern struct task_struct *current_set[NR_CPUS];
-char *klimit = _end;
-struct device_node *memory_node;
-
extern int init_bootmem_done;
/*
diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c
index ce974c83d88..1134f70f231 100644
--- a/arch/powerpc/mm/init_64.c
+++ b/arch/powerpc/mm/init_64.c
@@ -20,6 +20,8 @@
*
*/
+#undef DEBUG
+
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
@@ -64,6 +66,12 @@
#include <asm/vdso.h>
#include <asm/imalloc.h>
+#ifdef DEBUG
+#define DBG(fmt...) printk(fmt)
+#else
+#define DBG(fmt...)
+#endif
+
#if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted
#endif
@@ -72,8 +80,6 @@
#warning TASK_SIZE is smaller than it needs to be.
#endif
-unsigned long klimit = (unsigned long)_end;
-
/* max amount of RAM to use */
unsigned long __max_memory;
@@ -188,14 +194,14 @@ static void zero_ctor(void *addr, kmem_cache_t *cache, unsigned long flags)
}
#ifdef CONFIG_PPC_64K_PAGES
-static const int pgtable_cache_size[2] = {
- PTE_TABLE_SIZE, PGD_TABLE_SIZE
+static const unsigned int pgtable_cache_size[3] = {
+ PTE_TABLE_SIZE, PMD_TABLE_SIZE, PGD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
- "pte_pmd_cache", "pgd_cache",
+ "pte_pmd_cache", "pmd_cache", "pgd_cache",
};
#else
-static const int pgtable_cache_size[2] = {
+static const unsigned int pgtable_cache_size[2] = {
PTE_TABLE_SIZE, PMD_TABLE_SIZE
};
static const char *pgtable_cache_name[ARRAY_SIZE(pgtable_cache_size)] = {
@@ -213,6 +219,8 @@ void pgtable_cache_init(void)
int size = pgtable_cache_size[i];
const char *name = pgtable_cache_name[i];
+ DBG("Allocating page table cache %s (#%d) "
+ "for size: %08x...\n", name, i, size);
pgtable_cache[i] = kmem_cache_create(name,
size, size,
SLAB_HWCACHE_ALIGN |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 6f55efd9be9..1dd3cc69a49 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -110,6 +110,7 @@ EXPORT_SYMBOL(phys_mem_access_prot);
void online_page(struct page *page)
{
ClearPageReserved(page);
+ set_page_count(page, 0);
free_cold_page(page);
totalram_pages++;
num_physpages++;
@@ -127,6 +128,9 @@ int __devinit add_memory(u64 start, u64 size)
unsigned long start_pfn = start >> PAGE_SHIFT;
unsigned long nr_pages = size >> PAGE_SHIFT;
+ start += KERNELBASE;
+ create_section_mapping(start, start + size);
+
/* this should work for most non-highmem platforms */
zone = pgdata->node_zones;
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 900842451bd..c7f7bb6f30b 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -122,8 +122,11 @@ static int map_io_page(unsigned long ea, unsigned long pa, int flags)
*
*/
if (htab_bolt_mapping(ea, ea + PAGE_SIZE, pa, flags,
- mmu_virtual_psize))
- panic("Can't map bolted IO mapping");
+ mmu_virtual_psize)) {
+ printk(KERN_ERR "Failed to do bolted mapping IO "
+ "memory at %016lx !\n", pa);
+ return -ENOMEM;
+ }
}
return 0;
}
diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c
index fa325dbf98f..cfbb4e1f966 100644
--- a/arch/powerpc/mm/stab.c
+++ b/arch/powerpc/mm/stab.c
@@ -20,6 +20,7 @@
#include <asm/cputable.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
+#include <asm/firmware.h>
struct stab_entry {
unsigned long esid_data;
@@ -256,7 +257,7 @@ void stabs_alloc(void)
paca[cpu].stab_addr = newstab;
paca[cpu].stab_real = virt_to_abs(newstab);
- printk(KERN_DEBUG "Segment table for CPU %d at 0x%lx "
+ printk(KERN_INFO "Segment table for CPU %d at 0x%lx "
"virtual, 0x%lx absolute\n",
cpu, paca[cpu].stab_addr, paca[cpu].stab_real);
}
@@ -270,10 +271,28 @@ void stabs_alloc(void)
void stab_initialize(unsigned long stab)
{
unsigned long vsid = get_kernel_vsid(KERNELBASE);
+ unsigned long stabreal;
asm volatile("isync; slbia; isync":::"memory");
make_ste(stab, GET_ESID(KERNELBASE), vsid);
/* Order update */
asm volatile("sync":::"memory");
+
+ /* Set ASR */
+ stabreal = get_paca()->stab_real | 0x1ul;
+
+#ifdef CONFIG_PPC_ISERIES
+ if (firmware_has_feature(FW_FEATURE_ISERIES)) {
+ HvCall1(HvCallBaseSetASR, stabreal);
+ return;
+ }
+#endif /* CONFIG_PPC_ISERIES */
+#ifdef CONFIG_PPC_PSERIES
+ if (platform_is_lpar()) {
+ plpar_hcall_norets(H_SET_ASR, stabreal);
+ return;
+ }
+#endif
+ mtspr(SPRN_ASR, stabreal);
}
diff --git a/arch/powerpc/oprofile/op_model_power4.c b/arch/powerpc/oprofile/op_model_power4.c
index c4ee5478427..e3a024e324b 100644
--- a/arch/powerpc/oprofile/op_model_power4.c
+++ b/arch/powerpc/oprofile/op_model_power4.c
@@ -233,8 +233,7 @@ static unsigned long get_pc(struct pt_regs *regs)
mmcra = mfspr(SPRN_MMCRA);
/* Were we in the hypervisor? */
- if ((systemcfg->platform == PLATFORM_PSERIES_LPAR) &&
- (mmcra & MMCRA_SIHV))
+ if (platform_is_lpar() && (mmcra & MMCRA_SIHV))
/* function descriptor madness */
return *((unsigned long *)hypervisor_bucket);
diff --git a/arch/powerpc/platforms/chrp/setup.c b/arch/powerpc/platforms/chrp/setup.c
index ecd32d5d85f..4099ddab920 100644
--- a/arch/powerpc/platforms/chrp/setup.c
+++ b/arch/powerpc/platforms/chrp/setup.c
@@ -361,7 +361,9 @@ static void __init chrp_find_openpic(void)
printk(KERN_INFO "OpenPIC at %lx\n", opaddr);
irq_count = NR_IRQS - NUM_ISA_INTERRUPTS - 4; /* leave room for IPIs */
- prom_get_irq_senses(init_senses, NUM_8259_INTERRUPTS, NR_IRQS - 4);
+ prom_get_irq_senses(init_senses, NUM_ISA_INTERRUPTS, NR_IRQS - 4);
+ /* i8259 cascade is always positive level */
+ init_senses[0] = IRQ_SENSE_LEVEL | IRQ_POLARITY_POSITIVE;
iranges = (unsigned int *) get_property(np, "interrupt-ranges", &len);
if (iranges == NULL)
diff --git a/arch/powerpc/platforms/iseries/irq.c b/arch/powerpc/platforms/iseries/irq.c
index a06603d84a4..01090e9ce0c 100644
--- a/arch/powerpc/platforms/iseries/irq.c
+++ b/arch/powerpc/platforms/iseries/irq.c
@@ -103,6 +103,9 @@ static void intReceived(struct XmPciLpEvent *eventParm,
struct pt_regs *regsParm)
{
int irq;
+#ifdef CONFIG_IRQSTACKS
+ struct thread_info *curtp, *irqtp;
+#endif
++Pci_Interrupt_Count;
@@ -110,7 +113,20 @@ static void intReceived(struct XmPciLpEvent *eventParm,
case XmPciLpEvent_SlotInterrupt:
irq = eventParm->hvLpEvent.xCorrelationToken;
/* Dispatch the interrupt handlers for this irq */
- ppc_irq_dispatch_handler(regsParm, irq);
+#ifdef CONFIG_IRQSTACKS
+ /* Switch to the irq stack to handle this */
+ curtp = current_thread_info();
+ irqtp = hardirq_ctx[smp_processor_id()];
+ if (curtp != irqtp) {
+ irqtp->task = curtp->task;
+ irqtp->flags = 0;
+ call___do_IRQ(irq, regsParm, irqtp);
+ irqtp->task = NULL;
+ if (irqtp->flags)
+ set_bits(irqtp->flags, &curtp->flags);
+ } else
+#endif
+ __do_IRQ(irq, regsParm);
HvCallPci_eoi(eventParm->eventData.slotInterrupt.busNumber,
eventParm->eventData.slotInterrupt.subBusNumber,
eventParm->eventData.slotInterrupt.deviceId);
@@ -310,10 +326,8 @@ static void iSeries_disable_IRQ(unsigned int irq)
}
/*
- * Need to define this so ppc_irq_dispatch_handler will NOT call
- * enable_IRQ at the end of interrupt handling. However, this does
- * nothing because there is not enough information provided to do
- * the EOI HvCall. This is done by XmPciLpEvent.c
+ * This does nothing because there is not enough information
+ * provided to do the EOI HvCall. This is done by XmPciLpEvent.c
*/
static void iSeries_end_IRQ(unsigned int irq)
{
diff --git a/arch/powerpc/platforms/iseries/misc.S b/arch/powerpc/platforms/iseries/misc.S
index 09f14522e17..dfe7aa1ba09 100644
--- a/arch/powerpc/platforms/iseries/misc.S
+++ b/arch/powerpc/platforms/iseries/misc.S
@@ -15,6 +15,7 @@
#include <asm/processor.h>
#include <asm/asm-offsets.h>
+#include <asm/ppc_asm.h>
.text
diff --git a/arch/powerpc/platforms/iseries/setup.c b/arch/powerpc/platforms/iseries/setup.c
index 7f8f0cda6a7..6a29f301436 100644
--- a/arch/powerpc/platforms/iseries/setup.c
+++ b/arch/powerpc/platforms/iseries/setup.c
@@ -39,7 +39,8 @@
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
-
+#include <asm/systemcfg.h>
+#include <asm/system.h>
#include <asm/time.h>
#include <asm/paca.h>
#include <asm/cache.h>
@@ -71,7 +72,7 @@ extern void hvlog(char *fmt, ...);
#endif
/* Function Prototypes */
-static void build_iSeries_Memory_Map(void);
+static unsigned long build_iSeries_Memory_Map(void);
static void iseries_shared_idle(void);
static void iseries_dedicated_idle(void);
#ifdef CONFIG_PCI
@@ -84,7 +85,6 @@ static void iSeries_pci_final_fixup(void) { }
int piranha_simulator;
extern int rd_size; /* Defined in drivers/block/rd.c */
-extern unsigned long klimit;
extern unsigned long embedded_sysmap_start;
extern unsigned long embedded_sysmap_end;
@@ -403,9 +403,11 @@ void mschunks_alloc(unsigned long num_chunks)
* a table used to translate Linux's physical addresses to these
* absolute addresses. Absolute addresses are needed when
* communicating with the hypervisor (e.g. to build HPT entries)
+ *
+ * Returns the physical memory size
*/
-static void __init build_iSeries_Memory_Map(void)
+static unsigned long __init build_iSeries_Memory_Map(void)
{
u32 loadAreaFirstChunk, loadAreaLastChunk, loadAreaSize;
u32 nextPhysChunk;
@@ -538,7 +540,7 @@ static void __init build_iSeries_Memory_Map(void)
* which should be equal to
* nextPhysChunk
*/
- systemcfg->physicalMemorySize = chunk_to_addr(nextPhysChunk);
+ return chunk_to_addr(nextPhysChunk);
}
/*
@@ -564,8 +566,8 @@ static void __init iSeries_setup_arch(void)
printk("Max physical processors = %d\n",
itVpdAreas.xSlicMaxPhysicalProcs);
- systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
- printk("Processor version = %x\n", systemcfg->processor);
+ _systemcfg->processor = xIoHriProcessorVpd[procIx].xPVR;
+ printk("Processor version = %x\n", _systemcfg->processor);
}
static void iSeries_show_cpuinfo(struct seq_file *m)
@@ -702,7 +704,6 @@ static void iseries_shared_idle(void)
static void iseries_dedicated_idle(void)
{
- long oldval;
set_thread_flag(TIF_POLLING_NRFLAG);
while (1) {
@@ -929,7 +930,7 @@ void dt_cpus(struct iseries_flat_dt *dt)
dt_end_node(dt);
}
-void build_flat_dt(struct iseries_flat_dt *dt)
+void build_flat_dt(struct iseries_flat_dt *dt, unsigned long phys_mem_size)
{
u64 tmp[2];
@@ -945,7 +946,7 @@ void build_flat_dt(struct iseries_flat_dt *dt)
dt_prop_str(dt, "name", "memory");
dt_prop_str(dt, "device_type", "memory");
tmp[0] = 0;
- tmp[1] = systemcfg->physicalMemorySize;
+ tmp[1] = phys_mem_size;
dt_prop_u64_list(dt, "reg", tmp, 2);
dt_end_node(dt);
@@ -965,13 +966,15 @@ void build_flat_dt(struct iseries_flat_dt *dt)
void * __init iSeries_early_setup(void)
{
+ unsigned long phys_mem_size;
+
iSeries_fixup_klimit();
/*
* Initialize the table which translate Linux physical addresses to
* AS/400 absolute addresses
*/
- build_iSeries_Memory_Map();
+ phys_mem_size = build_iSeries_Memory_Map();
iSeries_get_cmdline();
@@ -981,7 +984,7 @@ void * __init iSeries_early_setup(void)
/* Parse early parameters, in particular mem=x */
parse_early_param();
- build_flat_dt(&iseries_dt);
+ build_flat_dt(&iseries_dt, phys_mem_size);
return (void *) __pa(&iseries_dt);
}
diff --git a/arch/powerpc/platforms/maple/pci.c b/arch/powerpc/platforms/maple/pci.c
index 340c21caeae..895aeb3f75d 100644
--- a/arch/powerpc/platforms/maple/pci.c
+++ b/arch/powerpc/platforms/maple/pci.c
@@ -380,9 +380,6 @@ void __init maple_pcibios_fixup(void)
for_each_pci_dev(dev)
pci_read_irq_line(dev);
- /* Do the mapping of the IO space */
- phbs_remap_io();
-
DBG(" <- maple_pcibios_fixup\n");
}
diff --git a/arch/powerpc/platforms/powermac/pci.c b/arch/powerpc/platforms/powermac/pci.c
index 8f818d092e2..dfd41b9781a 100644
--- a/arch/powerpc/platforms/powermac/pci.c
+++ b/arch/powerpc/platforms/powermac/pci.c
@@ -918,9 +918,6 @@ void __init pmac_pci_init(void)
PCI_DN(np)->busno = 0xf0;
}
- /* map in PCI I/O space */
- phbs_remap_io();
-
/* pmac_check_ht_link(); */
/* Tell pci.c to not use the common resource allocation mechanism */
diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c
index 83a49e80ac2..90040c49494 100644
--- a/arch/powerpc/platforms/powermac/pic.c
+++ b/arch/powerpc/platforms/powermac/pic.c
@@ -74,6 +74,9 @@ static DEFINE_SPINLOCK(pmac_pic_lock);
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
+#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
+static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+
/*
* Mark an irq as "lost". This is only used on the pmac
* since it can lose interrupts (see pmac_set_irq_mask).
diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
index e1f9443cc87..957b0910342 100644
--- a/arch/powerpc/platforms/powermac/smp.c
+++ b/arch/powerpc/platforms/powermac/smp.c
@@ -305,9 +305,19 @@ static int __init smp_psurge_probe(void)
psurge_start = ioremap(PSURGE_START, 4);
psurge_pri_intr = ioremap(PSURGE_PRI_INTR, 4);
- /* this is not actually strictly necessary -- paulus. */
- for (i = 1; i < ncpus; ++i)
- smp_hw_index[i] = i;
+ /*
+ * This is necessary because OF doesn't know about the
+ * secondary cpu(s), and thus there aren't nodes in the
+ * device tree for them, and smp_setup_cpu_maps hasn't
+ * set their bits in cpu_possible_map and cpu_present_map.
+ */
+ if (ncpus > NR_CPUS)
+ ncpus = NR_CPUS;
+ for (i = 1; i < ncpus ; ++i) {
+ cpu_set(i, cpu_present_map);
+ cpu_set(i, cpu_possible_map);
+ set_hard_smp_processor_id(i, i);
+ }
if (ppc_md.progress) ppc_md.progress("smp_psurge_probe - done", 0x352);
@@ -348,6 +358,7 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
int t;
set_dec(tb_ticks_per_jiffy);
+ /* XXX fixme */
set_tb(0, 0);
last_jiffy_stamp(cpu_nr) = 0;
@@ -363,8 +374,6 @@ static void __init psurge_dual_sync_tb(int cpu_nr)
/* now interrupt the secondary, starting both TBs */
psurge_set_ipi(1);
-
- smp_tb_synchronized = 1;
}
static struct irqaction psurge_irqaction = {
@@ -625,9 +634,8 @@ void smp_core99_give_timebase(void)
for (t = 100000; t > 0 && sec_tb_reset; --t)
udelay(10);
if (sec_tb_reset)
+ /* XXX BUG_ON here? */
printk(KERN_WARNING "Timeout waiting sync(2) on second CPU\n");
- else
- smp_tb_synchronized = 1;
/* Now, restart the timebase by leaving the GPIO to an open collector */
pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, core99_tb_gpio, 0);
@@ -810,19 +818,9 @@ static void __devinit smp_core99_setup_cpu(int cpu_nr)
}
-/* Core99 Macs (dual G4s and G5s) */
-struct smp_ops_t core99_smp_ops = {
- .message_pass = smp_mpic_message_pass,
- .probe = smp_core99_probe,
- .kick_cpu = smp_core99_kick_cpu,
- .setup_cpu = smp_core99_setup_cpu,
- .give_timebase = smp_core99_give_timebase,
- .take_timebase = smp_core99_take_timebase,
-};
-
#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
-int __cpu_disable(void)
+int smp_core99_cpu_disable(void)
{
cpu_clear(smp_processor_id(), cpu_online_map);
@@ -846,7 +844,7 @@ void cpu_die(void)
low_cpu_die();
}
-void __cpu_die(unsigned int cpu)
+void smp_core99_cpu_die(unsigned int cpu)
{
int timeout;
@@ -858,8 +856,21 @@ void __cpu_die(unsigned int cpu)
}
msleep(1);
}
- cpu_callin_map[cpu] = 0;
cpu_dead[cpu] = 0;
}
#endif
+
+/* Core99 Macs (dual G4s and G5s) */
+struct smp_ops_t core99_smp_ops = {
+ .message_pass = smp_mpic_message_pass,
+ .probe = smp_core99_probe,
+ .kick_cpu = smp_core99_kick_cpu,
+ .setup_cpu = smp_core99_setup_cpu,
+ .give_timebase = smp_core99_give_timebase,
+ .take_timebase = smp_core99_take_timebase,
+#if defined(CONFIG_HOTPLUG_CPU) && defined(CONFIG_PPC32)
+ .cpu_disable = smp_core99_cpu_disable,
+ .cpu_die = smp_core99_cpu_die,
+#endif
+};
diff --git a/arch/powerpc/platforms/pseries/Makefile b/arch/powerpc/platforms/pseries/Makefile
index b9938fece78..e7ca5b1f591 100644
--- a/arch/powerpc/platforms/pseries/Makefile
+++ b/arch/powerpc/platforms/pseries/Makefile
@@ -3,3 +3,5 @@ obj-y := pci.o lpar.o hvCall.o nvram.o reconfig.o \
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS) += xics.o
+obj-$(CONFIG_SCANLOG) += scanlog.o
+obj-$(CONFIG_EEH) += eeh.o eeh_event.o
diff --git a/arch/ppc64/kernel/eeh.c b/arch/powerpc/platforms/pseries/eeh.c
index 035d1b14a20..79de2310e70 100644
--- a/arch/ppc64/kernel/eeh.c
+++ b/arch/powerpc/platforms/pseries/eeh.c
@@ -1,39 +1,37 @@
/*
* eeh.c
* Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation
- *
+ *
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#include <linux/bootmem.h>
+#include <linux/delay.h>
#include <linux/init.h>
#include <linux/list.h>
-#include <linux/mm.h>
-#include <linux/notifier.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/rbtree.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
+#include <asm/atomic.h>
#include <asm/eeh.h>
+#include <asm/eeh_event.h>
#include <asm/io.h>
#include <asm/machdep.h>
-#include <asm/rtas.h>
-#include <asm/atomic.h>
-#include <asm/systemcfg.h>
#include <asm/ppc-pci.h>
+#include <asm/rtas.h>
#undef DEBUG
@@ -49,8 +47,8 @@
* were "empty": all reads return 0xff's and all writes are silently
* ignored. EEH slot isolation events can be triggered by parity
* errors on the address or data busses (e.g. during posted writes),
- * which in turn might be caused by dust, vibration, humidity,
- * radioactivity or plain-old failed hardware.
+ * which in turn might be caused by low voltage on the bus, dust,
+ * vibration, humidity, radioactivity or plain-old failed hardware.
*
* Note, however, that one of the leading causes of EEH slot
* freeze events are buggy device drivers, buggy device microcode,
@@ -71,26 +69,15 @@
* and sent out for processing.
*/
-/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
-#define BUID_HI(buid) ((buid) >> 32)
-#define BUID_LO(buid) ((buid) & 0xffffffff)
-
-/* EEH event workqueue setup. */
-static DEFINE_SPINLOCK(eeh_eventlist_lock);
-LIST_HEAD(eeh_eventlist);
-static void eeh_event_handler(void *);
-DECLARE_WORK(eeh_event_wq, eeh_event_handler, NULL);
-
-static struct notifier_block *eeh_notifier_chain;
-
-/*
- * If a device driver keeps reading an MMIO register in an interrupt
+/* If a device driver keeps reading an MMIO register in an interrupt
* handler after a slot isolation event has occurred, we assume it
* is broken and panic. This sets the threshold for how many read
* attempts we allow before panicking.
*/
-#define EEH_MAX_FAILS 1000
-static atomic_t eeh_fail_count;
+#define EEH_MAX_FAILS 100000
+
+/* Misc forward declaraions */
+static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn);
/* RTAS tokens */
static int ibm_set_eeh_option;
@@ -101,12 +88,19 @@ static int ibm_slot_error_detail;
static int eeh_subsystem_enabled;
+/* Lock to avoid races due to multiple reports of an error */
+static DEFINE_SPINLOCK(confirm_error_lock);
+
/* Buffer for reporting slot-error-detail rtas calls */
static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
static DEFINE_SPINLOCK(slot_errbuf_lock);
static int eeh_error_buf_size;
/* System monitoring statistics */
+static DEFINE_PER_CPU(unsigned long, no_device);
+static DEFINE_PER_CPU(unsigned long, no_dn);
+static DEFINE_PER_CPU(unsigned long, no_cfg_addr);
+static DEFINE_PER_CPU(unsigned long, ignored_check);
static DEFINE_PER_CPU(unsigned long, total_mmio_ffs);
static DEFINE_PER_CPU(unsigned long, false_positives);
static DEFINE_PER_CPU(unsigned long, ignored_failures);
@@ -224,9 +218,9 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
while (*p) {
parent = *p;
piar = rb_entry(parent, struct pci_io_addr_range, rb_node);
- if (alo < piar->addr_lo) {
+ if (ahi < piar->addr_lo) {
p = &parent->rb_left;
- } else if (ahi > piar->addr_hi) {
+ } else if (alo > piar->addr_hi) {
p = &parent->rb_right;
} else {
if (dev != piar->pcidev ||
@@ -245,6 +239,11 @@ pci_addr_cache_insert(struct pci_dev *dev, unsigned long alo,
piar->pcidev = dev;
piar->flags = flags;
+#ifdef DEBUG
+ printk(KERN_DEBUG "PIAR: insert range=[%lx:%lx] dev=%s\n",
+ alo, ahi, pci_name (dev));
+#endif
+
rb_link_node(&piar->rb_node, parent, p);
rb_insert_color(&piar->rb_node, &pci_io_addr_cache_root.rb_root);
@@ -260,18 +259,17 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
dn = pci_device_to_OF_node(dev);
if (!dn) {
- printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n",
- pci_name(dev));
+ printk(KERN_WARNING "PCI: no pci dn found for dev=%s\n", pci_name(dev));
return;
}
/* Skip any devices for which EEH is not enabled. */
- pdn = dn->data;
+ pdn = PCI_DN(dn);
if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
#ifdef DEBUG
- printk(KERN_INFO "PCI: skip building address cache for=%s\n",
- pci_name(dev));
+ printk(KERN_INFO "PCI: skip building address cache for=%s - %s\n",
+ pci_name(dev), pdn->node->full_name);
#endif
return;
}
@@ -307,7 +305,7 @@ static void __pci_addr_cache_insert_device(struct pci_dev *dev)
* we maintain a cache of devices that can be quickly searched.
* This routine adds a device to that cache.
*/
-void pci_addr_cache_insert_device(struct pci_dev *dev)
+static void pci_addr_cache_insert_device(struct pci_dev *dev)
{
unsigned long flags;
@@ -350,7 +348,7 @@ restart:
* the tree multiple times (once per resource).
* But so what; device removal doesn't need to be that fast.
*/
-void pci_addr_cache_remove_device(struct pci_dev *dev)
+static void pci_addr_cache_remove_device(struct pci_dev *dev)
{
unsigned long flags;
@@ -370,8 +368,12 @@ void pci_addr_cache_remove_device(struct pci_dev *dev)
*/
void __init pci_addr_cache_build(void)
{
+ struct device_node *dn;
struct pci_dev *dev = NULL;
+ if (!eeh_subsystem_enabled)
+ return;
+
spin_lock_init(&pci_io_addr_cache_root.piar_lock);
while ((dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev)) != NULL) {
@@ -380,6 +382,10 @@ void __init pci_addr_cache_build(void)
continue;
}
pci_addr_cache_insert_device(dev);
+
+ /* Save the BAR's; firmware doesn't restore these after EEH reset */
+ dn = pci_device_to_OF_node(dev);
+ eeh_save_bars(dev, PCI_DN(dn));
}
#ifdef DEBUG
@@ -391,22 +397,26 @@ void __init pci_addr_cache_build(void)
/* --------------------------------------------------------------- */
/* Above lies the PCI Address Cache. Below lies the EEH event infrastructure */
-/**
- * eeh_register_notifier - Register to find out about EEH events.
- * @nb: notifier block to callback on events
- */
-int eeh_register_notifier(struct notifier_block *nb)
+void eeh_slot_error_detail (struct pci_dn *pdn, int severity)
{
- return notifier_chain_register(&eeh_notifier_chain, nb);
-}
+ unsigned long flags;
+ int rc;
-/**
- * eeh_unregister_notifier - Unregister to an EEH event notifier.
- * @nb: notifier block to callback on events
- */
-int eeh_unregister_notifier(struct notifier_block *nb)
-{
- return notifier_chain_unregister(&eeh_notifier_chain, nb);
+ /* Log the error with the rtas logger */
+ spin_lock_irqsave(&slot_errbuf_lock, flags);
+ memset(slot_errbuf, 0, eeh_error_buf_size);
+
+ rc = rtas_call(ibm_slot_error_detail,
+ 8, 1, NULL, pdn->eeh_config_addr,
+ BUID_HI(pdn->phb->buid),
+ BUID_LO(pdn->phb->buid), NULL, 0,
+ virt_to_phys(slot_errbuf),
+ eeh_error_buf_size,
+ severity);
+
+ if (rc == 0)
+ log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
+ spin_unlock_irqrestore(&slot_errbuf_lock, flags);
}
/**
@@ -414,16 +424,16 @@ int eeh_unregister_notifier(struct notifier_block *nb)
* @dn: device node to read
* @rets: array to return results in
*/
-static int read_slot_reset_state(struct device_node *dn, int rets[])
+static int read_slot_reset_state(struct pci_dn *pdn, int rets[])
{
int token, outputs;
- struct pci_dn *pdn = dn->data;
if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
token = ibm_read_slot_reset_state2;
outputs = 4;
} else {
token = ibm_read_slot_reset_state;
+ rets[2] = 0; /* fake PE Unavailable info */
outputs = 3;
}
@@ -432,87 +442,84 @@ static int read_slot_reset_state(struct device_node *dn, int rets[])
}
/**
- * eeh_panic - call panic() for an eeh event that cannot be handled.
- * The philosophy of this routine is that it is better to panic and
- * halt the OS than it is to risk possible data corruption by
- * oblivious device drivers that don't know better.
- *
- * @dev pci device that had an eeh event
- * @reset_state current reset state of the device slot
+ * eeh_token_to_phys - convert EEH address token to phys address
+ * @token i/o token, should be address in the form 0xA....
*/
-static void eeh_panic(struct pci_dev *dev, int reset_state)
+static inline unsigned long eeh_token_to_phys(unsigned long token)
{
- /*
- * XXX We should create a separate sysctl for this.
- *
- * Since the panic_on_oops sysctl is used to halt the system
- * in light of potential corruption, we can use it here.
- */
- if (panic_on_oops)
- panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
- pci_name(dev));
- else {
- __get_cpu_var(ignored_failures)++;
- printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
- reset_state, pci_name(dev));
- }
+ pte_t *ptep;
+ unsigned long pa;
+
+ ptep = find_linux_pte(init_mm.pgd, token);
+ if (!ptep)
+ return token;
+ pa = pte_pfn(*ptep) << PAGE_SHIFT;
+
+ return pa | (token & (PAGE_SIZE-1));
}
-/**
- * eeh_event_handler - dispatch EEH events. The detection of a frozen
- * slot can occur inside an interrupt, where it can be hard to do
- * anything about it. The goal of this routine is to pull these
- * detection events out of the context of the interrupt handler, and
- * re-dispatch them for processing at a later time in a normal context.
- *
- * @dummy - unused
+/**
+ * Return the "partitionable endpoint" (pe) under which this device lies
*/
-static void eeh_event_handler(void *dummy)
+static struct device_node * find_device_pe(struct device_node *dn)
{
- unsigned long flags;
- struct eeh_event *event;
-
- while (1) {
- spin_lock_irqsave(&eeh_eventlist_lock, flags);
- event = NULL;
- if (!list_empty(&eeh_eventlist)) {
- event = list_entry(eeh_eventlist.next, struct eeh_event, list);
- list_del(&event->list);
- }
- spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
- if (event == NULL)
- break;
-
- printk(KERN_INFO "EEH: MMIO failure (%d), notifiying device "
- "%s\n", event->reset_state,
- pci_name(event->dev));
+ while ((dn->parent) && PCI_DN(dn->parent) &&
+ (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
+ dn = dn->parent;
+ }
+ return dn;
+}
- atomic_set(&eeh_fail_count, 0);
- notifier_call_chain (&eeh_notifier_chain,
- EEH_NOTIFY_FREEZE, event);
+/** Mark all devices that are peers of this device as failed.
+ * Mark the device driver too, so that it can see the failure
+ * immediately; this is critical, since some drivers poll
+ * status registers in interrupts ... If a driver is polling,
+ * and the slot is frozen, then the driver can deadlock in
+ * an interrupt context, which is bad.
+ */
- __get_cpu_var(slot_resets)++;
+static void __eeh_mark_slot (struct device_node *dn, int mode_flag)
+{
+ while (dn) {
+ if (PCI_DN(dn)) {
+ PCI_DN(dn)->eeh_mode |= mode_flag;
- pci_dev_put(event->dev);
- kfree(event);
+ if (dn->child)
+ __eeh_mark_slot (dn->child, mode_flag);
+ }
+ dn = dn->sibling;
}
}
-/**
- * eeh_token_to_phys - convert EEH address token to phys address
- * @token i/o token, should be address in the form 0xE....
- */
-static inline unsigned long eeh_token_to_phys(unsigned long token)
+void eeh_mark_slot (struct device_node *dn, int mode_flag)
{
- pte_t *ptep;
- unsigned long pa;
+ dn = find_device_pe (dn);
+ PCI_DN(dn)->eeh_mode |= mode_flag;
+ __eeh_mark_slot (dn->child, mode_flag);
+}
- ptep = find_linux_pte(init_mm.pgd, token);
- if (!ptep)
- return token;
- pa = pte_pfn(*ptep) << PAGE_SHIFT;
+static void __eeh_clear_slot (struct device_node *dn, int mode_flag)
+{
+ while (dn) {
+ if (PCI_DN(dn)) {
+ PCI_DN(dn)->eeh_mode &= ~mode_flag;
+ PCI_DN(dn)->eeh_check_count = 0;
+ if (dn->child)
+ __eeh_clear_slot (dn->child, mode_flag);
+ }
+ dn = dn->sibling;
+ }
+}
- return pa | (token & (PAGE_SIZE-1));
+void eeh_clear_slot (struct device_node *dn, int mode_flag)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&confirm_error_lock, flags);
+ dn = find_device_pe (dn);
+ PCI_DN(dn)->eeh_mode &= ~mode_flag;
+ PCI_DN(dn)->eeh_check_count = 0;
+ __eeh_clear_slot (dn->child, mode_flag);
+ spin_unlock_irqrestore(&confirm_error_lock, flags);
}
/**
@@ -526,7 +533,7 @@ static inline unsigned long eeh_token_to_phys(unsigned long token)
* will query firmware for the EEH status.
*
* Returns 0 if there has not been an EEH error; otherwise returns
- * a non-zero value and queues up a solt isolation event notification.
+ * a non-zero value and queues up a slot isolation event notification.
*
* It is safe to call this routine in an interrupt context.
*/
@@ -535,42 +542,59 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
int ret;
int rets[3];
unsigned long flags;
- int rc, reset_state;
- struct eeh_event *event;
struct pci_dn *pdn;
+ int rc = 0;
__get_cpu_var(total_mmio_ffs)++;
if (!eeh_subsystem_enabled)
return 0;
- if (!dn)
+ if (!dn) {
+ __get_cpu_var(no_dn)++;
return 0;
- pdn = dn->data;
+ }
+ pdn = PCI_DN(dn);
/* Access to IO BARs might get this far and still not want checking. */
- if (!pdn->eeh_capable || !(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
+ if (!(pdn->eeh_mode & EEH_MODE_SUPPORTED) ||
pdn->eeh_mode & EEH_MODE_NOCHECK) {
+ __get_cpu_var(ignored_check)++;
+#ifdef DEBUG
+ printk ("EEH:ignored check (%x) for %s %s\n",
+ pdn->eeh_mode, pci_name (dev), dn->full_name);
+#endif
return 0;
}
if (!pdn->eeh_config_addr) {
+ __get_cpu_var(no_cfg_addr)++;
return 0;
}
- /*
- * If we already have a pending isolation event for this
- * slot, we know it's bad already, we don't need to check...
+ /* If we already have a pending isolation event for this
+ * slot, we know it's bad already, we don't need to check.
+ * Do this checking under a lock; as multiple PCI devices
+ * in one slot might report errors simultaneously, and we
+ * only want one error recovery routine running.
*/
+ spin_lock_irqsave(&confirm_error_lock, flags);
+ rc = 1;
if (pdn->eeh_mode & EEH_MODE_ISOLATED) {
- atomic_inc(&eeh_fail_count);
- if (atomic_read(&eeh_fail_count) >= EEH_MAX_FAILS) {
+ pdn->eeh_check_count ++;
+ if (pdn->eeh_check_count >= EEH_MAX_FAILS) {
+ printk (KERN_ERR "EEH: Device driver ignored %d bad reads, panicing\n",
+ pdn->eeh_check_count);
+ dump_stack();
+
/* re-read the slot reset state */
- if (read_slot_reset_state(dn, rets) != 0)
+ if (read_slot_reset_state(pdn, rets) != 0)
rets[0] = -1; /* reset state unknown */
- eeh_panic(dev, rets[0]);
+
+ /* If we are here, then we hit an infinite loop. Stop. */
+ panic("EEH: MMIO halt (%d) on device:%s\n", rets[0], pci_name(dev));
}
- return 0;
+ goto dn_unlock;
}
/*
@@ -580,66 +604,69 @@ int eeh_dn_check_failure(struct device_node *dn, struct pci_dev *dev)
* function zero of a multi-function device.
* In any case they must share a common PHB.
*/
- ret = read_slot_reset_state(dn, rets);
- if (!(ret == 0 && rets[1] == 1 && (rets[0] == 2 || rets[0] == 4))) {
+ ret = read_slot_reset_state(pdn, rets);
+
+ /* If the call to firmware failed, punt */
+ if (ret != 0) {
+ printk(KERN_WARNING "EEH: read_slot_reset_state() failed; rc=%d dn=%s\n",
+ ret, dn->full_name);
__get_cpu_var(false_positives)++;
- return 0;
+ rc = 0;
+ goto dn_unlock;
}
- /* prevent repeated reports of this failure */
- pdn->eeh_mode |= EEH_MODE_ISOLATED;
-
- reset_state = rets[0];
-
- spin_lock_irqsave(&slot_errbuf_lock, flags);
- memset(slot_errbuf, 0, eeh_error_buf_size);
-
- rc = rtas_call(ibm_slot_error_detail,
- 8, 1, NULL, pdn->eeh_config_addr,
- BUID_HI(pdn->phb->buid),
- BUID_LO(pdn->phb->buid), NULL, 0,
- virt_to_phys(slot_errbuf),
- eeh_error_buf_size,
- 1 /* Temporary Error */);
-
- if (rc == 0)
- log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
- spin_unlock_irqrestore(&slot_errbuf_lock, flags);
+ /* If EEH is not supported on this device, punt. */
+ if (rets[1] != 1) {
+ printk(KERN_WARNING "EEH: event on unsupported device, rc=%d dn=%s\n",
+ ret, dn->full_name);
+ __get_cpu_var(false_positives)++;
+ rc = 0;
+ goto dn_unlock;
+ }
- printk(KERN_INFO "EEH: MMIO failure (%d) on device: %s %s\n",
- rets[0], dn->name, dn->full_name);
- event = kmalloc(sizeof(*event), GFP_ATOMIC);
- if (event == NULL) {
- eeh_panic(dev, reset_state);
- return 1;
- }
+ /* If not the kind of error we know about, punt. */
+ if (rets[0] != 2 && rets[0] != 4 && rets[0] != 5) {
+ __get_cpu_var(false_positives)++;
+ rc = 0;
+ goto dn_unlock;
+ }
- event->dev = dev;
- event->dn = dn;
- event->reset_state = reset_state;
+ /* Note that config-io to empty slots may fail;
+ * we recognize empty because they don't have children. */
+ if ((rets[0] == 5) && (dn->child == NULL)) {
+ __get_cpu_var(false_positives)++;
+ rc = 0;
+ goto dn_unlock;
+ }
- /* We may or may not be called in an interrupt context */
- spin_lock_irqsave(&eeh_eventlist_lock, flags);
- list_add(&event->list, &eeh_eventlist);
- spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+ __get_cpu_var(slot_resets)++;
+
+ /* Avoid repeated reports of this failure, including problems
+ * with other functions on this device, and functions under
+ * bridges. */
+ eeh_mark_slot (dn, EEH_MODE_ISOLATED);
+ spin_unlock_irqrestore(&confirm_error_lock, flags);
+ eeh_send_failure_event (dn, dev, rets[0], rets[2]);
+
/* Most EEH events are due to device driver bugs. Having
* a stack trace will help the device-driver authors figure
* out what happened. So print that out. */
- dump_stack();
- schedule_work(&eeh_event_wq);
+ if (rets[0] != 5) dump_stack();
+ return 1;
- return 0;
+dn_unlock:
+ spin_unlock_irqrestore(&confirm_error_lock, flags);
+ return rc;
}
-EXPORT_SYMBOL(eeh_dn_check_failure);
+EXPORT_SYMBOL_GPL(eeh_dn_check_failure);
/**
* eeh_check_failure - check if all 1's data is due to EEH slot freeze
* @token i/o token, should be address in the form 0xA....
* @val value, should be all 1's (XXX why do we need this arg??)
*
- * Check for an eeh failure at the given token address.
* Check for an EEH failure at the given token address. Call this
* routine if the result of a read was all 0xff's and you want to
* find out if this is due to an EEH slot freeze event. This routine
@@ -656,8 +683,10 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
/* Finding the phys addr + pci device; this is pretty quick. */
addr = eeh_token_to_phys((unsigned long __force) token);
dev = pci_get_device_by_addr(addr);
- if (!dev)
+ if (!dev) {
+ __get_cpu_var(no_device)++;
return val;
+ }
dn = pci_device_to_OF_node(dev);
eeh_dn_check_failure (dn, dev);
@@ -668,6 +697,217 @@ unsigned long eeh_check_failure(const volatile void __iomem *token, unsigned lon
EXPORT_SYMBOL(eeh_check_failure);
+/* ------------------------------------------------------------- */
+/* The code below deals with error recovery */
+
+/** Return negative value if a permanent error, else return
+ * a number of milliseconds to wait until the PCI slot is
+ * ready to be used.
+ */
+static int
+eeh_slot_availability(struct pci_dn *pdn)
+{
+ int rc;
+ int rets[3];
+
+ rc = read_slot_reset_state(pdn, rets);
+
+ if (rc) return rc;
+
+ if (rets[1] == 0) return -1; /* EEH is not supported */
+ if (rets[0] == 0) return 0; /* Oll Korrect */
+ if (rets[0] == 5) {
+ if (rets[2] == 0) return -1; /* permanently unavailable */
+ return rets[2]; /* number of millisecs to wait */
+ }
+ return -1;
+}
+
+/** rtas_pci_slot_reset raises/lowers the pci #RST line
+ * state: 1/0 to raise/lower the #RST
+ *
+ * Clear the EEH-frozen condition on a slot. This routine
+ * asserts the PCI #RST line if the 'state' argument is '1',
+ * and drops the #RST line if 'state is '0'. This routine is
+ * safe to call in an interrupt context.
+ *
+ */
+
+static void
+rtas_pci_slot_reset(struct pci_dn *pdn, int state)
+{
+ int rc;
+
+ BUG_ON (pdn==NULL);
+
+ if (!pdn->phb) {
+ printk (KERN_WARNING "EEH: in slot reset, device node %s has no phb\n",
+ pdn->node->full_name);
+ return;
+ }
+
+ rc = rtas_call(ibm_set_slot_reset,4,1, NULL,
+ pdn->eeh_config_addr,
+ BUID_HI(pdn->phb->buid),
+ BUID_LO(pdn->phb->buid),
+ state);
+ if (rc) {
+ printk (KERN_WARNING "EEH: Unable to reset the failed slot, (%d) #RST=%d dn=%s\n",
+ rc, state, pdn->node->full_name);
+ return;
+ }
+}
+
+/** rtas_set_slot_reset -- assert the pci #RST line for 1/4 second
+ * dn -- device node to be reset.
+ */
+
+void
+rtas_set_slot_reset(struct pci_dn *pdn)
+{
+ int i, rc;
+
+ rtas_pci_slot_reset (pdn, 1);
+
+ /* The PCI bus requires that the reset be held high for at least
+ * a 100 milliseconds. We wait a bit longer 'just in case'. */
+
+#define PCI_BUS_RST_HOLD_TIME_MSEC 250
+ msleep (PCI_BUS_RST_HOLD_TIME_MSEC);
+
+ /* We might get hit with another EEH freeze as soon as the
+ * pci slot reset line is dropped. Make sure we don't miss
+ * these, and clear the flag now. */
+ eeh_clear_slot (pdn->node, EEH_MODE_ISOLATED);
+
+ rtas_pci_slot_reset (pdn, 0);
+
+ /* After a PCI slot has been reset, the PCI Express spec requires
+ * a 1.5 second idle time for the bus to stabilize, before starting
+ * up traffic. */
+#define PCI_BUS_SETTLE_TIME_MSEC 1800
+ msleep (PCI_BUS_SETTLE_TIME_MSEC);
+
+ /* Now double check with the firmware to make sure the device is
+ * ready to be used; if not, wait for recovery. */
+ for (i=0; i<10; i++) {
+ rc = eeh_slot_availability (pdn);
+ if (rc <= 0) break;
+
+ msleep (rc+100);
+ }
+}
+
+/* ------------------------------------------------------- */
+/** Save and restore of PCI BARs
+ *
+ * Although firmware will set up BARs during boot, it doesn't
+ * set up device BAR's after a device reset, although it will,
+ * if requested, set up bridge configuration. Thus, we need to
+ * configure the PCI devices ourselves.
+ */
+
+/**
+ * __restore_bars - Restore the Base Address Registers
+ * Loads the PCI configuration space base address registers,
+ * the expansion ROM base address, the latency timer, and etc.
+ * from the saved values in the device node.
+ */
+static inline void __restore_bars (struct pci_dn *pdn)
+{
+ int i;
+
+ if (NULL==pdn->phb) return;
+ for (i=4; i<10; i++) {
+ rtas_write_config(pdn, i*4, 4, pdn->config_space[i]);
+ }
+
+ /* 12 == Expansion ROM Address */
+ rtas_write_config(pdn, 12*4, 4, pdn->config_space[12]);
+
+#define BYTE_SWAP(OFF) (8*((OFF)/4)+3-(OFF))
+#define SAVED_BYTE(OFF) (((u8 *)(pdn->config_space))[BYTE_SWAP(OFF)])
+
+ rtas_write_config (pdn, PCI_CACHE_LINE_SIZE, 1,
+ SAVED_BYTE(PCI_CACHE_LINE_SIZE));
+
+ rtas_write_config (pdn, PCI_LATENCY_TIMER, 1,
+ SAVED_BYTE(PCI_LATENCY_TIMER));
+
+ /* max latency, min grant, interrupt pin and line */
+ rtas_write_config(pdn, 15*4, 4, pdn->config_space[15]);
+}
+
+/**
+ * eeh_restore_bars - restore the PCI config space info
+ *
+ * This routine performs a recursive walk to the children
+ * of this device as well.
+ */
+void eeh_restore_bars(struct pci_dn *pdn)
+{
+ struct device_node *dn;
+ if (!pdn)
+ return;
+
+ if (! pdn->eeh_is_bridge)
+ __restore_bars (pdn);
+
+ dn = pdn->node->child;
+ while (dn) {
+ eeh_restore_bars (PCI_DN(dn));
+ dn = dn->sibling;
+ }
+}
+
+/**
+ * eeh_save_bars - save device bars
+ *
+ * Save the values of the device bars. Unlike the restore
+ * routine, this routine is *not* recursive. This is because
+ * PCI devices are added individuallly; but, for the restore,
+ * an entire slot is reset at a time.
+ */
+static void eeh_save_bars(struct pci_dev * pdev, struct pci_dn *pdn)
+{
+ int i;
+
+ if (!pdev || !pdn )
+ return;
+
+ for (i = 0; i < 16; i++)
+ pci_read_config_dword(pdev, i * 4, &pdn->config_space[i]);
+
+ if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ pdn->eeh_is_bridge = 1;
+}
+
+void
+rtas_configure_bridge(struct pci_dn *pdn)
+{
+ int token = rtas_token ("ibm,configure-bridge");
+ int rc;
+
+ if (token == RTAS_UNKNOWN_SERVICE)
+ return;
+ rc = rtas_call(token,3,1, NULL,
+ pdn->eeh_config_addr,
+ BUID_HI(pdn->phb->buid),
+ BUID_LO(pdn->phb->buid));
+ if (rc) {
+ printk (KERN_WARNING "EEH: Unable to configure device bridge (%d) for %s\n",
+ rc, pdn->node->full_name);
+ }
+}
+
+/* ------------------------------------------------------------- */
+/* The code below deals with enabling EEH for devices during the
+ * early boot sequence. EEH must be enabled before any PCI probing
+ * can be done.
+ */
+
+#define EEH_ENABLE 1
+
struct eeh_early_enable_info {
unsigned int buid_hi;
unsigned int buid_lo;
@@ -684,9 +924,11 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
u32 *device_id = (u32 *)get_property(dn, "device-id", NULL);
u32 *regs;
int enable;
- struct pci_dn *pdn = dn->data;
+ struct pci_dn *pdn = PCI_DN(dn);
pdn->eeh_mode = 0;
+ pdn->eeh_check_count = 0;
+ pdn->eeh_freeze_count = 0;
if (status && strcmp(status, "ok") != 0)
return NULL; /* ignore devices with bad status */
@@ -723,8 +965,9 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
/* First register entry is addr (00BBSS00) */
/* Try to enable eeh */
ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
- regs[0], info->buid_hi, info->buid_lo,
- EEH_ENABLE);
+ regs[0], info->buid_hi, info->buid_lo,
+ EEH_ENABLE);
+
if (ret == 0) {
eeh_subsystem_enabled = 1;
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
@@ -736,7 +979,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
/* This device doesn't support EEH, but it may have an
* EEH parent, in which case we mark it as supported. */
- if (dn->parent && dn->parent->data
+ if (dn->parent && PCI_DN(dn->parent)
&& (PCI_DN(dn->parent)->eeh_mode & EEH_MODE_SUPPORTED)) {
/* Parent supports EEH. */
pdn->eeh_mode |= EEH_MODE_SUPPORTED;
@@ -749,7 +992,7 @@ static void *early_enable_eeh(struct device_node *dn, void *data)
dn->full_name);
}
- return NULL;
+ return NULL;
}
/*
@@ -770,6 +1013,9 @@ void __init eeh_init(void)
struct device_node *phb, *np;
struct eeh_early_enable_info info;
+ spin_lock_init(&confirm_error_lock);
+ spin_lock_init(&slot_errbuf_lock);
+
np = of_find_node_by_path("/rtas");
if (np == NULL)
return;
@@ -797,13 +1043,11 @@ void __init eeh_init(void)
for (phb = of_find_node_by_name(NULL, "pci"); phb;
phb = of_find_node_by_name(phb, "pci")) {
unsigned long buid;
- struct pci_dn *pci;
buid = get_phb_buid(phb);
- if (buid == 0 || phb->data == NULL)
+ if (buid == 0 || PCI_DN(phb) == NULL)
continue;
- pci = phb->data;
info.buid_lo = BUID_LO(buid);
info.buid_hi = BUID_HI(buid);
traverse_pci_devices(phb, early_enable_eeh, &info);
@@ -832,11 +1076,13 @@ void eeh_add_device_early(struct device_node *dn)
struct pci_controller *phb;
struct eeh_early_enable_info info;
- if (!dn || !dn->data)
+ if (!dn || !PCI_DN(dn))
return;
phb = PCI_DN(dn)->phb;
if (NULL == phb || 0 == phb->buid) {
- printk(KERN_WARNING "EEH: Expected buid but found none\n");
+ printk(KERN_WARNING "EEH: Expected buid but found none for %s\n",
+ dn->full_name);
+ dump_stack();
return;
}
@@ -844,7 +1090,7 @@ void eeh_add_device_early(struct device_node *dn)
info.buid_lo = BUID_LO(phb->buid);
early_enable_eeh(dn, &info);
}
-EXPORT_SYMBOL(eeh_add_device_early);
+EXPORT_SYMBOL_GPL(eeh_add_device_early);
/**
* eeh_add_device_late - perform EEH initialization for the indicated pci device
@@ -855,6 +1101,9 @@ EXPORT_SYMBOL(eeh_add_device_early);
*/
void eeh_add_device_late(struct pci_dev *dev)
{
+ struct device_node *dn;
+ struct pci_dn *pdn;
+
if (!dev || !eeh_subsystem_enabled)
return;
@@ -862,9 +1111,15 @@ void eeh_add_device_late(struct pci_dev *dev)
printk(KERN_DEBUG "EEH: adding device %s\n", pci_name(dev));
#endif
+ pci_dev_get (dev);
+ dn = pci_device_to_OF_node(dev);
+ pdn = PCI_DN(dn);
+ pdn->pcidev = dev;
+
pci_addr_cache_insert_device (dev);
+ eeh_save_bars(dev, pdn);
}
-EXPORT_SYMBOL(eeh_add_device_late);
+EXPORT_SYMBOL_GPL(eeh_add_device_late);
/**
* eeh_remove_device - undo EEH setup for the indicated pci device
@@ -875,6 +1130,7 @@ EXPORT_SYMBOL(eeh_add_device_late);
*/
void eeh_remove_device(struct pci_dev *dev)
{
+ struct device_node *dn;
if (!dev || !eeh_subsystem_enabled)
return;
@@ -883,20 +1139,29 @@ void eeh_remove_device(struct pci_dev *dev)
printk(KERN_DEBUG "EEH: remove device %s\n", pci_name(dev));
#endif
pci_addr_cache_remove_device(dev);
+
+ dn = pci_device_to_OF_node(dev);
+ PCI_DN(dn)->pcidev = NULL;
+ pci_dev_put (dev);
}
-EXPORT_SYMBOL(eeh_remove_device);
+EXPORT_SYMBOL_GPL(eeh_remove_device);
static int proc_eeh_show(struct seq_file *m, void *v)
{
unsigned int cpu;
unsigned long ffs = 0, positives = 0, failures = 0;
unsigned long resets = 0;
+ unsigned long no_dev = 0, no_dn = 0, no_cfg = 0, no_check = 0;
for_each_cpu(cpu) {
ffs += per_cpu(total_mmio_ffs, cpu);
positives += per_cpu(false_positives, cpu);
failures += per_cpu(ignored_failures, cpu);
resets += per_cpu(slot_resets, cpu);
+ no_dev += per_cpu(no_device, cpu);
+ no_dn += per_cpu(no_dn, cpu);
+ no_cfg += per_cpu(no_cfg_addr, cpu);
+ no_check += per_cpu(ignored_check, cpu);
}
if (0 == eeh_subsystem_enabled) {
@@ -904,13 +1169,17 @@ static int proc_eeh_show(struct seq_file *m, void *v)
seq_printf(m, "eeh_total_mmio_ffs=%ld\n", ffs);
} else {
seq_printf(m, "EEH Subsystem is enabled\n");
- seq_printf(m, "eeh_total_mmio_ffs=%ld\n"
- "eeh_false_positives=%ld\n"
- "eeh_ignored_failures=%ld\n"
- "eeh_slot_resets=%ld\n"
- "eeh_fail_count=%d\n",
- ffs, positives, failures, resets,
- eeh_fail_count.counter);
+ seq_printf(m,
+ "no device=%ld\n"
+ "no device node=%ld\n"
+ "no config address=%ld\n"
+ "check not wanted=%ld\n"
+ "eeh_total_mmio_ffs=%ld\n"
+ "eeh_false_positives=%ld\n"
+ "eeh_ignored_failures=%ld\n"
+ "eeh_slot_resets=%ld\n",
+ no_dev, no_dn, no_cfg, no_check,
+ ffs, positives, failures, resets);
}
return 0;
@@ -932,7 +1201,7 @@ static int __init eeh_init_proc(void)
{
struct proc_dir_entry *e;
- if (systemcfg->platform & PLATFORM_PSERIES) {
+ if (platform_is_pseries()) {
e = create_proc_entry("ppc64/eeh", 0, NULL);
if (e)
e->proc_fops = &proc_eeh_operations;
diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
new file mode 100644
index 00000000000..92497333c2b
--- /dev/null
+++ b/arch/powerpc/platforms/pseries/eeh_event.c
@@ -0,0 +1,155 @@
+/*
+ * eeh_event.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#include <linux/list.h>
+#include <linux/pci.h>
+#include <asm/eeh_event.h>
+
+/** Overview:
+ * EEH error states may be detected within exception handlers;
+ * however, the recovery processing needs to occur asynchronously
+ * in a normal kernel context and not an interrupt context.
+ * This pair of routines creates an event and queues it onto a
+ * work-queue, where a worker thread can drive recovery.
+ */
+
+/* EEH event workqueue setup. */
+static spinlock_t eeh_eventlist_lock = SPIN_LOCK_UNLOCKED;
+LIST_HEAD(eeh_eventlist);
+static void eeh_thread_launcher(void *);
+DECLARE_WORK(eeh_event_wq, eeh_thread_launcher, NULL);
+
+/**
+ * eeh_panic - call panic() for an eeh event that cannot be handled.
+ * The philosophy of this routine is that it is better to panic and
+ * halt the OS than it is to risk possible data corruption by
+ * oblivious device drivers that don't know better.
+ *
+ * @dev pci device that had an eeh event
+ * @reset_state current reset state of the device slot
+ */
+static void eeh_panic(struct pci_dev *dev, int reset_state)
+{
+ /*
+ * Since the panic_on_oops sysctl is used to halt the system
+ * in light of potential corruption, we can use it here.
+ */
+ if (panic_on_oops) {
+ panic("EEH: MMIO failure (%d) on device:%s\n", reset_state,
+ pci_name(dev));
+ }
+ else {
+ printk(KERN_INFO "EEH: Ignored MMIO failure (%d) on device:%s\n",
+ reset_state, pci_name(dev));
+ }
+}
+
+/**
+ * eeh_event_handler - dispatch EEH events. The detection of a frozen
+ * slot can occur inside an interrupt, where it can be hard to do
+ * anything about it. The goal of this routine is to pull these
+ * detection events out of the context of the interrupt handler, and
+ * re-dispatch them for processing at a later time in a normal context.
+ *
+ * @dummy - unused
+ */
+static int eeh_event_handler(void * dummy)
+{
+ unsigned long flags;
+ struct eeh_event *event;
+
+ daemonize ("eehd");
+
+ while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_lock_irqsave(&eeh_eventlist_lock, flags);
+ event = NULL;
+ if (!list_empty(&eeh_eventlist)) {
+ event = list_entry(eeh_eventlist.next, struct eeh_event, list);
+ list_del(&event->list);
+ }
+ spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+ if (event == NULL)
+ break;
+
+ printk(KERN_INFO "EEH: Detected PCI bus error on device %s\n",
+ pci_name(event->dev));
+
+ eeh_panic (event->dev, event->state);
+
+ kfree(event);
+ }
+
+ return 0;
+}
+
+/**
+ * eeh_thread_launcher
+ *
+ * @dummy - unused
+ */
+static void eeh_thread_launcher(void *dummy)
+{
+ if (kernel_thread(eeh_event_handler, NULL, CLONE_KERNEL) < 0)
+ printk(KERN_ERR "Failed to start EEH daemon\n");
+}
+
+/**
+ * eeh_send_failure_event - generate a PCI error event
+ * @dev pci device
+ *
+ * This routine can be called within an interrupt context;
+ * the actual event will be delivered in a normal context
+ * (from a workqueue).
+ */
+int eeh_send_failure_event (struct device_node *dn,
+ struct pci_dev *dev,
+ int state,
+ int time_unavail)
+{
+ unsigned long flags;
+ struct eeh_event *event;
+
+ event = kmalloc(sizeof(*event), GFP_ATOMIC);
+ if (event == NULL) {
+ printk (KERN_ERR "EEH: out of memory, event not handled\n");
+ return 1;
+ }
+
+ if (dev)
+ pci_dev_get(dev);
+
+ event->dn = dn;
+ event->dev = dev;
+ event->state = state;
+ event->time_unavail = time_unavail;
+
+ /* We may or may not be called in an interrupt context */
+ spin_lock_irqsave(&eeh_eventlist_lock, flags);
+ list_add(&event->list, &eeh_eventlist);
+ spin_unlock_irqrestore(&eeh_eventlist_lock, flags);
+
+ schedule_work(&eeh_event_wq);
+
+ return 0;
+}
+
+/********************** END OF FILE ******************************/
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index fcc50bfd43f..97ba5214417 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -42,7 +42,6 @@
#include <asm/machdep.h>
#include <asm/abs_addr.h>
#include <asm/pSeries_reconfig.h>
-#include <asm/systemcfg.h>
#include <asm/firmware.h>
#include <asm/tce.h>
#include <asm/ppc-pci.h>
@@ -582,7 +581,7 @@ void iommu_init_early_pSeries(void)
return;
}
- if (systemcfg->platform & PLATFORM_LPAR) {
+ if (platform_is_lpar()) {
if (firmware_has_feature(FW_FEATURE_MULTITCE)) {
ppc_md.tce_build = tce_buildmulti_pSeriesLP;
ppc_md.tce_free = tce_freemulti_pSeriesLP;
diff --git a/arch/powerpc/platforms/pseries/pci.c b/arch/powerpc/platforms/pseries/pci.c
index c198656a3bb..999a9620b5c 100644
--- a/arch/powerpc/platforms/pseries/pci.c
+++ b/arch/powerpc/platforms/pseries/pci.c
@@ -107,7 +107,6 @@ static void __init pSeries_request_regions(void)
void __init pSeries_final_fixup(void)
{
- phbs_remap_io();
pSeries_request_regions();
pci_addr_cache_build();
@@ -123,7 +122,7 @@ static void fixup_winbond_82c105(struct pci_dev* dev)
int i;
unsigned int reg;
- if (!(systemcfg->platform & PLATFORM_PSERIES))
+ if (!platform_is_pseries())
return;
printk("Using INTC for W82c105 IDE controller.\n");
diff --git a/arch/powerpc/platforms/pseries/reconfig.c b/arch/powerpc/platforms/pseries/reconfig.c
index d7d40033945..d8864164dbe 100644
--- a/arch/powerpc/platforms/pseries/reconfig.c
+++ b/arch/powerpc/platforms/pseries/reconfig.c
@@ -408,7 +408,7 @@ static int proc_ppc64_create_ofdt(void)
{
struct proc_dir_entry *ent;
- if (!(systemcfg->platform & PLATFORM_PSERIES))
+ if (!platform_is_pseries())
return 0;
ent = create_proc_entry("ppc64/ofdt", S_IWUSR, NULL);
diff --git a/arch/powerpc/platforms/pseries/rtasd.c b/arch/powerpc/platforms/pseries/rtasd.c
index e26b0420b6d..00cf331a1dc 100644
--- a/arch/powerpc/platforms/pseries/rtasd.c
+++ b/arch/powerpc/platforms/pseries/rtasd.c
@@ -482,10 +482,12 @@ static int __init rtas_init(void)
{
struct proc_dir_entry *entry;
- /* No RTAS, only warn if we are on a pSeries box */
+ if (!platform_is_pseries())
+ return 0;
+
+ /* No RTAS */
if (rtas_token("event-scan") == RTAS_UNKNOWN_SERVICE) {
- if (systemcfg->platform & PLATFORM_PSERIES)
- printk(KERN_INFO "rtasd: no event-scan on system\n");
+ printk(KERN_INFO "rtasd: no event-scan on system\n");
return 1;
}
diff --git a/arch/ppc64/kernel/scanlog.c b/arch/powerpc/platforms/pseries/scanlog.c
index 2edc947f7c4..2edc947f7c4 100644
--- a/arch/ppc64/kernel/scanlog.c
+++ b/arch/powerpc/platforms/pseries/scanlog.c
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index a093a0d4dd6..e94247c28d4 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -249,7 +249,7 @@ static void __init pSeries_setup_arch(void)
ppc_md.idle_loop = default_idle;
}
- if (systemcfg->platform & PLATFORM_LPAR)
+ if (platform_is_lpar())
ppc_md.enable_pmcs = pseries_lpar_enable_pmcs;
else
ppc_md.enable_pmcs = power4_enable_pmcs;
@@ -378,7 +378,7 @@ static void __init pSeries_init_early(void)
fw_feature_init();
- if (systemcfg->platform & PLATFORM_LPAR)
+ if (platform_is_lpar())
hpte_init_lpar();
else {
hpte_init_native();
@@ -388,7 +388,7 @@ static void __init pSeries_init_early(void)
generic_find_legacy_serial_ports(&physport, &default_speed);
- if (systemcfg->platform & PLATFORM_LPAR)
+ if (platform_is_lpar())
find_udbg_vterm();
else if (physport) {
/* Map the uart for udbg. */
@@ -592,7 +592,7 @@ static void pseries_shared_idle(void)
static int pSeries_pci_probe_mode(struct pci_bus *bus)
{
- if (systemcfg->platform & PLATFORM_LPAR)
+ if (platform_is_lpar())
return PCI_PROBE_DEVTREE;
return PCI_PROBE_NORMAL;
}
diff --git a/arch/powerpc/platforms/pseries/smp.c b/arch/powerpc/platforms/pseries/smp.c
index 7a243e8ccd7..3ba794ca328 100644
--- a/arch/powerpc/platforms/pseries/smp.c
+++ b/arch/powerpc/platforms/pseries/smp.c
@@ -46,6 +46,7 @@
#include <asm/rtas.h>
#include <asm/pSeries_reconfig.h>
#include <asm/mpic.h>
+#include <asm/systemcfg.h>
#include "plpar_wrappers.h"
@@ -96,7 +97,7 @@ int pSeries_cpu_disable(void)
int cpu = smp_processor_id();
cpu_clear(cpu, cpu_online_map);
- systemcfg->processorCount--;
+ _systemcfg->processorCount--;
/*fix boot_cpuid here*/
if (cpu == boot_cpuid)
@@ -441,7 +442,7 @@ void __init smp_init_pSeries(void)
smp_ops->cpu_die = pSeries_cpu_die;
/* Processors can be added/removed only on LPAR */
- if (systemcfg->platform == PLATFORM_PSERIES_LPAR)
+ if (platform_is_lpar())
pSeries_reconfig_notifier_register(&pSeries_smp_nb);
#endif
diff --git a/arch/powerpc/platforms/pseries/xics.c b/arch/powerpc/platforms/pseries/xics.c
index c72c86f05cb..72ac18067ec 100644
--- a/arch/powerpc/platforms/pseries/xics.c
+++ b/arch/powerpc/platforms/pseries/xics.c
@@ -545,7 +545,9 @@ nextnode:
of_node_put(np);
}
- if (systemcfg->platform == PLATFORM_PSERIES) {
+ if (platform_is_lpar())
+ ops = &pSeriesLP_ops;
+ else {
#ifdef CONFIG_SMP
for_each_cpu(i) {
int hard_id;
@@ -561,12 +563,11 @@ nextnode:
#else
xics_per_cpu[0] = ioremap(intr_base, intr_size);
#endif /* CONFIG_SMP */
- } else if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
- ops = &pSeriesLP_ops;
}
xics_8259_pic.enable = i8259_pic.enable;
xics_8259_pic.disable = i8259_pic.disable;
+ xics_8259_pic.end = i8259_pic.end;
for (i = 0; i < 16; ++i)
get_irq_desc(i)->handler = &xics_8259_pic;
for (; i < NR_IRQS; ++i)
diff --git a/arch/powerpc/sysdev/u3_iommu.c b/arch/powerpc/sysdev/u3_iommu.c
index 543d6590981..f32baf7f469 100644
--- a/arch/powerpc/sysdev/u3_iommu.c
+++ b/arch/powerpc/sysdev/u3_iommu.c
@@ -226,7 +226,7 @@ static void iommu_table_u3_setup(void)
iommu_table_u3.it_busno = 0;
iommu_table_u3.it_offset = 0;
/* it_size is in number of entries */
- iommu_table_u3.it_size = dart_tablesize / sizeof(u32);
+ iommu_table_u3.it_size = (dart_tablesize / sizeof(u32)) >> DART_PAGE_FACTOR;
/* Initialize the common IOMMU code */
iommu_table_u3.it_base = (unsigned long)dart_vbase;
diff --git a/arch/powerpc/xmon/Makefile b/arch/powerpc/xmon/Makefile
index 79a784f0e7a..b20312e5ed2 100644
--- a/arch/powerpc/xmon/Makefile
+++ b/arch/powerpc/xmon/Makefile
@@ -8,4 +8,4 @@ obj-$(CONFIG_8xx) += start_8xx.o
obj-$(CONFIG_6xx) += start_32.o
obj-$(CONFIG_4xx) += start_32.o
obj-$(CONFIG_PPC64) += start_64.o
-obj-y += xmon.o ppc-dis.o ppc-opc.o subr_prf.o setjmp.o
+obj-y += xmon.o ppc-dis.o ppc-opc.o setjmp.o nonstdio.o
diff --git a/arch/powerpc/xmon/nonstdio.c b/arch/powerpc/xmon/nonstdio.c
new file mode 100644
index 00000000000..78765833f4c
--- /dev/null
+++ b/arch/powerpc/xmon/nonstdio.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <linux/string.h>
+#include <asm/time.h>
+#include "nonstdio.h"
+
+int xmon_putchar(int c)
+{
+ char ch = c;
+
+ if (c == '\n')
+ xmon_putchar('\r');
+ return xmon_write(&ch, 1) == 1? c: -1;
+}
+
+static char line[256];
+static char *lineptr;
+static int lineleft;
+
+int xmon_expect(const char *str, unsigned long timeout)
+{
+ int c;
+ unsigned long t0;
+
+ /* assume 25MHz default timebase if tb_ticks_per_sec not set yet */
+ timeout *= tb_ticks_per_sec? tb_ticks_per_sec: 25000000;
+ t0 = get_tbl();
+ do {
+ lineptr = line;
+ for (;;) {
+ c = xmon_read_poll();
+ if (c == -1) {
+ if (get_tbl() - t0 > timeout)
+ return 0;
+ continue;
+ }
+ if (c == '\n')
+ break;
+ if (c != '\r' && lineptr < &line[sizeof(line) - 1])
+ *lineptr++ = c;
+ }
+ *lineptr = 0;
+ } while (strstr(line, str) == NULL);
+ return 1;
+}
+
+int xmon_getchar(void)
+{
+ int c;
+
+ if (lineleft == 0) {
+ lineptr = line;
+ for (;;) {
+ c = xmon_readchar();
+ if (c == -1 || c == 4)
+ break;
+ if (c == '\r' || c == '\n') {
+ *lineptr++ = '\n';
+ xmon_putchar('\n');
+ break;
+ }
+ switch (c) {
+ case 0177:
+ case '\b':
+ if (lineptr > line) {
+ xmon_putchar('\b');
+ xmon_putchar(' ');
+ xmon_putchar('\b');
+ --lineptr;
+ }
+ break;
+ case 'U' & 0x1F:
+ while (lineptr > line) {
+ xmon_putchar('\b');
+ xmon_putchar(' ');
+ xmon_putchar('\b');
+ --lineptr;
+ }
+ break;
+ default:
+ if (lineptr >= &line[sizeof(line) - 1])
+ xmon_putchar('\a');
+ else {
+ xmon_putchar(c);
+ *lineptr++ = c;
+ }
+ }
+ }
+ lineleft = lineptr - line;
+ lineptr = line;
+ }
+ if (lineleft == 0)
+ return -1;
+ --lineleft;
+ return *lineptr++;
+}
+
+char *xmon_gets(char *str, int nb)
+{
+ char *p;
+ int c;
+
+ for (p = str; p < str + nb - 1; ) {
+ c = xmon_getchar();
+ if (c == -1) {
+ if (p == str)
+ return NULL;
+ break;
+ }
+ *p++ = c;
+ if (c == '\n')
+ break;
+ }
+ *p = 0;
+ return str;
+}
+
+void xmon_printf(const char *format, ...)
+{
+ va_list args;
+ int n;
+ static char xmon_outbuf[1024];
+
+ va_start(args, format);
+ n = vsnprintf(xmon_outbuf, sizeof(xmon_outbuf), format, args);
+ va_end(args);
+ xmon_write(xmon_outbuf, n);
+}
diff --git a/arch/powerpc/xmon/nonstdio.h b/arch/powerpc/xmon/nonstdio.h
index 84211a21c6f..47cebbd2b1b 100644
--- a/arch/powerpc/xmon/nonstdio.h
+++ b/arch/powerpc/xmon/nonstdio.h
@@ -1,22 +1,14 @@
-typedef int FILE;
-extern FILE *xmon_stdin, *xmon_stdout;
#define EOF (-1)
-#define stdin xmon_stdin
-#define stdout xmon_stdout
+
#define printf xmon_printf
-#define fprintf xmon_fprintf
-#define fputs xmon_fputs
-#define fgets xmon_fgets
#define putchar xmon_putchar
-#define getchar xmon_getchar
-#define putc xmon_putc
-#define getc xmon_getc
-#define fopen(n, m) NULL
-#define fflush(f) do {} while (0)
-#define fclose(f) do {} while (0)
-extern char *fgets(char *, int, void *);
-extern void xmon_printf(const char *, ...);
-extern void xmon_fprintf(void *, const char *, ...);
-extern void xmon_sprintf(char *, const char *, ...);
-#define perror(s) printf("%s: no files!\n", (s))
+extern int xmon_putchar(int c);
+extern int xmon_getchar(void);
+extern char *xmon_gets(char *, int);
+extern void xmon_printf(const char *, ...);
+extern void xmon_map_scc(void);
+extern int xmon_expect(const char *str, unsigned long timeout);
+extern int xmon_write(void *ptr, int nb);
+extern int xmon_readchar(void);
+extern int xmon_read_poll(void);
diff --git a/arch/powerpc/xmon/setjmp.S b/arch/powerpc/xmon/setjmp.S
index f8e40dfd2bf..96a91f10e2e 100644
--- a/arch/powerpc/xmon/setjmp.S
+++ b/arch/powerpc/xmon/setjmp.S
@@ -14,61 +14,61 @@
_GLOBAL(xmon_setjmp)
mflr r0
- STL r0,0(r3)
- STL r1,SZL(r3)
- STL r2,2*SZL(r3)
+ PPC_STL r0,0(r3)
+ PPC_STL r1,SZL(r3)
+ PPC_STL r2,2*SZL(r3)
mfcr r0
- STL r0,3*SZL(r3)
- STL r13,4*SZL(r3)
- STL r14,5*SZL(r3)
- STL r15,6*SZL(r3)
- STL r16,7*SZL(r3)
- STL r17,8*SZL(r3)
- STL r18,9*SZL(r3)
- STL r19,10*SZL(r3)
- STL r20,11*SZL(r3)
- STL r21,12*SZL(r3)
- STL r22,13*SZL(r3)
- STL r23,14*SZL(r3)
- STL r24,15*SZL(r3)
- STL r25,16*SZL(r3)
- STL r26,17*SZL(r3)
- STL r27,18*SZL(r3)
- STL r28,19*SZL(r3)
- STL r29,20*SZL(r3)
- STL r30,21*SZL(r3)
- STL r31,22*SZL(r3)
+ PPC_STL r0,3*SZL(r3)
+ PPC_STL r13,4*SZL(r3)
+ PPC_STL r14,5*SZL(r3)
+ PPC_STL r15,6*SZL(r3)
+ PPC_STL r16,7*SZL(r3)
+ PPC_STL r17,8*SZL(r3)
+ PPC_STL r18,9*SZL(r3)
+ PPC_STL r19,10*SZL(r3)
+ PPC_STL r20,11*SZL(r3)
+ PPC_STL r21,12*SZL(r3)
+ PPC_STL r22,13*SZL(r3)
+ PPC_STL r23,14*SZL(r3)
+ PPC_STL r24,15*SZL(r3)
+ PPC_STL r25,16*SZL(r3)
+ PPC_STL r26,17*SZL(r3)
+ PPC_STL r27,18*SZL(r3)
+ PPC_STL r28,19*SZL(r3)
+ PPC_STL r29,20*SZL(r3)
+ PPC_STL r30,21*SZL(r3)
+ PPC_STL r31,22*SZL(r3)
li r3,0
blr
_GLOBAL(xmon_longjmp)
- CMPI r4,0
+ PPC_LCMPI r4,0
bne 1f
li r4,1
-1: LDL r13,4*SZL(r3)
- LDL r14,5*SZL(r3)
- LDL r15,6*SZL(r3)
- LDL r16,7*SZL(r3)
- LDL r17,8*SZL(r3)
- LDL r18,9*SZL(r3)
- LDL r19,10*SZL(r3)
- LDL r20,11*SZL(r3)
- LDL r21,12*SZL(r3)
- LDL r22,13*SZL(r3)
- LDL r23,14*SZL(r3)
- LDL r24,15*SZL(r3)
- LDL r25,16*SZL(r3)
- LDL r26,17*SZL(r3)
- LDL r27,18*SZL(r3)
- LDL r28,19*SZL(r3)
- LDL r29,20*SZL(r3)
- LDL r30,21*SZL(r3)
- LDL r31,22*SZL(r3)
- LDL r0,3*SZL(r3)
+1: PPC_LL r13,4*SZL(r3)
+ PPC_LL r14,5*SZL(r3)
+ PPC_LL r15,6*SZL(r3)
+ PPC_LL r16,7*SZL(r3)
+ PPC_LL r17,8*SZL(r3)
+ PPC_LL r18,9*SZL(r3)
+ PPC_LL r19,10*SZL(r3)
+ PPC_LL r20,11*SZL(r3)
+ PPC_LL r21,12*SZL(r3)
+ PPC_LL r22,13*SZL(r3)
+ PPC_LL r23,14*SZL(r3)
+ PPC_LL r24,15*SZL(r3)
+ PPC_LL r25,16*SZL(r3)
+ PPC_LL r26,17*SZL(r3)
+ PPC_LL r27,18*SZL(r3)
+ PPC_LL r28,19*SZL(r3)
+ PPC_LL r29,20*SZL(r3)
+ PPC_LL r30,21*SZL(r3)
+ PPC_LL r31,22*SZL(r3)
+ PPC_LL r0,3*SZL(r3)
mtcrf 0x38,r0
- LDL r0,0(r3)
- LDL r1,SZL(r3)
- LDL r2,2*SZL(r3)
+ PPC_LL r0,0(r3)
+ PPC_LL r1,SZL(r3)
+ PPC_LL r2,2*SZL(r3)
mtlr r0
mr r3,r4
blr
@@ -84,52 +84,52 @@ _GLOBAL(xmon_longjmp)
* different ABIs, though).
*/
_GLOBAL(xmon_save_regs)
- STL r0,0*SZL(r3)
- STL r2,2*SZL(r3)
- STL r3,3*SZL(r3)
- STL r4,4*SZL(r3)
- STL r5,5*SZL(r3)
- STL r6,6*SZL(r3)
- STL r7,7*SZL(r3)
- STL r8,8*SZL(r3)
- STL r9,9*SZL(r3)
- STL r10,10*SZL(r3)
- STL r11,11*SZL(r3)
- STL r12,12*SZL(r3)
- STL r13,13*SZL(r3)
- STL r14,14*SZL(r3)
- STL r15,15*SZL(r3)
- STL r16,16*SZL(r3)
- STL r17,17*SZL(r3)
- STL r18,18*SZL(r3)
- STL r19,19*SZL(r3)
- STL r20,20*SZL(r3)
- STL r21,21*SZL(r3)
- STL r22,22*SZL(r3)
- STL r23,23*SZL(r3)
- STL r24,24*SZL(r3)
- STL r25,25*SZL(r3)
- STL r26,26*SZL(r3)
- STL r27,27*SZL(r3)
- STL r28,28*SZL(r3)
- STL r29,29*SZL(r3)
- STL r30,30*SZL(r3)
- STL r31,31*SZL(r3)
+ PPC_STL r0,0*SZL(r3)
+ PPC_STL r2,2*SZL(r3)
+ PPC_STL r3,3*SZL(r3)
+ PPC_STL r4,4*SZL(r3)
+ PPC_STL r5,5*SZL(r3)
+ PPC_STL r6,6*SZL(r3)
+ PPC_STL r7,7*SZL(r3)
+ PPC_STL r8,8*SZL(r3)
+ PPC_STL r9,9*SZL(r3)
+ PPC_STL r10,10*SZL(r3)
+ PPC_STL r11,11*SZL(r3)
+ PPC_STL r12,12*SZL(r3)
+ PPC_STL r13,13*SZL(r3)
+ PPC_STL r14,14*SZL(r3)
+ PPC_STL r15,15*SZL(r3)
+ PPC_STL r16,16*SZL(r3)
+ PPC_STL r17,17*SZL(r3)
+ PPC_STL r18,18*SZL(r3)
+ PPC_STL r19,19*SZL(r3)
+ PPC_STL r20,20*SZL(r3)
+ PPC_STL r21,21*SZL(r3)
+ PPC_STL r22,22*SZL(r3)
+ PPC_STL r23,23*SZL(r3)
+ PPC_STL r24,24*SZL(r3)
+ PPC_STL r25,25*SZL(r3)
+ PPC_STL r26,26*SZL(r3)
+ PPC_STL r27,27*SZL(r3)
+ PPC_STL r28,28*SZL(r3)
+ PPC_STL r29,29*SZL(r3)
+ PPC_STL r30,30*SZL(r3)
+ PPC_STL r31,31*SZL(r3)
/* go up one stack frame for SP */
- LDL r4,0(r1)
- STL r4,1*SZL(r3)
+ PPC_LL r4,0(r1)
+ PPC_STL r4,1*SZL(r3)
/* get caller's LR */
- LDL r0,LRSAVE(r4)
- STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
- STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
+ PPC_LL r0,LRSAVE(r4)
+ PPC_STL r0,_NIP-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_LINK-STACK_FRAME_OVERHEAD(r3)
mfmsr r0
- STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_MSR-STACK_FRAME_OVERHEAD(r3)
mfctr r0
- STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_CTR-STACK_FRAME_OVERHEAD(r3)
mfxer r0
- STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_XER-STACK_FRAME_OVERHEAD(r3)
mfcr r0
- STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_CCR-STACK_FRAME_OVERHEAD(r3)
li r0,0
- STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r0,_TRAP-STACK_FRAME_OVERHEAD(r3)
blr
diff --git a/arch/powerpc/xmon/start_32.c b/arch/powerpc/xmon/start_32.c
index 69b658c0f76..c2464df4217 100644
--- a/arch/powerpc/xmon/start_32.c
+++ b/arch/powerpc/xmon/start_32.c
@@ -11,7 +11,6 @@
#include <linux/cuda.h>
#include <linux/kernel.h>
#include <linux/errno.h>
-#include <linux/sysrq.h>
#include <linux/bitops.h>
#include <asm/xmon.h>
#include <asm/prom.h>
@@ -22,10 +21,11 @@
#include <asm/processor.h>
#include <asm/delay.h>
#include <asm/btext.h>
+#include <asm/time.h>
+#include "nonstdio.h"
static volatile unsigned char __iomem *sccc, *sccd;
unsigned int TXRDY, RXRDY, DLAB;
-static int xmon_expect(const char *str, unsigned int timeout);
static int use_serial;
static int use_screen;
@@ -33,16 +33,6 @@ static int via_modem;
static int xmon_use_sccb;
static struct device_node *channel_node;
-#define TB_SPEED 25000000
-
-static inline unsigned int readtb(void)
-{
- unsigned int ret;
-
- asm volatile("mftb %0" : "=r" (ret) :);
- return ret;
-}
-
void buf_access(void)
{
if (DLAB)
@@ -91,23 +81,7 @@ static unsigned long chrp_find_phys_io_base(void)
}
#endif /* CONFIG_PPC_CHRP */
-#ifdef CONFIG_MAGIC_SYSRQ
-static void sysrq_handle_xmon(int key, struct pt_regs *regs,
- struct tty_struct *tty)
-{
- xmon(regs);
-}
-
-static struct sysrq_key_op sysrq_xmon_op =
-{
- .handler = sysrq_handle_xmon,
- .help_msg = "Xmon",
- .action_msg = "Entering xmon",
-};
-#endif
-
-void
-xmon_map_scc(void)
+void xmon_map_scc(void)
{
#ifdef CONFIG_PPC_MULTIPLATFORM
volatile unsigned char __iomem *base;
@@ -217,8 +191,6 @@ xmon_map_scc(void)
RXRDY = 1;
DLAB = 0x80;
#endif /* platform */
-
- register_sysrq_key('x', &sysrq_xmon_op);
}
static int scc_initialized = 0;
@@ -238,8 +210,7 @@ static inline void do_poll_adb(void)
#endif /* CONFIG_ADB_CUDA */
}
-int
-xmon_write(void *handle, void *ptr, int nb)
+int xmon_write(void *ptr, int nb)
{
char *p = ptr;
int i, c, ct;
@@ -311,8 +282,7 @@ static unsigned char xmon_shift_keytab[128] =
"\0.\0*\0+\0\0\0\0\0/\r\0-\0" /* 0x40 - 0x4f */
"\0\0000123456789\0\0\0"; /* 0x50 - 0x5f */
-static int
-xmon_get_adb_key(void)
+static int xmon_get_adb_key(void)
{
int k, t, on;
@@ -350,32 +320,21 @@ xmon_get_adb_key(void)
}
#endif /* CONFIG_BOOTX_TEXT */
-int
-xmon_read(void *handle, void *ptr, int nb)
+int xmon_readchar(void)
{
- char *p = ptr;
- int i;
-
#ifdef CONFIG_BOOTX_TEXT
- if (use_screen) {
- for (i = 0; i < nb; ++i)
- *p++ = xmon_get_adb_key();
- return i;
- }
+ if (use_screen)
+ return xmon_get_adb_key();
#endif
- if (!scc_initialized)
- xmon_init_scc();
- for (i = 0; i < nb; ++i) {
+ if (!scc_initialized)
+ xmon_init_scc();
while ((*sccc & RXRDY) == 0)
- do_poll_adb();
+ do_poll_adb();
buf_access();
- *p++ = *sccd;
- }
- return i;
+ return *sccd;
}
-int
-xmon_read_poll(void)
+int xmon_read_poll(void)
{
if ((*sccc & RXRDY) == 0) {
do_poll_adb();
@@ -395,8 +354,7 @@ static unsigned char scc_inittab[] = {
3, 0xc1, /* rx enable, 8 bits */
};
-void
-xmon_init_scc(void)
+void xmon_init_scc(void)
{
if ( _machine == _MACH_chrp )
{
@@ -410,6 +368,7 @@ xmon_init_scc(void)
else if ( _machine == _MACH_Pmac )
{
int i, x;
+ unsigned long timeout;
if (channel_node != 0)
pmac_call_feature(
@@ -424,8 +383,12 @@ xmon_init_scc(void)
PMAC_FTR_MODEM_ENABLE,
channel_node, 0, 1);
printk(KERN_INFO "Modem powered up by debugger !\n");
- t0 = readtb();
- while (readtb() - t0 < 3*TB_SPEED)
+ t0 = get_tbl();
+ timeout = 3 * tb_ticks_per_sec;
+ if (timeout == 0)
+ /* assume 25MHz if tb_ticks_per_sec not set */
+ timeout = 75000000;
+ while (get_tbl() - t0 < timeout)
eieio();
}
/* use the B channel if requested */
@@ -447,164 +410,19 @@ xmon_init_scc(void)
scc_initialized = 1;
if (via_modem) {
for (;;) {
- xmon_write(NULL, "ATE1V1\r", 7);
+ xmon_write("ATE1V1\r", 7);
if (xmon_expect("OK", 5)) {
- xmon_write(NULL, "ATA\r", 4);
+ xmon_write("ATA\r", 4);
if (xmon_expect("CONNECT", 40))
break;
}
- xmon_write(NULL, "+++", 3);
+ xmon_write("+++", 3);
xmon_expect("OK", 3);
}
}
}
-void *xmon_stdin;
-void *xmon_stdout;
-void *xmon_stderr;
-
-int xmon_putc(int c, void *f)
-{
- char ch = c;
-
- if (c == '\n')
- xmon_putc('\r', f);
- return xmon_write(f, &ch, 1) == 1? c: -1;
-}
-
-int xmon_putchar(int c)
-{
- return xmon_putc(c, xmon_stdout);
-}
-
-int xmon_fputs(char *str, void *f)
-{
- int n = strlen(str);
-
- return xmon_write(f, str, n) == n? 0: -1;
-}
-
-int
-xmon_readchar(void)
-{
- char ch;
-
- for (;;) {
- switch (xmon_read(xmon_stdin, &ch, 1)) {
- case 1:
- return ch;
- case -1:
- xmon_printf("read(stdin) returned -1\r\n", 0, 0);
- return -1;
- }
- }
-}
-
-static char line[256];
-static char *lineptr;
-static int lineleft;
-
-int xmon_expect(const char *str, unsigned int timeout)
-{
- int c;
- unsigned int t0;
-
- timeout *= TB_SPEED;
- t0 = readtb();
- do {
- lineptr = line;
- for (;;) {
- c = xmon_read_poll();
- if (c == -1) {
- if (readtb() - t0 > timeout)
- return 0;
- continue;
- }
- if (c == '\n')
- break;
- if (c != '\r' && lineptr < &line[sizeof(line) - 1])
- *lineptr++ = c;
- }
- *lineptr = 0;
- } while (strstr(line, str) == NULL);
- return 1;
-}
-
-int
-xmon_getchar(void)
-{
- int c;
-
- if (lineleft == 0) {
- lineptr = line;
- for (;;) {
- c = xmon_readchar();
- if (c == -1 || c == 4)
- break;
- if (c == '\r' || c == '\n') {
- *lineptr++ = '\n';
- xmon_putchar('\n');
- break;
- }
- switch (c) {
- case 0177:
- case '\b':
- if (lineptr > line) {
- xmon_putchar('\b');
- xmon_putchar(' ');
- xmon_putchar('\b');
- --lineptr;
- }
- break;
- case 'U' & 0x1F:
- while (lineptr > line) {
- xmon_putchar('\b');
- xmon_putchar(' ');
- xmon_putchar('\b');
- --lineptr;
- }
- break;
- default:
- if (lineptr >= &line[sizeof(line) - 1])
- xmon_putchar('\a');
- else {
- xmon_putchar(c);
- *lineptr++ = c;
- }
- }
- }
- lineleft = lineptr - line;
- lineptr = line;
- }
- if (lineleft == 0)
- return -1;
- --lineleft;
- return *lineptr++;
-}
-
-char *
-xmon_fgets(char *str, int nb, void *f)
-{
- char *p;
- int c;
-
- for (p = str; p < str + nb - 1; ) {
- c = xmon_getchar();
- if (c == -1) {
- if (p == str)
- return NULL;
- break;
- }
- *p++ = c;
- if (c == '\n')
- break;
- }
- *p = 0;
- return str;
-}
-
-void
-xmon_enter(void)
+void xmon_enter(void)
{
#ifdef CONFIG_ADB_PMU
if (_machine == _MACH_Pmac) {
@@ -613,8 +431,7 @@ xmon_enter(void)
#endif
}
-void
-xmon_leave(void)
+void xmon_leave(void)
{
#ifdef CONFIG_ADB_PMU
if (_machine == _MACH_Pmac) {
diff --git a/arch/powerpc/xmon/start_64.c b/arch/powerpc/xmon/start_64.c
index e50c158191e..712552c4f24 100644
--- a/arch/powerpc/xmon/start_64.c
+++ b/arch/powerpc/xmon/start_64.c
@@ -6,182 +6,29 @@
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
-#include <linux/config.h>
-#include <linux/string.h>
-#include <linux/kernel.h>
-#include <linux/errno.h>
-#include <linux/sysrq.h>
-#include <linux/init.h>
#include <asm/machdep.h>
-#include <asm/io.h>
-#include <asm/page.h>
-#include <asm/prom.h>
-#include <asm/processor.h>
#include <asm/udbg.h>
-#include <asm/system.h>
#include "nonstdio.h"
-#ifdef CONFIG_MAGIC_SYSRQ
-
-static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
- struct tty_struct *tty)
-{
- /* ensure xmon is enabled */
- xmon_init(1);
- debugger(pt_regs);
-}
-
-static struct sysrq_key_op sysrq_xmon_op =
+void xmon_map_scc(void)
{
- .handler = sysrq_handle_xmon,
- .help_msg = "Xmon",
- .action_msg = "Entering xmon",
-};
-
-static int __init setup_xmon_sysrq(void)
-{
- register_sysrq_key('x', &sysrq_xmon_op);
- return 0;
}
-__initcall(setup_xmon_sysrq);
-#endif /* CONFIG_MAGIC_SYSRQ */
-int
-xmon_write(void *handle, void *ptr, int nb)
+int xmon_write(void *ptr, int nb)
{
return udbg_write(ptr, nb);
}
-int
-xmon_read(void *handle, void *ptr, int nb)
+int xmon_readchar(void)
{
- return udbg_read(ptr, nb);
+ if (udbg_getc)
+ return udbg_getc();
+ return -1;
}
-int
-xmon_read_poll(void)
+int xmon_read_poll(void)
{
if (udbg_getc_poll)
return udbg_getc_poll();
return -1;
}
-
-FILE *xmon_stdin;
-FILE *xmon_stdout;
-
-int
-xmon_putc(int c, void *f)
-{
- char ch = c;
-
- if (c == '\n')
- xmon_putc('\r', f);
- return xmon_write(f, &ch, 1) == 1? c: -1;
-}
-
-int
-xmon_putchar(int c)
-{
- return xmon_putc(c, xmon_stdout);
-}
-
-int
-xmon_fputs(char *str, void *f)
-{
- int n = strlen(str);
-
- return xmon_write(f, str, n) == n? 0: -1;
-}
-
-int
-xmon_readchar(void)
-{
- char ch;
-
- for (;;) {
- switch (xmon_read(xmon_stdin, &ch, 1)) {
- case 1:
- return ch;
- case -1:
- xmon_printf("read(stdin) returned -1\r\n", 0, 0);
- return -1;
- }
- }
-}
-
-static char line[256];
-static char *lineptr;
-static int lineleft;
-
-int
-xmon_getchar(void)
-{
- int c;
-
- if (lineleft == 0) {
- lineptr = line;
- for (;;) {
- c = xmon_readchar();
- if (c == -1 || c == 4)
- break;
- if (c == '\r' || c == '\n') {
- *lineptr++ = '\n';
- xmon_putchar('\n');
- break;
- }
- switch (c) {
- case 0177:
- case '\b':
- if (lineptr > line) {
- xmon_putchar('\b');
- xmon_putchar(' ');
- xmon_putchar('\b');
- --lineptr;
- }
- break;
- case 'U' & 0x1F:
- while (lineptr > line) {
- xmon_putchar('\b');
- xmon_putchar(' ');
- xmon_putchar('\b');
- --lineptr;
- }
- break;
- default:
- if (lineptr >= &line[sizeof(line) - 1])
- xmon_putchar('\a');
- else {
- xmon_putchar(c);
- *lineptr++ = c;
- }
- }
- }
- lineleft = lineptr - line;
- lineptr = line;
- }
- if (lineleft == 0)
- return -1;
- --lineleft;
- return *lineptr++;
-}
-
-char *
-xmon_fgets(char *str, int nb, void *f)
-{
- char *p;
- int c;
-
- for (p = str; p < str + nb - 1; ) {
- c = xmon_getchar();
- if (c == -1) {
- if (p == str)
- return NULL;
- break;
- }
- *p++ = c;
- if (c == '\n')
- break;
- }
- *p = 0;
- return str;
-}
diff --git a/arch/powerpc/xmon/start_8xx.c b/arch/powerpc/xmon/start_8xx.c
index a48bd594cf6..4c17b0486ad 100644
--- a/arch/powerpc/xmon/start_8xx.c
+++ b/arch/powerpc/xmon/start_8xx.c
@@ -15,273 +15,30 @@
#include <asm/8xx_immap.h>
#include <asm/mpc8xx.h>
#include <asm/commproc.h>
+#include "nonstdio.h"
-extern void xmon_printf(const char *fmt, ...);
extern int xmon_8xx_write(char *str, int nb);
extern int xmon_8xx_read_poll(void);
extern int xmon_8xx_read_char(void);
-void prom_drawhex(uint);
-void prom_drawstring(const char *str);
-static int use_screen = 1; /* default */
-
-#define TB_SPEED 25000000
-
-static inline unsigned int readtb(void)
-{
- unsigned int ret;
-
- asm volatile("mftb %0" : "=r" (ret) :);
- return ret;
-}
-
-void buf_access(void)
-{
-}
-
-void
-xmon_map_scc(void)
+void xmon_map_scc(void)
{
-
cpmp = (cpm8xx_t *)&(((immap_t *)IMAP_ADDR)->im_cpm);
- use_screen = 0;
-
- prom_drawstring("xmon uses serial port\n");
}
-static int scc_initialized = 0;
-
void xmon_init_scc(void);
-int
-xmon_write(void *handle, void *ptr, int nb)
+int xmon_write(void *ptr, int nb)
{
- char *p = ptr;
- int i, c, ct;
-
- if (!scc_initialized)
- xmon_init_scc();
-
return(xmon_8xx_write(ptr, nb));
}
-int xmon_wants_key;
-
-int
-xmon_read(void *handle, void *ptr, int nb)
+int xmon_readchar(void)
{
- char *p = ptr;
- int i;
-
- if (!scc_initialized)
- xmon_init_scc();
-
- for (i = 0; i < nb; ++i) {
- *p++ = xmon_8xx_read_char();
- }
- return i;
+ return xmon_8xx_read_char();
}
-int
-xmon_read_poll(void)
+int xmon_read_poll(void)
{
return(xmon_8xx_read_poll());
}
-
-void
-xmon_init_scc()
-{
- scc_initialized = 1;
-}
-
-#if 0
-extern int (*prom_entry)(void *);
-
-int
-xmon_exit(void)
-{
- struct prom_args {
- char *service;
- } args;
-
- for (;;) {
- args.service = "exit";
- (*prom_entry)(&args);
- }
-}
-#endif
-
-void *xmon_stdin;
-void *xmon_stdout;
-void *xmon_stderr;
-
-void
-xmon_init(void)
-{
-}
-
-int
-xmon_putc(int c, void *f)
-{
- char ch = c;
-
- if (c == '\n')
- xmon_putc('\r', f);
- return xmon_write(f, &ch, 1) == 1? c: -1;
-}
-
-int
-xmon_putchar(int c)
-{
- return xmon_putc(c, xmon_stdout);
-}
-
-int
-xmon_fputs(char *str, void *f)
-{
- int n = strlen(str);
-
- return xmon_write(f, str, n) == n? 0: -1;
-}
-
-int
-xmon_readchar(void)
-{
- char ch;
-
- for (;;) {
- switch (xmon_read(xmon_stdin, &ch, 1)) {
- case 1:
- return ch;
- case -1:
- xmon_printf("read(stdin) returned -1\r\n", 0, 0);
- return -1;
- }
- }
-}
-
-static char line[256];
-static char *lineptr;
-static int lineleft;
-
-#if 0
-int xmon_expect(const char *str, unsigned int timeout)
-{
- int c;
- unsigned int t0;
-
- timeout *= TB_SPEED;
- t0 = readtb();
- do {
- lineptr = line;
- for (;;) {
- c = xmon_read_poll();
- if (c == -1) {
- if (readtb() - t0 > timeout)
- return 0;
- continue;
- }
- if (c == '\n')
- break;
- if (c != '\r' && lineptr < &line[sizeof(line) - 1])
- *lineptr++ = c;
- }
- *lineptr = 0;
- } while (strstr(line, str) == NULL);
- return 1;
-}
-#endif
-
-int
-xmon_getchar(void)
-{
- int c;
-
- if (lineleft == 0) {
- lineptr = line;
- for (;;) {
- c = xmon_readchar();
- if (c == -1 || c == 4)
- break;
- if (c == '\r' || c == '\n') {
- *lineptr++ = '\n';
- xmon_putchar('\n');
- break;
- }
- switch (c) {
- case 0177:
- case '\b':
- if (lineptr > line) {
- xmon_putchar('\b');
- xmon_putchar(' ');
- xmon_putchar('\b');
- --lineptr;
- }
- break;
- case 'U' & 0x1F:
- while (lineptr > line) {
- xmon_putchar('\b');
- xmon_putchar(' ');
- xmon_putchar('\b');
- --lineptr;
- }
- break;
- default:
- if (lineptr >= &line[sizeof(line) - 1])
- xmon_putchar('\a');
- else {
- xmon_putchar(c);
- *lineptr++ = c;
- }
- }
- }
- lineleft = lineptr - line;
- lineptr = line;
- }
- if (lineleft == 0)
- return -1;
- --lineleft;
- return *lineptr++;
-}
-
-char *
-xmon_fgets(char *str, int nb, void *f)
-{
- char *p;
- int c;
-
- for (p = str; p < str + nb - 1; ) {
- c = xmon_getchar();
- if (c == -1) {
- if (p == str)
- return 0;
- break;
- }
- *p++ = c;
- if (c == '\n')
- break;
- }
- *p = 0;
- return str;
-}
-
-void
-prom_drawhex(uint val)
-{
- unsigned char buf[10];
-
- int i;
- for (i = 7; i >= 0; i--)
- {
- buf[i] = "0123456789abcdef"[val & 0x0f];
- val >>= 4;
- }
- buf[8] = '\0';
- xmon_fputs(buf, xmon_stdout);
-}
-
-void
-prom_drawstring(const char *str)
-{
- xmon_fputs(str, xmon_stdout);
-}
diff --git a/arch/powerpc/xmon/subr_prf.c b/arch/powerpc/xmon/subr_prf.c
deleted file mode 100644
index b48738c6dd3..00000000000
--- a/arch/powerpc/xmon/subr_prf.c
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Written by Cort Dougan to replace the version originally used
- * by Paul Mackerras, which came from NetBSD and thus had copyright
- * conflicts with Linux.
- *
- * This file makes liberal use of the standard linux utility
- * routines to reduce the size of the binary. We assume we can
- * trust some parts of Linux inside the debugger.
- * -- Cort (cort@cs.nmt.edu)
- *
- * Copyright (C) 1999 Cort Dougan.
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#include <linux/kernel.h>
-#include <linux/string.h>
-#include <linux/module.h>
-#include <stdarg.h>
-#include "nonstdio.h"
-
-extern int xmon_write(void *, void *, int);
-
-void xmon_vfprintf(void *f, const char *fmt, va_list ap)
-{
- static char xmon_buf[2048];
- int n;
-
- n = vsprintf(xmon_buf, fmt, ap);
- xmon_write(f, xmon_buf, n);
-}
-
-void xmon_printf(const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- xmon_vfprintf(stdout, fmt, ap);
- va_end(ap);
-}
-EXPORT_SYMBOL(xmon_printf);
-
-void xmon_fprintf(void *f, const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- xmon_vfprintf(f, fmt, ap);
- va_end(ap);
-}
-
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 1124f114620..cfcb2a56d66 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -1,7 +1,7 @@
/*
* Routines providing a simple monitor for use on the PowerMac.
*
- * Copyright (C) 1996 Paul Mackerras.
+ * Copyright (C) 1996-2005 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,6 +18,7 @@
#include <linux/kallsyms.h>
#include <linux/cpumask.h>
#include <linux/module.h>
+#include <linux/sysrq.h>
#include <asm/ptrace.h>
#include <asm/string.h>
@@ -144,15 +145,10 @@ static void xmon_print_symbol(unsigned long address, const char *mid,
static const char *getvecname(unsigned long vec);
extern int print_insn_powerpc(unsigned long, unsigned long, int);
-extern void printf(const char *fmt, ...);
-extern void xmon_vfprintf(void *f, const char *fmt, va_list ap);
-extern int xmon_putc(int c, void *f);
-extern int putchar(int ch);
extern void xmon_enter(void);
extern void xmon_leave(void);
-extern int xmon_read_poll(void);
extern long setjmp(long *);
extern void longjmp(long *, long);
extern void xmon_save_regs(struct pt_regs *);
@@ -748,7 +744,6 @@ cmds(struct pt_regs *excp)
printf("%x:", smp_processor_id());
#endif /* CONFIG_SMP */
printf("mon> ");
- fflush(stdout);
flush_input();
termch = 0;
cmd = skipbl();
@@ -1797,7 +1792,7 @@ memex(void)
for(;;){
if (!mnoread)
n = mread(adrs, val, size);
- printf("%.16x%c", adrs, brev? 'r': ' ');
+ printf(REG"%c", adrs, brev? 'r': ' ');
if (!mnoread) {
if (brev)
byterev(val, size);
@@ -1976,17 +1971,18 @@ prdump(unsigned long adrs, long ndump)
nr = mread(adrs, temp, r);
adrs += nr;
for (m = 0; m < r; ++m) {
- if ((m & 7) == 0 && m > 0)
- putchar(' ');
+ if ((m & (sizeof(long) - 1)) == 0 && m > 0)
+ putchar(' ');
if (m < nr)
printf("%.2x", temp[m]);
else
printf("%s", fault_chars[fault_type]);
}
- if (m <= 8)
- printf(" ");
- for (; m < 16; ++m)
+ for (; m < 16; ++m) {
+ if ((m & (sizeof(long) - 1)) == 0)
+ putchar(' ');
printf(" ");
+ }
printf(" |");
for (m = 0; m < r; ++m) {
if (m < nr) {
@@ -2151,7 +2147,6 @@ memzcan(void)
ok = mread(a, &v, 1);
if (ok && !ook) {
printf("%.8x .. ", a);
- fflush(stdout);
} else if (!ok && ook)
printf("%.8x\n", a - mskip);
ook = ok;
@@ -2372,7 +2367,7 @@ int
inchar(void)
{
if (lineptr == NULL || *lineptr == 0) {
- if (fgets(line, sizeof(line), stdin) == NULL) {
+ if (xmon_gets(line, sizeof(line)) == NULL) {
lineptr = NULL;
return EOF;
}
@@ -2526,4 +2521,29 @@ void xmon_init(int enable)
__debugger_dabr_match = NULL;
__debugger_fault_handler = NULL;
}
+ xmon_map_scc();
+}
+
+#ifdef CONFIG_MAGIC_SYSRQ
+static void sysrq_handle_xmon(int key, struct pt_regs *pt_regs,
+ struct tty_struct *tty)
+{
+ /* ensure xmon is enabled */
+ xmon_init(1);
+ debugger(pt_regs);
+}
+
+static struct sysrq_key_op sysrq_xmon_op =
+{
+ .handler = sysrq_handle_xmon,
+ .help_msg = "Xmon",
+ .action_msg = "Entering xmon",
+};
+
+static int __init setup_xmon_sysrq(void)
+{
+ register_sysrq_key('x', &sysrq_xmon_op);
+ return 0;
}
+__initcall(setup_xmon_sysrq);
+#endif /* CONFIG_MAGIC_SYSRQ */
diff --git a/arch/ppc/boot/include/of1275.h b/arch/ppc/boot/include/of1275.h
index 69173df76db..4ed88acfa73 100644
--- a/arch/ppc/boot/include/of1275.h
+++ b/arch/ppc/boot/include/of1275.h
@@ -19,6 +19,9 @@ extern prom_entry of_prom_entry;
/* function declarations */
+int call_prom(const char *service, int nargs, int nret, ...);
+int call_prom_ret(const char *service, int nargs, int nret,
+ unsigned int *rets, ...);
void * claim(unsigned int virt, unsigned int size, unsigned int align);
int map(unsigned int phys, unsigned int virt, unsigned int size);
void enter(void);
diff --git a/arch/ppc/boot/of1275/Makefile b/arch/ppc/boot/of1275/Makefile
index 02e6f235d7c..0b979c00497 100644
--- a/arch/ppc/boot/of1275/Makefile
+++ b/arch/ppc/boot/of1275/Makefile
@@ -3,4 +3,4 @@
#
lib-y := claim.o enter.o exit.o finddevice.o getprop.o ofinit.o \
- ofstdio.o read.o release.o write.o map.o
+ ofstdio.o read.o release.o write.o map.o call_prom.o
diff --git a/arch/ppc/boot/of1275/call_prom.c b/arch/ppc/boot/of1275/call_prom.c
new file mode 100644
index 00000000000..9479a3a2b8c
--- /dev/null
+++ b/arch/ppc/boot/of1275/call_prom.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include "of1275.h"
+#include <stdarg.h>
+
+int call_prom(const char *service, int nargs, int nret, ...)
+{
+ int i;
+ struct prom_args {
+ const char *service;
+ int nargs;
+ int nret;
+ unsigned int args[12];
+ } args;
+ va_list list;
+
+ args.service = service;
+ args.nargs = nargs;
+ args.nret = nret;
+
+ va_start(list, nret);
+ for (i = 0; i < nargs; i++)
+ args.args[i] = va_arg(list, unsigned int);
+ va_end(list);
+
+ for (i = 0; i < nret; i++)
+ args.args[nargs+i] = 0;
+
+ if (of_prom_entry(&args) < 0)
+ return -1;
+
+ return (nret > 0)? args.args[nargs]: 0;
+}
+
+int call_prom_ret(const char *service, int nargs, int nret,
+ unsigned int *rets, ...)
+{
+ int i;
+ struct prom_args {
+ const char *service;
+ int nargs;
+ int nret;
+ unsigned int args[12];
+ } args;
+ va_list list;
+
+ args.service = service;
+ args.nargs = nargs;
+ args.nret = nret;
+
+ va_start(list, rets);
+ for (i = 0; i < nargs; i++)
+ args.args[i] = va_arg(list, unsigned int);
+ va_end(list);
+
+ for (i = 0; i < nret; i++)
+ args.args[nargs+i] = 0;
+
+ if (of_prom_entry(&args) < 0)
+ return -1;
+
+ if (rets != (void *) 0)
+ for (i = 1; i < nret; ++i)
+ rets[i-1] = args.args[nargs+i];
+
+ return (nret > 0)? args.args[nargs]: 0;
+}
diff --git a/arch/ppc/boot/of1275/claim.c b/arch/ppc/boot/of1275/claim.c
index 13169a5c433..1ed3aeeff8a 100644
--- a/arch/ppc/boot/of1275/claim.c
+++ b/arch/ppc/boot/of1275/claim.c
@@ -9,27 +9,84 @@
*/
#include "of1275.h"
+#include "nonstdio.h"
-void *
-claim(unsigned int virt, unsigned int size, unsigned int align)
+/*
+ * Older OF's require that when claiming a specific range of addresses,
+ * we claim the physical space in the /memory node and the virtual
+ * space in the chosen mmu node, and then do a map operation to
+ * map virtual to physical.
+ */
+static int need_map = -1;
+static ihandle chosen_mmu;
+static phandle memory;
+
+/* returns true if s2 is a prefix of s1 */
+static int string_match(const char *s1, const char *s2)
+{
+ for (; *s2; ++s2)
+ if (*s1++ != *s2)
+ return 0;
+ return 1;
+}
+
+static int check_of_version(void)
+{
+ phandle oprom, chosen;
+ char version[64];
+
+ oprom = finddevice("/openprom");
+ if (oprom == OF_INVALID_HANDLE)
+ return 0;
+ if (getprop(oprom, "model", version, sizeof(version)) <= 0)
+ return 0;
+ version[sizeof(version)-1] = 0;
+ printf("OF version = '%s'\n", version);
+ if (!string_match(version, "Open Firmware, 1.")
+ && !string_match(version, "FirmWorks,3."))
+ return 0;
+ chosen = finddevice("/chosen");
+ if (chosen == OF_INVALID_HANDLE) {
+ chosen = finddevice("/chosen@0");
+ if (chosen == OF_INVALID_HANDLE) {
+ printf("no chosen\n");
+ return 0;
+ }
+ }
+ if (getprop(chosen, "mmu", &chosen_mmu, sizeof(chosen_mmu)) <= 0) {
+ printf("no mmu\n");
+ return 0;
+ }
+ memory = (ihandle) call_prom("open", 1, 1, "/memory");
+ if (memory == OF_INVALID_HANDLE) {
+ memory = (ihandle) call_prom("open", 1, 1, "/memory@0");
+ if (memory == OF_INVALID_HANDLE) {
+ printf("no memory node\n");
+ return 0;
+ }
+ }
+ printf("old OF detected\n");
+ return 1;
+}
+
+void *claim(unsigned int virt, unsigned int size, unsigned int align)
{
- struct prom_args {
- char *service;
- int nargs;
- int nret;
- unsigned int virt;
- unsigned int size;
- unsigned int align;
- void *ret;
- } args;
+ int ret;
+ unsigned int result;
- args.service = "claim";
- args.nargs = 3;
- args.nret = 1;
- args.virt = virt;
- args.size = size;
- args.align = align;
- args.ret = (void *) 0;
- (*of_prom_entry)(&args);
- return args.ret;
+ if (need_map < 0)
+ need_map = check_of_version();
+ if (align || !need_map)
+ return (void *) call_prom("claim", 3, 1, virt, size, align);
+
+ ret = call_prom_ret("call-method", 5, 2, &result, "claim", memory,
+ align, size, virt);
+ if (ret != 0 || result == -1)
+ return (void *) -1;
+ ret = call_prom_ret("call-method", 5, 2, &result, "claim", chosen_mmu,
+ align, size, virt);
+ /* 0x12 == coherent + read/write */
+ ret = call_prom("call-method", 6, 1, "map", chosen_mmu,
+ 0x12, size, virt, virt);
+ return virt;
}
diff --git a/arch/ppc/boot/of1275/finddevice.c b/arch/ppc/boot/of1275/finddevice.c
index 2c0f7cbb793..0dcb1201b77 100644
--- a/arch/ppc/boot/of1275/finddevice.c
+++ b/arch/ppc/boot/of1275/finddevice.c
@@ -10,22 +10,7 @@
#include "of1275.h"
-phandle
-finddevice(const char *name)
+phandle finddevice(const char *name)
{
- struct prom_args {
- char *service;
- int nargs;
- int nret;
- const char *devspec;
- phandle device;
- } args;
-
- args.service = "finddevice";
- args.nargs = 1;
- args.nret = 1;
- args.devspec = name;
- args.device = OF_INVALID_HANDLE;
- (*of_prom_entry)(&args);
- return args.device;
+ return (phandle) call_prom("finddevice", 1, 1, name);
}
diff --git a/arch/ppc/boot/openfirmware/Makefile b/arch/ppc/boot/openfirmware/Makefile
index 03415238fab..83a6433459c 100644
--- a/arch/ppc/boot/openfirmware/Makefile
+++ b/arch/ppc/boot/openfirmware/Makefile
@@ -80,8 +80,7 @@ $(obj)/note: $(utils)/mknote FORCE
$(call if_changed,mknote)
-$(obj)/coffcrt0.o: EXTRA_AFLAGS := -traditional -DXCOFF
-$(obj)/crt0.o: EXTRA_AFLAGS := -traditional
+$(obj)/coffcrt0.o: EXTRA_AFLAGS := -DXCOFF
targets += coffcrt0.o crt0.o
$(obj)/coffcrt0.o $(obj)/crt0.o: $(common)/crt0.S FORCE
$(call if_changed_dep,as_o_S)
diff --git a/arch/ppc/kernel/Makefile b/arch/ppc/kernel/Makefile
index 76a55a438f2..17a4da65e27 100644
--- a/arch/ppc/kernel/Makefile
+++ b/arch/ppc/kernel/Makefile
@@ -12,7 +12,7 @@ extra-$(CONFIG_6xx) += idle_6xx.o
extra-$(CONFIG_POWER4) += idle_power4.o
extra-y += vmlinux.lds
-obj-y := entry.o traps.o irq.o idle.o time.o misc.o \
+obj-y := entry.o traps.o idle.o time.o misc.o \
process.o align.o \
setup.o \
ppc_htab.o
@@ -38,8 +38,7 @@ endif
# These are here while we do the architecture merge
else
-obj-y := irq.o idle.o \
- align.o
+obj-y := idle.o align.o
obj-$(CONFIG_6xx) += l2cr.o cpu_setup_6xx.o
obj-$(CONFIG_SOFTWARE_SUSPEND) += swsusp.o
obj-$(CONFIG_MODULES) += module.o
diff --git a/arch/ppc/kernel/head_booke.h b/arch/ppc/kernel/head_booke.h
index aeb349b47af..f3d274c6b23 100644
--- a/arch/ppc/kernel/head_booke.h
+++ b/arch/ppc/kernel/head_booke.h
@@ -358,6 +358,6 @@ label:
NORMAL_EXCEPTION_PROLOG; \
bne load_up_fpu; /* if from user, just load it up */ \
addi r3,r1,STACK_FRAME_OVERHEAD; \
- EXC_XFER_EE_LITE(0x800, KernelFP)
+ EXC_XFER_EE_LITE(0x800, kernel_fp_unavailable_exception)
#endif /* __HEAD_BOOKE_H__ */
diff --git a/arch/ppc/kernel/idle.c b/arch/ppc/kernel/idle.c
index 3c4e4cb6107..821a75e4560 100644
--- a/arch/ppc/kernel/idle.c
+++ b/arch/ppc/kernel/idle.c
@@ -63,7 +63,7 @@ void cpu_idle(void)
int cpu = smp_processor_id();
for (;;) {
- while (need_resched()) {
+ while (!need_resched()) {
if (ppc_md.idle != NULL)
ppc_md.idle();
else
diff --git a/arch/ppc/kernel/irq.c b/arch/ppc/kernel/irq.c
deleted file mode 100644
index fbb2b9f8922..00000000000
--- a/arch/ppc/kernel/irq.c
+++ /dev/null
@@ -1,165 +0,0 @@
-/*
- * arch/ppc/kernel/irq.c
- *
- * Derived from arch/i386/kernel/irq.c
- * Copyright (C) 1992 Linus Torvalds
- * Adapted from arch/i386 by Gary Thomas
- * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
- * Updated and modified by Cort Dougan <cort@fsmlabs.com>
- * Copyright (C) 1996-2001 Cort Dougan
- * Adapted for Power Macintosh by Paul Mackerras
- * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
- * Amiga/APUS changes by Jesper Skov (jskov@cygnus.co.uk).
- *
- * This file contains the code used by various IRQ handling routines:
- * asking for different IRQ's should be done through these routines
- * instead of just grabbing them. Thus setups with different IRQ numbers
- * shouldn't result in any weird surprises, and installing new handlers
- * should be easier.
- *
- * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the
- * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit
- * mask register (of which only 16 are defined), hence the weird shifting
- * and complement of the cached_irq_mask. I want to be able to stuff
- * this right into the SIU SMASK register.
- * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
- * to reduce code space and undefined function references.
- */
-
-#include <linux/errno.h>
-#include <linux/module.h>
-#include <linux/threads.h>
-#include <linux/kernel_stat.h>
-#include <linux/signal.h>
-#include <linux/sched.h>
-#include <linux/ptrace.h>
-#include <linux/ioport.h>
-#include <linux/interrupt.h>
-#include <linux/timex.h>
-#include <linux/config.h>
-#include <linux/init.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/irq.h>
-#include <linux/proc_fs.h>
-#include <linux/random.h>
-#include <linux/seq_file.h>
-#include <linux/cpumask.h>
-#include <linux/profile.h>
-#include <linux/bitops.h>
-
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#include <asm/io.h>
-#include <asm/pgtable.h>
-#include <asm/irq.h>
-#include <asm/cache.h>
-#include <asm/prom.h>
-#include <asm/ptrace.h>
-#include <asm/machdep.h>
-
-#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
-
-extern atomic_t ipi_recv;
-extern atomic_t ipi_sent;
-
-#define MAXCOUNT 10000000
-
-int ppc_spurious_interrupts = 0;
-struct irqaction *ppc_irq_action[NR_IRQS];
-unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
-atomic_t ppc_n_lost_interrupts;
-
-#ifdef CONFIG_TAU_INT
-extern int tau_initialized;
-extern int tau_interrupts(int);
-#endif
-
-int show_interrupts(struct seq_file *p, void *v)
-{
- int i = *(loff_t *) v, j;
- struct irqaction * action;
- unsigned long flags;
-
- if (i == 0) {
- seq_puts(p, " ");
- for (j=0; j<NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "CPU%d ", j);
- seq_putc(p, '\n');
- }
-
- if (i < NR_IRQS) {
- spin_lock_irqsave(&irq_desc[i].lock, flags);
- action = irq_desc[i].action;
- if ( !action || !action->handler )
- goto skip;
- seq_printf(p, "%3d: ", i);
-#ifdef CONFIG_SMP
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ",
- kstat_cpu(j).irqs[i]);
-#else
- seq_printf(p, "%10u ", kstat_irqs(i));
-#endif /* CONFIG_SMP */
- if (irq_desc[i].handler)
- seq_printf(p, " %s ", irq_desc[i].handler->typename);
- else
- seq_puts(p, " None ");
- seq_printf(p, "%s", (irq_desc[i].status & IRQ_LEVEL) ? "Level " : "Edge ");
- seq_printf(p, " %s", action->name);
- for (action = action->next; action; action = action->next)
- seq_printf(p, ", %s", action->name);
- seq_putc(p, '\n');
-skip:
- spin_unlock_irqrestore(&irq_desc[i].lock, flags);
- } else if (i == NR_IRQS) {
-#ifdef CONFIG_TAU_INT
- if (tau_initialized){
- seq_puts(p, "TAU: ");
- for (j = 0; j < NR_CPUS; j++)
- if (cpu_online(j))
- seq_printf(p, "%10u ", tau_interrupts(j));
- seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n");
- }
-#endif
-#if defined(CONFIG_SMP) && !defined(CONFIG_PPC_MERGE)
- /* should this be per processor send/receive? */
- seq_printf(p, "IPI (recv/sent): %10u/%u\n",
- atomic_read(&ipi_recv), atomic_read(&ipi_sent));
-#endif
- seq_printf(p, "BAD: %10u\n", ppc_spurious_interrupts);
- }
- return 0;
-}
-
-void do_IRQ(struct pt_regs *regs)
-{
- int irq, first = 1;
- irq_enter();
-
- /*
- * Every platform is required to implement ppc_md.get_irq.
- * This function will either return an irq number or -1 to
- * indicate there are no more pending. But the first time
- * through the loop this means there wasn't and IRQ pending.
- * The value -2 is for buggy hardware and means that this IRQ
- * has already been handled. -- Tom
- */
- while ((irq = ppc_md.get_irq(regs)) >= 0) {
- __do_IRQ(irq, regs);
- first = 0;
- }
- if (irq != -2 && first)
- /* That's not SMP safe ... but who cares ? */
- ppc_spurious_interrupts++;
- irq_exit();
-}
-
-void __init init_IRQ(void)
-{
- ppc_md.init_IRQ();
-}
diff --git a/arch/ppc/kernel/misc.S b/arch/ppc/kernel/misc.S
index ae6af29938a..5e61124581d 100644
--- a/arch/ppc/kernel/misc.S
+++ b/arch/ppc/kernel/misc.S
@@ -497,9 +497,9 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
* and invalidate the corresponding instruction cache blocks.
* This is a no-op on the 601.
*
- * flush_icache_range(unsigned long start, unsigned long stop)
+ * __flush_icache_range(unsigned long start, unsigned long stop)
*/
-_GLOBAL(flush_icache_range)
+_GLOBAL(__flush_icache_range)
BEGIN_FTR_SECTION
blr /* for 601, do nothing */
END_FTR_SECTION_IFCLR(CPU_FTR_SPLIT_ID_CACHE)
diff --git a/arch/ppc/kernel/pci.c b/arch/ppc/kernel/pci.c
index e8f4e576750..48ed58f995c 100644
--- a/arch/ppc/kernel/pci.c
+++ b/arch/ppc/kernel/pci.c
@@ -62,20 +62,6 @@ struct pci_controller** hose_tail = &hose_head;
static int pci_bus_count;
static void
-fixup_rev1_53c810(struct pci_dev* dev)
-{
- /* rev 1 ncr53c810 chips don't set the class at all which means
- * they don't get their resources remapped. Fix that here.
- */
-
- if ((dev->class == PCI_CLASS_NOT_DEFINED)) {
- printk("NCR 53c810 rev 1 detected, setting PCI class.\n");
- dev->class = PCI_CLASS_STORAGE_SCSI;
- }
-}
-DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
-
-static void
fixup_broken_pcnet32(struct pci_dev* dev)
{
if ((dev->class>>8 == PCI_CLASS_NETWORK_ETHERNET)) {
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index e0ca61b37f4..66073f77519 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -46,6 +46,7 @@
#include <asm/btext.h>
#include <asm/div64.h>
#include <asm/xmon.h>
+#include <asm/signal.h>
#ifdef CONFIG_8xx
#include <asm/commproc.h>
@@ -57,7 +58,6 @@ extern void machine_check_exception(struct pt_regs *regs);
extern void alignment_exception(struct pt_regs *regs);
extern void program_check_exception(struct pt_regs *regs);
extern void single_step_exception(struct pt_regs *regs);
-extern int do_signal(sigset_t *, struct pt_regs *);
extern int pmac_newworld;
extern int sys_sigreturn(struct pt_regs *regs);
@@ -78,7 +78,6 @@ EXPORT_SYMBOL(program_check_exception);
EXPORT_SYMBOL(single_step_exception);
EXPORT_SYMBOL(sys_sigreturn);
EXPORT_SYMBOL(ppc_n_lost_interrupts);
-EXPORT_SYMBOL(ppc_lost_interrupts);
EXPORT_SYMBOL(ISA_DMA_THRESHOLD);
EXPORT_SYMBOL(DMA_MODE_READ);
@@ -176,6 +175,7 @@ EXPORT_SYMBOL(pci_bus_to_phys);
#endif /* CONFIG_PCI */
#ifdef CONFIG_NOT_COHERENT_CACHE
+extern void flush_dcache_all(void);
EXPORT_SYMBOL(flush_dcache_all);
#endif
@@ -217,9 +217,6 @@ EXPORT_SYMBOL(adb_try_handler_change);
EXPORT_SYMBOL(cuda_request);
EXPORT_SYMBOL(cuda_poll);
#endif /* CONFIG_ADB_CUDA */
-#ifdef CONFIG_PPC_MULTIPLATFORM
-EXPORT_SYMBOL(_machine);
-#endif
#ifdef CONFIG_PPC_PMAC
EXPORT_SYMBOL(sys_ctrler);
EXPORT_SYMBOL(pmac_newworld);
diff --git a/arch/ppc/kernel/setup.c b/arch/ppc/kernel/setup.c
index 6bcb85d2b7f..dc55e1abc45 100644
--- a/arch/ppc/kernel/setup.c
+++ b/arch/ppc/kernel/setup.c
@@ -76,6 +76,7 @@ unsigned int DMA_MODE_WRITE;
#ifdef CONFIG_PPC_MULTIPLATFORM
int _machine = 0;
+EXPORT_SYMBOL(_machine);
extern void prep_init(unsigned long r3, unsigned long r4,
unsigned long r5, unsigned long r6, unsigned long r7);
diff --git a/arch/ppc/platforms/pmac_pic.c b/arch/ppc/platforms/pmac_pic.c
index 9f2d95ea856..4742bf60935 100644
--- a/arch/ppc/platforms/pmac_pic.c
+++ b/arch/ppc/platforms/pmac_pic.c
@@ -75,6 +75,9 @@ static DEFINE_SPINLOCK(pmac_pic_lock);
#define GATWICK_IRQ_POOL_SIZE 10
static struct interrupt_info gatwick_int_pool[GATWICK_IRQ_POOL_SIZE];
+#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
+static unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
+
/*
* Mark an irq as "lost". This is only used on the pmac
* since it can lose interrupts (see pmac_set_irq_mask).
diff --git a/arch/ppc/platforms/prep_setup.c b/arch/ppc/platforms/prep_setup.c
index 067d7d53b81..4415748071d 100644
--- a/arch/ppc/platforms/prep_setup.c
+++ b/arch/ppc/platforms/prep_setup.c
@@ -61,6 +61,15 @@
#include <asm/pci-bridge.h>
#include <asm/todc.h>
+/* prep registers for L2 */
+#define CACHECRBA 0x80000823 /* Cache configuration register address */
+#define L2CACHE_MASK 0x03 /* Mask for 2 L2 Cache bits */
+#define L2CACHE_512KB 0x00 /* 512KB */
+#define L2CACHE_256KB 0x01 /* 256KB */
+#define L2CACHE_1MB 0x02 /* 1MB */
+#define L2CACHE_NONE 0x03 /* NONE */
+#define L2CACHE_PARITY 0x08 /* Mask for L2 Cache Parity Protected bit */
+
TODC_ALLOC();
unsigned char ucSystemType;
diff --git a/arch/ppc64/Kconfig b/arch/ppc64/Kconfig
index 29552348e58..c9d32db9d76 100644
--- a/arch/ppc64/Kconfig
+++ b/arch/ppc64/Kconfig
@@ -297,6 +297,10 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
+config ARCH_MEMORY_PROBE
+ def_bool y
+ depends on MEMORY_HOTPLUG
+
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
diff --git a/arch/ppc64/boot/addRamDisk.c b/arch/ppc64/boot/addRamDisk.c
index 7f2c0947339..c02a99952be 100644
--- a/arch/ppc64/boot/addRamDisk.c
+++ b/arch/ppc64/boot/addRamDisk.c
@@ -5,11 +5,59 @@
#include <sys/types.h>
#include <sys/stat.h>
#include <string.h>
+#include <elf.h>
#define ElfHeaderSize (64 * 1024)
#define ElfPages (ElfHeaderSize / 4096)
#define KERNELBASE (0xc000000000000000)
+#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
+struct addr_range {
+ unsigned long long addr;
+ unsigned long memsize;
+ unsigned long offset;
+};
+
+static int check_elf64(void *p, int size, struct addr_range *r)
+{
+ Elf64_Ehdr *elf64 = p;
+ Elf64_Phdr *elf64ph;
+
+ if (elf64->e_ident[EI_MAG0] != ELFMAG0 ||
+ elf64->e_ident[EI_MAG1] != ELFMAG1 ||
+ elf64->e_ident[EI_MAG2] != ELFMAG2 ||
+ elf64->e_ident[EI_MAG3] != ELFMAG3 ||
+ elf64->e_ident[EI_CLASS] != ELFCLASS64 ||
+ elf64->e_ident[EI_DATA] != ELFDATA2MSB ||
+ elf64->e_type != ET_EXEC || elf64->e_machine != EM_PPC64)
+ return 0;
+
+ if ((elf64->e_phoff + sizeof(Elf64_Phdr)) > size)
+ return 0;
+
+ elf64ph = (Elf64_Phdr *) ((unsigned long)elf64 +
+ (unsigned long)elf64->e_phoff);
+
+ r->memsize = (unsigned long)elf64ph->p_memsz;
+ r->offset = (unsigned long)elf64ph->p_offset;
+ r->addr = (unsigned long long)elf64ph->p_vaddr;
+
+#ifdef DEBUG
+ printf("PPC64 ELF file, ph:\n");
+ printf("p_type 0x%08x\n", elf64ph->p_type);
+ printf("p_flags 0x%08x\n", elf64ph->p_flags);
+ printf("p_offset 0x%016llx\n", elf64ph->p_offset);
+ printf("p_vaddr 0x%016llx\n", elf64ph->p_vaddr);
+ printf("p_paddr 0x%016llx\n", elf64ph->p_paddr);
+ printf("p_filesz 0x%016llx\n", elf64ph->p_filesz);
+ printf("p_memsz 0x%016llx\n", elf64ph->p_memsz);
+ printf("p_align 0x%016llx\n", elf64ph->p_align);
+ printf("... skipping 0x%08lx bytes of ELF header\n",
+ (unsigned long)elf64ph->p_offset);
+#endif
+
+ return 64;
+}
void get4k(FILE *file, char *buf )
{
unsigned j;
@@ -34,97 +82,92 @@ void death(const char *msg, FILE *fdesc, const char *fname)
int main(int argc, char **argv)
{
char inbuf[4096];
- FILE *ramDisk = NULL;
- FILE *sysmap = NULL;
- FILE *inputVmlinux = NULL;
- FILE *outputVmlinux = NULL;
-
- unsigned i = 0;
- unsigned long ramFileLen = 0;
- unsigned long ramLen = 0;
- unsigned long roundR = 0;
-
- unsigned long sysmapFileLen = 0;
- unsigned long sysmapLen = 0;
- unsigned long sysmapPages = 0;
- char* ptr_end = NULL;
- unsigned long offset_end = 0;
-
- unsigned long kernelLen = 0;
- unsigned long actualKernelLen = 0;
- unsigned long round = 0;
- unsigned long roundedKernelLen = 0;
- unsigned long ramStartOffs = 0;
- unsigned long ramPages = 0;
- unsigned long roundedKernelPages = 0;
- unsigned long hvReleaseData = 0;
+ struct addr_range vmlinux;
+ FILE *ramDisk;
+ FILE *inputVmlinux;
+ FILE *outputVmlinux;
+
+ char *rd_name, *lx_name, *out_name;
+
+ size_t i;
+ unsigned long ramFileLen;
+ unsigned long ramLen;
+ unsigned long roundR;
+ unsigned long offset_end;
+
+ unsigned long kernelLen;
+ unsigned long actualKernelLen;
+ unsigned long round;
+ unsigned long roundedKernelLen;
+ unsigned long ramStartOffs;
+ unsigned long ramPages;
+ unsigned long roundedKernelPages;
+ unsigned long hvReleaseData;
u_int32_t eyeCatcher = 0xc8a5d9c4;
- unsigned long naca = 0;
- unsigned long xRamDisk = 0;
- unsigned long xRamDiskSize = 0;
- long padPages = 0;
+ unsigned long naca;
+ unsigned long xRamDisk;
+ unsigned long xRamDiskSize;
+ long padPages;
if (argc < 2) {
fprintf(stderr, "Name of RAM disk file missing.\n");
exit(1);
}
+ rd_name = argv[1];
if (argc < 3) {
- fprintf(stderr, "Name of System Map input file is missing.\n");
- exit(1);
- }
-
- if (argc < 4) {
fprintf(stderr, "Name of vmlinux file missing.\n");
exit(1);
}
+ lx_name = argv[2];
- if (argc < 5) {
+ if (argc < 4) {
fprintf(stderr, "Name of vmlinux output file missing.\n");
exit(1);
}
+ out_name = argv[3];
- ramDisk = fopen(argv[1], "r");
+ ramDisk = fopen(rd_name, "r");
if ( ! ramDisk ) {
- fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", argv[1]);
+ fprintf(stderr, "RAM disk file \"%s\" failed to open.\n", rd_name);
exit(1);
}
- sysmap = fopen(argv[2], "r");
- if ( ! sysmap ) {
- fprintf(stderr, "System Map file \"%s\" failed to open.\n", argv[2]);
- exit(1);
- }
-
- inputVmlinux = fopen(argv[3], "r");
+ inputVmlinux = fopen(lx_name, "r");
if ( ! inputVmlinux ) {
- fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", argv[3]);
+ fprintf(stderr, "vmlinux file \"%s\" failed to open.\n", lx_name);
exit(1);
}
- outputVmlinux = fopen(argv[4], "w+");
+ outputVmlinux = fopen(out_name, "w+");
if ( ! outputVmlinux ) {
- fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", argv[4]);
+ fprintf(stderr, "output vmlinux file \"%s\" failed to open.\n", out_name);
exit(1);
}
-
-
-
+
+ i = fread(inbuf, 1, sizeof(inbuf), inputVmlinux);
+ if (i != sizeof(inbuf)) {
+ fprintf(stderr, "can not read vmlinux file %s: %u\n", lx_name, i);
+ exit(1);
+ }
+
+ i = check_elf64(inbuf, sizeof(inbuf), &vmlinux);
+ if (i == 0) {
+ fprintf(stderr, "You must have a linux kernel specified as argv[2]\n");
+ exit(1);
+ }
+
/* Input Vmlinux file */
fseek(inputVmlinux, 0, SEEK_END);
kernelLen = ftell(inputVmlinux);
fseek(inputVmlinux, 0, SEEK_SET);
- printf("kernel file size = %d\n", kernelLen);
- if ( kernelLen == 0 ) {
- fprintf(stderr, "You must have a linux kernel specified as argv[3]\n");
- exit(1);
- }
+ printf("kernel file size = %lu\n", kernelLen);
actualKernelLen = kernelLen - ElfHeaderSize;
- printf("actual kernel length (minus ELF header) = %d\n", actualKernelLen);
+ printf("actual kernel length (minus ELF header) = %lu\n", actualKernelLen);
round = actualKernelLen % 4096;
roundedKernelLen = actualKernelLen;
@@ -134,39 +177,7 @@ int main(int argc, char **argv)
roundedKernelPages = roundedKernelLen / 4096;
printf("Vmlinux pages to copy = %ld/0x%lx \n", roundedKernelPages, roundedKernelPages);
-
-
- /* Input System Map file */
- /* (needs to be processed simply to determine if we need to add pad pages due to the static variables not being included in the vmlinux) */
- fseek(sysmap, 0, SEEK_END);
- sysmapFileLen = ftell(sysmap);
- fseek(sysmap, 0, SEEK_SET);
- printf("%s file size = %ld/0x%lx \n", argv[2], sysmapFileLen, sysmapFileLen);
-
- sysmapLen = sysmapFileLen;
-
- roundR = 4096 - (sysmapLen % 4096);
- if (roundR) {
- printf("Rounding System Map file up to a multiple of 4096, adding %ld/0x%lx \n", roundR, roundR);
- sysmapLen += roundR;
- }
- printf("Rounded System Map size is %ld/0x%lx \n", sysmapLen, sysmapLen);
-
- /* Process the Sysmap file to determine where _end is */
- sysmapPages = sysmapLen / 4096;
- /* read the whole file line by line, expect that it doesn't fail */
- while ( fgets(inbuf, 4096, sysmap) ) ;
- /* search for _end in the last page of the system map */
- ptr_end = strstr(inbuf, " _end");
- if (!ptr_end) {
- fprintf(stderr, "Unable to find _end in the sysmap file \n");
- fprintf(stderr, "inbuf: \n");
- fprintf(stderr, "%s \n", inbuf);
- exit(1);
- }
- printf("Found _end in the last page of the sysmap - backing up 10 characters it looks like %s", ptr_end-10);
- /* convert address of _end in system map to hex offset. */
- offset_end = (unsigned int)strtol(ptr_end-10, NULL, 16);
+ offset_end = _ALIGN_UP(vmlinux.memsize, 4096);
/* calc how many pages we need to insert between the vmlinux and the start of the ram disk */
padPages = offset_end/4096 - roundedKernelPages;
@@ -194,7 +205,7 @@ int main(int argc, char **argv)
fseek(ramDisk, 0, SEEK_END);
ramFileLen = ftell(ramDisk);
fseek(ramDisk, 0, SEEK_SET);
- printf("%s file size = %ld/0x%lx \n", argv[1], ramFileLen, ramFileLen);
+ printf("%s file size = %ld/0x%lx \n", rd_name, ramFileLen, ramFileLen);
ramLen = ramFileLen;
@@ -248,19 +259,19 @@ int main(int argc, char **argv)
/* fseek to the hvReleaseData pointer */
fseek(outputVmlinux, ElfHeaderSize + 0x24, SEEK_SET);
if (fread(&hvReleaseData, 4, 1, outputVmlinux) != 1) {
- death("Could not read hvReleaseData pointer\n", outputVmlinux, argv[4]);
+ death("Could not read hvReleaseData pointer\n", outputVmlinux, out_name);
}
hvReleaseData = ntohl(hvReleaseData); /* Convert to native int */
- printf("hvReleaseData is at %08x\n", hvReleaseData);
+ printf("hvReleaseData is at %08lx\n", hvReleaseData);
/* fseek to the hvReleaseData */
fseek(outputVmlinux, ElfHeaderSize + hvReleaseData, SEEK_SET);
if (fread(inbuf, 0x40, 1, outputVmlinux) != 1) {
- death("Could not read hvReleaseData\n", outputVmlinux, argv[4]);
+ death("Could not read hvReleaseData\n", outputVmlinux, out_name);
}
/* Check hvReleaseData sanity */
if (memcmp(inbuf, &eyeCatcher, 4) != 0) {
- death("hvReleaseData is invalid\n", outputVmlinux, argv[4]);
+ death("hvReleaseData is invalid\n", outputVmlinux, out_name);
}
/* Get the naca pointer */
naca = ntohl(*((u_int32_t*) &inbuf[0x0C])) - KERNELBASE;
@@ -269,13 +280,13 @@ int main(int argc, char **argv)
/* fseek to the naca */
fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
if (fread(inbuf, 0x18, 1, outputVmlinux) != 1) {
- death("Could not read naca\n", outputVmlinux, argv[4]);
+ death("Could not read naca\n", outputVmlinux, out_name);
}
xRamDisk = ntohl(*((u_int32_t *) &inbuf[0x0c]));
xRamDiskSize = ntohl(*((u_int32_t *) &inbuf[0x14]));
/* Make sure a RAM disk isn't already present */
if ((xRamDisk != 0) || (xRamDiskSize != 0)) {
- death("RAM disk is already attached to this kernel\n", outputVmlinux, argv[4]);
+ death("RAM disk is already attached to this kernel\n", outputVmlinux, out_name);
}
/* Fill in the values */
*((u_int32_t *) &inbuf[0x0c]) = htonl(ramStartOffs);
@@ -285,15 +296,15 @@ int main(int argc, char **argv)
fflush(outputVmlinux);
fseek(outputVmlinux, ElfHeaderSize + naca, SEEK_SET);
if (fwrite(inbuf, 0x18, 1, outputVmlinux) != 1) {
- death("Could not write naca\n", outputVmlinux, argv[4]);
+ death("Could not write naca\n", outputVmlinux, out_name);
}
- printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08x\n",
+ printf("Ram Disk of 0x%lx pages is attached to the kernel at offset 0x%08lx\n",
ramPages, ramStartOffs);
/* Done */
fclose(outputVmlinux);
/* Set permission to executable */
- chmod(argv[4], S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
+ chmod(out_name, S_IRUSR|S_IWUSR|S_IXUSR|S_IRGRP|S_IXGRP|S_IROTH|S_IXOTH);
return 0;
}
diff --git a/arch/ppc64/kernel/Makefile b/arch/ppc64/kernel/Makefile
index c441aebe764..58b19f10765 100644
--- a/arch/ppc64/kernel/Makefile
+++ b/arch/ppc64/kernel/Makefile
@@ -11,12 +11,11 @@ obj-y := misc.o prom.o
endif
-obj-y += irq.o idle.o dma.o \
- align.o pacaData.o \
- udbg.o ioctl32.o \
+obj-y += idle.o dma.o \
+ align.o \
+ udbg.o \
rtc.o \
- cpu_setup_power4.o \
- iommu.o sysfs.o vdso.o firmware.o
+ iommu.o vdso.o
obj-y += vdso32/ vdso64/
pci-obj-$(CONFIG_PPC_MULTIPLATFORM) += pci_dn.o pci_direct_iommu.o
@@ -31,15 +30,10 @@ endif
obj-$(CONFIG_PPC_PSERIES) += udbg_16550.o
obj-$(CONFIG_KEXEC) += machine_kexec.o
-obj-$(CONFIG_EEH) += eeh.o
-obj-$(CONFIG_PROC_FS) += proc_ppc64.o
obj-$(CONFIG_MODULES) += module.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES) += ppc_ksyms.o
endif
-obj-$(CONFIG_PPC_RTAS) += rtas_pci.o
-obj-$(CONFIG_SCANLOG) += scanlog.o
-obj-$(CONFIG_LPARCFG) += lparcfg.o
obj-$(CONFIG_HVC_CONSOLE) += hvconsole.o
ifneq ($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_BOOTX_TEXT) += btext.o
@@ -52,8 +46,6 @@ obj-$(CONFIG_PPC_MAPLE) += udbg_16550.o
obj-$(CONFIG_KPROBES) += kprobes.o
-CFLAGS_ioctl32.o += -Ifs/
-
ifneq ($(CONFIG_PPC_MERGE),y)
ifeq ($(CONFIG_PPC_ISERIES),y)
arch/ppc64/kernel/head.o: arch/powerpc/kernel/lparmap.s
diff --git a/arch/ppc64/kernel/asm-offsets.c b/arch/ppc64/kernel/asm-offsets.c
index bce9065da6c..84ab5c18ef5 100644
--- a/arch/ppc64/kernel/asm-offsets.c
+++ b/arch/ppc64/kernel/asm-offsets.c
@@ -74,7 +74,6 @@ int main(void)
DEFINE(ICACHEL1LINESIZE, offsetof(struct ppc64_caches, iline_size));
DEFINE(ICACHEL1LOGLINESIZE, offsetof(struct ppc64_caches, log_iline_size));
DEFINE(ICACHEL1LINESPERPAGE, offsetof(struct ppc64_caches, ilines_per_page));
- DEFINE(PLATFORM, offsetof(struct systemcfg, platform));
DEFINE(PLATFORM_LPAR, PLATFORM_LPAR);
/* paca */
diff --git a/arch/ppc64/kernel/head.S b/arch/ppc64/kernel/head.S
index 9e8050ea122..1c869ea72d2 100644
--- a/arch/ppc64/kernel/head.S
+++ b/arch/ppc64/kernel/head.S
@@ -28,7 +28,6 @@
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
-#include <asm/systemcfg.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
@@ -1701,21 +1700,9 @@ _GLOBAL(__secondary_start)
HMT_MEDIUM /* Set thread priority to MEDIUM */
ld r2,PACATOC(r13)
- li r6,0
- stb r6,PACAPROCENABLED(r13)
-
-#ifndef CONFIG_PPC_ISERIES
- /* Initialize the page table pointer register. */
- LOADADDR(r6,_SDR1)
- ld r6,0(r6) /* get the value of _SDR1 */
- mtspr SPRN_SDR1,r6 /* set the htab location */
-#endif
- /* Initialize the first segment table (or SLB) entry */
- ld r3,PACASTABVIRT(r13) /* get addr of segment table */
-BEGIN_FTR_SECTION
- bl .stab_initialize
-END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
- bl .slb_initialize
+
+ /* Do early setup for that CPU */
+ bl .early_setup_secondary
/* Initialize the kernel stack. Just a repeat for iSeries. */
LOADADDR(r3,current_set)
@@ -1724,37 +1711,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_SLB)
addi r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
std r1,PACAKSAVE(r13)
- ld r3,PACASTABREAL(r13) /* get raddr of segment table */
- ori r4,r3,1 /* turn on valid bit */
-
-#ifdef CONFIG_PPC_ISERIES
- li r0,-1 /* hypervisor call */
- li r3,1
- sldi r3,r3,63 /* 0x8000000000000000 */
- ori r3,r3,4 /* 0x8000000000000004 */
- sc /* HvCall_setASR */
-#else
- /* set the ASR */
- ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
- ld r3,0(r3)
- lwz r3,PLATFORM(r3) /* r3 = platform flags */
- andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
- beq 98f /* branch if result is 0 */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmpwi r3,0x37 /* SStar */
- beq 97f
- cmpwi r3,0x36 /* IStar */
- beq 97f
- cmpwi r3,0x34 /* Pulsar */
- bne 98f
-97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
- HVSC /* Invoking hcall */
- b 99f
-98: /* !(rpa hypervisor) || !(star) */
- mtasr r4 /* set the stab location */
-99:
-#endif
li r7,0
mtlr r7
@@ -1896,40 +1852,6 @@ _STATIC(start_here_multiplatform)
mr r3,r31
bl .early_setup
- /* set the ASR */
- ld r3,PACASTABREAL(r13)
- ori r4,r3,1 /* turn on valid bit */
- ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
- ld r3,0(r3)
- lwz r3,PLATFORM(r3) /* r3 = platform flags */
- andi. r3,r3,PLATFORM_LPAR /* Test if bit 0 is set (LPAR bit) */
- beq 98f /* branch if result is 0 */
- mfspr r3,SPRN_PVR
- srwi r3,r3,16
- cmpwi r3,0x37 /* SStar */
- beq 97f
- cmpwi r3,0x36 /* IStar */
- beq 97f
- cmpwi r3,0x34 /* Pulsar */
- bne 98f
-97: li r3,H_SET_ASR /* hcall = H_SET_ASR */
- HVSC /* Invoking hcall */
- b 99f
-98: /* !(rpa hypervisor) || !(star) */
- mtasr r4 /* set the stab location */
-99:
- /* Set SDR1 (hash table pointer) */
- ld r3,systemcfg@got(r2) /* r3 = ptr to systemcfg */
- ld r3,0(r3)
- lwz r3,PLATFORM(r3) /* r3 = platform flags */
- /* Test if bit 0 is set (LPAR bit) */
- andi. r3,r3,PLATFORM_LPAR
- bne 98f /* branch if result is !0 */
- LOADADDR(r6,_SDR1) /* Only if NOT LPAR */
- sub r6,r6,r26
- ld r6,0(r6) /* get the value of _SDR1 */
- mtspr SPRN_SDR1,r6 /* set the htab location */
-98:
LOADADDR(r3,.start_here_common)
SET_REG_TO_CONST(r4, MSR_KERNEL)
mtspr SPRN_SRR0,r3
diff --git a/arch/ppc64/kernel/idle.c b/arch/ppc64/kernel/idle.c
index 715bc0e71e0..b879d3057ef 100644
--- a/arch/ppc64/kernel/idle.c
+++ b/arch/ppc64/kernel/idle.c
@@ -26,7 +26,6 @@
#include <asm/processor.h>
#include <asm/cputable.h>
#include <asm/time.h>
-#include <asm/systemcfg.h>
#include <asm/machdep.h>
#include <asm/smp.h>
diff --git a/arch/ppc64/kernel/misc.S b/arch/ppc64/kernel/misc.S
index 914632ec587..492bca6137e 100644
--- a/arch/ppc64/kernel/misc.S
+++ b/arch/ppc64/kernel/misc.S
@@ -78,12 +78,12 @@ _GLOBAL(call_do_softirq)
mtlr r0
blr
-_GLOBAL(call_handle_IRQ_event)
+_GLOBAL(call___do_IRQ)
mflr r0
std r0,16(r1)
- stdu r1,THREAD_SIZE-112(r6)
- mr r1,r6
- bl .handle_IRQ_event
+ stdu r1,THREAD_SIZE-112(r5)
+ mr r1,r5
+ bl .__do_IRQ
ld r1,0(r1)
ld r0,16(r1)
mtlr r0
diff --git a/arch/ppc64/kernel/nvram.c b/arch/ppc64/kernel/nvram.c
index 4fb1a9f5060..c0fcd29918c 100644
--- a/arch/ppc64/kernel/nvram.c
+++ b/arch/ppc64/kernel/nvram.c
@@ -31,7 +31,6 @@
#include <asm/rtas.h>
#include <asm/prom.h>
#include <asm/machdep.h>
-#include <asm/systemcfg.h>
#undef DEBUG_NVRAM
@@ -167,7 +166,7 @@ static int dev_nvram_ioctl(struct inode *inode, struct file *file,
case IOC_NVRAM_GET_OFFSET: {
int part, offset;
- if (systemcfg->platform != PLATFORM_POWERMAC)
+ if (_machine != PLATFORM_POWERMAC)
return -EINVAL;
if (copy_from_user(&part, (void __user*)arg, sizeof(part)) != 0)
return -EFAULT;
@@ -450,7 +449,7 @@ static int nvram_setup_partition(void)
* in our nvram, as Apple defined partitions use pretty much
* all of the space
*/
- if (systemcfg->platform == PLATFORM_POWERMAC)
+ if (_machine == PLATFORM_POWERMAC)
return -ENOSPC;
/* see if we have an OS partition that meets our needs.
diff --git a/arch/ppc64/kernel/pci.c b/arch/ppc64/kernel/pci.c
index 30247ff7497..3cef1b8f57f 100644
--- a/arch/ppc64/kernel/pci.c
+++ b/arch/ppc64/kernel/pci.c
@@ -548,6 +548,11 @@ static int __init pcibios_init(void)
if (ppc64_isabridge_dev != NULL)
printk("ISA bridge at %s\n", pci_name(ppc64_isabridge_dev));
+#ifdef CONFIG_PPC_MULTIPLATFORM
+ /* map in PCI I/O space */
+ phbs_remap_io();
+#endif
+
printk("PCI: Probing PCI hardware done\n");
return 0;
@@ -1277,12 +1282,9 @@ long sys_pciconfig_iobase(long which, unsigned long in_bus,
* G5 machines... So when something asks for bus 0 io base
* (bus 0 is HT root), we return the AGP one instead.
*/
-#ifdef CONFIG_PPC_PMAC
- if (systemcfg->platform == PLATFORM_POWERMAC &&
- machine_is_compatible("MacRISC4"))
+ if (machine_is_compatible("MacRISC4"))
if (in_bus == 0)
in_bus = 0xf0;
-#endif /* CONFIG_PPC_PMAC */
/* That syscall isn't quite compatible with PCI domains, but it's
* used on pre-domains setup. We return the first match
diff --git a/arch/ppc64/kernel/pci_dn.c b/arch/ppc64/kernel/pci_dn.c
index 1a443a7ada4..12c4c9e9bbc 100644
--- a/arch/ppc64/kernel/pci_dn.c
+++ b/arch/ppc64/kernel/pci_dn.c
@@ -43,7 +43,7 @@ static void * __devinit update_dn_pci_info(struct device_node *dn, void *data)
u32 *regs;
struct pci_dn *pdn;
- if (phb->is_dynamic)
+ if (mem_init_done)
pdn = kmalloc(sizeof(*pdn), GFP_KERNEL);
else
pdn = alloc_bootmem(sizeof(*pdn));
@@ -120,6 +120,14 @@ void *traverse_pci_devices(struct device_node *start, traverse_func pre,
return NULL;
}
+/**
+ * pci_devs_phb_init_dynamic - setup pci devices under this PHB
+ * phb: pci-to-host bridge (top-level bridge connecting to cpu)
+ *
+ * This routine is called both during boot, (before the memory
+ * subsystem is set up, before kmalloc is valid) and during the
+ * dynamic lpar operation of adding a PHB to a running system.
+ */
void __devinit pci_devs_phb_init_dynamic(struct pci_controller *phb)
{
struct device_node * dn = (struct device_node *) phb->arch_data;
@@ -201,9 +209,14 @@ static struct notifier_block pci_dn_reconfig_nb = {
.notifier_call = pci_dn_reconfig_notifier,
};
-/*
- * Actually initialize the phbs.
- * The buswalk on this phb has not happened yet.
+/**
+ * pci_devs_phb_init - Initialize phbs and pci devs under them.
+ *
+ * This routine walks over all phb's (pci-host bridges) on the
+ * system, and sets up assorted pci-related structures
+ * (including pci info in the device node structs) for each
+ * pci device found underneath. This routine runs once,
+ * early in the boot sequence.
*/
void __init pci_devs_phb_init(void)
{
diff --git a/arch/ppc64/kernel/prom.c b/arch/ppc64/kernel/prom.c
index 3402fbee62c..fbad2c36078 100644
--- a/arch/ppc64/kernel/prom.c
+++ b/arch/ppc64/kernel/prom.c
@@ -318,7 +318,7 @@ static int __devinit finish_node_interrupts(struct device_node *np,
}
/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
- if (systemcfg->platform == PLATFORM_POWERMAC && ic && ic->parent) {
+ if (_machine == PLATFORM_POWERMAC && ic && ic->parent) {
char *name = get_property(ic->parent, "name", NULL);
if (name && !strcmp(name, "u3"))
np->intrs[intrcount].line += 128;
@@ -1065,7 +1065,7 @@ static int __init early_init_dt_scan_chosen(unsigned long node,
prop = (u32 *)of_get_flat_dt_prop(node, "linux,platform", NULL);
if (prop == NULL)
return 0;
- systemcfg->platform = *prop;
+ _machine = *prop;
/* check if iommu is forced on or off */
if (of_get_flat_dt_prop(node, "linux,iommu-off", NULL) != NULL)
@@ -1230,11 +1230,8 @@ void __init early_init_devtree(void *params)
of_scan_flat_dt(early_init_dt_scan_memory, NULL);
lmb_enforce_memory_limit(memory_limit);
lmb_analyze();
- systemcfg->physicalMemorySize = lmb_phys_mem_size();
lmb_reserve(0, __pa(klimit));
- DBG("Phys. mem: %lx\n", systemcfg->physicalMemorySize);
-
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
early_reserve_mem();
@@ -1753,7 +1750,7 @@ static int of_finish_dynamic_node(struct device_node *node,
/* We don't support that function on PowerMac, at least
* not yet
*/
- if (systemcfg->platform == PLATFORM_POWERMAC)
+ if (_machine == PLATFORM_POWERMAC)
return -ENODEV;
/* fix up new node's linux_phandle field */
diff --git a/arch/ppc64/kernel/prom_init.c b/arch/ppc64/kernel/prom_init.c
index e4c880dab99..6375f40b23d 100644
--- a/arch/ppc64/kernel/prom_init.c
+++ b/arch/ppc64/kernel/prom_init.c
@@ -1934,7 +1934,8 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4, unsigned long
/*
* On pSeries, inform the firmware about our capabilities
*/
- if (RELOC(of_platform) & PLATFORM_PSERIES)
+ if (RELOC(of_platform) == PLATFORM_PSERIES ||
+ RELOC(of_platform) == PLATFORM_PSERIES_LPAR)
prom_send_capabilities();
/*
diff --git a/arch/ppc64/kernel/vdso.c b/arch/ppc64/kernel/vdso.c
index 4aacf521e3e..1bbacac4498 100644
--- a/arch/ppc64/kernel/vdso.c
+++ b/arch/ppc64/kernel/vdso.c
@@ -34,6 +34,7 @@
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
+#include <asm/systemcfg.h>
#include <asm/vdso.h>
#undef DEBUG
@@ -179,7 +180,7 @@ static struct page * vdso_vma_nopage(struct vm_area_struct * vma,
* Last page is systemcfg.
*/
if ((vma->vm_end - address) <= PAGE_SIZE)
- pg = virt_to_page(systemcfg);
+ pg = virt_to_page(_systemcfg);
else
pg = virt_to_page(vbase + offset);
@@ -604,7 +605,7 @@ void __init vdso_init(void)
get_page(pg);
}
- get_page(virt_to_page(systemcfg));
+ get_page(virt_to_page(_systemcfg));
}
int in_gate_area_no_task(unsigned long addr)
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 6a4ebc62193..d7bfc61d287 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -75,7 +75,7 @@ struct cpu_fp_info linux_sparc_fpu[] = {
{ 9, 3, "Fujitsu or Weitek on-chip FPU"},
};
-#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
+#define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu)
struct cpu_iu_info linux_sparc_chips[] = {
/* Sun4/100, 4/200, SLC */
@@ -120,7 +120,7 @@ struct cpu_iu_info linux_sparc_chips[] = {
{ 0xf, 0, "UNKNOWN CPU-VENDOR/TYPE"},
};
-#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
+#define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips)
char *sparc_cpu_type;
char *sparc_fpu_type;
diff --git a/arch/sparc/kernel/pcic.c b/arch/sparc/kernel/pcic.c
index 25e31d5ec99..cccfc12802e 100644
--- a/arch/sparc/kernel/pcic.c
+++ b/arch/sparc/kernel/pcic.c
@@ -143,7 +143,7 @@ static struct pcic_ca2irq pcic_i_jk[] = {
* as several PROMs may be installed on the same physical board.
*/
#define SN2L_INIT(name, map) \
- { name, map, sizeof(map)/sizeof(struct pcic_ca2irq) }
+ { name, map, ARRAY_SIZE(map) }
static struct pcic_sn2list pcic_known_sysnames[] = {
SN2L_INIT("SUNW,JavaEngine1", pcic_i_je1a), /* JE1, PROM 2.32 */
diff --git a/arch/sparc/mm/fault.c b/arch/sparc/mm/fault.c
index 2bbd53f3caf..9eeed3347df 100644
--- a/arch/sparc/mm/fault.c
+++ b/arch/sparc/mm/fault.c
@@ -33,8 +33,6 @@
#include <asm/kdebug.h>
#include <asm/uaccess.h>
-#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
-
extern int prom_node_root;
/* At boot time we determine these two values necessary for setting
diff --git a/arch/sparc64/kernel/cpu.c b/arch/sparc64/kernel/cpu.c
index 77ef5df4e5a..00eed88ef2e 100644
--- a/arch/sparc64/kernel/cpu.c
+++ b/arch/sparc64/kernel/cpu.c
@@ -43,7 +43,7 @@ struct cpu_fp_info linux_sparc_fpu[] = {
{ 0x3e, 0x22, 0, "UltraSparc IIIi+ integrated FPU"},
};
-#define NSPARCFPU (sizeof(linux_sparc_fpu)/sizeof(struct cpu_fp_info))
+#define NSPARCFPU ARRAY_SIZE(linux_sparc_fpu)
struct cpu_iu_info linux_sparc_chips[] = {
{ 0x17, 0x10, "TI UltraSparc I (SpitFire)"},
@@ -59,7 +59,7 @@ struct cpu_iu_info linux_sparc_chips[] = {
{ 0x3e, 0x22, "TI UltraSparc IIIi+ (Serrano)"},
};
-#define NSPARCCHIPS (sizeof(linux_sparc_chips)/sizeof(struct cpu_iu_info))
+#define NSPARCCHIPS ARRAY_SIZE(linux_sparc_chips)
char *sparc_cpu_type = "cpu-oops";
char *sparc_fpu_type = "fpu-oops";
diff --git a/arch/sparc64/kernel/ioctl32.c b/arch/sparc64/kernel/ioctl32.c
index c2e96daa5ab..e62214354bb 100644
--- a/arch/sparc64/kernel/ioctl32.c
+++ b/arch/sparc64/kernel/ioctl32.c
@@ -114,8 +114,6 @@ COMPATIBLE_IOCTL(FBIOGCURPOS)
COMPATIBLE_IOCTL(FBIOGCURMAX)
/* Little k */
/* Little v, the video4linux ioctls */
-COMPATIBLE_IOCTL(_IOR('p', 20, int[7])) /* RTCGET */
-COMPATIBLE_IOCTL(_IOW('p', 21, int[7])) /* RTCSET */
/* And these ioctls need translation */
/* Note SIOCRTMSG is no longer, so this is safe and * the user would have seen just an -EINVAL anyways. */
HANDLE_IOCTL(FBIOPUTCMAP32, fbiogetputcmap)
diff --git a/arch/sparc64/kernel/sbus.c b/arch/sparc64/kernel/sbus.c
index e09ddf92765..96b82505566 100644
--- a/arch/sparc64/kernel/sbus.c
+++ b/arch/sparc64/kernel/sbus.c
@@ -790,7 +790,7 @@ static unsigned long sysio_irq_offsets[] = {
#undef bogon
-#define NUM_SYSIO_OFFSETS (sizeof(sysio_irq_offsets) / sizeof(sysio_irq_offsets[0]))
+#define NUM_SYSIO_OFFSETS ARRAY_SIZE(sysio_irq_offsets)
/* Convert Interrupt Mapping register pointer to associated
* Interrupt Clear register pointer, SYSIO specific version.
diff --git a/arch/sparc64/mm/fault.c b/arch/sparc64/mm/fault.c
index 3be278d916d..6f0539aa44d 100644
--- a/arch/sparc64/mm/fault.c
+++ b/arch/sparc64/mm/fault.c
@@ -30,8 +30,6 @@
#include <asm/sections.h>
#include <asm/kdebug.h>
-#define ELEMENTS(arr) (sizeof (arr)/sizeof (arr[0]))
-
/*
* To debug kernel to catch accesses to certain virtual/physical addresses.
* Mode = 0 selects physical watchpoints, mode = 1 selects virtual watchpoints.
diff --git a/drivers/acpi/glue.c b/drivers/acpi/glue.c
index 3937adf4e5e..aa993715d64 100644
--- a/drivers/acpi/glue.c
+++ b/drivers/acpi/glue.c
@@ -203,6 +203,7 @@ acpi_handle acpi_get_pci_rootbridge_handle(unsigned int seg, unsigned int bus)
acpi_get_devices(PCI_ROOT_HID_STRING, find_pci_rootbridge, &find, NULL);
return find.handle;
}
+EXPORT_SYMBOL_GPL(acpi_get_pci_rootbridge_handle);
/* Get device's handler per its address under its parent */
struct acpi_find_child {
diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
index 0cded046800..821c81e8cd3 100644
--- a/drivers/atm/horizon.c
+++ b/drivers/atm/horizon.c
@@ -1511,8 +1511,8 @@ static inline short setup_idle_tx_channel (hrz_dev * dev, hrz_vcc * vcc) {
// a.k.a. prepare the channel and remember that we have done so.
tx_ch_desc * tx_desc = &memmap->tx_descs[tx_channel];
- u16 rd_ptr;
- u16 wr_ptr;
+ u32 rd_ptr;
+ u32 wr_ptr;
u16 channel = vcc->channel;
unsigned long flags;
diff --git a/drivers/char/agp/ali-agp.c b/drivers/char/agp/ali-agp.c
index ba54b587257..b02fc226715 100644
--- a/drivers/char/agp/ali-agp.c
+++ b/drivers/char/agp/ali-agp.c
@@ -389,7 +389,6 @@ static struct pci_device_id agp_ali_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_ali_pci_table);
static struct pci_driver agp_ali_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-ali",
.id_table = agp_ali_pci_table,
.probe = agp_ali_probe,
diff --git a/drivers/char/agp/amd-k7-agp.c b/drivers/char/agp/amd-k7-agp.c
index 40fcd88b2ce..1f776651ac6 100644
--- a/drivers/char/agp/amd-k7-agp.c
+++ b/drivers/char/agp/amd-k7-agp.c
@@ -515,7 +515,6 @@ static struct pci_device_id agp_amdk7_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amdk7_pci_table);
static struct pci_driver agp_amdk7_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-amdk7",
.id_table = agp_amdk7_pci_table,
.probe = agp_amdk7_probe,
diff --git a/drivers/char/agp/amd64-agp.c b/drivers/char/agp/amd64-agp.c
index 8f748fddca9..78ce98a69f3 100644
--- a/drivers/char/agp/amd64-agp.c
+++ b/drivers/char/agp/amd64-agp.c
@@ -703,7 +703,6 @@ static struct pci_device_id agp_amd64_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_amd64_pci_table);
static struct pci_driver agp_amd64_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-amd64",
.id_table = agp_amd64_pci_table,
.probe = agp_amd64_probe,
diff --git a/drivers/char/agp/ati-agp.c b/drivers/char/agp/ati-agp.c
index fbd41556546..53372a83b67 100644
--- a/drivers/char/agp/ati-agp.c
+++ b/drivers/char/agp/ati-agp.c
@@ -521,7 +521,6 @@ static struct pci_device_id agp_ati_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_ati_pci_table);
static struct pci_driver agp_ati_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-ati",
.id_table = agp_ati_pci_table,
.probe = agp_ati_probe,
diff --git a/drivers/char/agp/backend.c b/drivers/char/agp/backend.c
index 73f333f491b..27bca34b4a6 100644
--- a/drivers/char/agp/backend.c
+++ b/drivers/char/agp/backend.c
@@ -147,6 +147,7 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
printk(KERN_ERR PFX "unable to get memory for scratch page.\n");
return -ENOMEM;
}
+ flush_agp_mappings();
bridge->scratch_page_real = virt_to_gart(addr);
bridge->scratch_page =
@@ -187,9 +188,11 @@ static int agp_backend_initialize(struct agp_bridge_data *bridge)
return 0;
err_out:
- if (bridge->driver->needs_scratch_page)
+ if (bridge->driver->needs_scratch_page) {
bridge->driver->agp_destroy_page(
gart_to_virt(bridge->scratch_page_real));
+ flush_agp_mappings();
+ }
if (got_gatt)
bridge->driver->free_gatt_table(bridge);
if (got_keylist) {
@@ -211,9 +214,11 @@ static void agp_backend_cleanup(struct agp_bridge_data *bridge)
bridge->key_list = NULL;
if (bridge->driver->agp_destroy_page &&
- bridge->driver->needs_scratch_page)
+ bridge->driver->needs_scratch_page) {
bridge->driver->agp_destroy_page(
gart_to_virt(bridge->scratch_page_real));
+ flush_agp_mappings();
+ }
}
/* When we remove the global variable agp_bridge from all drivers
diff --git a/drivers/char/agp/efficeon-agp.c b/drivers/char/agp/efficeon-agp.c
index d41e0a62e32..e7aea77a60f 100644
--- a/drivers/char/agp/efficeon-agp.c
+++ b/drivers/char/agp/efficeon-agp.c
@@ -429,7 +429,6 @@ static struct pci_device_id agp_efficeon_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_efficeon_pci_table);
static struct pci_driver agp_efficeon_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-efficeon",
.id_table = agp_efficeon_pci_table,
.probe = agp_efficeon_probe,
diff --git a/drivers/char/agp/generic.c b/drivers/char/agp/generic.c
index c4a38715c6f..5567ce8d72b 100644
--- a/drivers/char/agp/generic.c
+++ b/drivers/char/agp/generic.c
@@ -57,7 +57,8 @@ int map_page_into_agp(struct page *page)
{
int i;
i = change_page_attr(page, 1, PAGE_KERNEL_NOCACHE);
- global_flush_tlb();
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
return i;
}
EXPORT_SYMBOL_GPL(map_page_into_agp);
@@ -66,7 +67,8 @@ int unmap_page_from_agp(struct page *page)
{
int i;
i = change_page_attr(page, 1, PAGE_KERNEL);
- global_flush_tlb();
+ /* Caller's responsibility to call global_flush_tlb() for
+ * performance reasons */
return i;
}
EXPORT_SYMBOL_GPL(unmap_page_from_agp);
@@ -153,6 +155,7 @@ void agp_free_memory(struct agp_memory *curr)
for (i = 0; i < curr->page_count; i++) {
curr->bridge->driver->agp_destroy_page(gart_to_virt(curr->memory[i]));
}
+ flush_agp_mappings();
}
agp_free_key(curr->key);
vfree(curr->memory);
@@ -210,7 +213,7 @@ struct agp_memory *agp_allocate_memory(struct agp_bridge_data *bridge,
new->memory[i] = virt_to_gart(addr);
new->page_count++;
}
- new->bridge = bridge;
+ new->bridge = bridge;
flush_agp_mappings();
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
index 58944cd271e..8ee19a4a6bc 100644
--- a/drivers/char/agp/i460-agp.c
+++ b/drivers/char/agp/i460-agp.c
@@ -111,8 +111,10 @@ static int i460_fetch_size (void)
if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
printk(KERN_ERR PFX
- "I/O (GART) page-size %ZuKB doesn't match expected size %ZuKB\n",
- 1UL << (i460.io_page_shift - 10), 1UL << (I460_IO_PAGE_SHIFT));
+ "I/O (GART) page-size %luKB doesn't match expected "
+ "size %luKB\n",
+ 1UL << (i460.io_page_shift - 10),
+ 1UL << (I460_IO_PAGE_SHIFT));
return 0;
}
@@ -514,9 +516,10 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
{
void *page;
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
page = agp_generic_alloc_page(agp_bridge);
- else
+ global_flush_tlb();
+ } else
/* Returning NULL would cause problems */
/* AK: really dubious code. */
page = (void *)~0UL;
@@ -525,8 +528,10 @@ static void *i460_alloc_page (struct agp_bridge_data *bridge)
static void i460_destroy_page (void *page)
{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
+ if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
agp_generic_destroy_page(page);
+ global_flush_tlb();
+ }
}
#endif /* I460_LARGE_IO_PAGES */
@@ -536,7 +541,7 @@ static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
{
/* Make sure the returned address is a valid GATT entry */
return bridge->driver->masks[0].mask
- | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xffffff000) >> 12);
+ | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
}
struct agp_bridge_driver intel_i460_driver = {
@@ -617,7 +622,6 @@ static struct pci_device_id agp_intel_i460_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
static struct pci_driver agp_intel_i460_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-intel-i460",
.id_table = agp_intel_i460_pci_table,
.probe = agp_intel_i460_probe,
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index bf4cc9ffd5b..e7bed5047dc 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -270,6 +270,7 @@ static struct agp_memory *alloc_agpphysmem_i8xx(size_t pg_count, int type)
switch (pg_count) {
case 1: addr = agp_bridge->driver->agp_alloc_page(agp_bridge);
+ global_flush_tlb();
break;
case 4:
/* kludge to get 4 physical pages for ARGB cursor */
@@ -330,9 +331,11 @@ static void intel_i810_free_by_type(struct agp_memory *curr)
if(curr->type == AGP_PHYS_MEMORY) {
if (curr->page_count == 4)
i8xx_destroy_pages(gart_to_virt(curr->memory[0]));
- else
+ else {
agp_bridge->driver->agp_destroy_page(
gart_to_virt(curr->memory[0]));
+ global_flush_tlb();
+ }
vfree(curr->memory);
}
kfree(curr);
@@ -1824,7 +1827,6 @@ static struct pci_device_id agp_intel_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_intel_pci_table);
static struct pci_driver agp_intel_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-intel",
.id_table = agp_intel_pci_table,
.probe = agp_intel_probe,
diff --git a/drivers/char/agp/nvidia-agp.c b/drivers/char/agp/nvidia-agp.c
index 3aed0c5e2f9..80dafa3030b 100644
--- a/drivers/char/agp/nvidia-agp.c
+++ b/drivers/char/agp/nvidia-agp.c
@@ -398,7 +398,6 @@ static struct pci_device_id agp_nvidia_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_nvidia_pci_table);
static struct pci_driver agp_nvidia_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-nvidia",
.id_table = agp_nvidia_pci_table,
.probe = agp_nvidia_probe,
diff --git a/drivers/char/agp/sis-agp.c b/drivers/char/agp/sis-agp.c
index a701361a889..ebc05554045 100644
--- a/drivers/char/agp/sis-agp.c
+++ b/drivers/char/agp/sis-agp.c
@@ -332,7 +332,6 @@ static struct pci_device_id agp_sis_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_sis_pci_table);
static struct pci_driver agp_sis_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-sis",
.id_table = agp_sis_pci_table,
.probe = agp_sis_probe,
diff --git a/drivers/char/agp/sworks-agp.c b/drivers/char/agp/sworks-agp.c
index 5a5392dd125..3f8f7fa6b0f 100644
--- a/drivers/char/agp/sworks-agp.c
+++ b/drivers/char/agp/sworks-agp.c
@@ -545,7 +545,6 @@ static struct pci_device_id agp_serverworks_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_serverworks_pci_table);
static struct pci_driver agp_serverworks_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-serverworks",
.id_table = agp_serverworks_pci_table,
.probe = agp_serverworks_probe,
diff --git a/drivers/char/agp/uninorth-agp.c b/drivers/char/agp/uninorth-agp.c
index 183c50acab2..c8255312b8c 100644
--- a/drivers/char/agp/uninorth-agp.c
+++ b/drivers/char/agp/uninorth-agp.c
@@ -658,7 +658,6 @@ static struct pci_device_id agp_uninorth_pci_table[] = {
MODULE_DEVICE_TABLE(pci, agp_uninorth_pci_table);
static struct pci_driver agp_uninorth_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-uninorth",
.id_table = agp_uninorth_pci_table,
.probe = agp_uninorth_probe,
diff --git a/drivers/char/agp/via-agp.c b/drivers/char/agp/via-agp.c
index 5d9a1370007..c847df575cf 100644
--- a/drivers/char/agp/via-agp.c
+++ b/drivers/char/agp/via-agp.c
@@ -518,7 +518,6 @@ MODULE_DEVICE_TABLE(pci, agp_via_pci_table);
static struct pci_driver agp_via_pci_driver = {
- .owner = THIS_MODULE,
.name = "agpgart-via",
.id_table = agp_via_pci_table,
.probe = agp_via_probe,
diff --git a/drivers/char/epca.c b/drivers/char/epca.c
index b7a0e4d6b93..407708a001e 100644
--- a/drivers/char/epca.c
+++ b/drivers/char/epca.c
@@ -3113,7 +3113,6 @@ MODULE_DEVICE_TABLE(pci, epca_pci_tbl);
int __init init_PCI (void)
{ /* Begin init_PCI */
memset (&epca_driver, 0, sizeof (epca_driver));
- epca_driver.owner = THIS_MODULE;
epca_driver.name = "epca";
epca_driver.id_table = epca_pci_tbl;
epca_driver.probe = epca_init_one;
diff --git a/drivers/char/synclink.c b/drivers/char/synclink.c
index 5d1ffa3bd4c..82c6abde68d 100644
--- a/drivers/char/synclink.c
+++ b/drivers/char/synclink.c
@@ -912,7 +912,6 @@ MODULE_DEVICE_TABLE(pci, synclink_pci_tbl);
MODULE_LICENSE("GPL");
static struct pci_driver synclink_pci_driver = {
- .owner = THIS_MODULE,
.name = "synclink",
.id_table = synclink_pci_tbl,
.probe = synclink_init_one,
diff --git a/drivers/char/synclinkmp.c b/drivers/char/synclinkmp.c
index 7c063c5abc5..ee5a40be9f9 100644
--- a/drivers/char/synclinkmp.c
+++ b/drivers/char/synclinkmp.c
@@ -500,7 +500,6 @@ MODULE_DEVICE_TABLE(pci, synclinkmp_pci_tbl);
MODULE_LICENSE("GPL");
static struct pci_driver synclinkmp_pci_driver = {
- .owner = THIS_MODULE,
.name = "synclinkmp",
.id_table = synclinkmp_pci_tbl,
.probe = synclinkmp_init_one,
diff --git a/drivers/char/watchdog/pcwd_pci.c b/drivers/char/watchdog/pcwd_pci.c
index d9ef55bdf88..2451edbefec 100644
--- a/drivers/char/watchdog/pcwd_pci.c
+++ b/drivers/char/watchdog/pcwd_pci.c
@@ -755,7 +755,6 @@ static struct pci_device_id pcipcwd_pci_tbl[] = {
MODULE_DEVICE_TABLE(pci, pcipcwd_pci_tbl);
static struct pci_driver pcipcwd_driver = {
- .owner = THIS_MODULE,
.name = WATCHDOG_NAME,
.id_table = pcipcwd_pci_tbl,
.probe = pcipcwd_card_init,
diff --git a/drivers/char/watchdog/wdt_pci.c b/drivers/char/watchdog/wdt_pci.c
index dc9370f6c34..4b3311993d4 100644
--- a/drivers/char/watchdog/wdt_pci.c
+++ b/drivers/char/watchdog/wdt_pci.c
@@ -711,7 +711,6 @@ MODULE_DEVICE_TABLE(pci, wdtpci_pci_tbl);
static struct pci_driver wdtpci_driver = {
- .owner = THIS_MODULE,
.name = "wdt_pci",
.id_table = wdtpci_pci_tbl,
.probe = wdtpci_init_one,
diff --git a/drivers/hwmon/hdaps.c b/drivers/hwmon/hdaps.c
index 1e5dfc7805e..c8c84e0819f 100644
--- a/drivers/hwmon/hdaps.c
+++ b/drivers/hwmon/hdaps.c
@@ -60,9 +60,11 @@
#define HDAPS_POLL_PERIOD (HZ/20) /* poll for input every 1/20s */
#define HDAPS_INPUT_FUZZ 4 /* input event threshold */
+#define HDAPS_INPUT_FLAT 4
static struct timer_list hdaps_timer;
static struct platform_device *pdev;
+static struct input_dev *hdaps_idev;
static unsigned int hdaps_invert;
static u8 km_activity;
static int rest_x;
@@ -309,18 +311,6 @@ static struct device_driver hdaps_driver = {
.resume = hdaps_resume
};
-/* Input class stuff */
-
-static struct input_dev hdaps_idev = {
- .name = "hdaps",
- .evbit = { BIT(EV_ABS) },
- .absbit = { BIT(ABS_X) | BIT(ABS_Y) },
- .absmin = { [ABS_X] = -256, [ABS_Y] = -256 },
- .absmax = { [ABS_X] = 256, [ABS_Y] = 256 },
- .absfuzz = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
- .absflat = { [ABS_X] = HDAPS_INPUT_FUZZ, [ABS_Y] = HDAPS_INPUT_FUZZ },
-};
-
/*
* hdaps_calibrate - Set our "resting" values. Callers must hold hdaps_sem.
*/
@@ -342,9 +332,9 @@ static void hdaps_mousedev_poll(unsigned long unused)
if (__hdaps_read_pair(HDAPS_PORT_XPOS, HDAPS_PORT_YPOS, &x, &y))
goto out;
- input_report_abs(&hdaps_idev, ABS_X, x - rest_x);
- input_report_abs(&hdaps_idev, ABS_Y, y - rest_y);
- input_sync(&hdaps_idev);
+ input_report_abs(hdaps_idev, ABS_X, x - rest_x);
+ input_report_abs(hdaps_idev, ABS_Y, y - rest_y);
+ input_sync(hdaps_idev);
mod_timer(&hdaps_timer, jiffies + HDAPS_POLL_PERIOD);
@@ -564,12 +554,25 @@ static int __init hdaps_init(void)
if (ret)
goto out_device;
+ hdaps_idev = input_allocate_device();
+ if (!hdaps_idev) {
+ ret = -ENOMEM;
+ goto out_group;
+ }
+
/* initial calibrate for the input device */
hdaps_calibrate();
/* initialize the input class */
- hdaps_idev.dev = &pdev->dev;
- input_register_device(&hdaps_idev);
+ hdaps_idev->name = "hdaps";
+ hdaps_idev->cdev.dev = &pdev->dev;
+ hdaps_idev->evbit[0] = BIT(EV_ABS);
+ input_set_abs_params(hdaps_idev, ABS_X,
+ -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+ input_set_abs_params(hdaps_idev, ABS_X,
+ -256, 256, HDAPS_INPUT_FUZZ, HDAPS_INPUT_FLAT);
+
+ input_register_device(hdaps_idev);
/* start up our timer for the input device */
init_timer(&hdaps_timer);
@@ -580,6 +583,8 @@ static int __init hdaps_init(void)
printk(KERN_INFO "hdaps: driver successfully loaded.\n");
return 0;
+out_group:
+ sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
out_device:
platform_device_unregister(pdev);
out_driver:
@@ -594,7 +599,7 @@ out:
static void __exit hdaps_exit(void)
{
del_timer_sync(&hdaps_timer);
- input_unregister_device(&hdaps_idev);
+ input_unregister_device(hdaps_idev);
sysfs_remove_group(&pdev->dev.kobj, &hdaps_attribute_group);
platform_device_unregister(pdev);
driver_unregister(&hdaps_driver);
diff --git a/drivers/hwmon/w83627hf.c b/drivers/hwmon/w83627hf.c
index 70ef926c3bd..4e9a04e1f08 100644
--- a/drivers/hwmon/w83627hf.c
+++ b/drivers/hwmon/w83627hf.c
@@ -180,11 +180,10 @@ superio_exit(void)
#define W83781D_REG_BANK 0x4E
#define W83781D_REG_CONFIG 0x40
-#define W83781D_REG_ALARM1 0x41
-#define W83781D_REG_ALARM2 0x42
-#define W83781D_REG_ALARM3 0x450
+#define W83781D_REG_ALARM1 0x459
+#define W83781D_REG_ALARM2 0x45A
+#define W83781D_REG_ALARM3 0x45B
-#define W83781D_REG_IRQ 0x4C
#define W83781D_REG_BEEP_CONFIG 0x4D
#define W83781D_REG_BEEP_INTS1 0x56
#define W83781D_REG_BEEP_INTS2 0x57
@@ -1370,13 +1369,6 @@ static void w83627hf_init_client(struct i2c_client *client)
W83781D_REG_TEMP3_CONFIG, tmp & 0xfe);
}
}
-
- /* enable comparator mode for temp2 and temp3 so
- alarm indication will work correctly */
- i = w83627hf_read_value(client, W83781D_REG_IRQ);
- if (!(i & 0x40))
- w83627hf_write_value(client, W83781D_REG_IRQ,
- i | 0x40);
}
/* Start monitoring */
@@ -1400,7 +1392,7 @@ static struct w83627hf_data *w83627hf_update_device(struct device *dev)
/* skip missing sensors */
if (((data->type == w83697hf) && (i == 1)) ||
((data->type == w83627thf || data->type == w83637hf)
- && (i == 4 || i == 5)))
+ && (i == 5 || i == 6)))
continue;
data->in[i] =
w83627hf_read_value(client, W83781D_REG_IN(i));
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index ba90f5140af..3eb47890db4 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -513,7 +513,6 @@ static void __devexit ali1535_remove(struct pci_dev *dev)
}
static struct pci_driver ali1535_driver = {
- .owner = THIS_MODULE,
.name = "ali1535_smbus",
.id_table = ali1535_ids,
.probe = ali1535_probe,
diff --git a/drivers/i2c/busses/i2c-ali1563.c b/drivers/i2c/busses/i2c-ali1563.c
index f1a62d89242..e6f63208fc4 100644
--- a/drivers/i2c/busses/i2c-ali1563.c
+++ b/drivers/i2c/busses/i2c-ali1563.c
@@ -408,7 +408,6 @@ static struct pci_device_id __devinitdata ali1563_id_table[] = {
MODULE_DEVICE_TABLE (pci, ali1563_id_table);
static struct pci_driver ali1563_pci_driver = {
- .owner = THIS_MODULE,
.name = "ali1563_smbus",
.id_table = ali1563_id_table,
.probe = ali1563_probe,
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 400b08ed429..7a5c0941dbc 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -504,7 +504,6 @@ static void __devexit ali15x3_remove(struct pci_dev *dev)
}
static struct pci_driver ali15x3_driver = {
- .owner = THIS_MODULE,
.name = "ali15x3_smbus",
.id_table = ali15x3_ids,
.probe = ali15x3_probe,
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index de035d137c3..1750dedaf4b 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -401,7 +401,6 @@ static void __devexit amd756_remove(struct pci_dev *dev)
}
static struct pci_driver amd756_driver = {
- .owner = THIS_MODULE,
.name = "amd756_smbus",
.id_table = amd756_ids,
.probe = amd756_probe,
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index f3b79a68dbe..e5ef560e686 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -384,7 +384,6 @@ static void __devexit amd8111_remove(struct pci_dev *dev)
}
static struct pci_driver amd8111_driver = {
- .owner = THIS_MODULE,
.name = "amd8111_smbus2",
.id_table = amd8111_ids,
.probe = amd8111_probe,
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 1b5354e24bf..e0cb3b0f92f 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -155,7 +155,6 @@ static void __devexit hydra_remove(struct pci_dev *dev)
static struct pci_driver hydra_driver = {
- .owner = THIS_MODULE,
.name = "hydra_smbus",
.id_table = hydra_ids,
.probe = hydra_probe,
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 4f63195069d..ac3eafa8aac 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -560,7 +560,6 @@ static void __devexit i801_remove(struct pci_dev *dev)
}
static struct pci_driver i801_driver = {
- .owner = THIS_MODULE,
.name = "i801_smbus",
.id_table = i801_ids,
.probe = i801_probe,
diff --git a/drivers/i2c/busses/i2c-i810.c b/drivers/i2c/busses/i2c-i810.c
index 52bc30593bd..748be30f2ba 100644
--- a/drivers/i2c/busses/i2c-i810.c
+++ b/drivers/i2c/busses/i2c-i810.c
@@ -233,7 +233,6 @@ static void __devexit i810_remove(struct pci_dev *dev)
}
static struct pci_driver i810_driver = {
- .owner = THIS_MODULE,
.name = "i810_smbus",
.id_table = i810_ids,
.probe = i810_probe,
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index fd26036e68a..4d18e6e5f15 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -347,7 +347,6 @@ static void __devexit nforce2_remove(struct pci_dev *dev)
}
static struct pci_driver nforce2_driver = {
- .owner = THIS_MODULE,
.name = "nForce2_smbus",
.id_table = nforce2_ids,
.probe = nforce2_probe,
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index 7d63eec423f..692f4734548 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -462,7 +462,6 @@ static void __devexit piix4_remove(struct pci_dev *dev)
}
static struct pci_driver piix4_driver = {
- .owner = THIS_MODULE,
.name = "piix4_smbus",
.id_table = piix4_ids,
.probe = piix4_probe,
diff --git a/drivers/i2c/busses/i2c-prosavage.c b/drivers/i2c/busses/i2c-prosavage.c
index 42cb1d8ca65..9479525892e 100644
--- a/drivers/i2c/busses/i2c-prosavage.c
+++ b/drivers/i2c/busses/i2c-prosavage.c
@@ -301,7 +301,6 @@ static struct pci_device_id prosavage_pci_tbl[] = {
MODULE_DEVICE_TABLE (pci, prosavage_pci_tbl);
static struct pci_driver prosavage_driver = {
- .owner = THIS_MODULE,
.name = "prosavage_smbus",
.id_table = prosavage_pci_tbl,
.probe = prosavage_probe,
diff --git a/drivers/i2c/busses/i2c-savage4.c b/drivers/i2c/busses/i2c-savage4.c
index aebe87ba403..0c8518298e4 100644
--- a/drivers/i2c/busses/i2c-savage4.c
+++ b/drivers/i2c/busses/i2c-savage4.c
@@ -179,7 +179,6 @@ static void __devexit savage4_remove(struct pci_dev *dev)
}
static struct pci_driver savage4_driver = {
- .owner = THIS_MODULE,
.name = "savage4_smbus",
.id_table = savage4_ids,
.probe = savage4_probe,
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index 3ad27c3ba15..b57ab74d23e 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -398,7 +398,6 @@ static void __devexit sis5595_remove(struct pci_dev *dev)
}
static struct pci_driver sis5595_driver = {
- .owner = THIS_MODULE,
.name = "sis5595_smbus",
.id_table = sis5595_ids,
.probe = sis5595_probe,
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index 7f49e5fd3ff..acb75e28241 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -496,7 +496,6 @@ static void __devexit sis630_remove(struct pci_dev *dev)
static struct pci_driver sis630_driver = {
- .owner = THIS_MODULE,
.name = "sis630_smbus",
.id_table = sis630_ids,
.probe = sis630_probe,
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 6a134c09132..3024907cdaf 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -329,7 +329,6 @@ static void __devexit sis96x_remove(struct pci_dev *dev)
}
static struct pci_driver sis96x_driver = {
- .owner = THIS_MODULE,
.name = "sis96x_smbus",
.id_table = sis96x_ids,
.probe = sis96x_probe,
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index 544a38e6439..484bbacfce6 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -159,7 +159,6 @@ static void __devexit vt586b_remove(struct pci_dev *dev)
static struct pci_driver vt586b_driver = {
- .owner = THIS_MODULE,
.name = "vt586b_smbus",
.id_table = vt586b_ids,
.probe = vt586b_probe,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index c9366b50483..47e52bf2c5e 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -142,19 +142,18 @@ static int vt596_transaction(u8 size)
/* Make sure the SMBus host is ready to start transmitting */
if ((temp = inb_p(SMBHSTSTS)) & 0x1F) {
dev_dbg(&vt596_adapter.dev, "SMBus busy (0x%02x). "
- "Resetting... ", temp);
+ "Resetting...\n", temp);
outb_p(temp, SMBHSTSTS);
if ((temp = inb_p(SMBHSTSTS)) & 0x1F) {
- printk("Failed! (0x%02x)\n", temp);
+ dev_err(&vt596_adapter.dev, "SMBus reset failed! "
+ "(0x%02x)\n", temp);
return -1;
- } else {
- printk("Successful!\n");
}
}
/* Start the transaction by setting bit 6 */
- outb_p(0x40 | (size & 0x3C), SMBHSTCNT);
+ outb_p(0x40 | size, SMBHSTCNT);
/* We will always wait for a fraction of a second */
do {
@@ -171,7 +170,7 @@ static int vt596_transaction(u8 size)
if (temp & 0x10) {
result = -1;
dev_err(&vt596_adapter.dev, "Transaction failed (0x%02x)\n",
- inb_p(SMBHSTCNT) & 0x3C);
+ size);
}
if (temp & 0x08) {
@@ -180,11 +179,13 @@ static int vt596_transaction(u8 size)
}
if (temp & 0x04) {
+ int read = inb_p(SMBHSTADD) & 0x01;
result = -1;
- /* Quick commands are used to probe for chips, so
- errors are expected, and we don't want to frighten the
- user. */
- if ((inb_p(SMBHSTCNT) & 0x3C) != VT596_QUICK)
+ /* The quick and receive byte commands are used to probe
+ for chips, so errors are expected, and we don't want
+ to frighten the user. */
+ if (!((size == VT596_QUICK && !read) ||
+ (size == VT596_BYTE && read)))
dev_err(&vt596_adapter.dev, "Transaction error!\n");
}
@@ -439,7 +440,6 @@ static struct pci_device_id vt596_ids[] = {
MODULE_DEVICE_TABLE(pci, vt596_ids);
static struct pci_driver vt596_driver = {
- .owner = THIS_MODULE,
.name = "vt596_smbus",
.id_table = vt596_ids,
.probe = vt596_probe,
@@ -462,9 +462,9 @@ static void __exit i2c_vt596_exit(void)
}
}
-MODULE_AUTHOR(
- "Frodo Looijaard <frodol@dds.nl> and "
- "Philip Edelbrock <phil@netroedge.com>");
+MODULE_AUTHOR("Kyosti Malkki <kmalkki@cc.hut.fi>, "
+ "Mark D. Studebaker <mdsxyz123@yahoo.com> and "
+ "Jean Delvare <khali@linux-fr.org>");
MODULE_DESCRIPTION("vt82c596 SMBus driver");
MODULE_LICENSE("GPL");
diff --git a/drivers/i2c/busses/i2c-voodoo3.c b/drivers/i2c/busses/i2c-voodoo3.c
index 650c3ebde84..b675773b0cc 100644
--- a/drivers/i2c/busses/i2c-voodoo3.c
+++ b/drivers/i2c/busses/i2c-voodoo3.c
@@ -225,7 +225,6 @@ static void __devexit voodoo3_remove(struct pci_dev *dev)
}
static struct pci_driver voodoo3_driver = {
- .owner = THIS_MODULE,
.name = "voodoo3_smbus",
.id_table = voodoo3_ids,
.probe = voodoo3_probe,
diff --git a/drivers/i2c/chips/ds1337.c b/drivers/i2c/chips/ds1337.c
index 01b03700741..02682fb794c 100644
--- a/drivers/i2c/chips/ds1337.c
+++ b/drivers/i2c/chips/ds1337.c
@@ -164,9 +164,9 @@ static int ds1337_set_datetime(struct i2c_client *client, struct rtc_time *dt)
buf[1] = BIN2BCD(dt->tm_sec);
buf[2] = BIN2BCD(dt->tm_min);
buf[3] = BIN2BCD(dt->tm_hour);
- buf[4] = BIN2BCD(dt->tm_wday) + 1;
+ buf[4] = BIN2BCD(dt->tm_wday + 1);
buf[5] = BIN2BCD(dt->tm_mday);
- buf[6] = BIN2BCD(dt->tm_mon) + 1;
+ buf[6] = BIN2BCD(dt->tm_mon + 1);
val = dt->tm_year;
if (val >= 100) {
val -= 100;
diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig
index a737886e39d..42e5b8175cb 100644
--- a/drivers/ide/Kconfig
+++ b/drivers/ide/Kconfig
@@ -539,6 +539,15 @@ config BLK_DEV_CS5530
It is safe to say Y to this question.
+config BLK_DEV_CS5535
+ tristate "AMD CS5535 chipset support"
+ depends on X86 && !X86_64
+ help
+ Include support for UDMA on the NSC/AMD CS5535 companion chipset.
+ This will automatically be detected and configured if found.
+
+ It is safe to say Y to this question.
+
config BLK_DEV_HPT34X
tristate "HPT34X chipset support"
help
diff --git a/drivers/ide/ide-floppy.c b/drivers/ide/ide-floppy.c
index e83f54d37f9..f615ab75996 100644
--- a/drivers/ide/ide-floppy.c
+++ b/drivers/ide/ide-floppy.c
@@ -2038,11 +2038,9 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
struct ide_floppy_obj *floppy = ide_floppy_g(bdev->bd_disk);
ide_drive_t *drive = floppy->drive;
void __user *argp = (void __user *)arg;
- int err = generic_ide_ioctl(drive, file, bdev, cmd, arg);
+ int err;
int prevent = (arg) ? 1 : 0;
idefloppy_pc_t pc;
- if (err != -EINVAL)
- return err;
switch (cmd) {
case CDROMEJECT:
@@ -2094,7 +2092,7 @@ static int idefloppy_ioctl(struct inode *inode, struct file *file,
case IDEFLOPPY_IOCTL_FORMAT_GET_PROGRESS:
return idefloppy_get_format_progress(drive, argp);
}
- return -EINVAL;
+ return generic_ide_ioctl(drive, file, bdev, cmd, arg);
}
static int idefloppy_media_changed(struct gendisk *disk)
diff --git a/drivers/ide/ide-iops.c b/drivers/ide/ide-iops.c
index 0b0aa4f5162..af7af958ab3 100644
--- a/drivers/ide/ide-iops.c
+++ b/drivers/ide/ide-iops.c
@@ -104,8 +104,6 @@ void default_hwif_iops (ide_hwif_t *hwif)
hwif->INSL = ide_insl;
}
-EXPORT_SYMBOL(default_hwif_iops);
-
/*
* MMIO operations, typically used for SATA controllers
*/
@@ -329,8 +327,6 @@ void default_hwif_transport(ide_hwif_t *hwif)
hwif->atapi_output_bytes = atapi_output_bytes;
}
-EXPORT_SYMBOL(default_hwif_transport);
-
/*
* Beginning of Taskfile OPCODE Library and feature sets.
*/
@@ -529,8 +525,6 @@ int wait_for_ready (ide_drive_t *drive, int timeout)
return 0;
}
-EXPORT_SYMBOL(wait_for_ready);
-
/*
* This routine busy-waits for the drive status to be not "busy".
* It then checks the status for all of the "good" bits and none
diff --git a/drivers/ide/ide-taskfile.c b/drivers/ide/ide-taskfile.c
index 7ec18fa3b5f..54f9639c2a8 100644
--- a/drivers/ide/ide-taskfile.c
+++ b/drivers/ide/ide-taskfile.c
@@ -161,8 +161,6 @@ ide_startstop_t do_rw_taskfile (ide_drive_t *drive, ide_task_t *task)
return ide_stopped;
}
-EXPORT_SYMBOL(do_rw_taskfile);
-
/*
* set_multmode_intr() is invoked on completion of a WIN_SETMULT cmd.
*/
diff --git a/drivers/ide/ide.c b/drivers/ide/ide.c
index 9fe19808d81..8af179b531c 100644
--- a/drivers/ide/ide.c
+++ b/drivers/ide/ide.c
@@ -803,6 +803,7 @@ found:
hwif->irq = hw->irq;
hwif->noprobe = 0;
hwif->chipset = hw->chipset;
+ hwif->gendev.parent = hw->dev;
if (!initializing) {
probe_hwif_init_with_fixup(hwif, fixup);
diff --git a/drivers/ide/legacy/ide-cs.c b/drivers/ide/legacy/ide-cs.c
index 1dafffa7e51..ef79805218e 100644
--- a/drivers/ide/legacy/ide-cs.c
+++ b/drivers/ide/legacy/ide-cs.c
@@ -182,13 +182,14 @@ static void ide_detach(dev_link_t *link)
} /* ide_detach */
-static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq)
+static int idecs_register(unsigned long io, unsigned long ctl, unsigned long irq, struct pcmcia_device *handle)
{
hw_regs_t hw;
memset(&hw, 0, sizeof(hw));
ide_init_hwif_ports(&hw, io, ctl, NULL);
hw.irq = irq;
hw.chipset = ide_pci;
+ hw.dev = &handle->dev;
return ide_register_hw_with_fixup(&hw, NULL, ide_undecoded_slave);
}
@@ -327,12 +328,12 @@ static void ide_config(dev_link_t *link)
/* retry registration in case device is still spinning up */
for (hd = -1, i = 0; i < 10; i++) {
- hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ);
+ hd = idecs_register(io_base, ctl_base, link->irq.AssignedIRQ, handle);
if (hd >= 0) break;
if (link->io.NumPorts1 == 0x20) {
outb(0x02, ctl_base + 0x10);
hd = idecs_register(io_base + 0x10, ctl_base + 0x10,
- link->irq.AssignedIRQ);
+ link->irq.AssignedIRQ, handle);
if (hd >= 0) {
io_base += 0x10;
ctl_base += 0x10;
diff --git a/drivers/ide/pci/Makefile b/drivers/ide/pci/Makefile
index af46226c179..f35d684edc2 100644
--- a/drivers/ide/pci/Makefile
+++ b/drivers/ide/pci/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_BLK_DEV_ATIIXP) += atiixp.o
obj-$(CONFIG_BLK_DEV_CMD64X) += cmd64x.o
obj-$(CONFIG_BLK_DEV_CS5520) += cs5520.o
obj-$(CONFIG_BLK_DEV_CS5530) += cs5530.o
+obj-$(CONFIG_BLK_DEV_CS5535) += cs5535.o
obj-$(CONFIG_BLK_DEV_SC1200) += sc1200.o
obj-$(CONFIG_BLK_DEV_CY82C693) += cy82c693.o
obj-$(CONFIG_BLK_DEV_HPT34X) += hpt34x.o
diff --git a/drivers/ide/pci/amd74xx.c b/drivers/ide/pci/amd74xx.c
index 844a6c9fb94..21965e5ef25 100644
--- a/drivers/ide/pci/amd74xx.c
+++ b/drivers/ide/pci/amd74xx.c
@@ -74,6 +74,7 @@ static struct amd_ide_chip {
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, 0x50, AMD_UDMA_133 },
{ PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, 0x50, AMD_UDMA_133 },
+ { PCI_DEVICE_ID_AMD_CS5536_IDE, 0x40, AMD_UDMA_100 },
{ 0 }
};
@@ -491,6 +492,7 @@ static ide_pci_device_t amd74xx_chipsets[] __devinitdata = {
/* 14 */ DECLARE_NV_DEV("NFORCE-MCP04"),
/* 15 */ DECLARE_NV_DEV("NFORCE-MCP51"),
/* 16 */ DECLARE_NV_DEV("NFORCE-MCP55"),
+ /* 17 */ DECLARE_AMD_DEV("AMD5536"),
};
static int __devinit amd74xx_probe(struct pci_dev *dev, const struct pci_device_id *id)
@@ -527,6 +529,7 @@ static struct pci_device_id amd74xx_pci_tbl[] = {
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP04_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 14 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP51_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 15 },
{ PCI_VENDOR_ID_NVIDIA, PCI_DEVICE_ID_NVIDIA_NFORCE_MCP55_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 16 },
+ { PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CS5536_IDE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 17 },
{ 0, },
};
MODULE_DEVICE_TABLE(pci, amd74xx_pci_tbl);
diff --git a/drivers/ide/pci/cs5535.c b/drivers/ide/pci/cs5535.c
new file mode 100644
index 00000000000..6eb305197f3
--- /dev/null
+++ b/drivers/ide/pci/cs5535.c
@@ -0,0 +1,305 @@
+/*
+ * linux/drivers/ide/pci/cs5535.c
+ *
+ * Copyright (C) 2004-2005 Advanced Micro Devices, Inc.
+ *
+ * History:
+ * 09/20/2005 - Jaya Kumar <jayakumar.ide@gmail.com>
+ * - Reworked tuneproc, set_drive, misc mods to prep for mainline
+ * - Work was sponsored by CIS (M) Sdn Bhd.
+ * Ported to Kernel 2.6.11 on June 26, 2005 by
+ * Wolfgang Zuleger <wolfgang.zuleger@gmx.de>
+ * Alexander Kiausch <alex.kiausch@t-online.de>
+ * Originally developed by AMD for 2.4/2.6
+ *
+ * Development of this chipset driver was funded
+ * by the nice folks at National Semiconductor/AMD.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * Documentation:
+ * CS5535 documentation available from AMD
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/ide.h>
+
+#include "ide-timing.h"
+
+#define MSR_ATAC_BASE 0x51300000
+#define ATAC_GLD_MSR_CAP (MSR_ATAC_BASE+0)
+#define ATAC_GLD_MSR_CONFIG (MSR_ATAC_BASE+0x01)
+#define ATAC_GLD_MSR_SMI (MSR_ATAC_BASE+0x02)
+#define ATAC_GLD_MSR_ERROR (MSR_ATAC_BASE+0x03)
+#define ATAC_GLD_MSR_PM (MSR_ATAC_BASE+0x04)
+#define ATAC_GLD_MSR_DIAG (MSR_ATAC_BASE+0x05)
+#define ATAC_IO_BAR (MSR_ATAC_BASE+0x08)
+#define ATAC_RESET (MSR_ATAC_BASE+0x10)
+#define ATAC_CH0D0_PIO (MSR_ATAC_BASE+0x20)
+#define ATAC_CH0D0_DMA (MSR_ATAC_BASE+0x21)
+#define ATAC_CH0D1_PIO (MSR_ATAC_BASE+0x22)
+#define ATAC_CH0D1_DMA (MSR_ATAC_BASE+0x23)
+#define ATAC_PCI_ABRTERR (MSR_ATAC_BASE+0x24)
+#define ATAC_BM0_CMD_PRIM 0x00
+#define ATAC_BM0_STS_PRIM 0x02
+#define ATAC_BM0_PRD 0x04
+#define CS5535_CABLE_DETECT 0x48
+
+/* Format I PIO settings. We seperate out cmd and data for safer timings */
+
+static unsigned int cs5535_pio_cmd_timings[5] =
+{ 0xF7F4, 0x53F3, 0x13F1, 0x5131, 0x1131 };
+static unsigned int cs5535_pio_dta_timings[5] =
+{ 0xF7F4, 0xF173, 0x8141, 0x5131, 0x1131 };
+
+static unsigned int cs5535_mwdma_timings[3] =
+{ 0x7F0FFFF3, 0x7F035352, 0x7f024241 };
+
+static unsigned int cs5535_udma_timings[5] =
+{ 0x7F7436A1, 0x7F733481, 0x7F723261, 0x7F713161, 0x7F703061 };
+
+/* Macros to check if the register is the reset value - reset value is an
+ invalid timing and indicates the register has not been set previously */
+
+#define CS5535_BAD_PIO(timings) ( (timings&~0x80000000UL) == 0x00009172 )
+#define CS5535_BAD_DMA(timings) ( (timings & 0x000FFFFF) == 0x00077771 )
+
+/****
+ * cs5535_set_speed - Configure the chipset to the new speed
+ * @drive: Drive to set up
+ * @speed: desired speed
+ *
+ * cs5535_set_speed() configures the chipset to a new speed.
+ */
+static void cs5535_set_speed(ide_drive_t *drive, u8 speed)
+{
+
+ u32 reg = 0, dummy;
+ int unit = drive->select.b.unit;
+
+
+ /* Set the PIO timings */
+ if ((speed & XFER_MODE) == XFER_PIO) {
+ u8 pioa;
+ u8 piob;
+ u8 cmd;
+
+ pioa = speed - XFER_PIO_0;
+ piob = ide_get_best_pio_mode(&(drive->hwif->drives[!unit]),
+ 255, 4, NULL);
+ cmd = pioa < piob ? pioa : piob;
+
+ /* Write the speed of the current drive */
+ reg = (cs5535_pio_cmd_timings[cmd] << 16) |
+ cs5535_pio_dta_timings[pioa];
+ wrmsr(unit ? ATAC_CH0D1_PIO : ATAC_CH0D0_PIO, reg, 0);
+
+ /* And if nessesary - change the speed of the other drive */
+ rdmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, dummy);
+
+ if (((reg >> 16) & cs5535_pio_cmd_timings[cmd]) !=
+ cs5535_pio_cmd_timings[cmd]) {
+ reg &= 0x0000FFFF;
+ reg |= cs5535_pio_cmd_timings[cmd] << 16;
+ wrmsr(unit ? ATAC_CH0D0_PIO : ATAC_CH0D1_PIO, reg, 0);
+ }
+
+ /* Set bit 31 of the DMA register for PIO format 1 timings */
+ rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
+ wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA,
+ reg | 0x80000000UL, 0);
+ } else {
+ rdmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, dummy);
+
+ reg &= 0x80000000UL; /* Preserve the PIO format bit */
+
+ if (speed >= XFER_UDMA_0 && speed <= XFER_UDMA_7)
+ reg |= cs5535_udma_timings[speed - XFER_UDMA_0];
+ else if (speed >= XFER_MW_DMA_0 && speed <= XFER_MW_DMA_2)
+ reg |= cs5535_mwdma_timings[speed - XFER_MW_DMA_0];
+ else
+ return;
+
+ wrmsr(unit ? ATAC_CH0D1_DMA : ATAC_CH0D0_DMA, reg, 0);
+ }
+}
+
+static u8 cs5535_ratemask(ide_drive_t *drive)
+{
+ /* eighty93 will return 1 if it's 80core and capable of
+ exceeding udma2, 0 otherwise. we need ratemask to set
+ the max speed and if we can > udma2 then we return 2
+ which selects speed_max as udma4 which is the 5535's max
+ speed, and 1 selects udma2 which is the max for 40c */
+ if (!eighty_ninty_three(drive))
+ return 1;
+
+ return 2;
+}
+
+
+/****
+ * cs5535_set_drive - Configure the drive to the new speed
+ * @drive: Drive to set up
+ * @speed: desired speed
+ *
+ * cs5535_set_drive() configures the drive and the chipset to a
+ * new speed. It also can be called by upper layers.
+ */
+static int cs5535_set_drive(ide_drive_t *drive, u8 speed)
+{
+ speed = ide_rate_filter(cs5535_ratemask(drive), speed);
+ ide_config_drive_speed(drive, speed);
+ cs5535_set_speed(drive, speed);
+
+ return 0;
+}
+
+/****
+ * cs5535_tuneproc - PIO setup
+ * @drive: drive to set up
+ * @pio: mode to use (255 for 'best possible')
+ *
+ * A callback from the upper layers for PIO-only tuning.
+ */
+static void cs5535_tuneproc(ide_drive_t *drive, u8 xferspeed)
+{
+ u8 modes[] = { XFER_PIO_0, XFER_PIO_1, XFER_PIO_2, XFER_PIO_3,
+ XFER_PIO_4 };
+
+ /* cs5535 max pio is pio 4, best_pio will check the blacklist.
+ i think we don't need to rate_filter the incoming xferspeed
+ since we know we're only going to choose pio */
+ xferspeed = ide_get_best_pio_mode(drive, xferspeed, 4, NULL);
+ ide_config_drive_speed(drive, modes[xferspeed]);
+ cs5535_set_speed(drive, xferspeed);
+}
+
+static int cs5535_config_drive_for_dma(ide_drive_t *drive)
+{
+ u8 speed;
+
+ speed = ide_dma_speed(drive, cs5535_ratemask(drive));
+
+ /* If no DMA speed was available then let dma_check hit pio */
+ if (!speed) {
+ return 0;
+ }
+
+ cs5535_set_drive(drive, speed);
+ return ide_dma_enable(drive);
+}
+
+static int cs5535_dma_check(ide_drive_t *drive)
+{
+ ide_hwif_t *hwif = drive->hwif;
+ struct hd_driveid *id = drive->id;
+ u8 speed;
+
+ drive->init_speed = 0;
+
+ if ((id->capability & 1) && drive->autodma) {
+ if (ide_use_dma(drive)) {
+ if (cs5535_config_drive_for_dma(drive))
+ return hwif->ide_dma_on(drive);
+ }
+
+ goto fast_ata_pio;
+
+ } else if ((id->capability & 8) || (id->field_valid & 2)) {
+fast_ata_pio:
+ speed = ide_get_best_pio_mode(drive, 255, 4, NULL);
+ cs5535_set_drive(drive, speed);
+ return hwif->ide_dma_off_quietly(drive);
+ }
+ /* IORDY not supported */
+ return 0;
+}
+
+static u8 __devinit cs5535_cable_detect(struct pci_dev *dev)
+{
+ u8 bit;
+
+ /* if a 80 wire cable was detected */
+ pci_read_config_byte(dev, CS5535_CABLE_DETECT, &bit);
+ return (bit & 1);
+}
+
+/****
+ * init_hwif_cs5535 - Initialize one ide cannel
+ * @hwif: Channel descriptor
+ *
+ * This gets invoked by the IDE driver once for each channel. It
+ * performs channel-specific pre-initialization before drive probing.
+ *
+ */
+static void __devinit init_hwif_cs5535(ide_hwif_t *hwif)
+{
+ int i;
+
+ hwif->autodma = 0;
+
+ hwif->tuneproc = &cs5535_tuneproc;
+ hwif->speedproc = &cs5535_set_drive;
+ hwif->ide_dma_check = &cs5535_dma_check;
+
+ hwif->atapi_dma = 1;
+ hwif->ultra_mask = 0x1F;
+ hwif->mwdma_mask = 0x07;
+
+
+ hwif->udma_four = cs5535_cable_detect(hwif->pci_dev);
+
+ if (!noautodma)
+ hwif->autodma = 1;
+
+ /* just setting autotune and not worrying about bios timings */
+ for (i = 0; i < 2; i++) {
+ hwif->drives[i].autotune = 1;
+ hwif->drives[i].autodma = hwif->autodma;
+ }
+}
+
+static ide_pci_device_t cs5535_chipset __devinitdata = {
+ .name = "CS5535",
+ .init_hwif = init_hwif_cs5535,
+ .channels = 1,
+ .autodma = AUTODMA,
+ .bootable = ON_BOARD,
+};
+
+static int __devinit cs5535_init_one(struct pci_dev *dev,
+ const struct pci_device_id *id)
+{
+ return ide_setup_pci_device(dev, &cs5535_chipset);
+}
+
+static struct pci_device_id cs5535_pci_tbl[] =
+{
+ { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_CS5535_IDE, PCI_ANY_ID,
+ PCI_ANY_ID, 0, 0, 0},
+ { 0, },
+};
+
+MODULE_DEVICE_TABLE(pci, cs5535_pci_tbl);
+
+static struct pci_driver driver = {
+ .name = "CS5535_IDE",
+ .id_table = cs5535_pci_tbl,
+ .probe = cs5535_init_one,
+};
+
+static int __init cs5535_ide_init(void)
+{
+ return ide_pci_register_driver(&driver);
+}
+
+module_init(cs5535_ide_init);
+
+MODULE_AUTHOR("AMD");
+MODULE_DESCRIPTION("PCI driver module for AMD/NS CS5535 IDE");
+MODULE_LICENSE("GPL");
diff --git a/drivers/ide/pci/cy82c693.c b/drivers/ide/pci/cy82c693.c
index 5a33513f3dd..9f41ecd5633 100644
--- a/drivers/ide/pci/cy82c693.c
+++ b/drivers/ide/pci/cy82c693.c
@@ -469,7 +469,7 @@ static void __devinit init_hwif_cy82c693(ide_hwif_t *hwif)
static __devinitdata ide_hwif_t *primary;
-void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
+static void __devinit init_iops_cy82c693(ide_hwif_t *hwif)
{
if (PCI_FUNC(hwif->pci_dev->devfn) == 1)
primary = hwif;
diff --git a/drivers/ide/pci/siimage.c b/drivers/ide/pci/siimage.c
index 2b9961b8813..022d244f2eb 100644
--- a/drivers/ide/pci/siimage.c
+++ b/drivers/ide/pci/siimage.c
@@ -701,6 +701,7 @@ static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
unsigned long barsize = pci_resource_len(dev, 5);
u8 tmpbyte = 0;
void __iomem *ioaddr;
+ u32 tmp, irq_mask;
/*
* Drop back to PIO if we can't map the mmio. Some
@@ -726,6 +727,14 @@ static unsigned int setup_mmio_siimage (struct pci_dev *dev, const char *name)
pci_set_drvdata(dev, (void *) ioaddr);
if (pdev_is_sata(dev)) {
+ /* make sure IDE0/1 interrupts are not masked */
+ irq_mask = (1 << 22) | (1 << 23);
+ tmp = readl(ioaddr + 0x48);
+ if (tmp & irq_mask) {
+ tmp &= ~irq_mask;
+ writel(tmp, ioaddr + 0x48);
+ readl(ioaddr + 0x48); /* flush */
+ }
writel(0, ioaddr + 0x148);
writel(0, ioaddr + 0x1C8);
}
diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c
index 18ed7765417..d4f2111d436 100644
--- a/drivers/ide/setup-pci.c
+++ b/drivers/ide/setup-pci.c
@@ -787,8 +787,9 @@ static int pre_init = 1; /* Before first ordered IDE scan */
static LIST_HEAD(ide_pci_drivers);
/*
- * ide_register_pci_driver - attach IDE driver
+ * __ide_register_pci_driver - attach IDE driver
* @driver: pci driver
+ * @module: owner module of the driver
*
* Registers a driver with the IDE layer. The IDE layer arranges that
* boot time setup is done in the expected device order and then
@@ -801,15 +802,16 @@ static LIST_HEAD(ide_pci_drivers);
* Returns are the same as for pci_register_driver
*/
-int ide_pci_register_driver(struct pci_driver *driver)
+int __ide_pci_register_driver(struct pci_driver *driver, struct module *module)
{
if(!pre_init)
- return pci_module_init(driver);
+ return __pci_register_driver(driver, module);
+ driver->driver.owner = module;
list_add_tail(&driver->node, &ide_pci_drivers);
return 0;
}
-EXPORT_SYMBOL_GPL(ide_pci_register_driver);
+EXPORT_SYMBOL_GPL(__ide_pci_register_driver);
/**
* ide_unregister_pci_driver - unregister an IDE driver
@@ -897,6 +899,6 @@ void __init ide_scan_pcibus (int scan_direction)
{
list_del(l);
d = list_entry(l, struct pci_driver, node);
- pci_register_driver(d);
+ __pci_register_driver(d, d->driver.owner);
}
}
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index aed5ca23fb2..5ea741f47fc 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -31,7 +31,7 @@
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
- * $Id: user_mad.c 2814 2005-07-06 19:14:09Z halr $
+ * $Id: user_mad.c 4010 2005-11-09 23:11:56Z roland $
*/
#include <linux/module.h>
@@ -110,13 +110,13 @@ struct ib_umad_device {
};
struct ib_umad_file {
- struct ib_umad_port *port;
- struct list_head recv_list;
- struct list_head port_list;
- spinlock_t recv_lock;
- wait_queue_head_t recv_wait;
- struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
- struct ib_mr *mr[IB_UMAD_MAX_AGENTS];
+ struct ib_umad_port *port;
+ struct list_head recv_list;
+ struct list_head port_list;
+ spinlock_t recv_lock;
+ wait_queue_head_t recv_wait;
+ struct ib_mad_agent *agent[IB_UMAD_MAX_AGENTS];
+ int agents_dead;
};
struct ib_umad_packet {
@@ -145,6 +145,12 @@ static void ib_umad_release_dev(struct kref *ref)
kfree(dev);
}
+/* caller must hold port->mutex at least for reading */
+static struct ib_mad_agent *__get_agent(struct ib_umad_file *file, int id)
+{
+ return file->agents_dead ? NULL : file->agent[id];
+}
+
static int queue_packet(struct ib_umad_file *file,
struct ib_mad_agent *agent,
struct ib_umad_packet *packet)
@@ -152,10 +158,11 @@ static int queue_packet(struct ib_umad_file *file,
int ret = 1;
down_read(&file->port->mutex);
+
for (packet->mad.hdr.id = 0;
packet->mad.hdr.id < IB_UMAD_MAX_AGENTS;
packet->mad.hdr.id++)
- if (agent == file->agent[packet->mad.hdr.id]) {
+ if (agent == __get_agent(file, packet->mad.hdr.id)) {
spin_lock_irq(&file->recv_lock);
list_add_tail(&packet->list, &file->recv_list);
spin_unlock_irq(&file->recv_lock);
@@ -327,7 +334,7 @@ static ssize_t ib_umad_write(struct file *filp, const char __user *buf,
down_read(&file->port->mutex);
- agent = file->agent[packet->mad.hdr.id];
+ agent = __get_agent(file, packet->mad.hdr.id);
if (!agent) {
ret = -EINVAL;
goto err_up;
@@ -481,7 +488,7 @@ static int ib_umad_reg_agent(struct ib_umad_file *file, unsigned long arg)
}
for (agent_id = 0; agent_id < IB_UMAD_MAX_AGENTS; ++agent_id)
- if (!file->agent[agent_id])
+ if (!__get_agent(file, agent_id))
goto found;
ret = -ENOMEM;
@@ -505,29 +512,15 @@ found:
goto out;
}
- file->agent[agent_id] = agent;
-
- file->mr[agent_id] = ib_get_dma_mr(agent->qp->pd, IB_ACCESS_LOCAL_WRITE);
- if (IS_ERR(file->mr[agent_id])) {
- ret = -ENOMEM;
- goto err;
- }
-
if (put_user(agent_id,
(u32 __user *) (arg + offsetof(struct ib_user_mad_reg_req, id)))) {
ret = -EFAULT;
- goto err_mr;
+ ib_unregister_mad_agent(agent);
+ goto out;
}
+ file->agent[agent_id] = agent;
ret = 0;
- goto out;
-
-err_mr:
- ib_dereg_mr(file->mr[agent_id]);
-
-err:
- file->agent[agent_id] = NULL;
- ib_unregister_mad_agent(agent);
out:
up_write(&file->port->mutex);
@@ -536,27 +529,29 @@ out:
static int ib_umad_unreg_agent(struct ib_umad_file *file, unsigned long arg)
{
+ struct ib_mad_agent *agent = NULL;
u32 id;
int ret = 0;
- down_write(&file->port->mutex);
+ if (get_user(id, (u32 __user *) arg))
+ return -EFAULT;
- if (get_user(id, (u32 __user *) arg)) {
- ret = -EFAULT;
- goto out;
- }
+ down_write(&file->port->mutex);
- if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !file->agent[id]) {
+ if (id < 0 || id >= IB_UMAD_MAX_AGENTS || !__get_agent(file, id)) {
ret = -EINVAL;
goto out;
}
- ib_dereg_mr(file->mr[id]);
- ib_unregister_mad_agent(file->agent[id]);
+ agent = file->agent[id];
file->agent[id] = NULL;
out:
up_write(&file->port->mutex);
+
+ if (agent)
+ ib_unregister_mad_agent(agent);
+
return ret;
}
@@ -621,23 +616,29 @@ static int ib_umad_close(struct inode *inode, struct file *filp)
struct ib_umad_file *file = filp->private_data;
struct ib_umad_device *dev = file->port->umad_dev;
struct ib_umad_packet *packet, *tmp;
+ int already_dead;
int i;
down_write(&file->port->mutex);
- for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
- if (file->agent[i]) {
- ib_dereg_mr(file->mr[i]);
- ib_unregister_mad_agent(file->agent[i]);
- }
+
+ already_dead = file->agents_dead;
+ file->agents_dead = 1;
list_for_each_entry_safe(packet, tmp, &file->recv_list, list)
kfree(packet);
list_del(&file->port_list);
- up_write(&file->port->mutex);
- kfree(file);
+ downgrade_write(&file->port->mutex);
+
+ if (!already_dead)
+ for (i = 0; i < IB_UMAD_MAX_AGENTS; ++i)
+ if (file->agent[i])
+ ib_unregister_mad_agent(file->agent[i]);
+
+ up_read(&file->port->mutex);
+ kfree(file);
kref_put(&dev->ref, ib_umad_release_dev);
return 0;
@@ -801,7 +802,7 @@ static int ib_umad_init_port(struct ib_device *device, int port_num,
goto err_class;
port->sm_dev->owner = THIS_MODULE;
port->sm_dev->ops = &umad_sm_fops;
- kobject_set_name(&port->dev->kobj, "issm%d", port->dev_num);
+ kobject_set_name(&port->sm_dev->kobj, "issm%d", port->dev_num);
if (cdev_add(port->sm_dev, base_dev + port->dev_num + IB_UMAD_MAX_PORTS, 1))
goto err_sm_cdev;
@@ -863,14 +864,36 @@ static void ib_umad_kill_port(struct ib_umad_port *port)
port->ib_dev = NULL;
- list_for_each_entry(file, &port->file_list, port_list)
- for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id) {
- if (!file->agent[id])
- continue;
- ib_dereg_mr(file->mr[id]);
- ib_unregister_mad_agent(file->agent[id]);
- file->agent[id] = NULL;
- }
+ /*
+ * Now go through the list of files attached to this port and
+ * unregister all of their MAD agents. We need to hold
+ * port->mutex while doing this to avoid racing with
+ * ib_umad_close(), but we can't hold the mutex for writing
+ * while calling ib_unregister_mad_agent(), since that might
+ * deadlock by calling back into queue_packet(). So we
+ * downgrade our lock to a read lock, and then drop and
+ * reacquire the write lock for the next iteration.
+ *
+ * We do list_del_init() on the file's list_head so that the
+ * list_del in ib_umad_close() is still OK, even after the
+ * file is removed from the list.
+ */
+ while (!list_empty(&port->file_list)) {
+ file = list_entry(port->file_list.next, struct ib_umad_file,
+ port_list);
+
+ file->agents_dead = 1;
+ list_del_init(&file->port_list);
+
+ downgrade_write(&port->mutex);
+
+ for (id = 0; id < IB_UMAD_MAX_AGENTS; ++id)
+ if (file->agent[id])
+ ib_unregister_mad_agent(file->agent[id]);
+
+ up_read(&port->mutex);
+ down_write(&port->mutex);
+ }
up_write(&port->mutex);
@@ -913,7 +936,7 @@ static void ib_umad_add_one(struct ib_device *device)
err:
while (--i >= s)
- ib_umad_kill_port(&umad_dev->port[i]);
+ ib_umad_kill_port(&umad_dev->port[i - s]);
kref_put(&umad_dev->ref, ib_umad_release_dev);
}
diff --git a/drivers/infiniband/core/uverbs_cmd.c b/drivers/infiniband/core/uverbs_cmd.c
index 63a74151c60..ed45da892b1 100644
--- a/drivers/infiniband/core/uverbs_cmd.c
+++ b/drivers/infiniband/core/uverbs_cmd.c
@@ -708,7 +708,7 @@ ssize_t ib_uverbs_poll_cq(struct ib_uverbs_file *file,
resp->wc[i].opcode = wc[i].opcode;
resp->wc[i].vendor_err = wc[i].vendor_err;
resp->wc[i].byte_len = wc[i].byte_len;
- resp->wc[i].imm_data = wc[i].imm_data;
+ resp->wc[i].imm_data = (__u32 __force) wc[i].imm_data;
resp->wc[i].qp_num = wc[i].qp_num;
resp->wc[i].src_qp = wc[i].src_qp;
resp->wc[i].wc_flags = wc[i].wc_flags;
@@ -908,7 +908,12 @@ retry:
if (ret)
goto err_destroy;
- resp.qp_handle = uobj->uobject.id;
+ resp.qp_handle = uobj->uobject.id;
+ resp.max_recv_sge = attr.cap.max_recv_sge;
+ resp.max_send_sge = attr.cap.max_send_sge;
+ resp.max_recv_wr = attr.cap.max_recv_wr;
+ resp.max_send_wr = attr.cap.max_send_wr;
+ resp.max_inline_data = attr.cap.max_inline_data;
if (copy_to_user((void __user *) (unsigned long) cmd.response,
&resp, sizeof resp)) {
@@ -1135,7 +1140,7 @@ ssize_t ib_uverbs_post_send(struct ib_uverbs_file *file,
next->num_sge = user_wr->num_sge;
next->opcode = user_wr->opcode;
next->send_flags = user_wr->send_flags;
- next->imm_data = user_wr->imm_data;
+ next->imm_data = (__be32 __force) user_wr->imm_data;
if (qp->qp_type == IB_QPT_UD) {
next->wr.ud.ah = idr_find(&ib_uverbs_ah_idr,
@@ -1701,7 +1706,6 @@ ssize_t ib_uverbs_modify_srq(struct ib_uverbs_file *file,
}
attr.max_wr = cmd.max_wr;
- attr.max_sge = cmd.max_sge;
attr.srq_limit = cmd.srq_limit;
ret = ib_modify_srq(srq, &attr, cmd.attr_mask);
diff --git a/drivers/infiniband/core/verbs.c b/drivers/infiniband/core/verbs.c
index 4186cc888ea..4c15e112736 100644
--- a/drivers/infiniband/core/verbs.c
+++ b/drivers/infiniband/core/verbs.c
@@ -325,16 +325,8 @@ EXPORT_SYMBOL(ib_destroy_cq);
int ib_resize_cq(struct ib_cq *cq,
int cqe)
{
- int ret;
-
- if (!cq->device->resize_cq)
- return -ENOSYS;
-
- ret = cq->device->resize_cq(cq, &cqe);
- if (!ret)
- cq->cqe = cqe;
-
- return ret;
+ return cq->device->resize_cq ?
+ cq->device->resize_cq(cq, cqe) : -ENOSYS;
}
EXPORT_SYMBOL(ib_resize_cq);
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 25ebab64bc4..c3bec7490f5 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -97,7 +97,7 @@ static void poll_catas(unsigned long dev_ptr)
}
spin_lock_irqsave(&catas_lock, flags);
- if (dev->catas_err.stop)
+ if (!dev->catas_err.stop)
mod_timer(&dev->catas_err.timer,
jiffies + MTHCA_CATAS_POLL_INTERVAL);
spin_unlock_irqrestore(&catas_lock, flags);
diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
index 49f211d55df..9ed34587fc5 100644
--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
@@ -1060,6 +1060,8 @@ int mthca_QUERY_DEV_LIM(struct mthca_dev *dev,
dev_lim->hca.arbel.resize_srq = field & 1;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_MAX_SG_RQ_OFFSET);
dev_lim->max_sg = min_t(int, field, dev_lim->max_sg);
+ MTHCA_GET(size, outbox, QUERY_DEV_LIM_MAX_DESC_SZ_RQ_OFFSET);
+ dev_lim->max_desc_sz = min_t(int, size, dev_lim->max_desc_sz);
MTHCA_GET(size, outbox, QUERY_DEV_LIM_MPT_ENTRY_SZ_OFFSET);
dev_lim->mpt_entry_sz = size;
MTHCA_GET(field, outbox, QUERY_DEV_LIM_PBL_SZ_OFFSET);
diff --git a/drivers/infiniband/hw/mthca/mthca_cq.c b/drivers/infiniband/hw/mthca/mthca_cq.c
index f98e2355582..4a8adcef207 100644
--- a/drivers/infiniband/hw/mthca/mthca_cq.c
+++ b/drivers/infiniband/hw/mthca/mthca_cq.c
@@ -258,7 +258,7 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
{
struct mthca_cq *cq;
struct mthca_cqe *cqe;
- int prod_index;
+ u32 prod_index;
int nfreed = 0;
spin_lock_irq(&dev->cq_table.lock);
@@ -293,19 +293,15 @@ void mthca_cq_clean(struct mthca_dev *dev, u32 cqn, u32 qpn,
* Now sweep backwards through the CQ, removing CQ entries
* that match our QP by copying older entries on top of them.
*/
- while (prod_index > cq->cons_index) {
- cqe = get_cqe(cq, (prod_index - 1) & cq->ibcq.cqe);
+ while ((int) --prod_index - (int) cq->cons_index >= 0) {
+ cqe = get_cqe(cq, prod_index & cq->ibcq.cqe);
if (cqe->my_qpn == cpu_to_be32(qpn)) {
if (srq)
mthca_free_srq_wqe(srq, be32_to_cpu(cqe->wqe));
++nfreed;
- }
- else if (nfreed)
- memcpy(get_cqe(cq, (prod_index - 1 + nfreed) &
- cq->ibcq.cqe),
- cqe,
- MTHCA_CQ_ENTRY_SIZE);
- --prod_index;
+ } else if (nfreed)
+ memcpy(get_cqe(cq, (prod_index + nfreed) & cq->ibcq.cqe),
+ cqe, MTHCA_CQ_ENTRY_SIZE);
}
if (nfreed) {
diff --git a/drivers/infiniband/hw/mthca/mthca_dev.h b/drivers/infiniband/hw/mthca/mthca_dev.h
index e7e5d3b4f00..497ff794ef6 100644
--- a/drivers/infiniband/hw/mthca/mthca_dev.h
+++ b/drivers/infiniband/hw/mthca/mthca_dev.h
@@ -131,6 +131,7 @@ struct mthca_limits {
int max_sg;
int num_qps;
int max_wqes;
+ int max_desc_sz;
int max_qp_init_rdma;
int reserved_qps;
int num_srqs;
@@ -154,6 +155,7 @@ struct mthca_limits {
int reserved_mcgs;
int num_pds;
int reserved_pds;
+ u32 page_size_cap;
u32 flags;
u8 port_width_cap;
};
diff --git a/drivers/infiniband/hw/mthca/mthca_main.c b/drivers/infiniband/hw/mthca/mthca_main.c
index 45c6328e780..6f94b25f3ac 100644
--- a/drivers/infiniband/hw/mthca/mthca_main.c
+++ b/drivers/infiniband/hw/mthca/mthca_main.c
@@ -168,6 +168,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
mdev->limits.max_srq_wqes = dev_lim->max_srq_sz;
mdev->limits.reserved_srqs = dev_lim->reserved_srqs;
mdev->limits.reserved_eecs = dev_lim->reserved_eecs;
+ mdev->limits.max_desc_sz = dev_lim->max_desc_sz;
/*
* Subtract 1 from the limit because we need to allocate a
* spare CQE so the HCA HW can tell the difference between an
@@ -181,6 +182,7 @@ static int __devinit mthca_dev_lim(struct mthca_dev *mdev, struct mthca_dev_lim
mdev->limits.reserved_uars = dev_lim->reserved_uars;
mdev->limits.reserved_pds = dev_lim->reserved_pds;
mdev->limits.port_width_cap = dev_lim->max_port_width;
+ mdev->limits.page_size_cap = ~(u32) (dev_lim->min_page_sz - 1);
mdev->limits.flags = dev_lim->flags;
/* IB_DEVICE_RESIZE_MAX_WR not supported by driver.
@@ -1196,7 +1198,6 @@ MODULE_DEVICE_TABLE(pci, mthca_pci_table);
static struct pci_driver mthca_driver = {
.name = DRV_NAME,
- .owner = THIS_MODULE,
.id_table = mthca_pci_table,
.probe = mthca_init_one,
.remove = __devexit_p(mthca_remove_one)
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.c b/drivers/infiniband/hw/mthca/mthca_provider.c
index 6b016666826..4cc7e2846df 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.c
+++ b/drivers/infiniband/hw/mthca/mthca_provider.c
@@ -90,6 +90,7 @@ static int mthca_query_device(struct ib_device *ibdev,
memcpy(&props->node_guid, out_mad->data + 12, 8);
props->max_mr_size = ~0ull;
+ props->page_size_cap = mdev->limits.page_size_cap;
props->max_qp = mdev->limits.num_qps - mdev->limits.reserved_qps;
props->max_qp_wr = mdev->limits.max_wqes;
props->max_sge = mdev->limits.max_sg;
@@ -615,11 +616,11 @@ static struct ib_qp *mthca_create_qp(struct ib_pd *pd,
return ERR_PTR(err);
}
- init_attr->cap.max_inline_data = 0;
init_attr->cap.max_send_wr = qp->sq.max;
init_attr->cap.max_recv_wr = qp->rq.max;
init_attr->cap.max_send_sge = qp->sq.max_gs;
init_attr->cap.max_recv_sge = qp->rq.max_gs;
+ init_attr->cap.max_inline_data = qp->max_inline_data;
return &qp->ibqp;
}
diff --git a/drivers/infiniband/hw/mthca/mthca_provider.h b/drivers/infiniband/hw/mthca/mthca_provider.h
index bcd4b01a339..1e73947b470 100644
--- a/drivers/infiniband/hw/mthca/mthca_provider.h
+++ b/drivers/infiniband/hw/mthca/mthca_provider.h
@@ -251,6 +251,7 @@ struct mthca_qp {
struct mthca_wq sq;
enum ib_sig_type sq_policy;
int send_wqe_offset;
+ int max_inline_data;
u64 *wrid;
union mthca_buf queue;
diff --git a/drivers/infiniband/hw/mthca/mthca_qp.c b/drivers/infiniband/hw/mthca/mthca_qp.c
index 8852ea477c2..760c418d5bc 100644
--- a/drivers/infiniband/hw/mthca/mthca_qp.c
+++ b/drivers/infiniband/hw/mthca/mthca_qp.c
@@ -885,6 +885,48 @@ int mthca_modify_qp(struct ib_qp *ibqp, struct ib_qp_attr *attr, int attr_mask)
return err;
}
+static void mthca_adjust_qp_caps(struct mthca_dev *dev,
+ struct mthca_pd *pd,
+ struct mthca_qp *qp)
+{
+ int max_data_size;
+
+ /*
+ * Calculate the maximum size of WQE s/g segments, excluding
+ * the next segment and other non-data segments.
+ */
+ max_data_size = min(dev->limits.max_desc_sz, 1 << qp->sq.wqe_shift) -
+ sizeof (struct mthca_next_seg);
+
+ switch (qp->transport) {
+ case MLX:
+ max_data_size -= 2 * sizeof (struct mthca_data_seg);
+ break;
+
+ case UD:
+ if (mthca_is_memfree(dev))
+ max_data_size -= sizeof (struct mthca_arbel_ud_seg);
+ else
+ max_data_size -= sizeof (struct mthca_tavor_ud_seg);
+ break;
+
+ default:
+ max_data_size -= sizeof (struct mthca_raddr_seg);
+ break;
+ }
+
+ /* We don't support inline data for kernel QPs (yet). */
+ if (!pd->ibpd.uobject)
+ qp->max_inline_data = 0;
+ else
+ qp->max_inline_data = max_data_size - MTHCA_INLINE_HEADER_SIZE;
+
+ qp->sq.max_gs = max_data_size / sizeof (struct mthca_data_seg);
+ qp->rq.max_gs = (min(dev->limits.max_desc_sz, 1 << qp->rq.wqe_shift) -
+ sizeof (struct mthca_next_seg)) /
+ sizeof (struct mthca_data_seg);
+}
+
/*
* Allocate and register buffer for WQEs. qp->rq.max, sq.max,
* rq.max_gs and sq.max_gs must all be assigned.
@@ -902,27 +944,53 @@ static int mthca_alloc_wqe_buf(struct mthca_dev *dev,
size = sizeof (struct mthca_next_seg) +
qp->rq.max_gs * sizeof (struct mthca_data_seg);
+ if (size > dev->limits.max_desc_sz)
+ return -EINVAL;
+
for (qp->rq.wqe_shift = 6; 1 << qp->rq.wqe_shift < size;
qp->rq.wqe_shift++)
; /* nothing */
- size = sizeof (struct mthca_next_seg) +
- qp->sq.max_gs * sizeof (struct mthca_data_seg);
+ size = qp->sq.max_gs * sizeof (struct mthca_data_seg);
switch (qp->transport) {
case MLX:
size += 2 * sizeof (struct mthca_data_seg);
break;
+
case UD:
- if (mthca_is_memfree(dev))
- size += sizeof (struct mthca_arbel_ud_seg);
- else
- size += sizeof (struct mthca_tavor_ud_seg);
+ size += mthca_is_memfree(dev) ?
+ sizeof (struct mthca_arbel_ud_seg) :
+ sizeof (struct mthca_tavor_ud_seg);
break;
+
+ case UC:
+ size += sizeof (struct mthca_raddr_seg);
+ break;
+
+ case RC:
+ size += sizeof (struct mthca_raddr_seg);
+ /*
+ * An atomic op will require an atomic segment, a
+ * remote address segment and one scatter entry.
+ */
+ size = max_t(int, size,
+ sizeof (struct mthca_atomic_seg) +
+ sizeof (struct mthca_raddr_seg) +
+ sizeof (struct mthca_data_seg));
+ break;
+
default:
- /* bind seg is as big as atomic + raddr segs */
- size += sizeof (struct mthca_bind_seg);
+ break;
}
+ /* Make sure that we have enough space for a bind request */
+ size = max_t(int, size, sizeof (struct mthca_bind_seg));
+
+ size += sizeof (struct mthca_next_seg);
+
+ if (size > dev->limits.max_desc_sz)
+ return -EINVAL;
+
for (qp->sq.wqe_shift = 6; 1 << qp->sq.wqe_shift < size;
qp->sq.wqe_shift++)
; /* nothing */
@@ -1066,6 +1134,8 @@ static int mthca_alloc_qp_common(struct mthca_dev *dev,
return ret;
}
+ mthca_adjust_qp_caps(dev, pd, qp);
+
/*
* If this is a userspace QP, we're done now. The doorbells
* will be allocated and buffers will be initialized in
@@ -1486,8 +1556,8 @@ int mthca_tavor_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
wqe += sizeof (struct mthca_atomic_seg);
- size += sizeof (struct mthca_raddr_seg) / 16 +
- sizeof (struct mthca_atomic_seg);
+ size += (sizeof (struct mthca_raddr_seg) +
+ sizeof (struct mthca_atomic_seg)) / 16;
break;
case IB_WR_RDMA_WRITE:
@@ -1637,6 +1707,7 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
{
struct mthca_dev *dev = to_mdev(ibqp->device);
struct mthca_qp *qp = to_mqp(ibqp);
+ __be32 doorbell[2];
unsigned long flags;
int err = 0;
int nreq;
@@ -1654,6 +1725,22 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
ind = qp->rq.next_ind;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
+ doorbell[1] = cpu_to_be32(qp->qpn << 8);
+
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ qp->rq.head += MTHCA_TAVOR_MAX_WQES_PER_RECV_DB;
+ size0 = 0;
+ }
+
if (mthca_wq_overflow(&qp->rq, nreq, qp->ibqp.recv_cq)) {
mthca_err(dev, "RQ %06x full (%u head, %u tail,"
" %d max, %d nreq)\n", qp->qpn,
@@ -1711,8 +1798,6 @@ int mthca_tavor_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr,
out:
if (likely(nreq)) {
- __be32 doorbell[2];
-
doorbell[0] = cpu_to_be32((qp->rq.next_ind << qp->rq.wqe_shift) | size0);
doorbell[1] = cpu_to_be32((qp->qpn << 8) | nreq);
@@ -1806,8 +1891,8 @@ int mthca_arbel_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr,
}
wqe += sizeof (struct mthca_atomic_seg);
- size += sizeof (struct mthca_raddr_seg) / 16 +
- sizeof (struct mthca_atomic_seg);
+ size += (sizeof (struct mthca_raddr_seg) +
+ sizeof (struct mthca_atomic_seg)) / 16;
break;
case IB_WR_RDMA_READ:
diff --git a/drivers/infiniband/hw/mthca/mthca_srq.c b/drivers/infiniband/hw/mthca/mthca_srq.c
index 26d5161fde0..f7d234295ef 100644
--- a/drivers/infiniband/hw/mthca/mthca_srq.c
+++ b/drivers/infiniband/hw/mthca/mthca_srq.c
@@ -417,6 +417,7 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
{
struct mthca_dev *dev = to_mdev(ibsrq->device);
struct mthca_srq *srq = to_msrq(ibsrq);
+ __be32 doorbell[2];
unsigned long flags;
int err = 0;
int first_ind;
@@ -432,6 +433,25 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
first_ind = srq->first_free;
for (nreq = 0; wr; ++nreq, wr = wr->next) {
+ if (unlikely(nreq == MTHCA_TAVOR_MAX_WQES_PER_RECV_DB)) {
+ nreq = 0;
+
+ doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
+ doorbell[1] = cpu_to_be32(srq->srqn << 8);
+
+ /*
+ * Make sure that descriptors are written
+ * before doorbell is rung.
+ */
+ wmb();
+
+ mthca_write64(doorbell,
+ dev->kar + MTHCA_RECEIVE_DOORBELL,
+ MTHCA_GET_DOORBELL_LOCK(&dev->doorbell_lock));
+
+ first_ind = srq->first_free;
+ }
+
ind = srq->first_free;
if (ind < 0) {
@@ -494,8 +514,6 @@ int mthca_tavor_post_srq_recv(struct ib_srq *ibsrq, struct ib_recv_wr *wr,
}
if (likely(nreq)) {
- __be32 doorbell[2];
-
doorbell[0] = cpu_to_be32(first_ind << srq->wqe_shift);
doorbell[1] = cpu_to_be32((srq->srqn << 8) | nreq);
diff --git a/drivers/infiniband/hw/mthca/mthca_wqe.h b/drivers/infiniband/hw/mthca/mthca_wqe.h
index 1f4c0ff28f7..73f1c0b9021 100644
--- a/drivers/infiniband/hw/mthca/mthca_wqe.h
+++ b/drivers/infiniband/hw/mthca/mthca_wqe.h
@@ -49,7 +49,8 @@ enum {
};
enum {
- MTHCA_INVAL_LKEY = 0x100
+ MTHCA_INVAL_LKEY = 0x100,
+ MTHCA_TAVOR_MAX_WQES_PER_RECV_DB = 256
};
struct mthca_next_seg {
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 0095acc0fbb..9923a15a999 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -179,6 +179,7 @@ struct ipoib_dev_priv {
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct list_head fs_list;
struct dentry *mcg_dentry;
+ struct dentry *path_dentry;
#endif
};
@@ -270,7 +271,6 @@ void ipoib_mcast_dev_flush(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev);
-void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter);
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter);
void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
union ib_gid *gid,
@@ -278,6 +278,11 @@ void ipoib_mcast_iter_read(struct ipoib_mcast_iter *iter,
unsigned int *queuelen,
unsigned int *complete,
unsigned int *send_only);
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev);
+int ipoib_path_iter_next(struct ipoib_path_iter *iter);
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+ struct ipoib_path *path);
#endif
int ipoib_mcast_attach(struct net_device *dev, u16 mlid,
@@ -299,13 +304,13 @@ void ipoib_pkey_poll(void *dev);
int ipoib_pkey_dev_delay_open(struct net_device *dev);
#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
-int ipoib_create_debug_file(struct net_device *dev);
-void ipoib_delete_debug_file(struct net_device *dev);
+void ipoib_create_debug_files(struct net_device *dev);
+void ipoib_delete_debug_files(struct net_device *dev);
int ipoib_register_debugfs(void);
void ipoib_unregister_debugfs(void);
#else
-static inline int ipoib_create_debug_file(struct net_device *dev) { return 0; }
-static inline void ipoib_delete_debug_file(struct net_device *dev) { }
+static inline void ipoib_create_debug_files(struct net_device *dev) { }
+static inline void ipoib_delete_debug_files(struct net_device *dev) { }
static inline int ipoib_register_debugfs(void) { return 0; }
static inline void ipoib_unregister_debugfs(void) { }
#endif
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_fs.c b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
index 38b150f775e..685258e3403 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_fs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_fs.c
@@ -43,6 +43,18 @@ struct file_operations;
static struct dentry *ipoib_root;
+static void format_gid(union ib_gid *gid, char *buf)
+{
+ int i, n;
+
+ for (n = 0, i = 0; i < 8; ++i) {
+ n += sprintf(buf + n, "%x",
+ be16_to_cpu(((__be16 *) gid->raw)[i]));
+ if (i < 7)
+ buf[n++] = ':';
+ }
+}
+
static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
{
struct ipoib_mcast_iter *iter;
@@ -54,7 +66,7 @@ static void *ipoib_mcg_seq_start(struct seq_file *file, loff_t *pos)
while (n--) {
if (ipoib_mcast_iter_next(iter)) {
- ipoib_mcast_iter_free(iter);
+ kfree(iter);
return NULL;
}
}
@@ -70,7 +82,7 @@ static void *ipoib_mcg_seq_next(struct seq_file *file, void *iter_ptr,
(*pos)++;
if (ipoib_mcast_iter_next(iter)) {
- ipoib_mcast_iter_free(iter);
+ kfree(iter);
return NULL;
}
@@ -87,32 +99,32 @@ static int ipoib_mcg_seq_show(struct seq_file *file, void *iter_ptr)
struct ipoib_mcast_iter *iter = iter_ptr;
char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
union ib_gid mgid;
- int i, n;
unsigned long created;
unsigned int queuelen, complete, send_only;
- if (iter) {
- ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
- &complete, &send_only);
+ if (!iter)
+ return 0;
- for (n = 0, i = 0; i < sizeof mgid / 2; ++i) {
- n += sprintf(gid_buf + n, "%x",
- be16_to_cpu(((__be16 *) mgid.raw)[i]));
- if (i < sizeof mgid / 2 - 1)
- gid_buf[n++] = ':';
- }
- }
+ ipoib_mcast_iter_read(iter, &mgid, &created, &queuelen,
+ &complete, &send_only);
- seq_printf(file, "GID: %*s", -(1 + (int) sizeof gid_buf), gid_buf);
+ format_gid(&mgid, gid_buf);
seq_printf(file,
- " created: %10ld queuelen: %4d complete: %d send_only: %d\n",
- created, queuelen, complete, send_only);
+ "GID: %s\n"
+ " created: %10ld\n"
+ " queuelen: %9d\n"
+ " complete: %9s\n"
+ " send_only: %8s\n"
+ "\n",
+ gid_buf, created, queuelen,
+ complete ? "yes" : "no",
+ send_only ? "yes" : "no");
return 0;
}
-static struct seq_operations ipoib_seq_ops = {
+static struct seq_operations ipoib_mcg_seq_ops = {
.start = ipoib_mcg_seq_start,
.next = ipoib_mcg_seq_next,
.stop = ipoib_mcg_seq_stop,
@@ -124,7 +136,7 @@ static int ipoib_mcg_open(struct inode *inode, struct file *file)
struct seq_file *seq;
int ret;
- ret = seq_open(file, &ipoib_seq_ops);
+ ret = seq_open(file, &ipoib_mcg_seq_ops);
if (ret)
return ret;
@@ -134,7 +146,7 @@ static int ipoib_mcg_open(struct inode *inode, struct file *file)
return 0;
}
-static struct file_operations ipoib_fops = {
+static struct file_operations ipoib_mcg_fops = {
.owner = THIS_MODULE,
.open = ipoib_mcg_open,
.read = seq_read,
@@ -142,25 +154,138 @@ static struct file_operations ipoib_fops = {
.release = seq_release
};
-int ipoib_create_debug_file(struct net_device *dev)
+static void *ipoib_path_seq_start(struct seq_file *file, loff_t *pos)
+{
+ struct ipoib_path_iter *iter;
+ loff_t n = *pos;
+
+ iter = ipoib_path_iter_init(file->private);
+ if (!iter)
+ return NULL;
+
+ while (n--) {
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+ }
+
+ return iter;
+}
+
+static void *ipoib_path_seq_next(struct seq_file *file, void *iter_ptr,
+ loff_t *pos)
+{
+ struct ipoib_path_iter *iter = iter_ptr;
+
+ (*pos)++;
+
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+static void ipoib_path_seq_stop(struct seq_file *file, void *iter_ptr)
+{
+ /* nothing for now */
+}
+
+static int ipoib_path_seq_show(struct seq_file *file, void *iter_ptr)
+{
+ struct ipoib_path_iter *iter = iter_ptr;
+ char gid_buf[sizeof "ffff:ffff:ffff:ffff:ffff:ffff:ffff:ffff"];
+ struct ipoib_path path;
+ int rate;
+
+ if (!iter)
+ return 0;
+
+ ipoib_path_iter_read(iter, &path);
+
+ format_gid(&path.pathrec.dgid, gid_buf);
+
+ seq_printf(file,
+ "GID: %s\n"
+ " complete: %6s\n",
+ gid_buf, path.pathrec.dlid ? "yes" : "no");
+
+ if (path.pathrec.dlid) {
+ rate = ib_sa_rate_enum_to_int(path.pathrec.rate) * 25;
+
+ seq_printf(file,
+ " DLID: 0x%04x\n"
+ " SL: %12d\n"
+ " rate: %*d%s Gb/sec\n",
+ be16_to_cpu(path.pathrec.dlid),
+ path.pathrec.sl,
+ 10 - ((rate % 10) ? 2 : 0),
+ rate / 10, rate % 10 ? ".5" : "");
+ }
+
+ seq_putc(file, '\n');
+
+ return 0;
+}
+
+static struct seq_operations ipoib_path_seq_ops = {
+ .start = ipoib_path_seq_start,
+ .next = ipoib_path_seq_next,
+ .stop = ipoib_path_seq_stop,
+ .show = ipoib_path_seq_show,
+};
+
+static int ipoib_path_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ int ret;
+
+ ret = seq_open(file, &ipoib_path_seq_ops);
+ if (ret)
+ return ret;
+
+ seq = file->private_data;
+ seq->private = inode->u.generic_ip;
+
+ return 0;
+}
+
+static struct file_operations ipoib_path_fops = {
+ .owner = THIS_MODULE,
+ .open = ipoib_path_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+void ipoib_create_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
- char name[IFNAMSIZ + sizeof "_mcg"];
+ char name[IFNAMSIZ + sizeof "_path"];
snprintf(name, sizeof name, "%s_mcg", dev->name);
-
priv->mcg_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
- ipoib_root, dev, &ipoib_fops);
-
- return priv->mcg_dentry ? 0 : -ENOMEM;
+ ipoib_root, dev, &ipoib_mcg_fops);
+ if (!priv->mcg_dentry)
+ ipoib_warn(priv, "failed to create mcg debug file\n");
+
+ snprintf(name, sizeof name, "%s_path", dev->name);
+ priv->path_dentry = debugfs_create_file(name, S_IFREG | S_IRUGO,
+ ipoib_root, dev, &ipoib_path_fops);
+ if (!priv->path_dentry)
+ ipoib_warn(priv, "failed to create path debug file\n");
}
-void ipoib_delete_debug_file(struct net_device *dev)
+void ipoib_delete_debug_files(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
if (priv->mcg_dentry)
debugfs_remove(priv->mcg_dentry);
+ if (priv->path_dentry)
+ debugfs_remove(priv->path_dentry);
}
int ipoib_register_debugfs(void)
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c
index ce0296273e7..2fa30751f36 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c
@@ -58,6 +58,11 @@ module_param_named(debug_level, ipoib_debug_level, int, 0644);
MODULE_PARM_DESC(debug_level, "Enable debug tracing if > 0");
#endif
+struct ipoib_path_iter {
+ struct net_device *dev;
+ struct ipoib_path path;
+};
+
static const u8 ipv4_bcast_addr[] = {
0x00, 0xff, 0xff, 0xff,
0xff, 0x12, 0x40, 0x1b, 0x00, 0x00, 0x00, 0x00,
@@ -250,6 +255,64 @@ static void path_free(struct net_device *dev, struct ipoib_path *path)
kfree(path);
}
+#ifdef CONFIG_INFINIBAND_IPOIB_DEBUG
+
+struct ipoib_path_iter *ipoib_path_iter_init(struct net_device *dev)
+{
+ struct ipoib_path_iter *iter;
+
+ iter = kmalloc(sizeof *iter, GFP_KERNEL);
+ if (!iter)
+ return NULL;
+
+ iter->dev = dev;
+ memset(iter->path.pathrec.dgid.raw, 0, 16);
+
+ if (ipoib_path_iter_next(iter)) {
+ kfree(iter);
+ return NULL;
+ }
+
+ return iter;
+}
+
+int ipoib_path_iter_next(struct ipoib_path_iter *iter)
+{
+ struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
+ struct rb_node *n;
+ struct ipoib_path *path;
+ int ret = 1;
+
+ spin_lock_irq(&priv->lock);
+
+ n = rb_first(&priv->path_tree);
+
+ while (n) {
+ path = rb_entry(n, struct ipoib_path, rb_node);
+
+ if (memcmp(iter->path.pathrec.dgid.raw, path->pathrec.dgid.raw,
+ sizeof (union ib_gid)) < 0) {
+ iter->path = *path;
+ ret = 0;
+ break;
+ }
+
+ n = rb_next(n);
+ }
+
+ spin_unlock_irq(&priv->lock);
+
+ return ret;
+}
+
+void ipoib_path_iter_read(struct ipoib_path_iter *iter,
+ struct ipoib_path *path)
+{
+ *path = iter->path;
+}
+
+#endif /* CONFIG_INFINIBAND_IPOIB_DEBUG */
+
void ipoib_flush_paths(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
@@ -763,7 +826,7 @@ void ipoib_dev_cleanup(struct net_device *dev)
{
struct ipoib_dev_priv *priv = netdev_priv(dev), *cpriv, *tcpriv;
- ipoib_delete_debug_file(dev);
+ ipoib_delete_debug_files(dev);
/* Delete any child interfaces first */
list_for_each_entry_safe(cpriv, tcpriv, &priv->child_intfs, list) {
@@ -972,8 +1035,7 @@ static struct net_device *ipoib_add_port(const char *format,
goto register_failed;
}
- if (ipoib_create_debug_file(priv->dev))
- goto debug_failed;
+ ipoib_create_debug_files(priv->dev);
if (ipoib_add_pkey_attr(priv->dev))
goto sysfs_failed;
@@ -987,9 +1049,7 @@ static struct net_device *ipoib_add_port(const char *format,
return priv->dev;
sysfs_failed:
- ipoib_delete_debug_file(priv->dev);
-
-debug_failed:
+ ipoib_delete_debug_files(priv->dev);
unregister_netdev(priv->dev);
register_failed:
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 3ecf78a9493..c33ed87f9df 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -120,12 +120,8 @@ static void ipoib_mcast_free(struct ipoib_mcast *mcast)
if (mcast->ah)
ipoib_put_ah(mcast->ah);
- while (!skb_queue_empty(&mcast->pkt_queue)) {
- struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
-
- skb->dev = dev;
- dev_kfree_skb_any(skb);
- }
+ while (!skb_queue_empty(&mcast->pkt_queue))
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
kfree(mcast);
}
@@ -317,13 +313,8 @@ ipoib_mcast_sendonly_join_complete(int status,
IPOIB_GID_ARG(mcast->mcmember.mgid), status);
/* Flush out any queued packets */
- while (!skb_queue_empty(&mcast->pkt_queue)) {
- struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue);
-
- skb->dev = dev;
-
- dev_kfree_skb_any(skb);
- }
+ while (!skb_queue_empty(&mcast->pkt_queue))
+ dev_kfree_skb_any(skb_dequeue(&mcast->pkt_queue));
/* Clear the busy flag so we try again */
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
@@ -928,21 +919,16 @@ struct ipoib_mcast_iter *ipoib_mcast_iter_init(struct net_device *dev)
return NULL;
iter->dev = dev;
- memset(iter->mgid.raw, 0, sizeof iter->mgid);
+ memset(iter->mgid.raw, 0, 16);
if (ipoib_mcast_iter_next(iter)) {
- ipoib_mcast_iter_free(iter);
+ kfree(iter);
return NULL;
}
return iter;
}
-void ipoib_mcast_iter_free(struct ipoib_mcast_iter *iter)
-{
- kfree(iter);
-}
-
int ipoib_mcast_iter_next(struct ipoib_mcast_iter *iter)
{
struct ipoib_dev_priv *priv = netdev_priv(iter->dev);
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
index 332d730e60c..d280b341a37 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_vlan.c
@@ -113,8 +113,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
priv->parent = ppriv->dev;
- if (ipoib_create_debug_file(priv->dev))
- goto debug_failed;
+ ipoib_create_debug_files(priv->dev);
if (ipoib_add_pkey_attr(priv->dev))
goto sysfs_failed;
@@ -130,9 +129,7 @@ int ipoib_vlan_add(struct net_device *pdev, unsigned short pkey)
return 0;
sysfs_failed:
- ipoib_delete_debug_file(priv->dev);
-
-debug_failed:
+ ipoib_delete_debug_files(priv->dev);
unregister_netdev(priv->dev);
register_failed:
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 8f464271664..49fa1e4413f 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -2707,7 +2707,7 @@ bnx2_init_nvram(struct bnx2 *bp)
if (j == entry_count) {
bp->flash_info = NULL;
- printk(KERN_ALERT "Unknown flash/EEPROM type.\n");
+ printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
rc = -ENODEV;
}
@@ -3903,6 +3903,8 @@ bnx2_test_loopback(struct bnx2 *bp)
pkt_size = 1514;
skb = dev_alloc_skb(pkt_size);
+ if (!skb)
+ return -ENOMEM;
packet = skb_put(skb, pkt_size);
memcpy(packet, bp->mac_addr, 6);
memset(packet + 6, 0x0, 8);
@@ -4798,11 +4800,7 @@ bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = dev->priv;
int rc;
- if (eeprom->offset > bp->flash_info->total_size)
- return -EINVAL;
-
- if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
- eeprom->len = bp->flash_info->total_size - eeprom->offset;
+ /* parameters already validated in ethtool_get_eeprom */
rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
@@ -4816,11 +4814,7 @@ bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
struct bnx2 *bp = dev->priv;
int rc;
- if (eeprom->offset > bp->flash_info->total_size)
- return -EINVAL;
-
- if ((eeprom->offset + eeprom->len) > bp->flash_info->total_size)
- eeprom->len = bp->flash_info->total_size - eeprom->offset;
+ /* parameters already validated in ethtool_set_eeprom */
rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
diff --git a/drivers/net/fs_enet/fs_enet-main.c b/drivers/net/fs_enet/fs_enet-main.c
index 9342d5bc7bb..f5d49a11065 100644
--- a/drivers/net/fs_enet/fs_enet-main.c
+++ b/drivers/net/fs_enet/fs_enet-main.c
@@ -37,6 +37,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
+#include <linux/platform_device.h>
#include <linux/vmalloc.h>
#include <asm/pgtable.h>
diff --git a/drivers/net/fs_enet/mac-fcc.c b/drivers/net/fs_enet/mac-fcc.c
index a940b96433c..e67b1d06611 100644
--- a/drivers/net/fs_enet/mac-fcc.c
+++ b/drivers/net/fs_enet/mac-fcc.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
+#include <linux/platform_device.h>
#include <asm/immap_cpm2.h>
#include <asm/mpc8260.h>
diff --git a/drivers/net/fs_enet/mac-fec.c b/drivers/net/fs_enet/mac-fec.c
index 5ef4e845a38..2e8f4446969 100644
--- a/drivers/net/fs_enet/mac-fec.c
+++ b/drivers/net/fs_enet/mac-fec.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/fs_enet/mac-scc.c b/drivers/net/fs_enet/mac-scc.c
index d8c6e9cadcf..a3897fda71f 100644
--- a/drivers/net/fs_enet/mac-scc.c
+++ b/drivers/net/fs_enet/mac-scc.c
@@ -34,6 +34,7 @@
#include <linux/ethtool.h>
#include <linux/bitops.h>
#include <linux/fs.h>
+#include <linux/platform_device.h>
#include <asm/irq.h>
#include <asm/uaccess.h>
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index c796f41b4a5..0d765f1733b 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -2290,7 +2290,6 @@ spider_net_remove(struct pci_dev *pdev)
}
static struct pci_driver spider_net_driver = {
- .owner = THIS_MODULE,
.name = spider_net_driver_name,
.id_table = spider_net_pci_tbl,
.probe = spider_net_probe,
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index 2a42add7f56..ea16805a153 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -2,6 +2,8 @@
#include <linux/module.h>
#include <linux/ioport.h>
+#include "pci.h"
+
/*
* This interrupt-safe spinlock protects all accesses to PCI
* configuration space.
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 061ead21ef1..c42b68d3aa2 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -32,8 +32,6 @@
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/delay.h>
-#include <asm/semaphore.h>
-#include <asm/io.h>
#include <linux/pcieport_if.h>
#include "pci_hotplug.h"
@@ -42,6 +40,7 @@
extern int pciehp_poll_mode;
extern int pciehp_poll_time;
extern int pciehp_debug;
+extern int pciehp_force;
/*#define dbg(format, arg...) do { if (pciehp_debug) printk(KERN_DEBUG "%s: " format, MY_NAME , ## arg); } while (0)*/
#define dbg(format, arg...) do { if (pciehp_debug) printk("%s: " format, MY_NAME , ## arg); } while (0)
@@ -49,39 +48,20 @@ extern int pciehp_debug;
#define info(format, arg...) printk(KERN_INFO "%s: " format, MY_NAME , ## arg)
#define warn(format, arg...) printk(KERN_WARNING "%s: " format, MY_NAME , ## arg)
-struct pci_func {
- struct pci_func *next;
- u8 bus;
- u8 device;
- u8 function;
- u8 is_a_board;
- u16 status;
- u8 configured;
- u8 switch_save;
- u8 presence_save;
- u32 base_length[0x06];
- u8 base_type[0x06];
- u16 reserved2;
- u32 config_space[0x20];
- struct pci_resource *mem_head;
- struct pci_resource *p_mem_head;
- struct pci_resource *io_head;
- struct pci_resource *bus_head;
- struct pci_dev* pci_dev;
+struct hotplug_params {
+ u8 cache_line_size;
+ u8 latency_timer;
+ u8 enable_serr;
+ u8 enable_perr;
};
struct slot {
struct slot *next;
u8 bus;
u8 device;
+ u16 status;
u32 number;
- u8 is_a_board;
- u8 configured;
u8 state;
- u8 switch_save;
- u8 presence_save;
- u32 capabilities;
- u16 reserved2;
struct timer_list task_event;
u8 hp_slot;
struct controller *ctrl;
@@ -90,42 +70,47 @@ struct slot {
struct list_head slot_list;
};
-struct pci_resource {
- struct pci_resource * next;
- u32 base;
- u32 length;
-};
-
struct event_info {
u32 event_type;
u8 hp_slot;
};
+typedef u8(*php_intr_callback_t) (u8 hp_slot, void *instance_id);
+
+struct php_ctlr_state_s {
+ struct php_ctlr_state_s *pnext;
+ struct pci_dev *pci_dev;
+ unsigned int irq;
+ unsigned long flags; /* spinlock's */
+ u32 slot_device_offset;
+ u32 num_slots;
+ struct timer_list int_poll_timer; /* Added for poll event */
+ php_intr_callback_t attention_button_callback;
+ php_intr_callback_t switch_change_callback;
+ php_intr_callback_t presence_change_callback;
+ php_intr_callback_t power_fault_callback;
+ void *callback_instance_id;
+ struct ctrl_reg *creg; /* Ptr to controller register space */
+};
+
+#define MAX_EVENTS 10
struct controller {
struct controller *next;
struct semaphore crit_sect; /* critical section semaphore */
- void *hpc_ctlr_handle; /* HPC controller handle */
+ struct php_ctlr_state_s *hpc_ctlr_handle; /* HPC controller handle */
int num_slots; /* Number of slots on ctlr */
int slot_num_inc; /* 1 or -1 */
- struct pci_resource *mem_head;
- struct pci_resource *p_mem_head;
- struct pci_resource *io_head;
- struct pci_resource *bus_head;
struct pci_dev *pci_dev;
struct pci_bus *pci_bus;
- struct event_info event_queue[10];
+ struct event_info event_queue[MAX_EVENTS];
struct slot *slot;
struct hpc_ops *hpc_ops;
wait_queue_head_t queue; /* sleep & wake process */
u8 next_event;
- u8 seg;
u8 bus;
u8 device;
u8 function;
- u8 rev;
u8 slot_device_offset;
- u8 add_support;
- enum pci_bus_speed speed;
u32 first_slot; /* First physical slot number */ /* PCIE only has 1 slot */
u8 slot_bus; /* Bus where the slots handled by this controller sit */
u8 ctrlcap;
@@ -133,20 +118,6 @@ struct controller {
u8 cap_base;
};
-struct irq_mapping {
- u8 barber_pole;
- u8 valid_INT;
- u8 interrupt[4];
-};
-
-struct resource_lists {
- struct pci_resource *mem_head;
- struct pci_resource *p_mem_head;
- struct pci_resource *io_head;
- struct pci_resource *bus_head;
- struct irq_mapping *irqs;
-};
-
#define INT_BUTTON_IGNORE 0
#define INT_PRESENCE_ON 1
#define INT_PRESENCE_OFF 2
@@ -200,21 +171,14 @@ struct resource_lists {
* error Messages
*/
#define msg_initialization_err "Initialization failure, error=%d\n"
-#define msg_HPC_rev_error "Unsupported revision of the PCI hot plug controller found.\n"
-#define msg_HPC_non_pcie "The PCI hot plug controller is not supported by this driver.\n"
-#define msg_HPC_not_supported "This system is not supported by this version of pciephd module. Upgrade to a newer version of pciehpd\n"
-#define msg_unable_to_save "Unable to store PCI hot plug add resource information. This system must be rebooted before adding any PCI devices.\n"
#define msg_button_on "PCI slot #%d - powering on due to button press.\n"
#define msg_button_off "PCI slot #%d - powering off due to button press.\n"
#define msg_button_cancel "PCI slot #%d - action canceled due to button press.\n"
#define msg_button_ignore "PCI slot #%d - button press ignored. (action in progress...)\n"
/* controller functions */
-extern int pciehprm_find_available_resources (struct controller *ctrl);
extern int pciehp_event_start_thread (void);
extern void pciehp_event_stop_thread (void);
-extern struct pci_func *pciehp_slot_create (unsigned char busnumber);
-extern struct pci_func *pciehp_slot_find (unsigned char bus, unsigned char device, unsigned char index);
extern int pciehp_enable_slot (struct slot *slot);
extern int pciehp_disable_slot (struct slot *slot);
@@ -224,25 +188,17 @@ extern u8 pciehp_handle_presence_change (u8 hp_slot, void *inst_id);
extern u8 pciehp_handle_power_fault (u8 hp_slot, void *inst_id);
/* extern void long_delay (int delay); */
-/* resource functions */
-extern int pciehp_resource_sort_and_combine (struct pci_resource **head);
-
/* pci functions */
-extern int pciehp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num);
-/*extern int pciehp_get_bus_dev (struct controller *ctrl, u8 *bus_num, u8 *dev_num, struct slot *slot);*/
-extern int pciehp_save_config (struct controller *ctrl, int busnumber, int num_ctlr_slots, int first_device_num);
-extern int pciehp_save_used_resources (struct controller *ctrl, struct pci_func * func, int flag);
-extern int pciehp_save_slot_config (struct controller *ctrl, struct pci_func * new_slot);
-extern void pciehp_destroy_board_resources (struct pci_func * func);
-extern int pciehp_return_board_resources (struct pci_func * func, struct resource_lists * resources);
-extern void pciehp_destroy_resource_list (struct resource_lists * resources);
-extern int pciehp_configure_device (struct controller* ctrl, struct pci_func* func);
-extern int pciehp_unconfigure_device (struct pci_func* func);
+extern int pciehp_configure_device (struct slot *p_slot);
+extern int pciehp_unconfigure_device (struct slot *p_slot);
+extern int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev);
+extern void pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
+ struct hotplug_params *hpp);
+
/* Global variables */
extern struct controller *pciehp_ctrl_list;
-extern struct pci_func *pciehp_slot_list[256];
/* Inline functions */
@@ -252,12 +208,9 @@ static inline struct slot *pciehp_find_slot(struct controller *ctrl, u8 device)
p_slot = ctrl->slot;
- dbg("p_slot = %p\n", p_slot);
-
while (p_slot && (p_slot->device != device)) {
tmp_slot = p_slot;
p_slot = p_slot->next;
- dbg("In while loop, p_slot = %p\n", p_slot);
}
if (p_slot == NULL) {
err("ERROR: pciehp_find_slot device=0x%x\n", device);
@@ -273,7 +226,6 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
DECLARE_WAITQUEUE(wait, current);
- dbg("%s : start\n", __FUNCTION__);
add_wait_queue(&ctrl->queue, &wait);
if (!pciehp_poll_mode)
/* Sleep for up to 1 second */
@@ -285,19 +237,9 @@ static inline int wait_for_ctrl_irq(struct controller *ctrl)
if (signal_pending(current))
retval = -EINTR;
- dbg("%s : end\n", __FUNCTION__);
return retval;
}
-/* Puts node back in the resource list pointed to by head */
-static inline void return_resource(struct pci_resource **head, struct pci_resource *node)
-{
- if (!node || !head)
- return;
- node->next = *head;
- *head = node;
-}
-
#define SLOT_NAME_SIZE 10
static inline void make_slot_name(char *buffer, int buffer_size, struct slot *slot)
@@ -311,14 +253,7 @@ enum php_ctlr_type {
ACPI
};
-typedef u8(*php_intr_callback_t) (unsigned int change_id, void *instance_id);
-
-int pcie_init(struct controller *ctrl, struct pcie_device *dev,
- php_intr_callback_t attention_button_callback,
- php_intr_callback_t switch_change_callback,
- php_intr_callback_t presence_change_callback,
- php_intr_callback_t power_fault_callback);
-
+int pcie_init(struct controller *ctrl, struct pcie_device *dev);
/* This has no meaning for PCI Express, as there is only 1 slot per port */
int pcie_get_ctlr_slot_config(struct controller *ctrl,
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index cafc7eadcf8..8df70486034 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -27,27 +27,20 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/proc_fs.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
#include <linux/pci.h>
-#include <linux/init.h>
-#include <asm/uaccess.h>
#include "pciehp.h"
-#include "pciehprm.h"
#include <linux/interrupt.h>
/* Global variables */
int pciehp_debug;
int pciehp_poll_mode;
int pciehp_poll_time;
+int pciehp_force;
struct controller *pciehp_ctrl_list;
-struct pci_func *pciehp_slot_list[256];
#define DRIVER_VERSION "0.4"
#define DRIVER_AUTHOR "Dan Zink <dan.zink@compaq.com>, Greg Kroah-Hartman <greg@kroah.com>, Dely Sy <dely.l.sy@intel.com>"
@@ -60,9 +53,11 @@ MODULE_LICENSE("GPL");
module_param(pciehp_debug, bool, 0644);
module_param(pciehp_poll_mode, bool, 0644);
module_param(pciehp_poll_time, int, 0644);
+module_param(pciehp_force, bool, 0644);
MODULE_PARM_DESC(pciehp_debug, "Debugging mode enabled or not");
MODULE_PARM_DESC(pciehp_poll_mode, "Using polling mechanism for hot-plug events or not");
MODULE_PARM_DESC(pciehp_poll_time, "Polling mechanism frequency, in seconds");
+MODULE_PARM_DESC(pciehp_force, "Force pciehp, even if _OSC and OSHP are missing");
#define PCIE_MODULE_NAME "pciehp"
@@ -114,8 +109,6 @@ static int init_slots(struct controller *ctrl)
u32 slot_number;
int result = -ENOMEM;
- dbg("%s\n",__FUNCTION__);
-
number_of_slots = ctrl->num_slots;
slot_device = ctrl->slot_device_offset;
slot_number = ctrl->first_slot;
@@ -370,7 +363,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
u8 value;
struct pci_dev *pdev;
- dbg("%s: Called by hp_drv\n", __FUNCTION__);
ctrl = kmalloc(sizeof(*ctrl), GFP_KERNEL);
if (!ctrl) {
err("%s : out of memory\n", __FUNCTION__);
@@ -378,22 +370,15 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
}
memset(ctrl, 0, sizeof(struct controller));
- dbg("%s: DRV_thread pid = %d\n", __FUNCTION__, current->pid);
-
pdev = dev->port;
+ ctrl->pci_dev = pdev;
- rc = pcie_init(ctrl, dev,
- (php_intr_callback_t) pciehp_handle_attention_button,
- (php_intr_callback_t) pciehp_handle_switch_change,
- (php_intr_callback_t) pciehp_handle_presence_change,
- (php_intr_callback_t) pciehp_handle_power_fault);
+ rc = pcie_init(ctrl, dev);
if (rc) {
dbg("%s: controller initialization failed\n", PCIE_MODULE_NAME);
goto err_out_free_ctrl;
}
- ctrl->pci_dev = pdev;
-
pci_set_drvdata(pdev, ctrl);
ctrl->pci_bus = kmalloc(sizeof(*ctrl->pci_bus), GFP_KERNEL);
@@ -402,7 +387,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
rc = -ENOMEM;
goto err_out_unmap_mmio_region;
}
- dbg("%s: ctrl->pci_bus %p\n", __FUNCTION__, ctrl->pci_bus);
memcpy (ctrl->pci_bus, pdev->bus, sizeof (*ctrl->pci_bus));
ctrl->bus = pdev->bus->number; /* ctrl bus */
ctrl->slot_bus = pdev->subordinate->number; /* bus controlled by this HPC */
@@ -424,25 +408,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
first_device_num = ctrl->slot_device_offset;
num_ctlr_slots = ctrl->num_slots;
- /* Store PCI Config Space for all devices on this bus */
- dbg("%s: Before calling pciehp_save_config, ctrl->bus %x,ctrl->slot_bus %x\n",
- __FUNCTION__,ctrl->bus, ctrl->slot_bus);
- rc = pciehp_save_config(ctrl, ctrl->slot_bus, num_ctlr_slots, first_device_num);
- if (rc) {
- err("%s: unable to save PCI configuration data, error %d\n", __FUNCTION__, rc);
- goto err_out_free_ctrl_bus;
- }
-
- /* Get IO, memory, and IRQ resources for new devices */
- rc = pciehprm_find_available_resources(ctrl);
- ctrl->add_support = !rc;
-
- if (rc) {
- dbg("pciehprm_find_available_resources = %#x\n", rc);
- err("unable to locate PCI configuration resources for hot plug add.\n");
- goto err_out_free_ctrl_bus;
- }
-
/* Setup the slot information structures */
rc = init_slots(ctrl);
if (rc) {
@@ -451,7 +416,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
}
t_slot = pciehp_find_slot(ctrl, first_device_num);
- dbg("%s: t_slot %p\n", __FUNCTION__, t_slot);
/* Finish setting up the hot plug ctrl device */
ctrl->next_event = 0;
@@ -468,7 +432,6 @@ static int pciehp_probe(struct pcie_device *dev, const struct pcie_port_service_
down(&ctrl->crit_sect);
t_slot->hpc_ops->get_adapter_status(t_slot, &value); /* Check if slot is occupied */
- dbg("%s: adpater value %x\n", __FUNCTION__, value);
if ((POWER_CTRL(ctrl->ctrlcap)) && !value) {
rc = t_slot->hpc_ops->power_off_slot(t_slot); /* Power off slot if not occupied*/
@@ -501,7 +464,6 @@ err_out_none:
static int pcie_start_thread(void)
{
- int loop;
int retval = 0;
dbg("Initialize + Start the notification/polling mechanism \n");
@@ -512,32 +474,11 @@ static int pcie_start_thread(void)
return retval;
}
- dbg("Initialize slot lists\n");
- /* One slot list for each bus in the system */
- for (loop = 0; loop < 256; loop++) {
- pciehp_slot_list[loop] = NULL;
- }
-
return retval;
}
-static inline void __exit
-free_pciehp_res(struct pci_resource *res)
-{
- struct pci_resource *tres;
-
- while (res) {
- tres = res;
- res = res->next;
- kfree(tres);
- }
-}
-
static void __exit unload_pciehpd(void)
{
- struct pci_func *next;
- struct pci_func *TempSlot;
- int loop;
struct controller *ctrl;
struct controller *tctrl;
@@ -546,11 +487,6 @@ static void __exit unload_pciehpd(void)
while (ctrl) {
cleanup_slots(ctrl);
- free_pciehp_res(ctrl->io_head);
- free_pciehp_res(ctrl->mem_head);
- free_pciehp_res(ctrl->p_mem_head);
- free_pciehp_res(ctrl->bus_head);
-
kfree (ctrl->pci_bus);
ctrl->hpc_ops->release_ctlr(ctrl);
@@ -561,20 +497,6 @@ static void __exit unload_pciehpd(void)
kfree(tctrl);
}
- for (loop = 0; loop < 256; loop++) {
- next = pciehp_slot_list[loop];
- while (next != NULL) {
- free_pciehp_res(next->io_head);
- free_pciehp_res(next->mem_head);
- free_pciehp_res(next->p_mem_head);
- free_pciehp_res(next->bus_head);
-
- TempSlot = next;
- next = next->next;
- kfree(TempSlot);
- }
- }
-
/* Stop the notification mechanism */
pciehp_event_stop_thread();
@@ -639,21 +561,16 @@ static int __init pcied_init(void)
if (retval)
goto error_hpc_init;
- retval = pciehprm_init(PCI);
- if (!retval) {
- retval = pcie_port_service_register(&hpdriver_portdrv);
- dbg("pcie_port_service_register = %d\n", retval);
- info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
- if (retval)
- dbg("%s: Failure to register service\n", __FUNCTION__);
- }
+ retval = pcie_port_service_register(&hpdriver_portdrv);
+ dbg("pcie_port_service_register = %d\n", retval);
+ info(DRIVER_DESC " version: " DRIVER_VERSION "\n");
+ if (retval)
+ dbg("%s: Failure to register service\n", __FUNCTION__);
error_hpc_init:
if (retval) {
- pciehprm_cleanup();
pciehp_event_stop_thread();
- } else
- pciehprm_print_pirt();
+ };
return retval;
}
@@ -663,9 +580,6 @@ static void __exit pcied_cleanup(void)
dbg("unload_pciehpd()\n");
unload_pciehpd();
- pciehprm_cleanup();
-
- dbg("pcie_port_service_unregister\n");
pcie_port_service_unregister(&hpdriver_portdrv);
info(DRIVER_DESC " version: " DRIVER_VERSION " unloaded\n");
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 898f6da6f0d..5e582eca21d 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -27,25 +27,14 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/interrupt.h>
-#include <linux/delay.h>
-#include <linux/wait.h>
#include <linux/smp_lock.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-#include "pciehprm.h"
-static u32 configure_new_device(struct controller *ctrl, struct pci_func *func,
- u8 behind_bridge, struct resource_lists *resources, u8 bridge_bus, u8 bridge_dev);
-static int configure_new_function( struct controller *ctrl, struct pci_func *func,
- u8 behind_bridge, struct resource_lists *resources, u8 bridge_bus, u8 bridge_dev);
static void interrupt_event_handler(struct controller *ctrl);
static struct semaphore event_semaphore; /* mutex for process loop (up if something to process) */
@@ -60,22 +49,18 @@ u8 pciehp_handle_attention_button(u8 hp_slot, void *inst_id)
struct slot *p_slot;
u8 rc = 0;
u8 getstatus;
- struct pci_func *func;
struct event_info *taskInfo;
/* Attention Button Change */
dbg("pciehp: Attention button interrupt received.\n");
- func = pciehp_slot_find(ctrl->slot_bus, (hp_slot + ctrl->slot_device_offset), 0);
-
/* This is the structure that tells the worker thread what to do */
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(func->presence_save));
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- ctrl->next_event = (ctrl->next_event + 1) % 10;
+ ctrl->next_event = (ctrl->next_event + 1) % MAX_EVENTS;
taskInfo->hp_slot = hp_slot;
rc++;
@@ -117,24 +102,20 @@ u8 pciehp_handle_switch_change(u8 hp_slot, void *inst_id)
struct slot *p_slot;
u8 rc = 0;
u8 getstatus;
- struct pci_func *func;
struct event_info *taskInfo;
/* Switch Change */
dbg("pciehp: Switch interrupt received.\n");
- func = pciehp_slot_find(ctrl->slot_bus, (hp_slot + ctrl->slot_device_offset), 0);
-
/* This is the structure that tells the worker thread
* what to do
*/
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
- ctrl->next_event = (ctrl->next_event + 1) % 10;
+ ctrl->next_event = (ctrl->next_event + 1) % MAX_EVENTS;
taskInfo->hp_slot = hp_slot;
rc++;
p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- p_slot->hpc_ops->get_adapter_status(p_slot, &(func->presence_save));
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
if (getstatus) {
@@ -142,14 +123,12 @@ u8 pciehp_handle_switch_change(u8 hp_slot, void *inst_id)
* Switch opened
*/
info("Latch open on Slot(%d)\n", ctrl->first_slot + hp_slot);
- func->switch_save = 0;
taskInfo->event_type = INT_SWITCH_OPEN;
} else {
/*
* Switch closed
*/
info("Latch close on Slot(%d)\n", ctrl->first_slot + hp_slot);
- func->switch_save = 0x10;
taskInfo->event_type = INT_SWITCH_CLOSE;
}
@@ -163,20 +142,17 @@ u8 pciehp_handle_presence_change(u8 hp_slot, void *inst_id)
{
struct controller *ctrl = (struct controller *) inst_id;
struct slot *p_slot;
- u8 rc = 0;
- struct pci_func *func;
+ u8 presence_save, rc = 0;
struct event_info *taskInfo;
/* Presence Change */
dbg("pciehp: Presence/Notify input change.\n");
- func = pciehp_slot_find(ctrl->slot_bus, (hp_slot + ctrl->slot_device_offset), 0);
-
/* This is the structure that tells the worker thread
* what to do
*/
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
- ctrl->next_event = (ctrl->next_event + 1) % 10;
+ ctrl->next_event = (ctrl->next_event + 1) % MAX_EVENTS;
taskInfo->hp_slot = hp_slot;
rc++;
@@ -185,8 +161,8 @@ u8 pciehp_handle_presence_change(u8 hp_slot, void *inst_id)
/* Switch is open, assume a presence change
* Save the presence state
*/
- p_slot->hpc_ops->get_adapter_status(p_slot, &(func->presence_save));
- if (func->presence_save) {
+ p_slot->hpc_ops->get_adapter_status(p_slot, &presence_save);
+ if (presence_save) {
/*
* Card Present
*/
@@ -211,19 +187,16 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
struct controller *ctrl = (struct controller *) inst_id;
struct slot *p_slot;
u8 rc = 0;
- struct pci_func *func;
struct event_info *taskInfo;
/* power fault */
dbg("pciehp: Power fault interrupt received.\n");
- func = pciehp_slot_find(ctrl->slot_bus, (hp_slot + ctrl->slot_device_offset), 0);
-
/* this is the structure that tells the worker thread
* what to do
*/
taskInfo = &(ctrl->event_queue[ctrl->next_event]);
- ctrl->next_event = (ctrl->next_event + 1) % 10;
+ ctrl->next_event = (ctrl->next_event + 1) % MAX_EVENTS;
taskInfo->hp_slot = hp_slot;
rc++;
@@ -234,7 +207,7 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
* power fault Cleared
*/
info("Power fault cleared on Slot(%d)\n", ctrl->first_slot + hp_slot);
- func->status = 0x00;
+ p_slot->status = 0x00;
taskInfo->event_type = INT_POWER_FAULT_CLEAR;
} else {
/*
@@ -243,7 +216,7 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
info("Power fault on Slot(%d)\n", ctrl->first_slot + hp_slot);
taskInfo->event_type = INT_POWER_FAULT;
/* set power fault status for this board */
- func->status = 0xFF;
+ p_slot->status = 0xFF;
info("power fault bit %x set\n", hp_slot);
}
if (rc)
@@ -252,810 +225,6 @@ u8 pciehp_handle_power_fault(u8 hp_slot, void *inst_id)
return rc;
}
-
-/**
- * sort_by_size: sort nodes by their length, smallest first.
- *
- * @head: list to sort
- */
-static int sort_by_size(struct pci_resource **head)
-{
- struct pci_resource *current_res;
- struct pci_resource *next_res;
- int out_of_order = 1;
-
- if (!(*head))
- return 1;
-
- if (!((*head)->next))
- return 0;
-
- while (out_of_order) {
- out_of_order = 0;
-
- /* Special case for swapping list head */
- if (((*head)->next) &&
- ((*head)->length > (*head)->next->length)) {
- out_of_order++;
- current_res = *head;
- *head = (*head)->next;
- current_res->next = (*head)->next;
- (*head)->next = current_res;
- }
-
- current_res = *head;
-
- while (current_res->next && current_res->next->next) {
- if (current_res->next->length > current_res->next->next->length) {
- out_of_order++;
- next_res = current_res->next;
- current_res->next = current_res->next->next;
- current_res = current_res->next;
- next_res->next = current_res->next;
- current_res->next = next_res;
- } else
- current_res = current_res->next;
- }
- } /* End of out_of_order loop */
-
- return 0;
-}
-
-
-/*
- * sort_by_max_size
- *
- * Sorts nodes on the list by their length.
- * Largest first.
- *
- */
-static int sort_by_max_size(struct pci_resource **head)
-{
- struct pci_resource *current_res;
- struct pci_resource *next_res;
- int out_of_order = 1;
-
- if (!(*head))
- return 1;
-
- if (!((*head)->next))
- return 0;
-
- while (out_of_order) {
- out_of_order = 0;
-
- /* Special case for swapping list head */
- if (((*head)->next) &&
- ((*head)->length < (*head)->next->length)) {
- out_of_order++;
- current_res = *head;
- *head = (*head)->next;
- current_res->next = (*head)->next;
- (*head)->next = current_res;
- }
-
- current_res = *head;
-
- while (current_res->next && current_res->next->next) {
- if (current_res->next->length < current_res->next->next->length) {
- out_of_order++;
- next_res = current_res->next;
- current_res->next = current_res->next->next;
- current_res = current_res->next;
- next_res->next = current_res->next;
- current_res->next = next_res;
- } else
- current_res = current_res->next;
- }
- } /* End of out_of_order loop */
-
- return 0;
-}
-
-
-/**
- * do_pre_bridge_resource_split: return one unused resource node
- * @head: list to scan
- *
- */
-static struct pci_resource *
-do_pre_bridge_resource_split(struct pci_resource **head,
- struct pci_resource **orig_head, u32 alignment)
-{
- struct pci_resource *prevnode = NULL;
- struct pci_resource *node;
- struct pci_resource *split_node;
- u32 rc;
- u32 temp_dword;
- dbg("do_pre_bridge_resource_split\n");
-
- if (!(*head) || !(*orig_head))
- return NULL;
-
- rc = pciehp_resource_sort_and_combine(head);
-
- if (rc)
- return NULL;
-
- if ((*head)->base != (*orig_head)->base)
- return NULL;
-
- if ((*head)->length == (*orig_head)->length)
- return NULL;
-
-
- /* If we got here, there the bridge requires some of the resource, but
- * we may be able to split some off of the front
- */
- node = *head;
-
- if (node->length & (alignment -1)) {
- /* this one isn't an aligned length, so we'll make a new entry
- * and split it up.
- */
- split_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
-
- if (!split_node)
- return NULL;
-
- temp_dword = (node->length | (alignment-1)) + 1 - alignment;
-
- split_node->base = node->base;
- split_node->length = temp_dword;
-
- node->length -= temp_dword;
- node->base += split_node->length;
-
- /* Put it in the list */
- *head = split_node;
- split_node->next = node;
- }
-
- if (node->length < alignment)
- return NULL;
-
- /* Now unlink it */
- if (*head == node) {
- *head = node->next;
- } else {
- prevnode = *head;
- while (prevnode->next != node)
- prevnode = prevnode->next;
-
- prevnode->next = node->next;
- }
- node->next = NULL;
-
- return node;
-}
-
-
-/**
- * do_bridge_resource_split: return one unused resource node
- * @head: list to scan
- *
- */
-static struct pci_resource *
-do_bridge_resource_split(struct pci_resource **head, u32 alignment)
-{
- struct pci_resource *prevnode = NULL;
- struct pci_resource *node;
- u32 rc;
- u32 temp_dword;
-
- if (!(*head))
- return NULL;
-
- rc = pciehp_resource_sort_and_combine(head);
-
- if (rc)
- return NULL;
-
- node = *head;
-
- while (node->next) {
- prevnode = node;
- node = node->next;
- kfree(prevnode);
- }
-
- if (node->length < alignment) {
- kfree(node);
- return NULL;
- }
-
- if (node->base & (alignment - 1)) {
- /* Short circuit if adjusted size is too small */
- temp_dword = (node->base | (alignment-1)) + 1;
- if ((node->length - (temp_dword - node->base)) < alignment) {
- kfree(node);
- return NULL;
- }
-
- node->length -= (temp_dword - node->base);
- node->base = temp_dword;
- }
-
- if (node->length & (alignment - 1)) {
- /* There's stuff in use after this node */
- kfree(node);
- return NULL;
- }
-
- return node;
-}
-
-
-/*
- * get_io_resource
- *
- * this function sorts the resource list by size and then
- * returns the first node of "size" length that is not in the
- * ISA aliasing window. If it finds a node larger than "size"
- * it will split it up.
- *
- * size must be a power of two.
- */
-static struct pci_resource *get_io_resource(struct pci_resource **head, u32 size)
-{
- struct pci_resource *prevnode;
- struct pci_resource *node;
- struct pci_resource *split_node = NULL;
- u32 temp_dword;
-
- if (!(*head))
- return NULL;
-
- if ( pciehp_resource_sort_and_combine(head) )
- return NULL;
-
- if ( sort_by_size(head) )
- return NULL;
-
- for (node = *head; node; node = node->next) {
- if (node->length < size)
- continue;
-
- if (node->base & (size - 1)) {
- /* this one isn't base aligned properly
- so we'll make a new entry and split it up */
- temp_dword = (node->base | (size-1)) + 1;
-
- /*/ Short circuit if adjusted size is too small */
- if ((node->length - (temp_dword - node->base)) < size)
- continue;
-
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
-
- if (!split_node)
- return NULL;
-
- split_node->base = node->base;
- split_node->length = temp_dword - node->base;
- node->base = temp_dword;
- node->length -= split_node->length;
-
- /* Put it in the list */
- split_node->next = node->next;
- node->next = split_node;
- } /* End of non-aligned base */
-
- /* Don't need to check if too small since we already did */
- if (node->length > size) {
- /* this one is longer than we need
- so we'll make a new entry and split it up */
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
-
- if (!split_node)
- return NULL;
-
- split_node->base = node->base + size;
- split_node->length = node->length - size;
- node->length = size;
-
- /* Put it in the list */
- split_node->next = node->next;
- node->next = split_node;
- } /* End of too big on top end */
-
- /* For IO make sure it's not in the ISA aliasing space */
- if (node->base & 0x300L)
- continue;
-
- /* If we got here, then it is the right size
- Now take it out of the list */
- if (*head == node) {
- *head = node->next;
- } else {
- prevnode = *head;
- while (prevnode->next != node)
- prevnode = prevnode->next;
-
- prevnode->next = node->next;
- }
- node->next = NULL;
- /* Stop looping */
- break;
- }
-
- return node;
-}
-
-
-/*
- * get_max_resource
- *
- * Gets the largest node that is at least "size" big from the
- * list pointed to by head. It aligns the node on top and bottom
- * to "size" alignment before returning it.
- * J.I. modified to put max size limits of; 64M->32M->16M->8M->4M->1M
- * This is needed to avoid allocating entire ACPI _CRS res to one child bridge/slot.
- */
-static struct pci_resource *get_max_resource(struct pci_resource **head, u32 size)
-{
- struct pci_resource *max;
- struct pci_resource *temp;
- struct pci_resource *split_node;
- u32 temp_dword;
- u32 max_size[] = { 0x4000000, 0x2000000, 0x1000000, 0x0800000, 0x0400000, 0x0200000, 0x0100000, 0x00 };
- int i;
-
- if (!(*head))
- return NULL;
-
- if (pciehp_resource_sort_and_combine(head))
- return NULL;
-
- if (sort_by_max_size(head))
- return NULL;
-
- for (max = *head;max; max = max->next) {
-
- /* If not big enough we could probably just bail,
- instead we'll continue to the next. */
- if (max->length < size)
- continue;
-
- if (max->base & (size - 1)) {
- /* this one isn't base aligned properly
- so we'll make a new entry and split it up */
- temp_dword = (max->base | (size-1)) + 1;
-
- /* Short circuit if adjusted size is too small */
- if ((max->length - (temp_dword - max->base)) < size)
- continue;
-
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
-
- if (!split_node)
- return NULL;
-
- split_node->base = max->base;
- split_node->length = temp_dword - max->base;
- max->base = temp_dword;
- max->length -= split_node->length;
-
- /* Put it next in the list */
- split_node->next = max->next;
- max->next = split_node;
- }
-
- if ((max->base + max->length) & (size - 1)) {
- /* this one isn't end aligned properly at the top
- so we'll make a new entry and split it up */
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
-
- if (!split_node)
- return NULL;
- temp_dword = ((max->base + max->length) & ~(size - 1));
- split_node->base = temp_dword;
- split_node->length = max->length + max->base
- - split_node->base;
- max->length -= split_node->length;
-
- /* Put it in the list */
- split_node->next = max->next;
- max->next = split_node;
- }
-
- /* Make sure it didn't shrink too much when we aligned it */
- if (max->length < size)
- continue;
-
- for ( i = 0; max_size[i] > size; i++) {
- if (max->length > max_size[i]) {
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
- if (!split_node)
- break; /* return NULL; */
- split_node->base = max->base + max_size[i];
- split_node->length = max->length - max_size[i];
- max->length = max_size[i];
- /* Put it next in the list */
- split_node->next = max->next;
- max->next = split_node;
- break;
- }
- }
-
- /* Now take it out of the list */
- temp = (struct pci_resource*) *head;
- if (temp == max) {
- *head = max->next;
- } else {
- while (temp && temp->next != max) {
- temp = temp->next;
- }
-
- temp->next = max->next;
- }
-
- max->next = NULL;
- return max;
- }
-
- /* If we get here, we couldn't find one */
- return NULL;
-}
-
-
-/*
- * get_resource
- *
- * this function sorts the resource list by size and then
- * returns the first node of "size" length. If it finds a node
- * larger than "size" it will split it up.
- *
- * size must be a power of two.
- */
-static struct pci_resource *get_resource(struct pci_resource **head, u32 size)
-{
- struct pci_resource *prevnode;
- struct pci_resource *node;
- struct pci_resource *split_node;
- u32 temp_dword;
-
- if (!(*head))
- return NULL;
-
- if ( pciehp_resource_sort_and_combine(head) )
- return NULL;
-
- if ( sort_by_size(head) )
- return NULL;
-
- for (node = *head; node; node = node->next) {
- dbg("%s: req_size =0x%x node=%p, base=0x%x, length=0x%x\n",
- __FUNCTION__, size, node, node->base, node->length);
- if (node->length < size)
- continue;
-
- if (node->base & (size - 1)) {
- dbg("%s: not aligned\n", __FUNCTION__);
- /* this one isn't base aligned properly
- so we'll make a new entry and split it up */
- temp_dword = (node->base | (size-1)) + 1;
-
- /* Short circuit if adjusted size is too small */
- if ((node->length - (temp_dword - node->base)) < size)
- continue;
-
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
-
- if (!split_node)
- return NULL;
-
- split_node->base = node->base;
- split_node->length = temp_dword - node->base;
- node->base = temp_dword;
- node->length -= split_node->length;
-
- /* Put it in the list */
- split_node->next = node->next;
- node->next = split_node;
- } /* End of non-aligned base */
-
- /* Don't need to check if too small since we already did */
- if (node->length > size) {
- dbg("%s: too big\n", __FUNCTION__);
- /* this one is longer than we need
- so we'll make a new entry and split it up */
- split_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
-
- if (!split_node)
- return NULL;
-
- split_node->base = node->base + size;
- split_node->length = node->length - size;
- node->length = size;
-
- /* Put it in the list */
- split_node->next = node->next;
- node->next = split_node;
- } /* End of too big on top end */
-
- dbg("%s: got one!!!\n", __FUNCTION__);
- /* If we got here, then it is the right size
- Now take it out of the list */
- if (*head == node) {
- *head = node->next;
- } else {
- prevnode = *head;
- while (prevnode->next != node)
- prevnode = prevnode->next;
-
- prevnode->next = node->next;
- }
- node->next = NULL;
- /* Stop looping */
- break;
- }
- return node;
-}
-
-
-/*
- * pciehp_resource_sort_and_combine
- *
- * Sorts all of the nodes in the list in ascending order by
- * their base addresses. Also does garbage collection by
- * combining adjacent nodes.
- *
- * returns 0 if success
- */
-int pciehp_resource_sort_and_combine(struct pci_resource **head)
-{
- struct pci_resource *node1;
- struct pci_resource *node2;
- int out_of_order = 1;
-
- dbg("%s: head = %p, *head = %p\n", __FUNCTION__, head, *head);
-
- if (!(*head))
- return 1;
-
- dbg("*head->next = %p\n",(*head)->next);
-
- if (!(*head)->next)
- return 0; /* only one item on the list, already sorted! */
-
- dbg("*head->base = 0x%x\n",(*head)->base);
- dbg("*head->next->base = 0x%x\n",(*head)->next->base);
- while (out_of_order) {
- out_of_order = 0;
-
- /* Special case for swapping list head */
- if (((*head)->next) &&
- ((*head)->base > (*head)->next->base)) {
- node1 = *head;
- (*head) = (*head)->next;
- node1->next = (*head)->next;
- (*head)->next = node1;
- out_of_order++;
- }
-
- node1 = (*head);
-
- while (node1->next && node1->next->next) {
- if (node1->next->base > node1->next->next->base) {
- out_of_order++;
- node2 = node1->next;
- node1->next = node1->next->next;
- node1 = node1->next;
- node2->next = node1->next;
- node1->next = node2;
- } else
- node1 = node1->next;
- }
- } /* End of out_of_order loop */
-
- node1 = *head;
-
- while (node1 && node1->next) {
- if ((node1->base + node1->length) == node1->next->base) {
- /* Combine */
- dbg("8..\n");
- node1->length += node1->next->length;
- node2 = node1->next;
- node1->next = node1->next->next;
- kfree(node2);
- } else
- node1 = node1->next;
- }
-
- return 0;
-}
-
-
-/**
- * pciehp_slot_create - Creates a node and adds it to the proper bus.
- * @busnumber - bus where new node is to be located
- *
- * Returns pointer to the new node or NULL if unsuccessful
- */
-struct pci_func *pciehp_slot_create(u8 busnumber)
-{
- struct pci_func *new_slot;
- struct pci_func *next;
- dbg("%s: busnumber %x\n", __FUNCTION__, busnumber);
- new_slot = kmalloc(sizeof(struct pci_func), GFP_KERNEL);
-
- if (new_slot == NULL)
- return new_slot;
-
- memset(new_slot, 0, sizeof(struct pci_func));
-
- new_slot->next = NULL;
- new_slot->configured = 1;
-
- if (pciehp_slot_list[busnumber] == NULL) {
- pciehp_slot_list[busnumber] = new_slot;
- } else {
- next = pciehp_slot_list[busnumber];
- while (next->next != NULL)
- next = next->next;
- next->next = new_slot;
- }
- return new_slot;
-}
-
-
-/**
- * slot_remove - Removes a node from the linked list of slots.
- * @old_slot: slot to remove
- *
- * Returns 0 if successful, !0 otherwise.
- */
-static int slot_remove(struct pci_func * old_slot)
-{
- struct pci_func *next;
-
- if (old_slot == NULL)
- return 1;
-
- next = pciehp_slot_list[old_slot->bus];
-
- if (next == NULL)
- return 1;
-
- if (next == old_slot) {
- pciehp_slot_list[old_slot->bus] = old_slot->next;
- pciehp_destroy_board_resources(old_slot);
- kfree(old_slot);
- return 0;
- }
-
- while ((next->next != old_slot) && (next->next != NULL)) {
- next = next->next;
- }
-
- if (next->next == old_slot) {
- next->next = old_slot->next;
- pciehp_destroy_board_resources(old_slot);
- kfree(old_slot);
- return 0;
- } else
- return 2;
-}
-
-
-/**
- * bridge_slot_remove - Removes a node from the linked list of slots.
- * @bridge: bridge to remove
- *
- * Returns 0 if successful, !0 otherwise.
- */
-static int bridge_slot_remove(struct pci_func *bridge)
-{
- u8 subordinateBus, secondaryBus;
- u8 tempBus;
- struct pci_func *next;
-
- if (bridge == NULL)
- return 1;
-
- secondaryBus = (bridge->config_space[0x06] >> 8) & 0xFF;
- subordinateBus = (bridge->config_space[0x06] >> 16) & 0xFF;
-
- for (tempBus = secondaryBus; tempBus <= subordinateBus; tempBus++) {
- next = pciehp_slot_list[tempBus];
-
- while (!slot_remove(next)) {
- next = pciehp_slot_list[tempBus];
- }
- }
-
- next = pciehp_slot_list[bridge->bus];
-
- if (next == NULL) {
- return 1;
- }
-
- if (next == bridge) {
- pciehp_slot_list[bridge->bus] = bridge->next;
- kfree(bridge);
- return 0;
- }
-
- while ((next->next != bridge) && (next->next != NULL)) {
- next = next->next;
- }
-
- if (next->next == bridge) {
- next->next = bridge->next;
- kfree(bridge);
- return 0;
- } else
- return 2;
-}
-
-
-/**
- * pciehp_slot_find - Looks for a node by bus, and device, multiple functions accessed
- * @bus: bus to find
- * @device: device to find
- * @index: is 0 for first function found, 1 for the second...
- *
- * Returns pointer to the node if successful, %NULL otherwise.
- */
-struct pci_func *pciehp_slot_find(u8 bus, u8 device, u8 index)
-{
- int found = -1;
- struct pci_func *func;
-
- func = pciehp_slot_list[bus];
- dbg("%s: bus %x device %x index %x\n",
- __FUNCTION__, bus, device, index);
- if (func != NULL) {
- dbg("%s: func-> bus %x device %x function %x pci_dev %p\n",
- __FUNCTION__, func->bus, func->device, func->function,
- func->pci_dev);
- } else
- dbg("%s: func == NULL\n", __FUNCTION__);
-
- if ((func == NULL) || ((func->device == device) && (index == 0)))
- return func;
-
- if (func->device == device)
- found++;
-
- while (func->next != NULL) {
- func = func->next;
-
- dbg("%s: In while loop, func-> bus %x device %x function %x pci_dev %p\n",
- __FUNCTION__, func->bus, func->device, func->function,
- func->pci_dev);
- if (func->device == device)
- found++;
- dbg("%s: while loop, found %d, index %d\n", __FUNCTION__,
- found, index);
-
- if ((found == index) || (func->function == index)) {
- dbg("%s: Found bus %x dev %x func %x\n", __FUNCTION__,
- func->bus, func->device, func->function);
- return func;
- }
- }
-
- return NULL;
-}
-
-static int is_bridge(struct pci_func * func)
-{
- /* Check the header type */
- if (((func->config_space[0x03] >> 16) & 0xFF) == 0x01)
- return 1;
- else
- return 0;
-}
-
-
/* The following routines constitute the bulk of the
hotplug controller logic
*/
@@ -1100,20 +269,17 @@ static void set_slot_off(struct controller *ctrl, struct slot * pslot)
* Configures board
*
*/
-static u32 board_added(struct pci_func * func, struct controller * ctrl)
+static int board_added(struct slot *p_slot)
{
u8 hp_slot;
- int index;
- u32 temp_register = 0xFFFFFFFF;
- u32 rc = 0;
- struct pci_func *new_func = NULL;
- struct slot *p_slot;
- struct resource_lists res_lists;
+ int rc = 0;
+ struct controller *ctrl = p_slot->ctrl;
- p_slot = pciehp_find_slot(ctrl, func->device);
- hp_slot = func->device - ctrl->slot_device_offset;
+ hp_slot = p_slot->device - ctrl->slot_device_offset;
- dbg("%s: func->device, slot_offset, hp_slot = %d, %d ,%d\n", __FUNCTION__, func->device, ctrl->slot_device_offset, hp_slot);
+ dbg("%s: slot device, slot offset, hp slot = %d, %d ,%d\n",
+ __FUNCTION__, p_slot->device,
+ ctrl->slot_device_offset, hp_slot);
/* Wait for exclusive access to hardware */
down(&ctrl->crit_sect);
@@ -1141,9 +307,7 @@ static u32 board_added(struct pci_func * func, struct controller * ctrl)
up(&ctrl->crit_sect);
/* Wait for ~1 second */
- dbg("%s: before long_delay\n", __FUNCTION__);
wait_for_ctrl_irq (ctrl);
- dbg("%s: afterlong_delay\n", __FUNCTION__);
/* Check link training status */
rc = p_slot->hpc_ops->check_lnk_status(ctrl);
@@ -1153,98 +317,47 @@ static u32 board_added(struct pci_func * func, struct controller * ctrl)
return rc;
}
- dbg("%s: func status = %x\n", __FUNCTION__, func->status);
+ dbg("%s: slot status = %x\n", __FUNCTION__, p_slot->status);
/* Check for a power fault */
- if (func->status == 0xFF) {
+ if (p_slot->status == 0xFF) {
/* power fault occurred, but it was benign */
- temp_register = 0xFFFFFFFF;
- dbg("%s: temp register set to %x by power fault\n", __FUNCTION__, temp_register);
rc = POWER_FAILURE;
- func->status = 0;
- } else {
- /* Get vendor/device ID u32 */
- rc = pci_bus_read_config_dword (ctrl->pci_dev->subordinate, PCI_DEVFN(func->device, func->function),
- PCI_VENDOR_ID, &temp_register);
- dbg("%s: pci_bus_read_config_dword returns %d\n", __FUNCTION__, rc);
- dbg("%s: temp_register is %x\n", __FUNCTION__, temp_register);
-
- if (rc != 0) {
- /* Something's wrong here */
- temp_register = 0xFFFFFFFF;
- dbg("%s: temp register set to %x by error\n", __FUNCTION__, temp_register);
- }
- /* Preset return code. It will be changed later if things go okay. */
- rc = NO_ADAPTER_PRESENT;
+ p_slot->status = 0;
+ goto err_exit;
}
- /* All F's is an empty slot or an invalid board */
- if (temp_register != 0xFFFFFFFF) { /* Check for a board in the slot */
- res_lists.io_head = ctrl->io_head;
- res_lists.mem_head = ctrl->mem_head;
- res_lists.p_mem_head = ctrl->p_mem_head;
- res_lists.bus_head = ctrl->bus_head;
- res_lists.irqs = NULL;
-
- rc = configure_new_device(ctrl, func, 0, &res_lists, 0, 0);
- dbg("%s: back from configure_new_device\n", __FUNCTION__);
-
- ctrl->io_head = res_lists.io_head;
- ctrl->mem_head = res_lists.mem_head;
- ctrl->p_mem_head = res_lists.p_mem_head;
- ctrl->bus_head = res_lists.bus_head;
+ rc = pciehp_configure_device(p_slot);
+ if (rc) {
+ err("Cannot add device 0x%x:%x\n", p_slot->bus,
+ p_slot->device);
+ goto err_exit;
+ }
- pciehp_resource_sort_and_combine(&(ctrl->mem_head));
- pciehp_resource_sort_and_combine(&(ctrl->p_mem_head));
- pciehp_resource_sort_and_combine(&(ctrl->io_head));
- pciehp_resource_sort_and_combine(&(ctrl->bus_head));
+ p_slot->status = 0;
- if (rc) {
- set_slot_off(ctrl, p_slot);
- return rc;
- }
- pciehp_save_slot_config(ctrl, func);
-
- func->status = 0;
- func->switch_save = 0x10;
- func->is_a_board = 0x01;
+ /*
+ * Some PCI Express root ports require fixup after hot-plug operation.
+ */
+ if (pcie_mch_quirk)
+ pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
+ if (PWR_LED(ctrl->ctrlcap)) {
+ /* Wait for exclusive access to hardware */
+ down(&ctrl->crit_sect);
- /* next, we will instantiate the linux pci_dev structures
- * (with appropriate driver notification, if already present)
- */
- index = 0;
- do {
- new_func = pciehp_slot_find(ctrl->slot_bus, func->device, index++);
- if (new_func && !new_func->pci_dev) {
- dbg("%s:call pci_hp_configure_dev, func %x\n",
- __FUNCTION__, index);
- pciehp_configure_device(ctrl, new_func);
- }
- } while (new_func);
-
- /*
- * Some PCI Express root ports require fixup after hot-plug operation.
- */
- if (pcie_mch_quirk)
- pci_fixup_device(pci_fixup_final, ctrl->pci_dev);
-
- if (PWR_LED(ctrl->ctrlcap)) {
- /* Wait for exclusive access to hardware */
- down(&ctrl->crit_sect);
-
- p_slot->hpc_ops->green_led_on(p_slot);
+ p_slot->hpc_ops->green_led_on(p_slot);
- /* Wait for the command to complete */
- wait_for_ctrl_irq (ctrl);
+ /* Wait for the command to complete */
+ wait_for_ctrl_irq (ctrl);
- /* Done with exclusive hardware access */
- up(&ctrl->crit_sect);
- }
- } else {
- set_slot_off(ctrl, p_slot);
- return -1;
- }
+ /* Done with exclusive hardware access */
+ up(&ctrl->crit_sect);
+ }
return 0;
+
+err_exit:
+ set_slot_off(ctrl, p_slot);
+ return -1;
}
@@ -1252,55 +365,25 @@ static u32 board_added(struct pci_func * func, struct controller * ctrl)
* remove_board - Turns off slot and LED's
*
*/
-static u32 remove_board(struct pci_func *func, struct controller *ctrl)
+static int remove_board(struct slot *p_slot)
{
- int index;
- u8 skip = 0;
u8 device;
u8 hp_slot;
- u32 rc;
- struct resource_lists res_lists;
- struct pci_func *temp_func;
- struct slot *p_slot;
-
- if (func == NULL)
- return 1;
+ int rc;
+ struct controller *ctrl = p_slot->ctrl;
- if (pciehp_unconfigure_device(func))
+ if (pciehp_unconfigure_device(p_slot))
return 1;
- device = func->device;
+ device = p_slot->device;
- hp_slot = func->device - ctrl->slot_device_offset;
+ hp_slot = p_slot->device - ctrl->slot_device_offset;
p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
dbg("In %s, hp_slot = %d\n", __FUNCTION__, hp_slot);
- if ((ctrl->add_support) &&
- !(func->bus_head || func->mem_head || func->p_mem_head || func->io_head)) {
- /* Here we check to see if we've saved any of the board's
- * resources already. If so, we'll skip the attempt to
- * determine what's being used.
- */
- index = 0;
-
- temp_func = func;
-
- while ((temp_func = pciehp_slot_find(temp_func->bus, temp_func->device, index++))) {
- if (temp_func->bus_head || temp_func->mem_head
- || temp_func->p_mem_head || temp_func->io_head) {
- skip = 1;
- break;
- }
- }
-
- if (!skip)
- rc = pciehp_save_used_resources(ctrl, func, DISABLE_CARD);
- }
/* Change status to shutdown */
- if (func->is_a_board)
- func->status = 0x01;
- func->configured = 0;
+ p_slot->status = 0x01;
/* Wait for exclusive access to hardware */
down(&ctrl->crit_sect);
@@ -1328,56 +411,6 @@ static u32 remove_board(struct pci_func *func, struct controller *ctrl)
/* Done with exclusive hardware access */
up(&ctrl->crit_sect);
- if (ctrl->add_support) {
- while (func) {
- res_lists.io_head = ctrl->io_head;
- res_lists.mem_head = ctrl->mem_head;
- res_lists.p_mem_head = ctrl->p_mem_head;
- res_lists.bus_head = ctrl->bus_head;
-
- dbg("Returning resources to ctlr lists for (B/D/F) = (%#x/%#x/%#x)\n",
- func->bus, func->device, func->function);
-
- pciehp_return_board_resources(func, &res_lists);
-
- ctrl->io_head = res_lists.io_head;
- ctrl->mem_head = res_lists.mem_head;
- ctrl->p_mem_head = res_lists.p_mem_head;
- ctrl->bus_head = res_lists.bus_head;
-
- pciehp_resource_sort_and_combine(&(ctrl->mem_head));
- pciehp_resource_sort_and_combine(&(ctrl->p_mem_head));
- pciehp_resource_sort_and_combine(&(ctrl->io_head));
- pciehp_resource_sort_and_combine(&(ctrl->bus_head));
-
- if (is_bridge(func)) {
- dbg("PCI Bridge Hot-Remove s:b:d:f(%02x:%02x:%02x:%02x)\n",
- ctrl->seg, func->bus, func->device, func->function);
- bridge_slot_remove(func);
- } else {
- dbg("PCI Function Hot-Remove s:b:d:f(%02x:%02x:%02x:%02x)\n",
- ctrl->seg, func->bus, func->device, func->function);
- slot_remove(func);
- }
-
- func = pciehp_slot_find(ctrl->slot_bus, device, 0);
- }
-
- /* Setup slot structure with entry for empty slot */
- func = pciehp_slot_create(ctrl->slot_bus);
-
- if (func == NULL) {
- return 1;
- }
-
- func->bus = ctrl->slot_bus;
- func->device = device;
- func->function = 0;
- func->configured = 0;
- func->switch_save = 0x10;
- func->is_a_board = 0;
- }
-
return 0;
}
@@ -1411,13 +444,15 @@ static void pciehp_pushbutton_thread(unsigned long slot)
p_slot->hpc_ops->get_power_status(p_slot, &getstatus);
if (getstatus) {
p_slot->state = POWEROFF_STATE;
- dbg("In power_down_board, b:d(%x:%x)\n", p_slot->bus, p_slot->device);
+ dbg("%s: disabling bus:device(%x:%x)\n", __FUNCTION__,
+ p_slot->bus, p_slot->device);
pciehp_disable_slot(p_slot);
p_slot->state = STATIC_STATE;
} else {
p_slot->state = POWERON_STATE;
- dbg("In add_board, b:d(%x:%x)\n", p_slot->bus, p_slot->device);
+ dbg("%s: adding bus:device(%x:%x)\n", __FUNCTION__,
+ p_slot->bus, p_slot->device);
if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) {
/* Wait for exclusive access to hardware */
@@ -1459,13 +494,15 @@ static void pciehp_surprise_rm_thread(unsigned long slot)
p_slot->hpc_ops->get_adapter_status(p_slot, &getstatus);
if (!getstatus) {
p_slot->state = POWEROFF_STATE;
- dbg("In removing board, b:d(%x:%x)\n", p_slot->bus, p_slot->device);
+ dbg("%s: removing bus:device(%x:%x)\n",
+ __FUNCTION__, p_slot->bus, p_slot->device);
pciehp_disable_slot(p_slot);
p_slot->state = STATIC_STATE;
} else {
p_slot->state = POWERON_STATE;
- dbg("In add_board, b:d(%x:%x)\n", p_slot->bus, p_slot->device);
+ dbg("%s: adding bus:device(%x:%x)\n",
+ __FUNCTION__, p_slot->bus, p_slot->device);
if (pciehp_enable_slot(p_slot) && PWR_LED(p_slot->ctrl->ctrlcap)) {
/* Wait for exclusive access to hardware */
@@ -1531,7 +568,6 @@ int pciehp_event_start_thread(void)
err ("Can't start up our event thread\n");
return -1;
}
- dbg("Our event thread pid = %d\n", pid);
return 0;
}
@@ -1539,9 +575,7 @@ int pciehp_event_start_thread(void)
void pciehp_event_stop_thread(void)
{
event_finished = 1;
- dbg("event_thread finish command given\n");
up(&event_semaphore);
- dbg("wait for event_thread to exit\n");
down(&event_exit);
}
@@ -1573,7 +607,6 @@ static void interrupt_event_handler(struct controller *ctrl)
{
int loop = 0;
int change = 1;
- struct pci_func *func;
u8 hp_slot;
u8 getstatus;
struct slot *p_slot;
@@ -1581,16 +614,12 @@ static void interrupt_event_handler(struct controller *ctrl)
while (change) {
change = 0;
- for (loop = 0; loop < 10; loop++) {
+ for (loop = 0; loop < MAX_EVENTS; loop++) {
if (ctrl->event_queue[loop].event_type != 0) {
hp_slot = ctrl->event_queue[loop].hp_slot;
- func = pciehp_slot_find(ctrl->slot_bus, (hp_slot + ctrl->slot_device_offset), 0);
-
p_slot = pciehp_find_slot(ctrl, hp_slot + ctrl->slot_device_offset);
- dbg("hp_slot %d, func %p, p_slot %p\n", hp_slot, func, p_slot);
-
if (ctrl->event_queue[loop].event_type == INT_BUTTON_CANCEL) {
dbg("button cancel\n");
del_timer(&p_slot->task_event);
@@ -1682,7 +711,6 @@ static void interrupt_event_handler(struct controller *ctrl)
p_slot->task_event.function = (void (*)(unsigned long)) pushbutton_helper_thread;
p_slot->task_event.data = (unsigned long) p_slot;
- dbg("add_timer p_slot = %p\n", (void *) p_slot);
add_timer(&p_slot->task_event);
}
}
@@ -1737,13 +765,6 @@ int pciehp_enable_slot(struct slot *p_slot)
{
u8 getstatus = 0;
int rc;
- struct pci_func *func;
-
- func = pciehp_slot_find(p_slot->bus, p_slot->device, 0);
- if (!func) {
- dbg("%s: Error! slot NULL\n", __FUNCTION__);
- return 1;
- }
/* Check to see if (latch closed, card present, power off) */
down(&p_slot->ctrl->crit_sect);
@@ -1773,45 +794,11 @@ int pciehp_enable_slot(struct slot *p_slot)
}
up(&p_slot->ctrl->crit_sect);
- slot_remove(func);
-
- func = pciehp_slot_create(p_slot->bus);
- if (func == NULL)
- return 1;
-
- func->bus = p_slot->bus;
- func->device = p_slot->device;
- func->function = 0;
- func->configured = 0;
- func->is_a_board = 1;
-
- /* We have to save the presence info for these slots */
- p_slot->hpc_ops->get_adapter_status(p_slot, &(func->presence_save));
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- func->switch_save = !getstatus? 0x10:0;
- rc = board_added(func, p_slot->ctrl);
+ rc = board_added(p_slot);
if (rc) {
- if (is_bridge(func))
- bridge_slot_remove(func);
- else
- slot_remove(func);
-
- /* Setup slot structure with entry for empty slot */
- func = pciehp_slot_create(p_slot->bus);
- if (func == NULL)
- return 1; /* Out of memory */
-
- func->bus = p_slot->bus;
- func->device = p_slot->device;
- func->function = 0;
- func->configured = 0;
- func->is_a_board = 1;
-
- /* We have to save the presence info for these slots */
- p_slot->hpc_ops->get_adapter_status(p_slot, &(func->presence_save));
p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
- func->switch_save = !getstatus? 0x10:0;
}
if (p_slot)
@@ -1823,14 +810,8 @@ int pciehp_enable_slot(struct slot *p_slot)
int pciehp_disable_slot(struct slot *p_slot)
{
- u8 class_code, header_type, BCR;
- u8 index = 0;
u8 getstatus = 0;
- u32 rc = 0;
int ret = 0;
- unsigned int devfn;
- struct pci_bus *pci_bus = p_slot->ctrl->pci_dev->subordinate;
- struct pci_func *func;
if (!p_slot->ctrl)
return 1;
@@ -1867,838 +848,8 @@ int pciehp_disable_slot(struct slot *p_slot)
up(&p_slot->ctrl->crit_sect);
- func = pciehp_slot_find(p_slot->bus, p_slot->device, index++);
-
- /* Make sure there are no video controllers here
- * for all func of p_slot
- */
- while (func && !rc) {
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- /* Check the Class Code */
- rc = pci_bus_read_config_byte (pci_bus, devfn, 0x0B, &class_code);
- if (rc)
- return rc;
-
- if (class_code == PCI_BASE_CLASS_DISPLAY) {
- /* Display/Video adapter (not supported) */
- rc = REMOVE_NOT_SUPPORTED;
- } else {
- /* See if it's a bridge */
- rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- /* If it's a bridge, check the VGA Enable bit */
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) {
- rc = pci_bus_read_config_byte (pci_bus, devfn, PCI_BRIDGE_CONTROL, &BCR);
- if (rc)
- return rc;
-
- /* If the VGA Enable bit is set, remove isn't supported */
- if (BCR & PCI_BRIDGE_CTL_VGA) {
- rc = REMOVE_NOT_SUPPORTED;
- }
- }
- }
-
- func = pciehp_slot_find(p_slot->bus, p_slot->device, index++);
- }
-
- func = pciehp_slot_find(p_slot->bus, p_slot->device, 0);
- if ((func != NULL) && !rc) {
- rc = remove_board(func, p_slot->ctrl);
- } else if (!rc)
- rc = 1;
-
- if (p_slot)
- update_slot_info(p_slot);
-
- return rc;
-}
-
-
-/**
- * configure_new_device - Configures the PCI header information of one board.
- *
- * @ctrl: pointer to controller structure
- * @func: pointer to function structure
- * @behind_bridge: 1 if this is a recursive call, 0 if not
- * @resources: pointer to set of resource lists
- *
- * Returns 0 if success
- *
- */
-static u32 configure_new_device(struct controller * ctrl, struct pci_func * func,
- u8 behind_bridge, struct resource_lists * resources, u8 bridge_bus, u8 bridge_dev)
-{
- u8 temp_byte, function, max_functions, stop_it;
- int rc;
- u32 ID;
- struct pci_func *new_slot;
- struct pci_bus lpci_bus, *pci_bus;
- int index;
-
- new_slot = func;
-
- dbg("%s\n", __FUNCTION__);
- memcpy(&lpci_bus, ctrl->pci_dev->subordinate, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = func->bus;
-
- /* Check for Multi-function device */
- rc = pci_bus_read_config_byte(pci_bus, PCI_DEVFN(func->device, func->function), 0x0E, &temp_byte);
- if (rc) {
- dbg("%s: rc = %d\n", __FUNCTION__, rc);
- return rc;
- }
-
- if (temp_byte & 0x80) /* Multi-function device */
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- rc = configure_new_function(ctrl, new_slot, behind_bridge,
- resources, bridge_bus, bridge_dev);
-
- if (rc) {
- dbg("configure_new_function failed: %d\n", rc);
- index = 0;
-
- while (new_slot) {
- new_slot = pciehp_slot_find(new_slot->bus,
- new_slot->device, index++);
-
- if (new_slot)
- pciehp_return_board_resources(new_slot,
- resources);
- }
-
- return rc;
- }
-
- function++;
-
- stop_it = 0;
-
- /* The following loop skips to the next present function
- * and creates a board structure
- */
-
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword(pci_bus, PCI_DEVFN(func->device, function), 0x00, &ID);
-
- if (ID == 0xFFFFFFFF) { /* There's nothing there. */
- function++;
- } else { /* There's something there */
- /* Setup slot structure. */
- new_slot = pciehp_slot_create(func->bus);
-
- if (new_slot == NULL) {
- /* Out of memory */
- return 1;
- }
-
- new_slot->bus = func->bus;
- new_slot->device = func->device;
- new_slot->function = function;
- new_slot->is_a_board = 1;
- new_slot->status = 0;
-
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- dbg("returning from %s\n", __FUNCTION__);
-
- return 0;
-}
-
-/*
- * Configuration logic that involves the hotplug data structures and
- * their bookkeeping
- */
-
-/**
- * configure_bridge: fill bridge's registers, either configure or disable it.
- */
-static int
-configure_bridge(struct pci_bus *pci_bus, unsigned int devfn,
- struct pci_resource *mem_node,
- struct pci_resource **hold_mem_node,
- int base_addr, int limit_addr)
-{
- u16 temp_word;
- u32 rc;
-
- if (mem_node) {
- memcpy(*hold_mem_node, mem_node, sizeof(struct pci_resource));
- mem_node->next = NULL;
-
- /* set Mem base and Limit registers */
- RES_CHECK(mem_node->base, 16);
- temp_word = (u16)(mem_node->base >> 16);
- rc = pci_bus_write_config_word(pci_bus, devfn, base_addr, temp_word);
-
- RES_CHECK(mem_node->base + mem_node->length - 1, 16);
- temp_word = (u16)((mem_node->base + mem_node->length - 1) >> 16);
- rc = pci_bus_write_config_word(pci_bus, devfn, limit_addr, temp_word);
- } else {
- temp_word = 0xFFFF;
- rc = pci_bus_write_config_word(pci_bus, devfn, base_addr, temp_word);
-
- temp_word = 0x0000;
- rc = pci_bus_write_config_word(pci_bus, devfn, limit_addr, temp_word);
-
- kfree(*hold_mem_node);
- *hold_mem_node = NULL;
- }
- return rc;
-}
-
-static int
-configure_new_bridge(struct controller *ctrl, struct pci_func *func,
- u8 behind_bridge, struct resource_lists *resources,
- struct pci_bus *pci_bus)
-{
- int cloop;
- u8 temp_byte;
- u8 device;
- u16 temp_word;
- u32 rc;
- u32 ID;
- unsigned int devfn;
- struct pci_resource *mem_node;
- struct pci_resource *p_mem_node;
- struct pci_resource *io_node;
- struct pci_resource *bus_node;
- struct pci_resource *hold_mem_node;
- struct pci_resource *hold_p_mem_node;
- struct pci_resource *hold_IO_node;
- struct pci_resource *hold_bus_node;
- struct irq_mapping irqs;
- struct pci_func *new_slot;
- struct resource_lists temp_resources;
-
- devfn = PCI_DEVFN(func->device, func->function);
-
- /* set Primary bus */
- dbg("set Primary bus = 0x%x\n", func->bus);
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_PRIMARY_BUS, func->bus);
- if (rc)
- return rc;
-
- /* find range of busses to use */
- bus_node = get_max_resource(&resources->bus_head, 1L);
-
- /* If we don't have any busses to allocate, we can't continue */
- if (!bus_node) {
- err("Got NO bus resource to use\n");
- return -ENOMEM;
- }
- dbg("Got ranges of buses to use: base:len=0x%x:%x\n", bus_node->base, bus_node->length);
-
- /* set Secondary bus */
- temp_byte = (u8)bus_node->base;
- dbg("set Secondary bus = 0x%x\n", temp_byte);
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, temp_byte);
- if (rc)
- return rc;
-
- /* set subordinate bus */
- temp_byte = (u8)(bus_node->base + bus_node->length - 1);
- dbg("set subordinate bus = 0x%x\n", temp_byte);
- rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte);
- if (rc)
- return rc;
-
- /* Set HP parameters (Cache Line Size, Latency Timer) */
- rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_BRIDGE);
- if (rc)
- return rc;
-
- /* Setup the IO, memory, and prefetchable windows */
-
- io_node = get_max_resource(&(resources->io_head), 0x1000L);
- if (io_node) {
- dbg("io_node(base, len, next) (%x, %x, %p)\n", io_node->base,
- io_node->length, io_node->next);
- }
-
- mem_node = get_max_resource(&(resources->mem_head), 0x100000L);
- if (mem_node) {
- dbg("mem_node(base, len, next) (%x, %x, %p)\n", mem_node->base,
- mem_node->length, mem_node->next);
- }
-
- if (resources->p_mem_head)
- p_mem_node = get_max_resource(&(resources->p_mem_head), 0x100000L);
- else {
- /*
- * In some platform implementation, MEM and PMEM are not
- * distinguished, and hence ACPI _CRS has only MEM entries
- * for both MEM and PMEM.
- */
- dbg("using MEM for PMEM\n");
- p_mem_node = get_max_resource(&(resources->mem_head), 0x100000L);
- }
- if (p_mem_node) {
- dbg("p_mem_node(base, len, next) (%x, %x, %p)\n", p_mem_node->base,
- p_mem_node->length, p_mem_node->next);
- }
-
- /* set up the IRQ info */
- if (!resources->irqs) {
- irqs.barber_pole = 0;
- irqs.interrupt[0] = 0;
- irqs.interrupt[1] = 0;
- irqs.interrupt[2] = 0;
- irqs.interrupt[3] = 0;
- irqs.valid_INT = 0;
- } else {
- irqs.barber_pole = resources->irqs->barber_pole;
- irqs.interrupt[0] = resources->irqs->interrupt[0];
- irqs.interrupt[1] = resources->irqs->interrupt[1];
- irqs.interrupt[2] = resources->irqs->interrupt[2];
- irqs.interrupt[3] = resources->irqs->interrupt[3];
- irqs.valid_INT = resources->irqs->valid_INT;
- }
-
- /* set up resource lists that are now aligned on top and bottom
- * for anything behind the bridge.
- */
- temp_resources.bus_head = bus_node;
- temp_resources.io_head = io_node;
- temp_resources.mem_head = mem_node;
- temp_resources.p_mem_head = p_mem_node;
- temp_resources.irqs = &irqs;
-
- /* Make copies of the nodes we are going to pass down so that
- * if there is a problem,we can just use these to free resources
- */
- hold_bus_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- hold_IO_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- hold_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- hold_p_mem_node = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
-
- if (!hold_bus_node || !hold_IO_node || !hold_mem_node || !hold_p_mem_node) {
- kfree(hold_bus_node);
- kfree(hold_IO_node);
- kfree(hold_mem_node);
- kfree(hold_p_mem_node);
-
- return 1;
- }
-
- memcpy(hold_bus_node, bus_node, sizeof(struct pci_resource));
-
- bus_node->base += 1;
- bus_node->length -= 1;
- bus_node->next = NULL;
-
- /* If we have IO resources copy them and fill in the bridge's
- * IO range registers
- */
- if (io_node) {
- memcpy(hold_IO_node, io_node, sizeof(struct pci_resource));
- io_node->next = NULL;
-
- /* set IO base and Limit registers */
- RES_CHECK(io_node->base, 8);
- temp_byte = (u8)(io_node->base >> 8);
- rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_BASE, temp_byte);
-
- RES_CHECK(io_node->base + io_node->length - 1, 8);
- temp_byte = (u8)((io_node->base + io_node->length - 1) >> 8);
- rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
- } else {
- kfree(hold_IO_node);
- hold_IO_node = NULL;
- }
-
- /* If we have memory resources copy them and fill in the bridge's
- * memory range registers. Otherwise, fill in the range
- * registers with values that disable them.
- */
- rc = configure_bridge(pci_bus, devfn, mem_node, &hold_mem_node,
- PCI_MEMORY_BASE, PCI_MEMORY_LIMIT);
-
- /* If we have prefetchable memory resources copy them and
- * fill in the bridge's memory range registers. Otherwise,
- * fill in the range registers with values that disable them.
- */
- rc = configure_bridge(pci_bus, devfn, p_mem_node, &hold_p_mem_node,
- PCI_PREF_MEMORY_BASE, PCI_PREF_MEMORY_LIMIT);
-
- /* Adjust this to compensate for extra adjustment in first loop */
- irqs.barber_pole--;
-
- rc = 0;
-
- /* Here we actually find the devices and configure them */
- for (device = 0; (device <= 0x1F) && !rc; device++) {
- irqs.barber_pole = (irqs.barber_pole + 1) & 0x03;
-
- ID = 0xFFFFFFFF;
- pci_bus->number = hold_bus_node->base;
- pci_bus_read_config_dword (pci_bus, PCI_DEVFN(device, 0), PCI_VENDOR_ID, &ID);
- pci_bus->number = func->bus;
-
- if (ID != 0xFFFFFFFF) { /* device Present */
- /* Setup slot structure. */
- new_slot = pciehp_slot_create(hold_bus_node->base);
-
- if (new_slot == NULL) {
- /* Out of memory */
- rc = -ENOMEM;
- continue;
- }
-
- new_slot->bus = hold_bus_node->base;
- new_slot->device = device;
- new_slot->function = 0;
- new_slot->is_a_board = 1;
- new_slot->status = 0;
-
- rc = configure_new_device(ctrl, new_slot, 1,
- &temp_resources, func->bus,
- func->device);
- dbg("configure_new_device rc=0x%x\n",rc);
- } /* End of IF (device in slot?) */
- } /* End of FOR loop */
-
- if (rc) {
- pciehp_destroy_resource_list(&temp_resources);
-
- return_resource(&(resources->bus_head), hold_bus_node);
- return_resource(&(resources->io_head), hold_IO_node);
- return_resource(&(resources->mem_head), hold_mem_node);
- return_resource(&(resources->p_mem_head), hold_p_mem_node);
- return(rc);
- }
-
- /* save the interrupt routing information */
- if (resources->irqs) {
- resources->irqs->interrupt[0] = irqs.interrupt[0];
- resources->irqs->interrupt[1] = irqs.interrupt[1];
- resources->irqs->interrupt[2] = irqs.interrupt[2];
- resources->irqs->interrupt[3] = irqs.interrupt[3];
- resources->irqs->valid_INT = irqs.valid_INT;
- } else if (!behind_bridge) {
- /* We need to hook up the interrupts here */
- for (cloop = 0; cloop < 4; cloop++) {
- if (irqs.valid_INT & (0x01 << cloop)) {
- rc = pciehp_set_irq(func->bus, func->device,
- 0x0A + cloop, irqs.interrupt[cloop]);
- if (rc) {
- pciehp_destroy_resource_list (&temp_resources);
- return_resource(&(resources->bus_head), hold_bus_node);
- return_resource(&(resources->io_head), hold_IO_node);
- return_resource(&(resources->mem_head), hold_mem_node);
- return_resource(&(resources->p_mem_head), hold_p_mem_node);
- return rc;
- }
- }
- } /* end of for loop */
- }
-
- /* Return unused bus resources
- * First use the temporary node to store information for the board
- */
- if (hold_bus_node && bus_node && temp_resources.bus_head) {
- hold_bus_node->length = bus_node->base - hold_bus_node->base;
-
- hold_bus_node->next = func->bus_head;
- func->bus_head = hold_bus_node;
-
- temp_byte = (u8)(temp_resources.bus_head->base - 1);
-
- /* set subordinate bus */
- dbg("re-set subordinate bus = 0x%x\n", temp_byte);
- rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_SUBORDINATE_BUS, temp_byte);
-
- if (temp_resources.bus_head->length == 0) {
- kfree(temp_resources.bus_head);
- temp_resources.bus_head = NULL;
- } else {
- dbg("return bus res of b:d(0x%x:%x) base:len(0x%x:%x)\n",
- func->bus, func->device, temp_resources.bus_head->base, temp_resources.bus_head->length);
- return_resource(&(resources->bus_head), temp_resources.bus_head);
- }
- }
-
- /* If we have IO space available and there is some left,
- * return the unused portion
- */
- if (hold_IO_node && temp_resources.io_head) {
- io_node = do_pre_bridge_resource_split(&(temp_resources.io_head),
- &hold_IO_node, 0x1000);
-
- /* Check if we were able to split something off */
- if (io_node) {
- hold_IO_node->base = io_node->base + io_node->length;
-
- RES_CHECK(hold_IO_node->base, 8);
- temp_byte = (u8)((hold_IO_node->base) >> 8);
- rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_BASE, temp_byte);
-
- return_resource(&(resources->io_head), io_node);
- }
-
- io_node = do_bridge_resource_split(&(temp_resources.io_head), 0x1000);
-
- /* Check if we were able to split something off */
- if (io_node) {
- /* First use the temporary node to store information for the board */
- hold_IO_node->length = io_node->base - hold_IO_node->base;
-
- /* If we used any, add it to the board's list */
- if (hold_IO_node->length) {
- hold_IO_node->next = func->io_head;
- func->io_head = hold_IO_node;
-
- RES_CHECK(io_node->base - 1, 8);
- temp_byte = (u8)((io_node->base - 1) >> 8);
- rc = pci_bus_write_config_byte (pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
-
- return_resource(&(resources->io_head), io_node);
- } else {
- /* it doesn't need any IO */
- temp_byte = 0x00;
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_IO_LIMIT, temp_byte);
-
- return_resource(&(resources->io_head), io_node);
- kfree(hold_IO_node);
- }
- } else {
- /* it used most of the range */
- hold_IO_node->next = func->io_head;
- func->io_head = hold_IO_node;
- }
- } else if (hold_IO_node) {
- /* it used the whole range */
- hold_IO_node->next = func->io_head;
- func->io_head = hold_IO_node;
- }
-
- /* If we have memory space available and there is some left,
- * return the unused portion
- */
- if (hold_mem_node && temp_resources.mem_head) {
- mem_node = do_pre_bridge_resource_split(&(temp_resources.mem_head), &hold_mem_node, 0x100000L);
-
- /* Check if we were able to split something off */
- if (mem_node) {
- hold_mem_node->base = mem_node->base + mem_node->length;
-
- RES_CHECK(hold_mem_node->base, 16);
- temp_word = (u16)((hold_mem_node->base) >> 16);
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_BASE, temp_word);
-
- return_resource(&(resources->mem_head), mem_node);
- }
-
- mem_node = do_bridge_resource_split(&(temp_resources.mem_head), 0x100000L);
-
- /* Check if we were able to split something off */
- if (mem_node) {
- /* First use the temporary node to store information for the board */
- hold_mem_node->length = mem_node->base - hold_mem_node->base;
-
- if (hold_mem_node->length) {
- hold_mem_node->next = func->mem_head;
- func->mem_head = hold_mem_node;
-
- /* configure end address */
- RES_CHECK(mem_node->base - 1, 16);
- temp_word = (u16)((mem_node->base - 1) >> 16);
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
-
- /* Return unused resources to the pool */
- return_resource(&(resources->mem_head), mem_node);
- } else {
- /* it doesn't need any Mem */
- temp_word = 0x0000;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_MEMORY_LIMIT, temp_word);
-
- return_resource(&(resources->mem_head), mem_node);
- kfree(hold_mem_node);
- }
- } else {
- /* it used most of the range */
- hold_mem_node->next = func->mem_head;
- func->mem_head = hold_mem_node;
- }
- } else if (hold_mem_node) {
- /* it used the whole range */
- hold_mem_node->next = func->mem_head;
- func->mem_head = hold_mem_node;
- }
-
- /* If we have prefetchable memory space available and there is some
- * left at the end, return the unused portion
- */
- if (hold_p_mem_node && temp_resources.p_mem_head) {
- p_mem_node = do_pre_bridge_resource_split(&(temp_resources.p_mem_head),
- &hold_p_mem_node, 0x100000L);
-
- /* Check if we were able to split something off */
- if (p_mem_node) {
- hold_p_mem_node->base = p_mem_node->base + p_mem_node->length;
-
- RES_CHECK(hold_p_mem_node->base, 16);
- temp_word = (u16)((hold_p_mem_node->base) >> 16);
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_BASE, temp_word);
-
- return_resource(&(resources->p_mem_head), p_mem_node);
- }
-
- p_mem_node = do_bridge_resource_split(&(temp_resources.p_mem_head), 0x100000L);
-
- /* Check if we were able to split something off */
- if (p_mem_node) {
- /* First use the temporary node to store information for the board */
- hold_p_mem_node->length = p_mem_node->base - hold_p_mem_node->base;
-
- /* If we used any, add it to the board's list */
- if (hold_p_mem_node->length) {
- hold_p_mem_node->next = func->p_mem_head;
- func->p_mem_head = hold_p_mem_node;
-
- RES_CHECK(p_mem_node->base - 1, 16);
- temp_word = (u16)((p_mem_node->base - 1) >> 16);
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
-
- return_resource(&(resources->p_mem_head), p_mem_node);
- } else {
- /* it doesn't need any PMem */
- temp_word = 0x0000;
- rc = pci_bus_write_config_word (pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, temp_word);
-
- return_resource(&(resources->p_mem_head), p_mem_node);
- kfree(hold_p_mem_node);
- }
- } else {
- /* it used the most of the range */
- hold_p_mem_node->next = func->p_mem_head;
- func->p_mem_head = hold_p_mem_node;
- }
- } else if (hold_p_mem_node) {
- /* it used the whole range */
- hold_p_mem_node->next = func->p_mem_head;
- func->p_mem_head = hold_p_mem_node;
- }
-
- /* We should be configuring an IRQ and the bridge's base address
- * registers if it needs them. Although we have never seen such
- * a device
- */
-
- pciehprm_enable_card(ctrl, func, PCI_HEADER_TYPE_BRIDGE);
-
- dbg("PCI Bridge Hot-Added s:b:d:f(%02x:%02x:%02x:%02x)\n", ctrl->seg, func->bus, func->device, func->function);
-
- return rc;
+ ret = remove_board(p_slot);
+ update_slot_info(p_slot);
+ return ret;
}
-/**
- * configure_new_function - Configures the PCI header information of one device
- *
- * @ctrl: pointer to controller structure
- * @func: pointer to function structure
- * @behind_bridge: 1 if this is a recursive call, 0 if not
- * @resources: pointer to set of resource lists
- *
- * Calls itself recursively for bridged devices.
- * Returns 0 if success
- *
- */
-static int
-configure_new_function(struct controller *ctrl, struct pci_func *func,
- u8 behind_bridge, struct resource_lists *resources,
- u8 bridge_bus, u8 bridge_dev)
-{
- int cloop;
- u8 temp_byte;
- u8 class_code;
- u32 rc;
- u32 temp_register;
- u32 base;
- unsigned int devfn;
- struct pci_resource *mem_node;
- struct pci_resource *io_node;
- struct pci_bus lpci_bus, *pci_bus;
-
- memcpy(&lpci_bus, ctrl->pci_dev->subordinate, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- /* Check for Bridge */
- rc = pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &temp_byte);
- if (rc)
- return rc;
- dbg("%s: bus %x dev %x func %x temp_byte = %x\n", __FUNCTION__,
- func->bus, func->device, func->function, temp_byte);
-
- if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
- rc = configure_new_bridge(ctrl, func, behind_bridge, resources,
- pci_bus);
-
- if (rc)
- return rc;
- } else if ((temp_byte & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- /* Standard device */
- u64 base64;
- rc = pci_bus_read_config_byte(pci_bus, devfn, 0x0B, &class_code);
-
- if (class_code == PCI_BASE_CLASS_DISPLAY)
- return DEVICE_TYPE_NOT_SUPPORTED;
-
- /* Figure out IO and memory needs */
- for (cloop = PCI_BASE_ADDRESS_0; cloop <= PCI_BASE_ADDRESS_5; cloop += 4) {
- temp_register = 0xFFFFFFFF;
-
- rc = pci_bus_write_config_dword (pci_bus, devfn, cloop, temp_register);
- rc = pci_bus_read_config_dword(pci_bus, devfn, cloop, &temp_register);
- dbg("Bar[%x]=0x%x on bus:dev:func(0x%x:%x:%x)\n", cloop, temp_register,
- func->bus, func->device, func->function);
-
- if (!temp_register)
- continue;
-
- base64 = 0L;
- if (temp_register & PCI_BASE_ADDRESS_SPACE_IO) {
- /* Map IO */
-
- /* set base = amount of IO space */
- base = temp_register & 0xFFFFFFFC;
- base = ~base + 1;
-
- dbg("NEED IO length(0x%x)\n", base);
- io_node = get_io_resource(&(resources->io_head),(ulong)base);
-
- /* allocate the resource to the board */
- if (io_node) {
- dbg("Got IO base=0x%x(length=0x%x)\n", io_node->base, io_node->length);
- base = (u32)io_node->base;
- io_node->next = func->io_head;
- func->io_head = io_node;
- } else {
- err("Got NO IO resource(length=0x%x)\n", base);
- return -ENOMEM;
- }
- } else { /* map MEM */
- int prefetchable = 1;
- struct pci_resource **res_node = &func->p_mem_head;
- char *res_type_str = "PMEM";
- u32 temp_register2;
-
- if (!(temp_register & PCI_BASE_ADDRESS_MEM_PREFETCH)) {
- prefetchable = 0;
- res_node = &func->mem_head;
- res_type_str++;
- }
-
- base = temp_register & 0xFFFFFFF0;
- base = ~base + 1;
-
- switch (temp_register & PCI_BASE_ADDRESS_MEM_TYPE_MASK) {
- case PCI_BASE_ADDRESS_MEM_TYPE_32:
- dbg("NEED 32 %s bar=0x%x(length=0x%x)\n", res_type_str, temp_register, base);
-
- if (prefetchable && resources->p_mem_head)
- mem_node=get_resource(&(resources->p_mem_head), (ulong)base);
- else {
- if (prefetchable)
- dbg("using MEM for PMEM\n");
- mem_node = get_resource(&(resources->mem_head), (ulong)base);
- }
-
- /* allocate the resource to the board */
- if (mem_node) {
- base = (u32)mem_node->base;
- mem_node->next = *res_node;
- *res_node = mem_node;
- dbg("Got 32 %s base=0x%x(length=0x%x)\n", res_type_str, mem_node->base,
- mem_node->length);
- } else {
- err("Got NO 32 %s resource(length=0x%x)\n", res_type_str, base);
- return -ENOMEM;
- }
- break;
- case PCI_BASE_ADDRESS_MEM_TYPE_64:
- rc = pci_bus_read_config_dword(pci_bus, devfn, cloop+4, &temp_register2);
- dbg("NEED 64 %s bar=0x%x:%x(length=0x%x)\n", res_type_str, temp_register2,
- temp_register, base);
-
- if (prefetchable && resources->p_mem_head)
- mem_node = get_resource(&(resources->p_mem_head), (ulong)base);
- else {
- if (prefetchable)
- dbg("using MEM for PMEM\n");
- mem_node = get_resource(&(resources->mem_head), (ulong)base);
- }
-
- /* allocate the resource to the board */
- if (mem_node) {
- base64 = mem_node->base;
- mem_node->next = *res_node;
- *res_node = mem_node;
- dbg("Got 64 %s base=0x%x:%x(length=%x)\n", res_type_str, (u32)(base64 >> 32),
- (u32)base64, mem_node->length);
- } else {
- err("Got NO 64 %s resource(length=0x%x)\n", res_type_str, base);
- return -ENOMEM;
- }
- break;
- default:
- dbg("reserved BAR type=0x%x\n", temp_register);
- break;
- }
-
- }
-
- if (base64) {
- rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, (u32)base64);
- cloop += 4;
- base64 >>= 32;
-
- if (base64) {
- dbg("%s: high dword of base64(0x%x) set to 0\n", __FUNCTION__, (u32)base64);
- base64 = 0x0L;
- }
-
- rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, (u32)base64);
- } else {
- rc = pci_bus_write_config_dword(pci_bus, devfn, cloop, base);
- }
- } /* End of base register loop */
-
- /* disable ROM base Address */
- rc = pci_bus_write_config_dword (pci_bus, devfn, PCI_ROM_ADDRESS, 0x00);
-
- /* Set HP parameters (Cache Line Size, Latency Timer) */
- rc = pciehprm_set_hpp(ctrl, func, PCI_HEADER_TYPE_NORMAL);
- if (rc)
- return rc;
-
- pciehprm_enable_card(ctrl, func, PCI_HEADER_TYPE_NORMAL);
-
- dbg("PCI function Hot-Added s:b:d:f(%02x:%02x:%02x:%02x)\n", ctrl->seg, func->bus, func->device,
- func->function);
- } /* End of Not-A-Bridge else */
- else {
- /* It's some strange type of PCI adapter (Cardbus?) */
- return DEVICE_TYPE_NOT_SUPPORTED;
- }
-
- func->configured = 1;
-
- return 0;
-}
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 7a0e27f0e06..4a3cecca012 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -27,16 +27,10 @@
*
*/
-#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/vmalloc.h>
-#include <linux/interrupt.h>
-#include <linux/spinlock.h>
#include <linux/pci.h>
-#include <asm/system.h>
#include "../pci.h"
#include "pciehp.h"
@@ -217,23 +211,6 @@ static int pcie_cap_base = 0; /* Base of the PCI Express capability item struct
#define MRL_STATE 0x0020
#define PRSN_STATE 0x0040
-struct php_ctlr_state_s {
- struct php_ctlr_state_s *pnext;
- struct pci_dev *pci_dev;
- unsigned int irq;
- unsigned long flags; /* spinlock's */
- u32 slot_device_offset;
- u32 num_slots;
- struct timer_list int_poll_timer; /* Added for poll event */
- php_intr_callback_t attention_button_callback;
- php_intr_callback_t switch_change_callback;
- php_intr_callback_t presence_change_callback;
- php_intr_callback_t power_fault_callback;
- void *callback_instance_id;
- struct ctrl_reg *creg; /* Ptr to controller register space */
-};
-
-
static spinlock_t hpc_event_lock;
DEFINE_DBG_BUFFER /* Debug string buffer for entire HPC defined here */
@@ -297,7 +274,6 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd)
DBG_ENTER_ROUTINE
- dbg("%s : Enter\n", __FUNCTION__);
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
return -1;
@@ -308,7 +284,6 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd)
err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__);
return retval;
}
- dbg("%s : hp_register_read_word SLOT_STATUS %x\n", __FUNCTION__, slot_status);
if ((slot_status & CMD_COMPLETED) == CMD_COMPLETED ) {
/* After 1 sec and CMD_COMPLETED still not set, just proceed forward to issue
@@ -316,14 +291,11 @@ static int pcie_write_cmd(struct slot *slot, u16 cmd)
dbg("%s : CMD_COMPLETED not clear after 1 sec.\n", __FUNCTION__);
}
- dbg("%s: Before hp_register_write_word SLOT_CTRL %x\n", __FUNCTION__, cmd);
retval = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(slot->ctrl->cap_base), cmd | CMD_CMPL_INTR_ENABLE);
if (retval) {
err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__);
return retval;
}
- dbg("%s : hp_register_write_word SLOT_CTRL %x\n", __FUNCTION__, cmd | CMD_CMPL_INTR_ENABLE);
- dbg("%s : Exit\n", __FUNCTION__);
DBG_LEAVE_ROUTINE
return retval;
@@ -509,7 +481,6 @@ static int hpc_query_power_fault(struct slot * slot)
u16 slot_status;
u8 pwr_fault;
int retval = 0;
- u8 status;
DBG_ENTER_ROUTINE
@@ -521,15 +492,13 @@ static int hpc_query_power_fault(struct slot * slot)
retval = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(slot->ctrl->cap_base), slot_status);
if (retval) {
- err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__);
+ err("%s : Cannot check for power fault\n", __FUNCTION__);
return retval;
}
pwr_fault = (u8)((slot_status & PWR_FAULT_DETECTED) >> 1);
- status = (pwr_fault != 1) ? 1 : 0;
DBG_LEAVE_ROUTINE
- /* Note: Logic 0 => fault */
- return status;
+ return pwr_fault;
}
static int hpc_set_attention_status(struct slot *slot, u8 value)
@@ -539,7 +508,8 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
u16 slot_ctrl;
int rc = 0;
- dbg("%s: \n", __FUNCTION__);
+ DBG_ENTER_ROUTINE
+
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
return -1;
@@ -555,7 +525,6 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
return rc;
}
- dbg("%s : hp_register_read_word SLOT_CTRL %x\n", __FUNCTION__, slot_ctrl);
switch (value) {
case 0 : /* turn off */
@@ -576,6 +545,7 @@ static int hpc_set_attention_status(struct slot *slot, u8 value)
pcie_write_cmd(slot, slot_cmd);
dbg("%s: SLOT_CTRL %x write cmd %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd);
+ DBG_LEAVE_ROUTINE
return rc;
}
@@ -587,7 +557,8 @@ static void hpc_set_green_led_on(struct slot *slot)
u16 slot_ctrl;
int rc = 0;
- dbg("%s: \n", __FUNCTION__);
+ DBG_ENTER_ROUTINE
+
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
return ;
@@ -604,7 +575,6 @@ static void hpc_set_green_led_on(struct slot *slot)
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
return;
}
- dbg("%s : hp_register_read_word SLOT_CTRL %x\n", __FUNCTION__, slot_ctrl);
slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0100;
if (!pciehp_poll_mode)
slot_cmd = slot_cmd | HP_INTR_ENABLE;
@@ -612,6 +582,7 @@ static void hpc_set_green_led_on(struct slot *slot)
pcie_write_cmd(slot, slot_cmd);
dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd);
+ DBG_LEAVE_ROUTINE
return;
}
@@ -622,7 +593,8 @@ static void hpc_set_green_led_off(struct slot *slot)
u16 slot_ctrl;
int rc = 0;
- dbg("%s: \n", __FUNCTION__);
+ DBG_ENTER_ROUTINE
+
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
return ;
@@ -639,7 +611,6 @@ static void hpc_set_green_led_off(struct slot *slot)
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
return;
}
- dbg("%s : hp_register_read_word SLOT_CTRL %x\n", __FUNCTION__, slot_ctrl);
slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0300;
@@ -648,6 +619,7 @@ static void hpc_set_green_led_off(struct slot *slot)
pcie_write_cmd(slot, slot_cmd);
dbg("%s: SLOT_CTRL %x write cmd %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd);
+ DBG_LEAVE_ROUTINE
return;
}
@@ -658,7 +630,8 @@ static void hpc_set_green_led_blink(struct slot *slot)
u16 slot_ctrl;
int rc = 0;
- dbg("%s: \n", __FUNCTION__);
+ DBG_ENTER_ROUTINE
+
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
return ;
@@ -675,7 +648,6 @@ static void hpc_set_green_led_blink(struct slot *slot)
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
return;
}
- dbg("%s : hp_register_read_word SLOT_CTRL %x\n", __FUNCTION__, slot_ctrl);
slot_cmd = (slot_ctrl & ~PWR_LED_CTRL) | 0x0200;
@@ -684,6 +656,7 @@ static void hpc_set_green_led_blink(struct slot *slot)
pcie_write_cmd(slot, slot_cmd);
dbg("%s: SLOT_CTRL %x write cmd %x\n",__FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base), slot_cmd);
+ DBG_LEAVE_ROUTINE
return;
}
@@ -780,7 +753,6 @@ static int hpc_power_on_slot(struct slot * slot)
int retval = 0;
DBG_ENTER_ROUTINE
- dbg("%s: \n", __FUNCTION__);
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
@@ -799,8 +771,6 @@ static int hpc_power_on_slot(struct slot * slot)
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
return retval;
}
- dbg("%s: SLOT_CTRL %x, value read %xn", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base),
- slot_ctrl);
slot_cmd = (slot_ctrl & ~PWR_CTRL) | POWER_ON;
@@ -829,7 +799,6 @@ static int hpc_power_off_slot(struct slot * slot)
int retval = 0;
DBG_ENTER_ROUTINE
- dbg("%s: \n", __FUNCTION__);
if (!php_ctlr) {
err("%s: Invalid HPC controller handle!\n", __FUNCTION__);
@@ -848,8 +817,6 @@ static int hpc_power_off_slot(struct slot * slot)
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
return retval;
}
- dbg("%s: SLOT_CTRL %x, value read %x\n", __FUNCTION__, SLOT_CTRL(slot->ctrl->cap_base),
- slot_ctrl);
slot_cmd = (slot_ctrl & ~PWR_CTRL) | POWER_OFF;
@@ -924,7 +891,6 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id, struct pt_regs *regs)
return IRQ_NONE;
}
- dbg("%s: Set Mask Hot-plug Interrupt Enable\n", __FUNCTION__);
dbg("%s: hp_register_read_word SLOT_CTRL with value %x\n", __FUNCTION__, temp_word);
temp_word = (temp_word & ~HP_INTR_ENABLE & ~CMD_CMPL_INTR_ENABLE) | 0x00;
@@ -933,7 +899,6 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id, struct pt_regs *regs)
err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__);
return IRQ_NONE;
}
- dbg("%s: hp_register_write_word SLOT_CTRL with value %x\n", __FUNCTION__, temp_word);
rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status);
if (rc) {
@@ -949,14 +914,12 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id, struct pt_regs *regs)
err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__);
return IRQ_NONE;
}
- dbg("%s: hp_register_write_word SLOT_STATUS with value %x\n", __FUNCTION__, temp_word);
}
if (intr_loc & CMD_COMPLETED) {
/*
* Command Complete Interrupt Pending
*/
- dbg("%s: In Command Complete Interrupt Pending\n", __FUNCTION__);
wake_up_interruptible(&ctrl->queue);
}
@@ -989,7 +952,6 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id, struct pt_regs *regs)
}
dbg("%s: Unmask Hot-plug Interrupt Enable\n", __FUNCTION__);
- dbg("%s: hp_register_read_word SLOT_CTRL with value %x\n", __FUNCTION__, temp_word);
temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_CTRL(ctrl->cap_base), temp_word);
@@ -997,14 +959,12 @@ static irqreturn_t pcie_isr(int IRQ, void *dev_id, struct pt_regs *regs)
err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__);
return IRQ_NONE;
}
- dbg("%s: hp_register_write_word SLOT_CTRL with value %x\n", __FUNCTION__, temp_word);
rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status);
if (rc) {
err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__);
return IRQ_NONE;
}
- dbg("%s: hp_register_read_word SLOT_STATUS with value %x\n", __FUNCTION__, slot_status);
/* Clear command complete interrupt caused by this write */
temp_word = 0x1F;
@@ -1248,12 +1208,7 @@ static struct hpc_ops pciehp_hpc_ops = {
.check_lnk_status = hpc_check_lnk_status,
};
-int pcie_init(struct controller * ctrl,
- struct pcie_device *dev,
- php_intr_callback_t attention_button_callback,
- php_intr_callback_t switch_change_callback,
- php_intr_callback_t presence_change_callback,
- php_intr_callback_t power_fault_callback)
+int pcie_init(struct controller * ctrl, struct pcie_device *dev)
{
struct php_ctlr_state_s *php_ctlr, *p;
void *instance_id = ctrl;
@@ -1282,8 +1237,8 @@ int pcie_init(struct controller * ctrl,
pdev = dev->port;
php_ctlr->pci_dev = pdev; /* save pci_dev in context */
- dbg("%s: pdev->vendor %x pdev->device %x\n", __FUNCTION__,
- pdev->vendor, pdev->device);
+ dbg("%s: hotplug controller vendor id 0x%x device id 0x%x\n",
+ __FUNCTION__, pdev->vendor, pdev->device);
saved_cap_base = pcie_cap_base;
@@ -1340,8 +1295,6 @@ int pcie_init(struct controller * ctrl,
first = 0;
}
- dbg("pdev = %p: b:d:f:irq=0x%x:%x:%x:%x\n", pdev, pdev->bus->number,
- PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq);
for ( rc = 0; rc < DEVICE_COUNT_RESOURCE; rc++)
if (pci_resource_len(pdev, rc) > 0)
dbg("pci resource[%d] start=0x%lx(len=0x%lx)\n", rc,
@@ -1359,13 +1312,12 @@ int pcie_init(struct controller * ctrl,
/* find the IRQ */
php_ctlr->irq = dev->irq;
- dbg("HPC interrupt = %d\n", php_ctlr->irq);
/* Save interrupt callback info */
- php_ctlr->attention_button_callback = attention_button_callback;
- php_ctlr->switch_change_callback = switch_change_callback;
- php_ctlr->presence_change_callback = presence_change_callback;
- php_ctlr->power_fault_callback = power_fault_callback;
+ php_ctlr->attention_button_callback = pciehp_handle_attention_button;
+ php_ctlr->switch_change_callback = pciehp_handle_switch_change;
+ php_ctlr->presence_change_callback = pciehp_handle_presence_change;
+ php_ctlr->power_fault_callback = pciehp_handle_power_fault;
php_ctlr->callback_instance_id = instance_id;
/* return PCI Controller Info */
@@ -1387,15 +1339,12 @@ int pcie_init(struct controller * ctrl,
err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s : Mask HPIE hp_register_write_word SLOT_CTRL %x\n", __FUNCTION__, temp_word);
rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status);
if (rc) {
err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s: Mask HPIE SLOT_STATUS offset %x reads slot_status %x\n", __FUNCTION__, SLOT_STATUS(ctrl->cap_base)
- , slot_status);
temp_word = 0x1F; /* Clear all events */
rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word);
@@ -1403,7 +1352,6 @@ int pcie_init(struct controller * ctrl,
err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s: SLOT_STATUS offset %x writes slot_status %x\n", __FUNCTION__, SLOT_STATUS(ctrl->cap_base), temp_word);
if (pciehp_poll_mode) {/* Install interrupt polling code */
/* Install and start the interrupt polling timer */
@@ -1419,13 +1367,14 @@ int pcie_init(struct controller * ctrl,
}
}
+ dbg("pciehp ctrl b:d:f:irq=0x%x:%x:%x:%x\n", pdev->bus->number,
+ PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn), dev->irq);
+
rc = hp_register_read_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word);
if (rc) {
err("%s : hp_register_read_word SLOT_CTRL failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s: SLOT_CTRL %x value read %x\n", __FUNCTION__, SLOT_CTRL(ctrl->cap_base), temp_word);
- dbg("%s: slot_cap %x\n", __FUNCTION__, slot_cap);
intr_enable = intr_enable | PRSN_DETECT_ENABLE;
@@ -1445,7 +1394,6 @@ int pcie_init(struct controller * ctrl,
} else {
temp_word = (temp_word & ~HP_INTR_ENABLE) | HP_INTR_ENABLE;
}
- dbg("%s: temp_word %x\n", __FUNCTION__, temp_word);
/* Unmask Hot-plug Interrupt Enable for the interrupt notification mechanism case */
rc = hp_register_write_word(pdev, SLOT_CTRL(ctrl->cap_base), temp_word);
@@ -1453,14 +1401,11 @@ int pcie_init(struct controller * ctrl,
err("%s : hp_register_write_word SLOT_CTRL failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s : Unmask HPIE hp_register_write_word SLOT_CTRL with %x\n", __FUNCTION__, temp_word);
rc = hp_register_read_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), slot_status);
if (rc) {
err("%s : hp_register_read_word SLOT_STATUS failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s: Unmask HPIE SLOT_STATUS offset %x reads slot_status %x\n", __FUNCTION__,
- SLOT_STATUS(ctrl->cap_base), slot_status);
temp_word = 0x1F; /* Clear all events */
rc = hp_register_write_word(php_ctlr->pci_dev, SLOT_STATUS(ctrl->cap_base), temp_word);
@@ -1468,8 +1413,16 @@ int pcie_init(struct controller * ctrl,
err("%s : hp_register_write_word SLOT_STATUS failed\n", __FUNCTION__);
goto abort_free_ctlr;
}
- dbg("%s: SLOT_STATUS offset %x writes slot_status %x\n", __FUNCTION__, SLOT_STATUS(ctrl->cap_base), temp_word);
+ if (pciehp_force) {
+ dbg("Bypassing BIOS check for pciehp use on %s\n",
+ pci_name(ctrl->pci_dev));
+ } else {
+ rc = pciehp_get_hp_hw_control_from_firmware(ctrl->pci_dev);
+ if (rc)
+ goto abort_free_ctlr;
+ }
+
/* Add this HPC instance into the HPC list */
spin_lock(&list_lock);
if (php_ctlr_list_head == 0) {
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index ff17d8e07e9..647673a7d22 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -27,801 +27,111 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
-#include <linux/slab.h>
-#include <linux/workqueue.h>
-#include <linux/proc_fs.h>
#include <linux/pci.h>
#include "../pci.h"
#include "pciehp.h"
-#ifndef CONFIG_IA64
-#include "../../../arch/i386/pci/pci.h" /* horrible hack showing how processor dependant we are... */
-#endif
-int pciehp_configure_device (struct controller* ctrl, struct pci_func* func)
+int pciehp_configure_device(struct slot *p_slot)
{
- unsigned char bus;
- struct pci_bus *child;
- int num;
-
- if (func->pci_dev == NULL)
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
-
- /* Still NULL ? Well then scan for it ! */
- if (func->pci_dev == NULL) {
- dbg("%s: pci_dev still null. do pci_scan_slot\n", __FUNCTION__);
-
- num = pci_scan_slot(ctrl->pci_dev->subordinate, PCI_DEVFN(func->device, func->function));
-
- if (num)
- pci_bus_add_devices(ctrl->pci_dev->subordinate);
-
- func->pci_dev = pci_find_slot(func->bus, PCI_DEVFN(func->device, func->function));
- if (func->pci_dev == NULL) {
- dbg("ERROR: pci_dev still null\n");
- return 0;
- }
+ struct pci_dev *dev;
+ struct pci_bus *parent = p_slot->ctrl->pci_dev->subordinate;
+ int num, fn;
+
+ dev = pci_find_slot(p_slot->bus, PCI_DEVFN(p_slot->device, 0));
+ if (dev) {
+ err("Device %s already exists at %x:%x, cannot hot-add\n",
+ pci_name(dev), p_slot->bus, p_slot->device);
+ return -EINVAL;
}
- if (func->pci_dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
- pci_read_config_byte(func->pci_dev, PCI_SECONDARY_BUS, &bus);
- child = pci_add_new_bus(func->pci_dev->bus, (func->pci_dev), bus);
- pci_do_scan_bus(child);
+ num = pci_scan_slot(parent, PCI_DEVFN(p_slot->device, 0));
+ if (num == 0) {
+ err("No new device found\n");
+ return -ENODEV;
+ }
+ for (fn = 0; fn < 8; fn++) {
+ if (!(dev = pci_find_slot(p_slot->bus,
+ PCI_DEVFN(p_slot->device, fn))))
+ continue;
+ if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ err("Cannot hot-add display device %s\n",
+ pci_name(dev));
+ continue;
+ }
+ if ((dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) ||
+ (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)) {
+ /* Find an unused bus number for the new bridge */
+ struct pci_bus *child;
+ unsigned char busnr, start = parent->secondary;
+ unsigned char end = parent->subordinate;
+ for (busnr = start; busnr <= end; busnr++) {
+ if (!pci_find_bus(pci_domain_nr(parent),
+ busnr))
+ break;
+ }
+ if (busnr >= end) {
+ err("No free bus for hot-added bridge\n");
+ continue;
+ }
+ child = pci_add_new_bus(parent, dev, busnr);
+ if (!child) {
+ err("Cannot add new bus for %s\n",
+ pci_name(dev));
+ continue;
+ }
+ child->subordinate = pci_do_scan_bus(child);
+ pci_bus_size_bridges(child);
+ }
+ /* TBD: program firmware provided _HPP values */
+ /* program_fw_provided_values(dev); */
}
+ pci_bus_assign_resources(parent);
+ pci_bus_add_devices(parent);
+ pci_enable_bridges(parent);
return 0;
}
-
-int pciehp_unconfigure_device(struct pci_func* func)
+int pciehp_unconfigure_device(struct slot *p_slot)
{
int rc = 0;
int j;
- struct pci_bus *pbus;
+ u8 bctl = 0;
- dbg("%s: bus/dev/func = %x/%x/%x\n", __FUNCTION__, func->bus,
- func->device, func->function);
- pbus = func->pci_dev->bus;
+ dbg("%s: bus/dev = %x/%x\n", __FUNCTION__, p_slot->bus,
+ p_slot->device);
for (j=0; j<8 ; j++) {
- struct pci_dev* temp = pci_find_slot(func->bus,
- (func->device << 3) | j);
- if (temp) {
- pci_remove_bus_device(temp);
+ struct pci_dev* temp = pci_find_slot(p_slot->bus,
+ (p_slot->device << 3) | j);
+ if (!temp)
+ continue;
+ if ((temp->class >> 16) == PCI_BASE_CLASS_DISPLAY) {
+ err("Cannot remove display device %s\n",
+ pci_name(temp));
+ continue;
}
+ if (temp->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_read_config_byte(temp, PCI_BRIDGE_CONTROL, &bctl);
+ if (bctl & PCI_BRIDGE_CTL_VGA) {
+ err("Cannot remove display device %s\n",
+ pci_name(temp));
+ continue;
+ }
+ }
+ pci_remove_bus_device(temp);
}
/*
* Some PCI Express root ports require fixup after hot-plug operation.
*/
if (pcie_mch_quirk)
- pci_fixup_device(pci_fixup_final, pbus->self);
+ pci_fixup_device(pci_fixup_final, p_slot->ctrl->pci_dev);
return rc;
}
-/*
- * pciehp_set_irq
- *
- * @bus_num: bus number of PCI device
- * @dev_num: device number of PCI device
- * @slot: pointer to u8 where slot number will be returned
- */
-int pciehp_set_irq (u8 bus_num, u8 dev_num, u8 int_pin, u8 irq_num)
-{
-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_IO_APIC)
- int rc;
- u16 temp_word;
- struct pci_dev fakedev;
- struct pci_bus fakebus;
-
- fakedev.devfn = dev_num << 3;
- fakedev.bus = &fakebus;
- fakebus.number = bus_num;
- dbg("%s: dev %d, bus %d, pin %d, num %d\n",
- __FUNCTION__, dev_num, bus_num, int_pin, irq_num);
- rc = pcibios_set_irq_routing(&fakedev, int_pin - 0x0a, irq_num);
- dbg("%s: rc %d\n", __FUNCTION__, rc);
- if (!rc)
- return !rc;
-
- /* set the Edge Level Control Register (ELCR) */
- temp_word = inb(0x4d0);
- temp_word |= inb(0x4d1) << 8;
-
- temp_word |= 0x01 << irq_num;
-
- /* This should only be for x86 as it sets the Edge Level Control Register */
- outb((u8) (temp_word & 0xFF), 0x4d0);
- outb((u8) ((temp_word & 0xFF00) >> 8), 0x4d1);
-#endif
- return 0;
-}
-
-/* More PCI configuration routines; this time centered around hotplug controller */
-
-
-/*
- * pciehp_save_config
- *
- * Reads configuration for all slots in a PCI bus and saves info.
- *
- * Note: For non-hot plug busses, the slot # saved is the device #
- *
- * returns 0 if success
- */
-int pciehp_save_config(struct controller *ctrl, int busnumber, int num_ctlr_slots, int first_device_num)
-{
- int rc;
- u8 class_code;
- u8 header_type;
- u32 ID;
- u8 secondary_bus;
- struct pci_func *new_slot;
- int sub_bus;
- int max_functions;
- int function;
- u8 DevError;
- int device = 0;
- int cloop = 0;
- int stop_it;
- int index;
- int is_hot_plug = num_ctlr_slots || first_device_num;
- struct pci_bus lpci_bus, *pci_bus;
- int FirstSupported, LastSupported;
-
- dbg("%s: Enter\n", __FUNCTION__);
-
- memcpy(&lpci_bus, ctrl->pci_dev->subordinate, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
-
- dbg("%s: num_ctlr_slots = %d, first_device_num = %d\n", __FUNCTION__,
- num_ctlr_slots, first_device_num);
-
- /* Decide which slots are supported */
- if (is_hot_plug) {
- /*********************************
- * is_hot_plug is the slot mask
- *********************************/
- FirstSupported = first_device_num;
- LastSupported = FirstSupported + num_ctlr_slots - 1;
- } else {
- FirstSupported = 0;
- LastSupported = 0x1F;
- }
-
- dbg("FirstSupported = %d, LastSupported = %d\n", FirstSupported,
- LastSupported);
-
- /* Save PCI configuration space for all devices in supported slots */
- dbg("%s: pci_bus->number = %x\n", __FUNCTION__, pci_bus->number);
- pci_bus->number = busnumber;
- dbg("%s: bus = %x, dev = %x\n", __FUNCTION__, busnumber, device);
- for (device = FirstSupported; device <= LastSupported; device++) {
- ID = 0xFFFFFFFF;
- rc = pci_bus_read_config_dword(pci_bus, PCI_DEVFN(device, 0),
- PCI_VENDOR_ID, &ID);
-
- if (ID != 0xFFFFFFFF) { /* device in slot */
- dbg("%s: ID = %x\n", __FUNCTION__, ID);
- rc = pci_bus_read_config_byte(pci_bus, PCI_DEVFN(device, 0),
- 0x0B, &class_code);
- if (rc)
- return rc;
-
- rc = pci_bus_read_config_byte(pci_bus, PCI_DEVFN(device, 0),
- PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- dbg("class_code = %x, header_type = %x\n", class_code, header_type);
-
- /* If multi-function device, set max_functions to 8 */
- if (header_type & 0x80)
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- DevError = 0;
- dbg("%s: In do loop\n", __FUNCTION__);
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* P-P Bridge */
- /* Recurse the subordinate bus
- * get the subordinate bus number
- */
- rc = pci_bus_read_config_byte(pci_bus,
- PCI_DEVFN(device, function),
- PCI_SECONDARY_BUS, &secondary_bus);
- if (rc) {
- return rc;
- } else {
- sub_bus = (int) secondary_bus;
-
- /* Save secondary bus cfg spc with this recursive call. */
- rc = pciehp_save_config(ctrl, sub_bus, 0, 0);
- if (rc)
- return rc;
- }
- }
-
- index = 0;
- new_slot = pciehp_slot_find(busnumber, device, index++);
-
- dbg("%s: new_slot = %p bus %x dev %x fun %x\n",
- __FUNCTION__, new_slot, busnumber, device, index-1);
-
- while (new_slot && (new_slot->function != (u8) function)) {
- new_slot = pciehp_slot_find(busnumber, device, index++);
- dbg("%s: while loop, new_slot = %p bus %x dev %x fun %x\n",
- __FUNCTION__, new_slot, busnumber, device, index-1);
- }
- if (!new_slot) {
- /* Setup slot structure. */
- new_slot = pciehp_slot_create(busnumber);
- dbg("%s: if, new_slot = %p bus %x dev %x fun %x\n",
- __FUNCTION__, new_slot, busnumber, device, function);
-
- if (new_slot == NULL)
- return(1);
- }
-
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = (u8) function;
- new_slot->is_a_board = 1;
- new_slot->switch_save = 0x10;
- /* In case of unsupported board */
- new_slot->status = DevError;
- new_slot->pci_dev = pci_find_slot(new_slot->bus,
- (new_slot->device << 3) | new_slot->function);
- dbg("new_slot->pci_dev = %p\n", new_slot->pci_dev);
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- rc = pci_bus_read_config_dword(pci_bus,
- PCI_DEVFN(device, function),
- cloop << 2,
- (u32 *) &(new_slot->config_space [cloop]));
- /* dbg("new_slot->config_space[%x] = %x\n",
- cloop, new_slot->config_space[cloop]); */
- if (rc)
- return rc;
- }
-
- function++;
-
- stop_it = 0;
-
- /* this loop skips to the next present function
- * reading in Class Code and Header type.
- */
-
- while ((function < max_functions)&&(!stop_it)) {
- dbg("%s: In while loop \n", __FUNCTION__);
- rc = pci_bus_read_config_dword(pci_bus,
- PCI_DEVFN(device, function),
- PCI_VENDOR_ID, &ID);
-
- if (ID == 0xFFFFFFFF) { /* nothing there. */
- function++;
- dbg("Nothing there\n");
- } else { /* Something there */
- rc = pci_bus_read_config_byte(pci_bus,
- PCI_DEVFN(device, function),
- 0x0B, &class_code);
- if (rc)
- return rc;
-
- rc = pci_bus_read_config_byte(pci_bus,
- PCI_DEVFN(device, function),
- PCI_HEADER_TYPE, &header_type);
- if (rc)
- return rc;
-
- dbg("class_code = %x, header_type = %x\n", class_code, header_type);
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- /* End of IF (device in slot?) */
- } else if (is_hot_plug) {
- /* Setup slot structure with entry for empty slot */
- new_slot = pciehp_slot_create(busnumber);
-
- if (new_slot == NULL) {
- return(1);
- }
- dbg("new_slot = %p, bus = %x, dev = %x, fun = %x\n", new_slot,
- new_slot->bus, new_slot->device, new_slot->function);
-
- new_slot->bus = (u8) busnumber;
- new_slot->device = (u8) device;
- new_slot->function = 0;
- new_slot->is_a_board = 0;
- new_slot->presence_save = 0;
- new_slot->switch_save = 0;
- }
- } /* End of FOR loop */
-
- dbg("%s: Exit\n", __FUNCTION__);
- return(0);
-}
-
-
-/*
- * pciehp_save_slot_config
- *
- * Saves configuration info for all PCI devices in a given slot
- * including subordinate busses.
- *
- * returns 0 if success
- */
-int pciehp_save_slot_config(struct controller *ctrl, struct pci_func * new_slot)
-{
- int rc;
- u8 class_code;
- u8 header_type;
- u32 ID;
- u8 secondary_bus;
- int sub_bus;
- int max_functions;
- int function;
- int cloop = 0;
- int stop_it;
- struct pci_bus lpci_bus, *pci_bus;
- memcpy(&lpci_bus, ctrl->pci_dev->subordinate, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = new_slot->bus;
-
- ID = 0xFFFFFFFF;
-
- pci_bus_read_config_dword(pci_bus, PCI_DEVFN(new_slot->device, 0),
- PCI_VENDOR_ID, &ID);
-
- if (ID != 0xFFFFFFFF) { /* device in slot */
- pci_bus_read_config_byte(pci_bus, PCI_DEVFN(new_slot->device, 0),
- 0x0B, &class_code);
-
- pci_bus_read_config_byte(pci_bus, PCI_DEVFN(new_slot->device, 0),
- PCI_HEADER_TYPE, &header_type);
-
- if (header_type & 0x80) /* Multi-function device */
- max_functions = 8;
- else
- max_functions = 1;
-
- function = 0;
-
- do {
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
- /* Recurse the subordinate bus */
- pci_bus_read_config_byte(pci_bus,
- PCI_DEVFN(new_slot->device, function),
- PCI_SECONDARY_BUS, &secondary_bus);
-
- sub_bus = (int) secondary_bus;
-
- /* Save the config headers for the secondary bus. */
- rc = pciehp_save_config(ctrl, sub_bus, 0, 0);
-
- if (rc)
- return rc;
-
- } /* End of IF */
-
- new_slot->status = 0;
-
- for (cloop = 0; cloop < 0x20; cloop++) {
- pci_bus_read_config_dword(pci_bus,
- PCI_DEVFN(new_slot->device, function),
- cloop << 2,
- (u32 *) &(new_slot->config_space [cloop]));
- }
-
- function++;
-
- stop_it = 0;
-
- /* this loop skips to the next present function
- * reading in the Class Code and the Header type.
- */
-
- while ((function < max_functions) && (!stop_it)) {
- pci_bus_read_config_dword(pci_bus,
- PCI_DEVFN(new_slot->device, function),
- PCI_VENDOR_ID, &ID);
-
- if (ID == 0xFFFFFFFF) { /* nothing there. */
- function++;
- } else { /* Something there */
- pci_bus_read_config_byte(pci_bus,
- PCI_DEVFN(new_slot->device, function),
- 0x0B, &class_code);
-
- pci_bus_read_config_byte(pci_bus,
- PCI_DEVFN(new_slot->device, function),
- PCI_HEADER_TYPE, &header_type);
-
- stop_it++;
- }
- }
-
- } while (function < max_functions);
- } /* End of IF (device in slot?) */
- else {
- return 2;
- }
-
- return 0;
-}
-
-
-/*
- * pciehp_save_used_resources
- *
- * Stores used resource information for existing boards. this is
- * for boards that were in the system when this driver was loaded.
- * this function is for hot plug ADD
- *
- * returns 0 if success
- * if disable == 1(DISABLE_CARD),
- * it loops for all functions of the slot and disables them.
- * else, it just get resources of the function and return.
- */
-int pciehp_save_used_resources(struct controller *ctrl, struct pci_func *func, int disable)
-{
- u8 cloop;
- u8 header_type;
- u8 secondary_bus;
- u8 temp_byte;
- u16 command;
- u16 save_command;
- u16 w_base, w_length;
- u32 temp_register;
- u32 save_base;
- u32 base, length;
- u64 base64 = 0;
- int index = 0;
- unsigned int devfn;
- struct pci_resource *mem_node = NULL;
- struct pci_resource *p_mem_node = NULL;
- struct pci_resource *t_mem_node;
- struct pci_resource *io_node;
- struct pci_resource *bus_node;
- struct pci_bus lpci_bus, *pci_bus;
- memcpy(&lpci_bus, ctrl->pci_dev->subordinate, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
-
- if (disable)
- func = pciehp_slot_find(func->bus, func->device, index++);
-
- while ((func != NULL) && func->is_a_board) {
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- /* Save the command register */
- pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &save_command);
-
- if (disable) {
- /* disable card */
- command = 0x00;
- pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- }
-
- /* Check for Bridge */
- pci_bus_read_config_byte(pci_bus, devfn, PCI_HEADER_TYPE, &header_type);
-
- if ((header_type & 0x7F) == PCI_HEADER_TYPE_BRIDGE) { /* PCI-PCI Bridge */
- dbg("Save_used_res of PCI bridge b:d=0x%x:%x, sc=0x%x\n",
- func->bus, func->device, save_command);
- if (disable) {
- /* Clear Bridge Control Register */
- command = 0x00;
- pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, command);
- }
-
- pci_bus_read_config_byte(pci_bus, devfn, PCI_SECONDARY_BUS, &secondary_bus);
- pci_bus_read_config_byte(pci_bus, devfn, PCI_SUBORDINATE_BUS, &temp_byte);
-
- bus_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
- if (!bus_node)
- return -ENOMEM;
-
- bus_node->base = (ulong)secondary_bus;
- bus_node->length = (ulong)(temp_byte - secondary_bus + 1);
-
- bus_node->next = func->bus_head;
- func->bus_head = bus_node;
-
- /* Save IO base and Limit registers */
- pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_BASE, &temp_byte);
- base = temp_byte;
- pci_bus_read_config_byte(pci_bus, devfn, PCI_IO_LIMIT, &temp_byte);
- length = temp_byte;
-
- if ((base <= length) && (!disable || (save_command & PCI_COMMAND_IO))) {
- io_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
- if (!io_node)
- return -ENOMEM;
-
- io_node->base = (ulong)(base & PCI_IO_RANGE_MASK) << 8;
- io_node->length = (ulong)(length - base + 0x10) << 8;
-
- io_node->next = func->io_head;
- func->io_head = io_node;
- }
-
- /* Save memory base and Limit registers */
- pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_BASE, &w_base);
- pci_bus_read_config_word(pci_bus, devfn, PCI_MEMORY_LIMIT, &w_length);
-
- if ((w_base <= w_length) && (!disable || (save_command & PCI_COMMAND_MEMORY))) {
- mem_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
- if (!mem_node)
- return -ENOMEM;
-
- mem_node->base = (ulong)w_base << 16;
- mem_node->length = (ulong)(w_length - w_base + 0x10) << 16;
-
- mem_node->next = func->mem_head;
- func->mem_head = mem_node;
- }
- /* Save prefetchable memory base and Limit registers */
- pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_BASE, &w_base);
- pci_bus_read_config_word(pci_bus, devfn, PCI_PREF_MEMORY_LIMIT, &w_length);
-
- if ((w_base <= w_length) && (!disable || (save_command & PCI_COMMAND_MEMORY))) {
- p_mem_node = kmalloc(sizeof(struct pci_resource),
- GFP_KERNEL);
- if (!p_mem_node)
- return -ENOMEM;
-
- p_mem_node->base = (ulong)w_base << 16;
- p_mem_node->length = (ulong)(w_length - w_base + 0x10) << 16;
-
- p_mem_node->next = func->p_mem_head;
- func->p_mem_head = p_mem_node;
- }
- } else if ((header_type & 0x7F) == PCI_HEADER_TYPE_NORMAL) {
- dbg("Save_used_res of PCI adapter b:d=0x%x:%x, sc=0x%x\n",
- func->bus, func->device, save_command);
-
- /* Figure out IO and memory base lengths */
- for (cloop = PCI_BASE_ADDRESS_0; cloop <= PCI_BASE_ADDRESS_5; cloop += 4) {
- pci_bus_read_config_dword(pci_bus, devfn, cloop, &save_base);
-
- temp_register = 0xFFFFFFFF;
- pci_bus_write_config_dword(pci_bus, devfn, cloop, temp_register);
- pci_bus_read_config_dword(pci_bus, devfn, cloop, &temp_register);
-
- if (!disable)
- pci_bus_write_config_dword(pci_bus, devfn, cloop, save_base);
-
- if (!temp_register)
- continue;
-
- base = temp_register;
-
- if ((base & PCI_BASE_ADDRESS_SPACE_IO) &&
- (!disable || (save_command & PCI_COMMAND_IO))) {
- /* IO base */
- /* set temp_register = amount of IO space requested */
- base = base & 0xFFFFFFFCL;
- base = (~base) + 1;
-
- io_node = kmalloc(sizeof (struct pci_resource),
- GFP_KERNEL);
- if (!io_node)
- return -ENOMEM;
-
- io_node->base = (ulong)save_base & PCI_BASE_ADDRESS_IO_MASK;
- io_node->length = (ulong)base;
- dbg("sur adapter: IO bar=0x%x(length=0x%x)\n",
- io_node->base, io_node->length);
-
- io_node->next = func->io_head;
- func->io_head = io_node;
- } else { /* map Memory */
- int prefetchable = 1;
- /* struct pci_resources **res_node; */
- char *res_type_str = "PMEM";
- u32 temp_register2;
-
- t_mem_node = kmalloc(sizeof (struct pci_resource),
- GFP_KERNEL);
- if (!t_mem_node)
- return -ENOMEM;
-
- if (!(base & PCI_BASE_ADDRESS_MEM_PREFETCH) &&
- (!disable || (save_command & PCI_COMMAND_MEMORY))) {
- prefetchable = 0;
- mem_node = t_mem_node;
- res_type_str++;
- } else
- p_mem_node = t_mem_node;
-
- base = base & 0xFFFFFFF0L;
- base = (~base) + 1;
-
- switch (temp_register & PCI_BASE_ADDRESS_MEM_TYPE_MASK) {
- case PCI_BASE_ADDRESS_MEM_TYPE_32:
- if (prefetchable) {
- p_mem_node->base = (ulong)save_base & PCI_BASE_ADDRESS_MEM_MASK;
- p_mem_node->length = (ulong)base;
- dbg("sur adapter: 32 %s bar=0x%x(length=0x%x)\n",
- res_type_str,
- p_mem_node->base,
- p_mem_node->length);
-
- p_mem_node->next = func->p_mem_head;
- func->p_mem_head = p_mem_node;
- } else {
- mem_node->base = (ulong)save_base & PCI_BASE_ADDRESS_MEM_MASK;
- mem_node->length = (ulong)base;
- dbg("sur adapter: 32 %s bar=0x%x(length=0x%x)\n",
- res_type_str,
- mem_node->base,
- mem_node->length);
-
- mem_node->next = func->mem_head;
- func->mem_head = mem_node;
- }
- break;
- case PCI_BASE_ADDRESS_MEM_TYPE_64:
- pci_bus_read_config_dword(pci_bus, devfn, cloop+4, &temp_register2);
- base64 = temp_register2;
- base64 = (base64 << 32) | save_base;
-
- if (temp_register2) {
- dbg("sur adapter: 64 %s high dword of base64(0x%x:%x) masked to 0\n",
- res_type_str, temp_register2, (u32)base64);
- base64 &= 0x00000000FFFFFFFFL;
- }
-
- if (prefetchable) {
- p_mem_node->base = base64 & PCI_BASE_ADDRESS_MEM_MASK;
- p_mem_node->length = base;
- dbg("sur adapter: 64 %s base=0x%x(len=0x%x)\n",
- res_type_str,
- p_mem_node->base,
- p_mem_node->length);
-
- p_mem_node->next = func->p_mem_head;
- func->p_mem_head = p_mem_node;
- } else {
- mem_node->base = base64 & PCI_BASE_ADDRESS_MEM_MASK;
- mem_node->length = base;
- dbg("sur adapter: 64 %s base=0x%x(len=0x%x)\n",
- res_type_str,
- mem_node->base,
- mem_node->length);
-
- mem_node->next = func->mem_head;
- func->mem_head = mem_node;
- }
- cloop += 4;
- break;
- default:
- dbg("asur: reserved BAR type=0x%x\n",
- temp_register);
- break;
- }
- }
- } /* End of base register loop */
- } else { /* Some other unknown header type */
- dbg("Save_used_res of PCI unknown type b:d=0x%x:%x. skip.\n",
- func->bus, func->device);
- }
-
- /* find the next device in this slot */
- if (!disable)
- break;
- func = pciehp_slot_find(func->bus, func->device, index++);
- }
-
- return 0;
-}
-
-
-/**
- * kfree_resource_list: release memory of all list members
- * @res: resource list to free
- */
-static inline void
-return_resource_list(struct pci_resource **func, struct pci_resource **res)
-{
- struct pci_resource *node;
- struct pci_resource *t_node;
-
- node = *func;
- *func = NULL;
- while (node) {
- t_node = node->next;
- return_resource(res, node);
- node = t_node;
- }
-}
-
-/*
- * pciehp_return_board_resources
- *
- * this routine returns all resources allocated to a board to
- * the available pool.
- *
- * returns 0 if success
- */
-int pciehp_return_board_resources(struct pci_func * func,
- struct resource_lists * resources)
-{
- int rc;
-
- dbg("%s\n", __FUNCTION__);
-
- if (!func)
- return 1;
-
- return_resource_list(&(func->io_head),&(resources->io_head));
- return_resource_list(&(func->mem_head),&(resources->mem_head));
- return_resource_list(&(func->p_mem_head),&(resources->p_mem_head));
- return_resource_list(&(func->bus_head),&(resources->bus_head));
-
- rc = pciehp_resource_sort_and_combine(&(resources->mem_head));
- rc |= pciehp_resource_sort_and_combine(&(resources->p_mem_head));
- rc |= pciehp_resource_sort_and_combine(&(resources->io_head));
- rc |= pciehp_resource_sort_and_combine(&(resources->bus_head));
-
- return rc;
-}
-
-/**
- * kfree_resource_list: release memory of all list members
- * @res: resource list to free
- */
-static inline void
-kfree_resource_list(struct pci_resource **r)
-{
- struct pci_resource *res, *tres;
-
- res = *r;
- *r = NULL;
-
- while (res) {
- tres = res;
- res = res->next;
- kfree(tres);
- }
-}
-
-/**
- * pciehp_destroy_resource_list: put node back in the resource list
- * @resources: list to put nodes back
- */
-void pciehp_destroy_resource_list(struct resource_lists * resources)
-{
- kfree_resource_list(&(resources->io_head));
- kfree_resource_list(&(resources->mem_head));
- kfree_resource_list(&(resources->p_mem_head));
- kfree_resource_list(&(resources->bus_head));
-}
-
-/**
- * pciehp_destroy_board_resources: put node back in the resource list
- * @resources: list to put nodes back
- */
-void pciehp_destroy_board_resources(struct pci_func * func)
-{
- kfree_resource_list(&(func->io_head));
- kfree_resource_list(&(func->mem_head));
- kfree_resource_list(&(func->p_mem_head));
- kfree_resource_list(&(func->bus_head));
-}
diff --git a/drivers/pci/hotplug/pciehprm.h b/drivers/pci/hotplug/pciehprm.h
deleted file mode 100644
index 05f20fbc5f5..00000000000
--- a/drivers/pci/hotplug/pciehprm.h
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * PCIEHPRM : PCIEHP Resource Manager for ACPI/non-ACPI platform
- *
- * Copyright (C) 1995,2001 Compaq Computer Corporation
- * Copyright (C) 2001,2003 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2001 IBM Corp.
- * Copyright (C) 2003-2004 Intel Corporation
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
- *
- */
-
-#ifndef _PCIEHPRM_H_
-#define _PCIEHPRM_H_
-
-#ifdef CONFIG_HOTPLUG_PCI_PCIE_PHPRM_NONACPI
-#include "pciehprm_nonacpi.h"
-#endif
-
-int pciehprm_init(enum php_ctlr_type ct);
-void pciehprm_cleanup(void);
-int pciehprm_print_pirt(void);
-int pciehprm_find_available_resources(struct controller *ctrl);
-int pciehprm_set_hpp(struct controller *ctrl, struct pci_func *func, u8 card_type);
-void pciehprm_enable_card(struct controller *ctrl, struct pci_func *func, u8 card_type);
-
-#ifdef DEBUG
-#define RES_CHECK(this, bits) \
- { if (((this) & (bits - 1))) \
- printk("%s:%d ERR: potential res loss!\n", __FUNCTION__, __LINE__); }
-#else
-#define RES_CHECK(this, bits)
-#endif
-
-#endif /* _PCIEHPRM_H_ */
diff --git a/drivers/pci/hotplug/pciehprm_acpi.c b/drivers/pci/hotplug/pciehprm_acpi.c
index 1406db35b08..ae244e21862 100644
--- a/drivers/pci/hotplug/pciehprm_acpi.c
+++ b/drivers/pci/hotplug/pciehprm_acpi.c
@@ -24,100 +24,20 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/acpi.h>
-#include <linux/efi.h>
#include <linux/pci-acpi.h>
-#include <asm/uaccess.h>
-#include <asm/system.h>
-#ifdef CONFIG_IA64
-#include <asm/iosapic.h>
-#endif
-#include <acpi/acpi.h>
#include <acpi/acpi_bus.h>
#include <acpi/actypes.h>
#include "pciehp.h"
-#include "pciehprm.h"
-
-#define PCI_MAX_BUS 0x100
-#define ACPI_STA_DEVICE_PRESENT 0x01
#define METHOD_NAME__SUN "_SUN"
#define METHOD_NAME__HPP "_HPP"
#define METHOD_NAME_OSHP "OSHP"
-/* Status code for running acpi method to gain native control */
-#define NC_NOT_RUN 0
-#define OSC_NOT_EXIST 1
-#define OSC_RUN_FAILED 2
-#define OSHP_NOT_EXIST 3
-#define OSHP_RUN_FAILED 4
-#define NC_RUN_SUCCESS 5
-
-#define PHP_RES_BUS 0xA0
-#define PHP_RES_IO 0xA1
-#define PHP_RES_MEM 0xA2
-#define PHP_RES_PMEM 0xA3
-
-#define BRIDGE_TYPE_P2P 0x00
-#define BRIDGE_TYPE_HOST 0x01
-
-/* this should go to drivers/acpi/include/ */
-struct acpi__hpp {
- u8 cache_line_size;
- u8 latency_timer;
- u8 enable_serr;
- u8 enable_perr;
-};
-
-struct acpi_php_slot {
- struct acpi_php_slot *next;
- struct acpi_bridge *bridge;
- acpi_handle handle;
- int seg;
- int bus;
- int dev;
- int fun;
- u32 sun;
- struct pci_resource *mem_head;
- struct pci_resource *p_mem_head;
- struct pci_resource *io_head;
- struct pci_resource *bus_head;
- void *slot_ops; /* _STA, _EJx, etc */
- struct slot *slot;
-}; /* per func */
-
-struct acpi_bridge {
- struct acpi_bridge *parent;
- struct acpi_bridge *next;
- struct acpi_bridge *child;
- acpi_handle handle;
- int seg;
- int pbus; /* pdev->bus->number */
- int pdevice; /* PCI_SLOT(pdev->devfn) */
- int pfunction; /* PCI_DEVFN(pdev->devfn) */
- int bus; /* pdev->subordinate->number */
- struct acpi__hpp *_hpp;
- struct acpi_php_slot *slots;
- struct pci_resource *tmem_head; /* total from crs */
- struct pci_resource *tp_mem_head; /* total from crs */
- struct pci_resource *tio_head; /* total from crs */
- struct pci_resource *tbus_head; /* total from crs */
- struct pci_resource *mem_head; /* available */
- struct pci_resource *p_mem_head; /* available */
- struct pci_resource *io_head; /* available */
- struct pci_resource *bus_head; /* available */
- int scanned;
- int type;
-};
-
-static struct acpi_bridge *acpi_bridges_head;
-
static u8 * acpi_path_name( acpi_handle handle)
{
acpi_status status;
@@ -133,85 +53,43 @@ static u8 * acpi_path_name( acpi_handle handle)
return path_name;
}
-static void acpi_get__hpp ( struct acpi_bridge *ab);
-static int acpi_run_oshp ( struct acpi_bridge *ab);
-static int osc_run_status = NC_NOT_RUN;
-static int oshp_run_status = NC_NOT_RUN;
-
-static int acpi_add_slot_to_php_slots(
- struct acpi_bridge *ab,
- int bus_num,
- acpi_handle handle,
- u32 adr,
- u32 sun
- )
-{
- struct acpi_php_slot *aps;
- static long samesun = -1;
-
- aps = (struct acpi_php_slot *) kmalloc (sizeof(struct acpi_php_slot), GFP_KERNEL);
- if (!aps) {
- err ("acpi_pciehprm: alloc for aps fail\n");
- return -1;
- }
- memset(aps, 0, sizeof(struct acpi_php_slot));
-
- aps->handle = handle;
- aps->bus = bus_num;
- aps->dev = (adr >> 16) & 0xffff;
- aps->fun = adr & 0xffff;
- aps->sun = sun;
-
- aps->next = ab->slots; /* cling to the bridge */
- aps->bridge = ab;
- ab->slots = aps;
-
- ab->scanned += 1;
- if (!ab->_hpp)
- acpi_get__hpp(ab);
-
- if (osc_run_status == OSC_NOT_EXIST)
- oshp_run_status = acpi_run_oshp(ab);
-
- if (sun != samesun) {
- info("acpi_pciehprm: Slot sun(%x) at s:b:d:f=0x%02x:%02x:%02x:%02x\n",
- aps->sun, ab->seg, aps->bus, aps->dev, aps->fun);
- samesun = sun;
- }
- return 0;
-}
-
-static void acpi_get__hpp ( struct acpi_bridge *ab)
+static acpi_status
+acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
{
acpi_status status;
u8 nui[4];
struct acpi_buffer ret_buf = { 0, NULL};
union acpi_object *ext_obj, *package;
- u8 *path_name = acpi_path_name(ab->handle);
+ u8 *path_name = acpi_path_name(handle);
int i, len = 0;
/* get _hpp */
- status = acpi_evaluate_object(ab->handle, METHOD_NAME__HPP, NULL, &ret_buf);
+ status = acpi_evaluate_object(handle, METHOD_NAME__HPP, NULL, &ret_buf);
switch (status) {
case AE_BUFFER_OVERFLOW:
ret_buf.pointer = kmalloc (ret_buf.length, GFP_KERNEL);
if (!ret_buf.pointer) {
- err ("acpi_pciehprm:%s alloc for _HPP fail\n", path_name);
- return;
+ err ("%s:%s alloc for _HPP fail\n", __FUNCTION__,
+ path_name);
+ return AE_NO_MEMORY;
}
- status = acpi_evaluate_object(ab->handle, METHOD_NAME__HPP, NULL, &ret_buf);
+ status = acpi_evaluate_object(handle, METHOD_NAME__HPP,
+ NULL, &ret_buf);
if (ACPI_SUCCESS(status))
break;
default:
if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s _HPP fail=0x%x\n", path_name, status);
- return;
+ dbg("%s:%s _HPP fail=0x%x\n", __FUNCTION__,
+ path_name, status);
+ return status;
}
}
ext_obj = (union acpi_object *) ret_buf.pointer;
if (ext_obj->type != ACPI_TYPE_PACKAGE) {
- err ("acpi_pciehprm:%s _HPP obj not a package\n", path_name);
+ err ("%s:%s _HPP obj not a package\n", __FUNCTION__,
+ path_name);
+ status = AE_ERROR;
goto free_and_return;
}
@@ -224,1514 +102,153 @@ static void acpi_get__hpp ( struct acpi_bridge *ab)
nui[i] = (u8)ext_obj->integer.value;
break;
default:
- err ("acpi_pciehprm:%s _HPP obj type incorrect\n", path_name);
+ err ("%s:%s _HPP obj type incorrect\n", __FUNCTION__,
+ path_name);
+ status = AE_ERROR;
goto free_and_return;
}
}
- ab->_hpp = kmalloc (sizeof (struct acpi__hpp), GFP_KERNEL);
- if (!ab->_hpp) {
- err ("acpi_pciehprm:%s alloc for _HPP failed\n", path_name);
- goto free_and_return;
- }
- memset(ab->_hpp, 0, sizeof(struct acpi__hpp));
+ hpp->cache_line_size = nui[0];
+ hpp->latency_timer = nui[1];
+ hpp->enable_serr = nui[2];
+ hpp->enable_perr = nui[3];
- ab->_hpp->cache_line_size = nui[0];
- ab->_hpp->latency_timer = nui[1];
- ab->_hpp->enable_serr = nui[2];
- ab->_hpp->enable_perr = nui[3];
-
- dbg(" _HPP: cache_line_size=0x%x\n", ab->_hpp->cache_line_size);
- dbg(" _HPP: latency timer =0x%x\n", ab->_hpp->latency_timer);
- dbg(" _HPP: enable SERR =0x%x\n", ab->_hpp->enable_serr);
- dbg(" _HPP: enable PERR =0x%x\n", ab->_hpp->enable_perr);
+ dbg(" _HPP: cache_line_size=0x%x\n", hpp->cache_line_size);
+ dbg(" _HPP: latency timer =0x%x\n", hpp->latency_timer);
+ dbg(" _HPP: enable SERR =0x%x\n", hpp->enable_serr);
+ dbg(" _HPP: enable PERR =0x%x\n", hpp->enable_perr);
free_and_return:
kfree(ret_buf.pointer);
+ return status;
}
-static int acpi_run_oshp ( struct acpi_bridge *ab)
+static acpi_status acpi_run_oshp(acpi_handle handle)
{
acpi_status status;
- u8 *path_name = acpi_path_name(ab->handle);
+ u8 *path_name = acpi_path_name(handle);
/* run OSHP */
- status = acpi_evaluate_object(ab->handle, METHOD_NAME_OSHP, NULL, NULL);
+ status = acpi_evaluate_object(handle, METHOD_NAME_OSHP, NULL, NULL);
if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s OSHP fails=0x%x\n", path_name, status);
- oshp_run_status = (status == AE_NOT_FOUND) ? OSHP_NOT_EXIST : OSHP_RUN_FAILED;
+ dbg("%s:%s OSHP fails=0x%x\n", __FUNCTION__, path_name,
+ status);
} else {
- oshp_run_status = NC_RUN_SUCCESS;
- dbg("acpi_pciehprm:%s OSHP passes =0x%x\n", path_name, status);
- dbg("acpi_pciehprm:%s oshp_run_status =0x%x\n", path_name, oshp_run_status);
- }
- return oshp_run_status;
-}
-
-static acpi_status acpi_evaluate_crs(
- acpi_handle handle,
- struct acpi_resource **retbuf
- )
-{
- acpi_status status;
- struct acpi_buffer crsbuf;
- u8 *path_name = acpi_path_name(handle);
-
- crsbuf.length = 0;
- crsbuf.pointer = NULL;
-
- status = acpi_get_current_resources (handle, &crsbuf);
-
- switch (status) {
- case AE_BUFFER_OVERFLOW:
- break; /* found */
- case AE_NOT_FOUND:
- dbg("acpi_pciehprm:%s _CRS not found\n", path_name);
- return status;
- default:
- err ("acpi_pciehprm:%s _CRS fail=0x%x\n", path_name, status);
- return status;
+ dbg("%s:%s OSHP passes\n", __FUNCTION__, path_name);
}
-
- crsbuf.pointer = kmalloc (crsbuf.length, GFP_KERNEL);
- if (!crsbuf.pointer) {
- err ("acpi_pciehprm: alloc %ld bytes for %s _CRS fail\n", (ulong)crsbuf.length, path_name);
- return AE_NO_MEMORY;
- }
-
- status = acpi_get_current_resources (handle, &crsbuf);
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm: %s _CRS fail=0x%x.\n", path_name, status);
- kfree(crsbuf.pointer);
- return status;
- }
-
- *retbuf = crsbuf.pointer;
-
return status;
}
-static void free_pci_resource ( struct pci_resource *aprh)
+static int is_root_bridge(acpi_handle handle)
{
- struct pci_resource *res, *next;
+ acpi_status status;
+ struct acpi_device_info *info;
+ struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+ int i;
- for (res = aprh; res; res = next) {
- next = res->next;
- kfree(res);
- }
-}
-
-static void print_pci_resource ( struct pci_resource *aprh)
-{
- struct pci_resource *res;
-
- for (res = aprh; res; res = res->next)
- dbg(" base= 0x%x length= 0x%x\n", res->base, res->length);
-}
-
-static void print_slot_resources( struct acpi_php_slot *aps)
-{
- if (aps->bus_head) {
- dbg(" BUS Resources:\n");
- print_pci_resource (aps->bus_head);
- }
-
- if (aps->io_head) {
- dbg(" IO Resources:\n");
- print_pci_resource (aps->io_head);
- }
-
- if (aps->mem_head) {
- dbg(" MEM Resources:\n");
- print_pci_resource (aps->mem_head);
- }
-
- if (aps->p_mem_head) {
- dbg(" PMEM Resources:\n");
- print_pci_resource (aps->p_mem_head);
- }
-}
-
-static void print_pci_resources( struct acpi_bridge *ab)
-{
- if (ab->tbus_head) {
- dbg(" Total BUS Resources:\n");
- print_pci_resource (ab->tbus_head);
- }
- if (ab->bus_head) {
- dbg(" BUS Resources:\n");
- print_pci_resource (ab->bus_head);
- }
-
- if (ab->tio_head) {
- dbg(" Total IO Resources:\n");
- print_pci_resource (ab->tio_head);
- }
- if (ab->io_head) {
- dbg(" IO Resources:\n");
- print_pci_resource (ab->io_head);
- }
-
- if (ab->tmem_head) {
- dbg(" Total MEM Resources:\n");
- print_pci_resource (ab->tmem_head);
- }
- if (ab->mem_head) {
- dbg(" MEM Resources:\n");
- print_pci_resource (ab->mem_head);
- }
-
- if (ab->tp_mem_head) {
- dbg(" Total PMEM Resources:\n");
- print_pci_resource (ab->tp_mem_head);
- }
- if (ab->p_mem_head) {
- dbg(" PMEM Resources:\n");
- print_pci_resource (ab->p_mem_head);
- }
- if (ab->_hpp) {
- dbg(" _HPP: cache_line_size=0x%x\n", ab->_hpp->cache_line_size);
- dbg(" _HPP: latency timer =0x%x\n", ab->_hpp->latency_timer);
- dbg(" _HPP: enable SERR =0x%x\n", ab->_hpp->enable_serr);
- dbg(" _HPP: enable PERR =0x%x\n", ab->_hpp->enable_perr);
- }
-}
-
-static int pciehprm_delete_resource(
- struct pci_resource **aprh,
- ulong base,
- ulong size)
-{
- struct pci_resource *res;
- struct pci_resource *prevnode;
- struct pci_resource *split_node;
- ulong tbase;
-
- pciehp_resource_sort_and_combine(aprh);
-
- for (res = *aprh; res; res = res->next) {
- if (res->base > base)
- continue;
-
- if ((res->base + res->length) < (base + size))
- continue;
-
- if (res->base < base) {
- tbase = base;
-
- if ((res->length - (tbase - res->base)) < size)
- continue;
-
- split_node = (struct pci_resource *) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!split_node)
- return -ENOMEM;
-
- split_node->base = res->base;
- split_node->length = tbase - res->base;
- res->base = tbase;
- res->length -= split_node->length;
-
- split_node->next = res->next;
- res->next = split_node;
- }
-
- if (res->length >= size) {
- split_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!split_node)
- return -ENOMEM;
-
- split_node->base = res->base + size;
- split_node->length = res->length - size;
- res->length = size;
-
- split_node->next = res->next;
- res->next = split_node;
- }
-
- if (*aprh == res) {
- *aprh = res->next;
- } else {
- prevnode = *aprh;
- while (prevnode->next != res)
- prevnode = prevnode->next;
-
- prevnode->next = res->next;
- }
- res->next = NULL;
- kfree(res);
- break;
- }
-
- return 0;
-}
-
-static int pciehprm_delete_resources(
- struct pci_resource **aprh,
- struct pci_resource *this
- )
-{
- struct pci_resource *res;
-
- for (res = this; res; res = res->next)
- pciehprm_delete_resource(aprh, res->base, res->length);
-
- return 0;
-}
-
-static int pciehprm_add_resource(
- struct pci_resource **aprh,
- ulong base,
- ulong size)
-{
- struct pci_resource *res;
-
- for (res = *aprh; res; res = res->next) {
- if ((res->base + res->length) == base) {
- res->length += size;
- size = 0L;
- break;
+ status = acpi_get_object_info(handle, &buffer);
+ if (ACPI_SUCCESS(status)) {
+ info = buffer.pointer;
+ if ((info->valid & ACPI_VALID_HID) &&
+ !strcmp(PCI_ROOT_HID_STRING,
+ info->hardware_id.value)) {
+ acpi_os_free(buffer.pointer);
+ return 1;
+ }
+ if (info->valid & ACPI_VALID_CID) {
+ for (i=0; i < info->compatibility_id.count; i++) {
+ if (!strcmp(PCI_ROOT_HID_STRING,
+ info->compatibility_id.id[i].value)) {
+ acpi_os_free(buffer.pointer);
+ return 1;
+ }
+ }
}
- if (res->next == *aprh)
- break;
}
-
- if (size) {
- res = kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!res) {
- err ("acpi_pciehprm: alloc for res fail\n");
- return -ENOMEM;
- }
- memset(res, 0, sizeof (struct pci_resource));
-
- res->base = base;
- res->length = size;
- res->next = *aprh;
- *aprh = res;
- }
-
return 0;
}
-static int pciehprm_add_resources(
- struct pci_resource **aprh,
- struct pci_resource *this
- )
-{
- struct pci_resource *res;
- int rc = 0;
-
- for (res = this; res && !rc; res = res->next)
- rc = pciehprm_add_resource(aprh, res->base, res->length);
-
- return rc;
-}
-
-static void acpi_parse_io (
- struct acpi_bridge *ab,
- union acpi_resource_data *data
- )
+int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
- struct acpi_resource_io *dataio;
- dataio = (struct acpi_resource_io *) data;
-
- dbg("Io Resource\n");
- dbg(" %d bit decode\n", ACPI_DECODE_16 == dataio->io_decode ? 16:10);
- dbg(" Range minimum base: %08X\n", dataio->min_base_address);
- dbg(" Range maximum base: %08X\n", dataio->max_base_address);
- dbg(" Alignment: %08X\n", dataio->alignment);
- dbg(" Range Length: %08X\n", dataio->range_length);
-}
-
-static void acpi_parse_fixed_io (
- struct acpi_bridge *ab,
- union acpi_resource_data *data
- )
-{
- struct acpi_resource_fixed_io *datafio;
- datafio = (struct acpi_resource_fixed_io *) data;
-
- dbg("Fixed Io Resource\n");
- dbg(" Range base address: %08X", datafio->base_address);
- dbg(" Range length: %08X", datafio->range_length);
-}
-
-static void acpi_parse_address16_32 (
- struct acpi_bridge *ab,
- union acpi_resource_data *data,
- acpi_resource_type id
- )
-{
- /*
- * acpi_resource_address16 == acpi_resource_address32
- * acpi_resource_address16 *data16 = (acpi_resource_address16 *) data;
+ acpi_status status;
+ acpi_handle chandle, handle = DEVICE_ACPI_HANDLE(&(dev->dev));
+ struct pci_dev *pdev = dev;
+ u8 *path_name;
+ /*
+ * Per PCI firmware specification, we should run the ACPI _OSC
+ * method to get control of hotplug hardware before using it.
+ * If an _OSC is missing, we look for an OSHP to do the same thing.
+ * To handle different BIOS behavior, we look for _OSC and OSHP
+ * within the scope of the hotplug controller and its parents, upto
+ * the host bridge under which this controller exists.
*/
- struct acpi_resource_address32 *data32 = (struct acpi_resource_address32 *) data;
- struct pci_resource **aprh, **tprh;
-
- if (id == ACPI_RSTYPE_ADDRESS16)
- dbg("acpi_pciehprm:16-Bit Address Space Resource\n");
- else
- dbg("acpi_pciehprm:32-Bit Address Space Resource\n");
-
- switch (data32->resource_type) {
- case ACPI_MEMORY_RANGE:
- dbg(" Resource Type: Memory Range\n");
- aprh = &ab->mem_head;
- tprh = &ab->tmem_head;
-
- switch (data32->attribute.memory.cache_attribute) {
- case ACPI_NON_CACHEABLE_MEMORY:
- dbg(" Type Specific: Noncacheable memory\n");
- break;
- case ACPI_CACHABLE_MEMORY:
- dbg(" Type Specific: Cacheable memory\n");
- break;
- case ACPI_WRITE_COMBINING_MEMORY:
- dbg(" Type Specific: Write-combining memory\n");
- break;
- case ACPI_PREFETCHABLE_MEMORY:
- aprh = &ab->p_mem_head;
- dbg(" Type Specific: Prefetchable memory\n");
- break;
- default:
- dbg(" Type Specific: Invalid cache attribute\n");
+ while (!handle) {
+ /*
+ * This hotplug controller was not listed in the ACPI name
+ * space at all. Try to get acpi handle of parent pci bus.
+ */
+ if (!pdev || !pdev->bus->parent)
break;
- }
-
- dbg(" Type Specific: Read%s\n", ACPI_READ_WRITE_MEMORY == data32->attribute.memory.read_write_attribute ? "/Write":" Only");
- break;
-
- case ACPI_IO_RANGE:
- dbg(" Resource Type: I/O Range\n");
- aprh = &ab->io_head;
- tprh = &ab->tio_head;
-
- switch (data32->attribute.io.range_attribute) {
- case ACPI_NON_ISA_ONLY_RANGES:
- dbg(" Type Specific: Non-ISA Io Addresses\n");
- break;
- case ACPI_ISA_ONLY_RANGES:
- dbg(" Type Specific: ISA Io Addresses\n");
- break;
- case ACPI_ENTIRE_RANGE:
- dbg(" Type Specific: ISA and non-ISA Io Addresses\n");
- break;
- default:
- dbg(" Type Specific: Invalid range attribute\n");
+ dbg("Could not find %s in acpi namespace, trying parent\n",
+ pci_name(pdev));
+ if (!pdev->bus->parent->self)
+ /* Parent must be a host bridge */
+ handle = acpi_get_pci_rootbridge_handle(
+ pci_domain_nr(pdev->bus->parent),
+ pdev->bus->parent->number);
+ else
+ handle = DEVICE_ACPI_HANDLE(
+ &(pdev->bus->parent->self->dev));
+ pdev = pdev->bus->parent->self;
+ }
+
+ while (handle) {
+ path_name = acpi_path_name(handle);
+ dbg("Trying to get hotplug control for %s \n", path_name);
+ status = pci_osc_control_set(handle,
+ OSC_PCI_EXPRESS_NATIVE_HP_CONTROL);
+ if (status == AE_NOT_FOUND)
+ status = acpi_run_oshp(handle);
+ if (ACPI_SUCCESS(status)) {
+ dbg("Gained control for hotplug HW for pci %s (%s)\n",
+ pci_name(dev), path_name);
+ return 0;
+ }
+ if (is_root_bridge(handle))
break;
- }
- break;
-
- case ACPI_BUS_NUMBER_RANGE:
- dbg(" Resource Type: Bus Number Range(fixed)\n");
- /* fixup to be compatible with the rest of php driver */
- data32->min_address_range++;
- data32->address_length--;
- aprh = &ab->bus_head;
- tprh = &ab->tbus_head;
- break;
- default:
- dbg(" Resource Type: Invalid resource type. Exiting.\n");
- return;
- }
-
- dbg(" Resource %s\n", ACPI_CONSUMER == data32->producer_consumer ? "Consumer":"Producer");
- dbg(" %s decode\n", ACPI_SUB_DECODE == data32->decode ? "Subtractive":"Positive");
- dbg(" Min address is %s fixed\n", ACPI_ADDRESS_FIXED == data32->min_address_fixed ? "":"not");
- dbg(" Max address is %s fixed\n", ACPI_ADDRESS_FIXED == data32->max_address_fixed ? "":"not");
- dbg(" Granularity: %08X\n", data32->granularity);
- dbg(" Address range min: %08X\n", data32->min_address_range);
- dbg(" Address range max: %08X\n", data32->max_address_range);
- dbg(" Address translation offset: %08X\n", data32->address_translation_offset);
- dbg(" Address Length: %08X\n", data32->address_length);
-
- if (0xFF != data32->resource_source.index) {
- dbg(" Resource Source Index: %X\n", data32->resource_source.index);
- /* dbg(" Resource Source: %s\n", data32->resource_source.string_ptr); */
- }
-
- pciehprm_add_resource(aprh, data32->min_address_range, data32->address_length);
-}
-
-static acpi_status acpi_parse_crs(
- struct acpi_bridge *ab,
- struct acpi_resource *crsbuf
- )
-{
- acpi_status status = AE_OK;
- struct acpi_resource *resource = crsbuf;
- u8 count = 0;
- u8 done = 0;
-
- while (!done) {
- dbg("acpi_pciehprm: PCI bus 0x%x Resource structure %x.\n", ab->bus, count++);
- switch (resource->id) {
- case ACPI_RSTYPE_IRQ:
- dbg("Irq -------- Resource\n");
- break;
- case ACPI_RSTYPE_DMA:
- dbg("DMA -------- Resource\n");
- break;
- case ACPI_RSTYPE_START_DPF:
- dbg("Start DPF -------- Resource\n");
- break;
- case ACPI_RSTYPE_END_DPF:
- dbg("End DPF -------- Resource\n");
- break;
- case ACPI_RSTYPE_IO:
- acpi_parse_io (ab, &resource->data);
- break;
- case ACPI_RSTYPE_FIXED_IO:
- acpi_parse_fixed_io (ab, &resource->data);
- break;
- case ACPI_RSTYPE_VENDOR:
- dbg("Vendor -------- Resource\n");
- break;
- case ACPI_RSTYPE_END_TAG:
- dbg("End_tag -------- Resource\n");
- done = 1;
- break;
- case ACPI_RSTYPE_MEM24:
- dbg("Mem24 -------- Resource\n");
- break;
- case ACPI_RSTYPE_MEM32:
- dbg("Mem32 -------- Resource\n");
- break;
- case ACPI_RSTYPE_FIXED_MEM32:
- dbg("Fixed Mem32 -------- Resource\n");
- break;
- case ACPI_RSTYPE_ADDRESS16:
- acpi_parse_address16_32(ab, &resource->data, ACPI_RSTYPE_ADDRESS16);
- break;
- case ACPI_RSTYPE_ADDRESS32:
- acpi_parse_address16_32(ab, &resource->data, ACPI_RSTYPE_ADDRESS32);
- break;
- case ACPI_RSTYPE_ADDRESS64:
- info("Address64 -------- Resource unparsed\n");
- break;
- case ACPI_RSTYPE_EXT_IRQ:
- dbg("Ext Irq -------- Resource\n");
- break;
- default:
- dbg("Invalid -------- resource type 0x%x\n", resource->id);
- break;
- }
-
- resource = (struct acpi_resource *) ((char *)resource + resource->length);
- }
-
- return status;
-}
-
-static acpi_status acpi_get_crs( struct acpi_bridge *ab)
-{
- acpi_status status;
- struct acpi_resource *crsbuf;
-
- status = acpi_evaluate_crs(ab->handle, &crsbuf);
- if (ACPI_SUCCESS(status)) {
- status = acpi_parse_crs(ab, crsbuf);
- kfree(crsbuf);
-
- pciehp_resource_sort_and_combine(&ab->bus_head);
- pciehp_resource_sort_and_combine(&ab->io_head);
- pciehp_resource_sort_and_combine(&ab->mem_head);
- pciehp_resource_sort_and_combine(&ab->p_mem_head);
-
- pciehprm_add_resources (&ab->tbus_head, ab->bus_head);
- pciehprm_add_resources (&ab->tio_head, ab->io_head);
- pciehprm_add_resources (&ab->tmem_head, ab->mem_head);
- pciehprm_add_resources (&ab->tp_mem_head, ab->p_mem_head);
- }
-
- return status;
-}
-
-/* find acpi_bridge downword from ab. */
-static struct acpi_bridge *
-find_acpi_bridge_by_bus(
- struct acpi_bridge *ab,
- int seg,
- int bus /* pdev->subordinate->number */
- )
-{
- struct acpi_bridge *lab = NULL;
-
- if (!ab)
- return NULL;
-
- if ((ab->bus == bus) && (ab->seg == seg))
- return ab;
-
- if (ab->child)
- lab = find_acpi_bridge_by_bus(ab->child, seg, bus);
-
- if (!lab)
- if (ab->next)
- lab = find_acpi_bridge_by_bus(ab->next, seg, bus);
-
- return lab;
-}
-
-/*
- * Build a device tree of ACPI PCI Bridges
- */
-static void pciehprm_acpi_register_a_bridge (
- struct acpi_bridge **head,
- struct acpi_bridge *pab, /* parent bridge to which child bridge is added */
- struct acpi_bridge *cab /* child bridge to add */
- )
-{
- struct acpi_bridge *lpab;
- struct acpi_bridge *lcab;
-
- lpab = find_acpi_bridge_by_bus(*head, pab->seg, pab->bus);
- if (!lpab) {
- if (!(pab->type & BRIDGE_TYPE_HOST))
- warn("PCI parent bridge s:b(%x:%x) not in list.\n", pab->seg, pab->bus);
- pab->next = *head;
- *head = pab;
- lpab = pab;
- }
-
- if ((cab->type & BRIDGE_TYPE_HOST) && (pab == cab))
- return;
-
- lcab = find_acpi_bridge_by_bus(*head, cab->seg, cab->bus);
- if (lcab) {
- if ((pab->bus != lcab->parent->bus) || (lcab->bus != cab->bus))
- err("PCI child bridge s:b(%x:%x) in list with diff parent.\n", cab->seg, cab->bus);
- return;
- } else
- lcab = cab;
-
- lcab->parent = lpab;
- lcab->next = lpab->child;
- lpab->child = lcab;
-}
-
-static acpi_status pciehprm_acpi_build_php_slots_callback(
- acpi_handle handle,
- u32 Level,
- void *context,
- void **retval
- )
-{
- ulong bus_num;
- ulong seg_num;
- ulong sun, adr;
- ulong padr = 0;
- acpi_handle phandle = NULL;
- struct acpi_bridge *pab = (struct acpi_bridge *)context;
- struct acpi_bridge *lab;
- acpi_status status;
- u8 *path_name = acpi_path_name(handle);
-
- /* get _SUN */
- status = acpi_evaluate_integer(handle, METHOD_NAME__SUN, NULL, &sun);
- switch(status) {
- case AE_NOT_FOUND:
- return AE_OK;
- default:
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s _SUN fail=0x%x\n", path_name, status);
- return status;
- }
- }
-
- /* get _ADR. _ADR must exist if _SUN exists */
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s _ADR fail=0x%x\n", path_name, status);
- return status;
- }
-
- dbg("acpi_pciehprm:%s sun=0x%08x adr=0x%08x\n", path_name, (u32)sun, (u32)adr);
-
- status = acpi_get_parent(handle, &phandle);
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s get_parent fail=0x%x\n", path_name, status);
- return (status);
- }
-
- bus_num = pab->bus;
- seg_num = pab->seg;
-
- if (pab->bus == bus_num) {
- lab = pab;
- } else {
- dbg("WARN: pab is not parent\n");
- lab = find_acpi_bridge_by_bus(pab, seg_num, bus_num);
- if (!lab) {
- dbg("acpi_pciehprm: alloc new P2P bridge(%x) for sun(%08x)\n", (u32)bus_num, (u32)sun);
- lab = (struct acpi_bridge *)kmalloc(sizeof(struct acpi_bridge), GFP_KERNEL);
- if (!lab) {
- err("acpi_pciehprm: alloc for ab fail\n");
- return AE_NO_MEMORY;
- }
- memset(lab, 0, sizeof(struct acpi_bridge));
-
- lab->handle = phandle;
- lab->pbus = pab->bus;
- lab->pdevice = (int)(padr >> 16) & 0xffff;
- lab->pfunction = (int)(padr & 0xffff);
- lab->bus = (int)bus_num;
- lab->scanned = 0;
- lab->type = BRIDGE_TYPE_P2P;
-
- pciehprm_acpi_register_a_bridge (&acpi_bridges_head, pab, lab);
- } else
- dbg("acpi_pciehprm: found P2P bridge(%x) for sun(%08x)\n", (u32)bus_num, (u32)sun);
- }
-
- acpi_add_slot_to_php_slots(lab, (int)bus_num, handle, (u32)adr, (u32)sun);
-
- return (status);
-}
-
-static int pciehprm_acpi_build_php_slots(
- struct acpi_bridge *ab,
- u32 depth
- )
-{
- acpi_status status;
- u8 *path_name = acpi_path_name(ab->handle);
-
- /* Walk down this pci bridge to get _SUNs if any behind P2P */
- status = acpi_walk_namespace ( ACPI_TYPE_DEVICE,
- ab->handle,
- depth,
- pciehprm_acpi_build_php_slots_callback,
- ab,
- NULL );
- if (ACPI_FAILURE(status)) {
- dbg("acpi_pciehprm:%s walk for _SUN on pci bridge seg:bus(%x:%x) fail=0x%x\n", path_name, ab->seg, ab->bus, status);
- return -1;
- }
-
- return 0;
-}
-
-static void build_a_bridge(
- struct acpi_bridge *pab,
- struct acpi_bridge *ab
- )
-{
- u8 *path_name = acpi_path_name(ab->handle);
-
- pciehprm_acpi_register_a_bridge (&acpi_bridges_head, pab, ab);
-
- switch (ab->type) {
- case BRIDGE_TYPE_HOST:
- dbg("acpi_pciehprm: Registered PCI HOST Bridge(%02x) on s:b:d:f(%02x:%02x:%02x:%02x) [%s]\n",
- ab->bus, ab->seg, ab->pbus, ab->pdevice, ab->pfunction, path_name);
- break;
- case BRIDGE_TYPE_P2P:
- dbg("acpi_pciehprm: Registered PCI P2P Bridge(%02x-%02x) on s:b:d:f(%02x:%02x:%02x:%02x) [%s]\n",
- ab->pbus, ab->bus, ab->seg, ab->pbus, ab->pdevice, ab->pfunction, path_name);
- break;
- };
-
- /* build any immediate PHP slots under this pci bridge */
- pciehprm_acpi_build_php_slots(ab, 1);
-}
-
-static struct acpi_bridge * add_p2p_bridge(
- acpi_handle handle,
- struct acpi_bridge *pab, /* parent */
- ulong adr
- )
-{
- struct acpi_bridge *ab;
- struct pci_dev *pdev;
- ulong devnum, funcnum;
- u8 *path_name = acpi_path_name(handle);
-
- ab = (struct acpi_bridge *) kmalloc (sizeof(struct acpi_bridge), GFP_KERNEL);
- if (!ab) {
- err("acpi_pciehprm: alloc for ab fail\n");
- return NULL;
- }
- memset(ab, 0, sizeof(struct acpi_bridge));
-
- devnum = (adr >> 16) & 0xffff;
- funcnum = adr & 0xffff;
-
- pdev = pci_find_slot(pab->bus, PCI_DEVFN(devnum, funcnum));
- if (!pdev || !pdev->subordinate) {
- err("acpi_pciehprm:%s is not a P2P Bridge\n", path_name);
- kfree(ab);
- return NULL;
- }
-
- ab->handle = handle;
- ab->seg = pab->seg;
- ab->pbus = pab->bus; /* or pdev->bus->number */
- ab->pdevice = devnum; /* or PCI_SLOT(pdev->devfn) */
- ab->pfunction = funcnum; /* or PCI_FUNC(pdev->devfn) */
- ab->bus = pdev->subordinate->number;
- ab->scanned = 0;
- ab->type = BRIDGE_TYPE_P2P;
-
- dbg("acpi_pciehprm: P2P(%x-%x) on pci=b:d:f(%x:%x:%x) acpi=b:d:f(%x:%x:%x) [%s]\n",
- pab->bus, ab->bus, pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn),
- pab->bus, (u32)devnum, (u32)funcnum, path_name);
-
- build_a_bridge(pab, ab);
-
- return ab;
-}
-
-static acpi_status scan_p2p_bridge(
- acpi_handle handle,
- u32 Level,
- void *context,
- void **retval
- )
-{
- struct acpi_bridge *pab = (struct acpi_bridge *)context;
- struct acpi_bridge *ab;
- acpi_status status;
- ulong adr = 0;
- u8 *path_name = acpi_path_name(handle);
- ulong devnum, funcnum;
- struct pci_dev *pdev;
-
- /* get device, function */
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
- if (ACPI_FAILURE(status)) {
- if (status != AE_NOT_FOUND)
- err("acpi_pciehprm:%s _ADR fail=0x%x\n", path_name, status);
- return AE_OK;
- }
-
- devnum = (adr >> 16) & 0xffff;
- funcnum = adr & 0xffff;
-
- pdev = pci_find_slot(pab->bus, PCI_DEVFN(devnum, funcnum));
- if (!pdev)
- return AE_OK;
- if (!pdev->subordinate)
- return AE_OK;
-
- ab = add_p2p_bridge(handle, pab, adr);
- if (ab) {
- status = acpi_walk_namespace ( ACPI_TYPE_DEVICE,
- handle,
- (u32)1,
- scan_p2p_bridge,
- ab,
- NULL);
+ chandle = handle;
+ status = acpi_get_parent(chandle, &handle);
if (ACPI_FAILURE(status))
- dbg("acpi_pciehprm:%s find_p2p fail=0x%x\n", path_name, status);
- }
-
- return AE_OK;
-}
-
-static struct acpi_bridge * add_host_bridge(
- acpi_handle handle,
- ulong segnum,
- ulong busnum
- )
-{
- ulong adr = 0;
- acpi_status status;
- struct acpi_bridge *ab;
- u8 *path_name = acpi_path_name(handle);
-
- /* get device, function: host br adr is always 0000 though. */
- status = acpi_evaluate_integer(handle, METHOD_NAME__ADR, NULL, &adr);
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s _ADR fail=0x%x\n", path_name, status);
- return NULL;
- }
- dbg("acpi_pciehprm: ROOT PCI seg(0x%x)bus(0x%x)dev(0x%x)func(0x%x) [%s]\n", (u32)segnum,
- (u32)busnum, (u32)(adr >> 16) & 0xffff, (u32)adr & 0xffff, path_name);
-
- ab = (struct acpi_bridge *) kmalloc (sizeof(struct acpi_bridge), GFP_KERNEL);
- if (!ab) {
- err("acpi_pciehprm: alloc for ab fail\n");
- return NULL;
- }
- memset(ab, 0, sizeof(struct acpi_bridge));
-
- ab->handle = handle;
- ab->seg = (int)segnum;
- ab->bus = ab->pbus = (int)busnum;
- ab->pdevice = (int)(adr >> 16) & 0xffff;
- ab->pfunction = (int)(adr & 0xffff);
- ab->scanned = 0;
- ab->type = BRIDGE_TYPE_HOST;
-
- /* get root pci bridge's current resources */
- status = acpi_get_crs(ab);
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s evaluate _CRS fail=0x%x\n", path_name, status);
- kfree(ab);
- return NULL;
- }
-
- status = pci_osc_control_set (OSC_PCI_EXPRESS_NATIVE_HP_CONTROL);
- if (ACPI_FAILURE(status)) {
- err("%s: status %x\n", __FUNCTION__, status);
- osc_run_status = (status == AE_NOT_FOUND) ? OSC_NOT_EXIST : OSC_RUN_FAILED;
- } else {
- osc_run_status = NC_RUN_SUCCESS;
- }
- dbg("%s: osc_run_status %x\n", __FUNCTION__, osc_run_status);
-
- build_a_bridge(ab, ab);
-
- return ab;
-}
-
-static acpi_status acpi_scan_from_root_pci_callback (
- acpi_handle handle,
- u32 Level,
- void *context,
- void **retval
- )
-{
- ulong segnum = 0;
- ulong busnum = 0;
- acpi_status status;
- struct acpi_bridge *ab;
- u8 *path_name = acpi_path_name(handle);
-
- /* get bus number of this pci root bridge */
- status = acpi_evaluate_integer(handle, METHOD_NAME__SEG, NULL, &segnum);
- if (ACPI_FAILURE(status)) {
- if (status != AE_NOT_FOUND) {
- err("acpi_pciehprm:%s evaluate _SEG fail=0x%x\n", path_name, status);
- return status;
- }
- segnum = 0;
- }
-
- /* get bus number of this pci root bridge */
- status = acpi_evaluate_integer(handle, METHOD_NAME__BBN, NULL, &busnum);
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:%s evaluate _BBN fail=0x%x\n", path_name, status);
- return (status);
- }
-
- ab = add_host_bridge(handle, segnum, busnum);
- if (ab) {
- status = acpi_walk_namespace ( ACPI_TYPE_DEVICE,
- handle,
- 1,
- scan_p2p_bridge,
- ab,
- NULL);
- if (ACPI_FAILURE(status))
- dbg("acpi_pciehprm:%s find_p2p fail=0x%x\n", path_name, status);
+ break;
}
- return AE_OK;
+ err("Cannot get control of hotplug hardware for pci %s\n",
+ pci_name(dev));
+ return -1;
}
-static int pciehprm_acpi_scan_pci (void)
+void pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
+ struct hotplug_params *hpp)
{
- acpi_status status;
+ acpi_status status = AE_NOT_FOUND;
+ struct pci_dev *pdev = dev;
/*
- * TBD: traverse LDM device tree with the help of
- * unified ACPI augmented for php device population.
+ * _HPP settings apply to all child buses, until another _HPP is
+ * encountered. If we don't find an _HPP for the input pci dev,
+ * look for it in the parent device scope since that would apply to
+ * this pci dev. If we don't find any _HPP, use hardcoded defaults
*/
- status = acpi_get_devices ( PCI_ROOT_HID_STRING,
- acpi_scan_from_root_pci_callback,
- NULL,
- NULL );
- if (ACPI_FAILURE(status)) {
- err("acpi_pciehprm:get_device PCI ROOT HID fail=0x%x\n", status);
- return -1;
- }
-
- return 0;
-}
-
-int pciehprm_init(enum php_ctlr_type ctlr_type)
-{
- int rc;
-
- if (ctlr_type != PCI)
- return -ENODEV;
-
- dbg("pciehprm ACPI init <enter>\n");
- acpi_bridges_head = NULL;
-
- /* construct PCI bus:device tree of acpi_handles */
- rc = pciehprm_acpi_scan_pci();
- if (rc)
- return rc;
-
- if ((oshp_run_status != NC_RUN_SUCCESS) && (osc_run_status != NC_RUN_SUCCESS)) {
- err("Fails to gain control of native hot-plug\n");
- rc = -ENODEV;
- }
-
- dbg("pciehprm ACPI init %s\n", (rc)?"fail":"success");
- return rc;
-}
-
-static void free_a_slot(struct acpi_php_slot *aps)
-{
- dbg(" free a php func of slot(0x%02x) on PCI b:d:f=0x%02x:%02x:%02x\n", aps->sun, aps->bus, aps->dev, aps->fun);
-
- free_pci_resource (aps->io_head);
- free_pci_resource (aps->bus_head);
- free_pci_resource (aps->mem_head);
- free_pci_resource (aps->p_mem_head);
-
- kfree(aps);
-}
-
-static void free_a_bridge( struct acpi_bridge *ab)
-{
- struct acpi_php_slot *aps, *next;
-
- switch (ab->type) {
- case BRIDGE_TYPE_HOST:
- dbg("Free ACPI PCI HOST Bridge(%x) [%s] on s:b:d:f(%x:%x:%x:%x)\n",
- ab->bus, acpi_path_name(ab->handle), ab->seg, ab->pbus, ab->pdevice, ab->pfunction);
- break;
- case BRIDGE_TYPE_P2P:
- dbg("Free ACPI PCI P2P Bridge(%x-%x) [%s] on s:b:d:f(%x:%x:%x:%x)\n",
- ab->pbus, ab->bus, acpi_path_name(ab->handle), ab->seg, ab->pbus, ab->pdevice, ab->pfunction);
- break;
- };
-
- /* free slots first */
- for (aps = ab->slots; aps; aps = next) {
- next = aps->next;
- free_a_slot(aps);
- }
-
- free_pci_resource (ab->io_head);
- free_pci_resource (ab->tio_head);
- free_pci_resource (ab->bus_head);
- free_pci_resource (ab->tbus_head);
- free_pci_resource (ab->mem_head);
- free_pci_resource (ab->tmem_head);
- free_pci_resource (ab->p_mem_head);
- free_pci_resource (ab->tp_mem_head);
-
- kfree(ab);
-}
-
-static void pciehprm_free_bridges ( struct acpi_bridge *ab)
-{
- if (!ab)
- return;
-
- if (ab->child)
- pciehprm_free_bridges (ab->child);
-
- if (ab->next)
- pciehprm_free_bridges (ab->next);
-
- free_a_bridge(ab);
-}
-
-void pciehprm_cleanup(void)
-{
- pciehprm_free_bridges (acpi_bridges_head);
-}
-
-static int get_number_of_slots (
- struct acpi_bridge *ab,
- int selfonly
- )
-{
- struct acpi_php_slot *aps;
- int prev_slot = -1;
- int slot_num = 0;
-
- for ( aps = ab->slots; aps; aps = aps->next)
- if (aps->dev != prev_slot) {
- prev_slot = aps->dev;
- slot_num++;
- }
-
- if (ab->child)
- slot_num += get_number_of_slots (ab->child, 0);
-
- if (selfonly)
- return slot_num;
-
- if (ab->next)
- slot_num += get_number_of_slots (ab->next, 0);
-
- return slot_num;
-}
-
-static int print_acpi_resources (struct acpi_bridge *ab)
-{
- struct acpi_php_slot *aps;
- int i;
-
- switch (ab->type) {
- case BRIDGE_TYPE_HOST:
- dbg("PCI HOST Bridge (%x) [%s]\n", ab->bus, acpi_path_name(ab->handle));
- break;
- case BRIDGE_TYPE_P2P:
- dbg("PCI P2P Bridge (%x-%x) [%s]\n", ab->pbus, ab->bus, acpi_path_name(ab->handle));
- break;
- };
-
- print_pci_resources (ab);
-
- for ( i = -1, aps = ab->slots; aps; aps = aps->next) {
- if (aps->dev == i)
- continue;
- dbg(" Slot sun(%x) s:b:d:f(%02x:%02x:%02x:%02x)\n", aps->sun, aps->seg, aps->bus, aps->dev, aps->fun);
- print_slot_resources(aps);
- i = aps->dev;
- }
-
- if (ab->child)
- print_acpi_resources (ab->child);
-
- if (ab->next)
- print_acpi_resources (ab->next);
-
- return 0;
-}
-
-int pciehprm_print_pirt(void)
-{
- dbg("PCIEHPRM ACPI Slots\n");
- if (acpi_bridges_head)
- print_acpi_resources (acpi_bridges_head);
-
- return 0;
-}
-
-static struct acpi_php_slot * get_acpi_slot (
- struct acpi_bridge *ab,
- u32 sun
- )
-{
- struct acpi_php_slot *aps = NULL;
-
- for ( aps = ab->slots; aps; aps = aps->next)
- if (aps->sun == sun)
- return aps;
-
- if (!aps && ab->child) {
- aps = (struct acpi_php_slot *)get_acpi_slot (ab->child, sun);
- if (aps)
- return aps;
- }
-
- if (!aps && ab->next) {
- aps = (struct acpi_php_slot *)get_acpi_slot (ab->next, sun);
- if (aps)
- return aps;
- }
-
- return aps;
-
-}
-
-#if 0
-void * pciehprm_get_slot(struct slot *slot)
-{
- struct acpi_bridge *ab = acpi_bridges_head;
- struct acpi_php_slot *aps = get_acpi_slot (ab, slot->number);
-
- aps->slot = slot;
-
- dbg("Got acpi slot sun(%x): s:b:d:f(%x:%x:%x:%x)\n", aps->sun, aps->seg, aps->bus, aps->dev, aps->fun);
-
- return (void *)aps;
-}
-#endif
-
-static void pciehprm_dump_func_res( struct pci_func *fun)
-{
- struct pci_func *func = fun;
-
- if (func->bus_head) {
- dbg(": BUS Resources:\n");
- print_pci_resource (func->bus_head);
- }
- if (func->io_head) {
- dbg(": IO Resources:\n");
- print_pci_resource (func->io_head);
- }
- if (func->mem_head) {
- dbg(": MEM Resources:\n");
- print_pci_resource (func->mem_head);
- }
- if (func->p_mem_head) {
- dbg(": PMEM Resources:\n");
- print_pci_resource (func->p_mem_head);
- }
-}
-
-static void pciehprm_dump_ctrl_res( struct controller *ctlr)
-{
- struct controller *ctrl = ctlr;
-
- if (ctrl->bus_head) {
- dbg(": BUS Resources:\n");
- print_pci_resource (ctrl->bus_head);
- }
- if (ctrl->io_head) {
- dbg(": IO Resources:\n");
- print_pci_resource (ctrl->io_head);
- }
- if (ctrl->mem_head) {
- dbg(": MEM Resources:\n");
- print_pci_resource (ctrl->mem_head);
- }
- if (ctrl->p_mem_head) {
- dbg(": PMEM Resources:\n");
- print_pci_resource (ctrl->p_mem_head);
- }
-}
-
-static int pciehprm_get_used_resources (
- struct controller *ctrl,
- struct pci_func *func
- )
-{
- return pciehp_save_used_resources (ctrl, func, !DISABLE_CARD);
-}
-
-static int configure_existing_function(
- struct controller *ctrl,
- struct pci_func *func
- )
-{
- int rc;
-
- /* see how much resources the func has used. */
- rc = pciehprm_get_used_resources (ctrl, func);
-
- if (!rc) {
- /* subtract the resources used by the func from ctrl resources */
- rc = pciehprm_delete_resources (&ctrl->bus_head, func->bus_head);
- rc |= pciehprm_delete_resources (&ctrl->io_head, func->io_head);
- rc |= pciehprm_delete_resources (&ctrl->mem_head, func->mem_head);
- rc |= pciehprm_delete_resources (&ctrl->p_mem_head, func->p_mem_head);
- if (rc)
- warn("aCEF: cannot del used resources\n");
- } else
- err("aCEF: cannot get used resources\n");
-
- return rc;
-}
-
-static int bind_pci_resources_to_slots ( struct controller *ctrl)
-{
- struct pci_func *func, new_func;
- int busn = ctrl->slot_bus;
- int devn, funn;
- u32 vid;
-
- for (devn = 0; devn < 32; devn++) {
- for (funn = 0; funn < 8; funn++) {
- /*
- if (devn == ctrl->device && funn == ctrl->function)
- continue;
- */
- /* find out if this entry is for an occupied slot */
- vid = 0xFFFFFFFF;
- pci_bus_read_config_dword(ctrl->pci_dev->subordinate, PCI_DEVFN(devn, funn), PCI_VENDOR_ID, &vid);
-
- if (vid != 0xFFFFFFFF) {
- dbg("%s: vid = %x\n", __FUNCTION__, vid);
- func = pciehp_slot_find(busn, devn, funn);
- if (!func) {
- memset(&new_func, 0, sizeof(struct pci_func));
- new_func.bus = busn;
- new_func.device = devn;
- new_func.function = funn;
- new_func.is_a_board = 1;
- configure_existing_function(ctrl, &new_func);
- pciehprm_dump_func_res(&new_func);
- } else {
- configure_existing_function(ctrl, func);
- pciehprm_dump_func_res(func);
- }
- dbg("aCCF:existing PCI 0x%x Func ResourceDump\n", ctrl->bus);
- }
- }
- }
-
- return 0;
-}
-
-static int bind_pci_resources(
- struct controller *ctrl,
- struct acpi_bridge *ab
- )
-{
- int status = 0;
-
- if (ab->bus_head) {
- dbg("bapr: BUS Resources add on PCI 0x%x\n", ab->bus);
- status = pciehprm_add_resources (&ctrl->bus_head, ab->bus_head);
- if (pciehprm_delete_resources (&ab->bus_head, ctrl->bus_head))
- warn("bapr: cannot sub BUS Resource on PCI 0x%x\n", ab->bus);
- if (status) {
- err("bapr: BUS Resource add on PCI 0x%x: fail=0x%x\n", ab->bus, status);
- return status;
- }
- } else
- info("bapr: No BUS Resource on PCI 0x%x.\n", ab->bus);
-
- if (ab->io_head) {
- dbg("bapr: IO Resources add on PCI 0x%x\n", ab->bus);
- status = pciehprm_add_resources (&ctrl->io_head, ab->io_head);
- if (pciehprm_delete_resources (&ab->io_head, ctrl->io_head))
- warn("bapr: cannot sub IO Resource on PCI 0x%x\n", ab->bus);
- if (status) {
- err("bapr: IO Resource add on PCI 0x%x: fail=0x%x\n", ab->bus, status);
- return status;
- }
- } else
- info("bapr: No IO Resource on PCI 0x%x.\n", ab->bus);
-
- if (ab->mem_head) {
- dbg("bapr: MEM Resources add on PCI 0x%x\n", ab->bus);
- status = pciehprm_add_resources (&ctrl->mem_head, ab->mem_head);
- if (pciehprm_delete_resources (&ab->mem_head, ctrl->mem_head))
- warn("bapr: cannot sub MEM Resource on PCI 0x%x\n", ab->bus);
- if (status) {
- err("bapr: MEM Resource add on PCI 0x%x: fail=0x%x\n", ab->bus, status);
- return status;
- }
- } else
- info("bapr: No MEM Resource on PCI 0x%x.\n", ab->bus);
-
- if (ab->p_mem_head) {
- dbg("bapr: PMEM Resources add on PCI 0x%x\n", ab->bus);
- status = pciehprm_add_resources (&ctrl->p_mem_head, ab->p_mem_head);
- if (pciehprm_delete_resources (&ab->p_mem_head, ctrl->p_mem_head))
- warn("bapr: cannot sub PMEM Resource on PCI 0x%x\n", ab->bus);
- if (status) {
- err("bapr: PMEM Resource add on PCI 0x%x: fail=0x%x\n", ab->bus, status);
- return status;
- }
- } else
- info("bapr: No PMEM Resource on PCI 0x%x.\n", ab->bus);
-
- return status;
-}
-
-static int no_pci_resources( struct acpi_bridge *ab)
-{
- return !(ab->p_mem_head || ab->mem_head || ab->io_head || ab->bus_head);
-}
-
-static int find_pci_bridge_resources (
- struct controller *ctrl,
- struct acpi_bridge *ab
- )
-{
- int rc = 0;
- struct pci_func func;
-
- memset(&func, 0, sizeof(struct pci_func));
-
- func.bus = ab->pbus;
- func.device = ab->pdevice;
- func.function = ab->pfunction;
- func.is_a_board = 1;
-
- /* Get used resources for this PCI bridge */
- rc = pciehp_save_used_resources (ctrl, &func, !DISABLE_CARD);
-
- ab->io_head = func.io_head;
- ab->mem_head = func.mem_head;
- ab->p_mem_head = func.p_mem_head;
- ab->bus_head = func.bus_head;
- if (ab->bus_head)
- pciehprm_delete_resource(&ab->bus_head, ctrl->pci_dev->subordinate->number, 1);
-
- return rc;
-}
-
-static int get_pci_resources_from_bridge(
- struct controller *ctrl,
- struct acpi_bridge *ab
- )
-{
- int rc = 0;
-
- dbg("grfb: Get Resources for PCI 0x%x from actual PCI bridge 0x%x.\n", ctrl->bus, ab->bus);
-
- rc = find_pci_bridge_resources (ctrl, ab);
-
- pciehp_resource_sort_and_combine(&ab->bus_head);
- pciehp_resource_sort_and_combine(&ab->io_head);
- pciehp_resource_sort_and_combine(&ab->mem_head);
- pciehp_resource_sort_and_combine(&ab->p_mem_head);
-
- pciehprm_add_resources (&ab->tbus_head, ab->bus_head);
- pciehprm_add_resources (&ab->tio_head, ab->io_head);
- pciehprm_add_resources (&ab->tmem_head, ab->mem_head);
- pciehprm_add_resources (&ab->tp_mem_head, ab->p_mem_head);
-
- return rc;
-}
-
-static int get_pci_resources(
- struct controller *ctrl,
- struct acpi_bridge *ab
- )
-{
- int rc = 0;
-
- if (no_pci_resources(ab)) {
- dbg("spbr:PCI 0x%x has no resources. Get parent resources.\n", ab->bus);
- rc = get_pci_resources_from_bridge(ctrl, ab);
- }
-
- return rc;
-}
-
-/*
- * Get resources for this ctrl.
- * 1. get total resources from ACPI _CRS or bridge (this ctrl)
- * 2. find used resources of existing adapters
- * 3. subtract used resources from total resources
- */
-int pciehprm_find_available_resources( struct controller *ctrl)
-{
- int rc = 0;
- struct acpi_bridge *ab;
-
- ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->pci_dev->subordinate->number);
- if (!ab) {
- err("pfar:cannot locate acpi bridge of PCI 0x%x.\n", ctrl->pci_dev->subordinate->number);
- return -1;
- }
- if (no_pci_resources(ab)) {
- rc = get_pci_resources(ctrl, ab);
- if (rc) {
- err("pfar:cannot get pci resources of PCI 0x%x.\n", ctrl->pci_dev->subordinate->number);
- return -1;
- }
- }
-
- rc = bind_pci_resources(ctrl, ab);
- dbg("pfar:pre-Bind PCI 0x%x Ctrl Resource Dump\n", ctrl->pci_dev->subordinate->number);
- pciehprm_dump_ctrl_res(ctrl);
-
- bind_pci_resources_to_slots (ctrl);
-
- dbg("pfar:post-Bind PCI 0x%x Ctrl Resource Dump\n", ctrl->pci_dev->subordinate->number);
- pciehprm_dump_ctrl_res(ctrl);
-
- return rc;
-}
-
-int pciehprm_set_hpp(
- struct controller *ctrl,
- struct pci_func *func,
- u8 card_type
- )
-{
- struct acpi_bridge *ab;
- struct pci_bus lpci_bus, *pci_bus;
- int rc = 0;
- unsigned int devfn;
- u8 cls= 0x08; /* default cache line size */
- u8 lt = 0x40; /* default latency timer */
- u8 ep = 0;
- u8 es = 0;
-
- memcpy(&lpci_bus, ctrl->pci_bus, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->bus);
-
- if (ab) {
- if (ab->_hpp) {
- lt = (u8)ab->_hpp->latency_timer;
- cls = (u8)ab->_hpp->cache_line_size;
- ep = (u8)ab->_hpp->enable_perr;
- es = (u8)ab->_hpp->enable_serr;
- } else
- dbg("_hpp: no _hpp for B/D/F=%#x/%#x/%#x. use default value\n", func->bus, func->device, func->function);
- } else
- dbg("_hpp: no acpi bridge for B/D/F = %#x/%#x/%#x. use default value\n", func->bus, func->device, func->function);
-
-
- if (card_type == PCI_HEADER_TYPE_BRIDGE) {
- /* set subordinate Latency Timer */
- rc |= pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, lt);
+ while (pdev && (ACPI_FAILURE(status))) {
+ acpi_handle handle = DEVICE_ACPI_HANDLE(&(pdev->dev));
+ if (!handle)
+ break;
+ status = acpi_run_hpp(handle, hpp);
+ if (!(pdev->bus->parent))
+ break;
+ /* Check if a parent object supports _HPP */
+ pdev = pdev->bus->parent->self;
}
-
- /* set base Latency Timer */
- rc |= pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, lt);
- dbg(" set latency timer =0x%02x: %x\n", lt, rc);
-
- rc |= pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, cls);
- dbg(" set cache_line_size=0x%02x: %x\n", cls, rc);
-
- return rc;
}
-void pciehprm_enable_card(
- struct controller *ctrl,
- struct pci_func *func,
- u8 card_type)
-{
- u16 command, cmd, bcommand, bcmd;
- struct pci_bus lpci_bus, *pci_bus;
- struct acpi_bridge *ab;
- unsigned int devfn;
- int rc;
-
- memcpy(&lpci_bus, ctrl->pci_bus, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- rc = pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &cmd);
-
- if (card_type == PCI_HEADER_TYPE_BRIDGE) {
- rc = pci_bus_read_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, &bcmd);
- }
-
- command = cmd | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
- | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
- bcommand = bcmd | PCI_BRIDGE_CTL_NO_ISA;
-
- ab = find_acpi_bridge_by_bus(acpi_bridges_head, ctrl->seg, ctrl->bus);
- if (ab) {
- if (ab->_hpp) {
- if (ab->_hpp->enable_perr) {
- command |= PCI_COMMAND_PARITY;
- bcommand |= PCI_BRIDGE_CTL_PARITY;
- } else {
- command &= ~PCI_COMMAND_PARITY;
- bcommand &= ~PCI_BRIDGE_CTL_PARITY;
- }
- if (ab->_hpp->enable_serr) {
- command |= PCI_COMMAND_SERR;
- bcommand |= PCI_BRIDGE_CTL_SERR;
- } else {
- command &= ~PCI_COMMAND_SERR;
- bcommand &= ~PCI_BRIDGE_CTL_SERR;
- }
- } else
- dbg("no _hpp for B/D/F = %#x/%#x/%#x.\n", func->bus, func->device, func->function);
- } else
- dbg("no acpi bridge for B/D/F = %#x/%#x/%#x.\n", func->bus, func->device, func->function);
-
- if (command != cmd) {
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
- }
- if ((card_type == PCI_HEADER_TYPE_BRIDGE) && (bcommand != bcmd)) {
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, bcommand);
- }
-}
diff --git a/drivers/pci/hotplug/pciehprm_nonacpi.c b/drivers/pci/hotplug/pciehprm_nonacpi.c
index 76c727c74cc..29180dfe849 100644
--- a/drivers/pci/hotplug/pciehprm_nonacpi.c
+++ b/drivers/pci/hotplug/pciehprm_nonacpi.c
@@ -27,479 +27,21 @@
*
*/
-#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/sched.h>
#include <linux/pci.h>
-#include <linux/init.h>
#include <linux/slab.h>
-
-#include <asm/uaccess.h>
-#ifdef CONFIG_IA64
-#include <asm/iosapic.h>
-#endif
-
#include "pciehp.h"
-#include "pciehprm.h"
-#include "pciehprm_nonacpi.h"
-
-void pciehprm_cleanup(void)
+void pciehp_get_hp_params_from_firmware(struct pci_dev *dev,
+ struct hotplug_params *hpp)
{
return;
}
-int pciehprm_print_pirt(void)
-{
- return 0;
-}
-
-int pciehprm_get_physical_slot_number(struct controller *ctrl, u32 *sun, u8 busnum, u8 devnum)
-{
-
- *sun = (u8) (ctrl->first_slot);
- return 0;
-}
-
-
-static void print_pci_resource ( struct pci_resource *aprh)
-{
- struct pci_resource *res;
-
- for (res = aprh; res; res = res->next)
- dbg(" base= 0x%x length= 0x%x\n", res->base, res->length);
-}
-
-
-static void phprm_dump_func_res( struct pci_func *fun)
-{
- struct pci_func *func = fun;
-
- if (func->bus_head) {
- dbg(": BUS Resources:\n");
- print_pci_resource (func->bus_head);
- }
- if (func->io_head) {
- dbg(": IO Resources:\n");
- print_pci_resource (func->io_head);
- }
- if (func->mem_head) {
- dbg(": MEM Resources:\n");
- print_pci_resource (func->mem_head);
- }
- if (func->p_mem_head) {
- dbg(": PMEM Resources:\n");
- print_pci_resource (func->p_mem_head);
- }
-}
-
-static int phprm_get_used_resources (
- struct controller *ctrl,
- struct pci_func *func
- )
-{
- return pciehp_save_used_resources (ctrl, func, !DISABLE_CARD);
-}
-
-static int phprm_delete_resource(
- struct pci_resource **aprh,
- ulong base,
- ulong size)
-{
- struct pci_resource *res;
- struct pci_resource *prevnode;
- struct pci_resource *split_node;
- ulong tbase;
-
- pciehp_resource_sort_and_combine(aprh);
-
- for (res = *aprh; res; res = res->next) {
- if (res->base > base)
- continue;
-
- if ((res->base + res->length) < (base + size))
- continue;
-
- if (res->base < base) {
- tbase = base;
-
- if ((res->length - (tbase - res->base)) < size)
- continue;
-
- split_node = (struct pci_resource *) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!split_node)
- return -ENOMEM;
-
- split_node->base = res->base;
- split_node->length = tbase - res->base;
- res->base = tbase;
- res->length -= split_node->length;
-
- split_node->next = res->next;
- res->next = split_node;
- }
-
- if (res->length >= size) {
- split_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!split_node)
- return -ENOMEM;
-
- split_node->base = res->base + size;
- split_node->length = res->length - size;
- res->length = size;
-
- split_node->next = res->next;
- res->next = split_node;
- }
-
- if (*aprh == res) {
- *aprh = res->next;
- } else {
- prevnode = *aprh;
- while (prevnode->next != res)
- prevnode = prevnode->next;
-
- prevnode->next = res->next;
- }
- res->next = NULL;
- kfree(res);
- break;
- }
-
- return 0;
-}
-
-
-static int phprm_delete_resources(
- struct pci_resource **aprh,
- struct pci_resource *this
- )
-{
- struct pci_resource *res;
-
- for (res = this; res; res = res->next)
- phprm_delete_resource(aprh, res->base, res->length);
-
- return 0;
-}
-
-
-static int configure_existing_function(
- struct controller *ctrl,
- struct pci_func *func
- )
-{
- int rc;
-
- /* see how much resources the func has used. */
- rc = phprm_get_used_resources (ctrl, func);
-
- if (!rc) {
- /* subtract the resources used by the func from ctrl resources */
- rc = phprm_delete_resources (&ctrl->bus_head, func->bus_head);
- rc |= phprm_delete_resources (&ctrl->io_head, func->io_head);
- rc |= phprm_delete_resources (&ctrl->mem_head, func->mem_head);
- rc |= phprm_delete_resources (&ctrl->p_mem_head, func->p_mem_head);
- if (rc)
- warn("aCEF: cannot del used resources\n");
- } else
- err("aCEF: cannot get used resources\n");
-
- return rc;
-}
-
-static int pciehprm_delete_resource(
- struct pci_resource **aprh,
- ulong base,
- ulong size)
-{
- struct pci_resource *res;
- struct pci_resource *prevnode;
- struct pci_resource *split_node;
- ulong tbase;
-
- pciehp_resource_sort_and_combine(aprh);
-
- for (res = *aprh; res; res = res->next) {
- if (res->base > base)
- continue;
-
- if ((res->base + res->length) < (base + size))
- continue;
-
- if (res->base < base) {
- tbase = base;
-
- if ((res->length - (tbase - res->base)) < size)
- continue;
-
- split_node = (struct pci_resource *) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!split_node)
- return -ENOMEM;
-
- split_node->base = res->base;
- split_node->length = tbase - res->base;
- res->base = tbase;
- res->length -= split_node->length;
-
- split_node->next = res->next;
- res->next = split_node;
- }
-
- if (res->length >= size) {
- split_node = (struct pci_resource*) kmalloc(sizeof(struct pci_resource), GFP_KERNEL);
- if (!split_node)
- return -ENOMEM;
-
- split_node->base = res->base + size;
- split_node->length = res->length - size;
- res->length = size;
-
- split_node->next = res->next;
- res->next = split_node;
- }
-
- if (*aprh == res) {
- *aprh = res->next;
- } else {
- prevnode = *aprh;
- while (prevnode->next != res)
- prevnode = prevnode->next;
-
- prevnode->next = res->next;
- }
- res->next = NULL;
- kfree(res);
- break;
- }
-
- return 0;
-}
-
-static int bind_pci_resources_to_slots ( struct controller *ctrl)
+int pciehp_get_hp_hw_control_from_firmware(struct pci_dev *dev)
{
- struct pci_func *func, new_func;
- int busn = ctrl->slot_bus;
- int devn, funn;
- u32 vid;
-
- for (devn = 0; devn < 32; devn++) {
- for (funn = 0; funn < 8; funn++) {
- /*
- if (devn == ctrl->device && funn == ctrl->function)
- continue;
- */
- /* find out if this entry is for an occupied slot */
- vid = 0xFFFFFFFF;
-
- pci_bus_read_config_dword(ctrl->pci_dev->subordinate, PCI_DEVFN(devn, funn), PCI_VENDOR_ID, &vid);
-
- if (vid != 0xFFFFFFFF) {
- dbg("%s: vid = %x bus %x dev %x fun %x\n", __FUNCTION__,
- vid, busn, devn, funn);
- func = pciehp_slot_find(busn, devn, funn);
- dbg("%s: func = %p\n", __FUNCTION__,func);
- if (!func) {
- memset(&new_func, 0, sizeof(struct pci_func));
- new_func.bus = busn;
- new_func.device = devn;
- new_func.function = funn;
- new_func.is_a_board = 1;
- configure_existing_function(ctrl, &new_func);
- phprm_dump_func_res(&new_func);
- } else {
- configure_existing_function(ctrl, func);
- phprm_dump_func_res(func);
- }
- dbg("aCCF:existing PCI 0x%x Func ResourceDump\n", ctrl->bus);
- }
- }
- }
-
return 0;
}
-
-static void phprm_dump_ctrl_res( struct controller *ctlr)
-{
- struct controller *ctrl = ctlr;
-
- if (ctrl->bus_head) {
- dbg(": BUS Resources:\n");
- print_pci_resource (ctrl->bus_head);
- }
- if (ctrl->io_head) {
- dbg(": IO Resources:\n");
- print_pci_resource (ctrl->io_head);
- }
- if (ctrl->mem_head) {
- dbg(": MEM Resources:\n");
- print_pci_resource (ctrl->mem_head);
- }
- if (ctrl->p_mem_head) {
- dbg(": PMEM Resources:\n");
- print_pci_resource (ctrl->p_mem_head);
- }
-}
-
-/*
- * phprm_find_available_resources
- *
- * Finds available memory, IO, and IRQ resources for programming
- * devices which may be added to the system
- * this function is for hot plug ADD!
- *
- * returns 0 if success
- */
-int pciehprm_find_available_resources(struct controller *ctrl)
-{
- struct pci_func func;
- u32 rc;
-
- memset(&func, 0, sizeof(struct pci_func));
-
- func.bus = ctrl->bus;
- func.device = ctrl->device;
- func.function = ctrl->function;
- func.is_a_board = 1;
-
- /* Get resources for this PCI bridge */
- rc = pciehp_save_used_resources (ctrl, &func, !DISABLE_CARD);
- dbg("%s: pciehp_save_used_resources rc = %d\n", __FUNCTION__, rc);
-
- if (func.mem_head)
- func.mem_head->next = ctrl->mem_head;
- ctrl->mem_head = func.mem_head;
-
- if (func.p_mem_head)
- func.p_mem_head->next = ctrl->p_mem_head;
- ctrl->p_mem_head = func.p_mem_head;
-
- if (func.io_head)
- func.io_head->next = ctrl->io_head;
- ctrl->io_head = func.io_head;
-
- if(func.bus_head)
- func.bus_head->next = ctrl->bus_head;
- ctrl->bus_head = func.bus_head;
-
- if (ctrl->bus_head)
- pciehprm_delete_resource(&ctrl->bus_head, ctrl->pci_dev->subordinate->number, 1);
-
- dbg("%s:pre-Bind PCI 0x%x Ctrl Resource Dump\n", __FUNCTION__, ctrl->bus);
- phprm_dump_ctrl_res(ctrl);
-
- dbg("%s: before bind_pci_resources_to slots\n", __FUNCTION__);
-
- bind_pci_resources_to_slots (ctrl);
-
- dbg("%s:post-Bind PCI 0x%x Ctrl Resource Dump\n", __FUNCTION__, ctrl->bus);
- phprm_dump_ctrl_res(ctrl);
-
- return (rc);
-}
-
-int pciehprm_set_hpp(
- struct controller *ctrl,
- struct pci_func *func,
- u8 card_type)
-{
- u32 rc;
- u8 temp_byte;
- struct pci_bus lpci_bus, *pci_bus;
- unsigned int devfn;
- memcpy(&lpci_bus, ctrl->pci_bus, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- temp_byte = 0x40; /* hard coded value for LT */
- if (card_type == PCI_HEADER_TYPE_BRIDGE) {
- /* set subordinate Latency Timer */
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_SEC_LATENCY_TIMER, temp_byte);
-
- if (rc) {
- dbg("%s: set secondary LT error. b:d:f(%02x:%02x:%02x)\n", __FUNCTION__,
- func->bus, func->device, func->function);
- return rc;
- }
- }
-
- /* set base Latency Timer */
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_LATENCY_TIMER, temp_byte);
-
- if (rc) {
- dbg("%s: set LT error. b:d:f(%02x:%02x:%02x)\n", __FUNCTION__, func->bus, func->device, func->function);
- return rc;
- }
-
- /* set Cache Line size */
- temp_byte = 0x08; /* hard coded value for CLS */
-
- rc = pci_bus_write_config_byte(pci_bus, devfn, PCI_CACHE_LINE_SIZE, temp_byte);
-
- if (rc) {
- dbg("%s: set CLS error. b:d:f(%02x:%02x:%02x)\n", __FUNCTION__, func->bus, func->device, func->function);
- }
-
- /* set enable_perr */
- /* set enable_serr */
-
- return rc;
-}
-
-void pciehprm_enable_card(
- struct controller *ctrl,
- struct pci_func *func,
- u8 card_type)
-{
- u16 command, bcommand;
- struct pci_bus lpci_bus, *pci_bus;
- unsigned int devfn;
- int rc;
-
- memcpy(&lpci_bus, ctrl->pci_bus, sizeof(lpci_bus));
- pci_bus = &lpci_bus;
- pci_bus->number = func->bus;
- devfn = PCI_DEVFN(func->device, func->function);
-
- rc = pci_bus_read_config_word(pci_bus, devfn, PCI_COMMAND, &command);
-
- command |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR
- | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
- | PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
-
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_COMMAND, command);
-
- if (card_type == PCI_HEADER_TYPE_BRIDGE) {
-
- rc = pci_bus_read_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, &bcommand);
-
- bcommand |= PCI_BRIDGE_CTL_PARITY | PCI_BRIDGE_CTL_SERR
- | PCI_BRIDGE_CTL_NO_ISA;
-
- rc = pci_bus_write_config_word(pci_bus, devfn, PCI_BRIDGE_CONTROL, bcommand);
- }
-}
-
-static int legacy_pciehprm_init_pci(void)
-{
- return 0;
-}
-
-int pciehprm_init(enum php_ctlr_type ctrl_type)
-{
- int retval;
-
- switch (ctrl_type) {
- case PCI:
- retval = legacy_pciehprm_init_pci();
- break;
- default:
- retval = -ENODEV;
- break;
- }
-
- return retval;
-}
diff --git a/drivers/pci/hotplug/pciehprm_nonacpi.h b/drivers/pci/hotplug/pciehprm_nonacpi.h
deleted file mode 100644
index b10603b0e95..00000000000
--- a/drivers/pci/hotplug/pciehprm_nonacpi.h
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * PCIEHPRM NONACPI: PHP Resource Manager for Non-ACPI/Legacy platform
- *
- * Copyright (C) 1995,2001 Compaq Computer Corporation
- * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2001 IBM Corp.
- * Copyright (C) 2003-2004 Intel Corporation
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- *
- * Send feedback to <greg@kroah.com>, <kristen.c.accardi@intel.com>
- *
- */
-
-#ifndef _PCIEHPRM_NONACPI_H_
-#define _PCIEHPRM_NONACPI_H_
-
-struct irq_info {
- u8 bus, devfn; /* bus, device and function */
- struct {
- u8 link; /* IRQ line ID, chipset dependent, 0=not routed */
- u16 bitmap; /* Available IRQs */
- } __attribute__ ((packed)) irq[4];
- u8 slot; /* slot number, 0=onboard */
- u8 rfu;
-} __attribute__ ((packed));
-
-struct irq_routing_table {
- u32 signature; /* PIRQ_SIGNATURE should be here */
- u16 version; /* PIRQ_VERSION */
- u16 size; /* Table size in bytes */
- u8 rtr_bus, rtr_devfn; /* Where the interrupt router lies */
- u16 exclusive_irqs; /* IRQs devoted exclusively to PCI usage */
- u16 rtr_vendor, rtr_device; /* Vendor and device ID of interrupt router */
- u32 miniport_data; /* Crap */
- u8 rfu[11];
- u8 checksum; /* Modulo 256 checksum must give zero */
- struct irq_info slots[0];
-} __attribute__ ((packed));
-
-#endif /* _PCIEHPRM_NONACPI_H_ */
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index fcb66b9a0e2..cc03609f45d 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -134,43 +134,6 @@ static void rpadlpar_claim_one_bus(struct pci_bus *b)
rpadlpar_claim_one_bus(child_bus);
}
-static int pci_add_secondary_bus(struct device_node *dn,
- struct pci_dev *bridge_dev)
-{
- struct pci_dn *pdn = dn->data;
- struct pci_controller *hose = pdn->phb;
- struct pci_bus *child;
- u8 sec_busno;
-
- /* Get busno of downstream bus */
- pci_read_config_byte(bridge_dev, PCI_SECONDARY_BUS, &sec_busno);
-
- /* Allocate and add to children of bridge_dev->bus */
- child = pci_add_new_bus(bridge_dev->bus, bridge_dev, sec_busno);
- if (!child) {
- printk(KERN_ERR "%s: could not add secondary bus\n", __FUNCTION__);
- return -ENOMEM;
- }
-
- sprintf(child->name, "PCI Bus #%02x", child->number);
-
- /* Fixup subordinate bridge bases and resources */
- pcibios_fixup_bus(child);
-
- /* Claim new bus resources */
- rpadlpar_claim_one_bus(bridge_dev->bus);
-
- if (hose->last_busno < child->number)
- hose->last_busno = child->number;
-
- pdn->bussubno = child->number;
-
- /* ioremap() for child bus, which may or may not succeed */
- remap_bus_range(child);
-
- return 0;
-}
-
static struct pci_dev *dlpar_find_new_dev(struct pci_bus *parent,
struct device_node *dev_dn)
{
@@ -188,29 +151,41 @@ static struct pci_dev *dlpar_find_new_dev(struct pci_bus *parent,
static struct pci_dev *dlpar_pci_add_bus(struct device_node *dn)
{
struct pci_dn *pdn = dn->data;
- struct pci_controller *hose = pdn->phb;
+ struct pci_controller *phb = pdn->phb;
struct pci_dev *dev = NULL;
- /* Scan phb bus for EADS device, adding new one to bus->devices */
- if (!pci_scan_single_device(hose->bus, pdn->devfn)) {
- printk(KERN_ERR "%s: found no device on bus\n", __FUNCTION__);
+ rpaphp_eeh_init_nodes(dn);
+ /* Add EADS device to PHB bus, adding new entry to bus->devices */
+ dev = of_create_pci_dev(dn, phb->bus, pdn->devfn);
+ if (!dev) {
+ printk(KERN_ERR "%s: failed to create pci dev for %s\n",
+ __FUNCTION__, dn->full_name);
return NULL;
}
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE ||
+ dev->hdr_type == PCI_HEADER_TYPE_CARDBUS)
+ of_scan_pci_bridge(dn, dev);
+
+ rpaphp_init_new_devs(dev->subordinate);
+
+ /* Claim new bus resources */
+ rpadlpar_claim_one_bus(dev->bus);
+
+ /* ioremap() for child bus, which may or may not succeed */
+ (void) remap_bus_range(dev->bus);
+
/* Add new devices to global lists. Register in proc, sysfs. */
- pci_bus_add_devices(hose->bus);
+ pci_bus_add_devices(phb->bus);
/* Confirm new bridge dev was created */
- dev = dlpar_find_new_dev(hose->bus, dn);
+ dev = dlpar_find_new_dev(phb->bus, dn);
if (dev) {
if (dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
printk(KERN_ERR "%s: unexpected header type %d\n",
__FUNCTION__, dev->hdr_type);
return NULL;
}
-
- if (pci_add_secondary_bus(dn, dev))
- return NULL;
}
return dev;
@@ -219,7 +194,6 @@ static struct pci_dev *dlpar_pci_add_bus(struct device_node *dn)
static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
{
struct pci_dev *dev;
- int rc;
if (rpaphp_find_pci_bus(dn))
return -EINVAL;
@@ -232,15 +206,6 @@ static int dlpar_add_pci_slot(char *drc_name, struct device_node *dn)
return -EIO;
}
- if (dn->child) {
- rc = rpaphp_config_pci_adapter(dev->subordinate);
- if (rc < 0) {
- printk(KERN_ERR "%s: unable to enable slot %s\n",
- __FUNCTION__, drc_name);
- return -EIO;
- }
- }
-
/* Add hotplug slot */
if (rpaphp_add_slot(dn)) {
printk(KERN_ERR "%s: unable to add hotplug slot %s\n",
@@ -306,7 +271,7 @@ static int dlpar_add_phb(char *drc_name, struct device_node *dn)
{
struct pci_controller *phb;
- if (PCI_DN(dn)->phb) {
+ if (PCI_DN(dn) && PCI_DN(dn)->phb) {
/* PHB already exists */
return -EINVAL;
}
@@ -435,6 +400,8 @@ int dlpar_remove_pci_slot(char *drc_name, struct device_node *dn)
__FUNCTION__, drc_name);
return -EIO;
}
+ } else {
+ rpaphp_unconfig_pci_adapter(bus);
}
if (unmap_bus_range(bus)) {
diff --git a/drivers/pci/hotplug/rpaphp.h b/drivers/pci/hotplug/rpaphp.h
index 71ea5f9bb28..57ea71a7bda 100644
--- a/drivers/pci/hotplug/rpaphp.h
+++ b/drivers/pci/hotplug/rpaphp.h
@@ -93,6 +93,8 @@ extern int rpaphp_claim_resource(struct pci_dev *dev, int resource);
extern int rpaphp_enable_pci_slot(struct slot *slot);
extern int register_pci_slot(struct slot *slot);
extern int rpaphp_get_pci_adapter_status(struct slot *slot, int is_init, u8 * value);
+extern void rpaphp_init_new_devs(struct pci_bus *bus);
+extern void rpaphp_eeh_init_nodes(struct device_node *dn);
extern int rpaphp_config_pci_adapter(struct pci_bus *bus);
extern int rpaphp_unconfig_pci_adapter(struct pci_bus *bus);
diff --git a/drivers/pci/hotplug/rpaphp_pci.c b/drivers/pci/hotplug/rpaphp_pci.c
index f7c12d7dfcf..a7859a84d1a 100644
--- a/drivers/pci/hotplug/rpaphp_pci.c
+++ b/drivers/pci/hotplug/rpaphp_pci.c
@@ -154,8 +154,7 @@ exit:
}
/* Must be called before pci_bus_add_devices */
-static void
-rpaphp_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
+void rpaphp_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
{
struct pci_dev *dev;
@@ -184,6 +183,20 @@ rpaphp_fixup_new_pci_devices(struct pci_bus *bus, int fix_bus)
}
}
+static void rpaphp_eeh_add_bus_device(struct pci_bus *bus)
+{
+ struct pci_dev *dev;
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ eeh_add_device_late(dev);
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ struct pci_bus *subbus = dev->subordinate;
+ if (subbus)
+ rpaphp_eeh_add_bus_device (subbus);
+ }
+ }
+}
+
static int rpaphp_pci_config_bridge(struct pci_dev *dev)
{
u8 sec_busno;
@@ -217,6 +230,13 @@ static int rpaphp_pci_config_bridge(struct pci_dev *dev)
return 0;
}
+void rpaphp_init_new_devs(struct pci_bus *bus)
+{
+ rpaphp_fixup_new_pci_devices(bus, 0);
+ rpaphp_eeh_add_bus_device(bus);
+}
+EXPORT_SYMBOL_GPL(rpaphp_init_new_devs);
+
/*****************************************************************************
rpaphp_pci_config_slot() will configure all devices under the
given slot->dn and return the the first pci_dev.
@@ -233,36 +253,51 @@ rpaphp_pci_config_slot(struct pci_bus *bus)
if (!dn || !dn->child)
return NULL;
- slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
+ if (systemcfg->platform == PLATFORM_PSERIES_LPAR) {
+ of_scan_bus(dn, bus);
+ if (list_empty(&bus->devices)) {
+ err("%s: No new device found\n", __FUNCTION__);
+ return NULL;
+ }
- /* pci_scan_slot should find all children */
- num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
- if (num) {
- rpaphp_fixup_new_pci_devices(bus, 1);
+ rpaphp_init_new_devs(bus);
pci_bus_add_devices(bus);
- }
- if (list_empty(&bus->devices)) {
- err("%s: No new device found\n", __FUNCTION__);
- return NULL;
- }
- list_for_each_entry(dev, &bus->devices, bus_list) {
- if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
- rpaphp_pci_config_bridge(dev);
+ dev = list_entry(&bus->devices, struct pci_dev, bus_list);
+ } else {
+ slotno = PCI_SLOT(PCI_DN(dn->child)->devfn);
+
+ /* pci_scan_slot should find all children */
+ num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0));
+ if (num) {
+ rpaphp_fixup_new_pci_devices(bus, 1);
+ pci_bus_add_devices(bus);
+ }
+ if (list_empty(&bus->devices)) {
+ err("%s: No new device found\n", __FUNCTION__);
+ return NULL;
+ }
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
+ rpaphp_pci_config_bridge(dev);
+
+ rpaphp_eeh_add_bus_device(bus);
+ }
}
return dev;
}
-static void enable_eeh(struct device_node *dn)
+void rpaphp_eeh_init_nodes(struct device_node *dn)
{
struct device_node *sib;
for (sib = dn->child; sib; sib = sib->sibling)
- enable_eeh(sib);
+ rpaphp_eeh_init_nodes(sib);
eeh_add_device_early(dn);
return;
}
+EXPORT_SYMBOL_GPL(rpaphp_eeh_init_nodes);
static void print_slot_pci_funcs(struct pci_bus *bus)
{
@@ -289,7 +324,7 @@ int rpaphp_config_pci_adapter(struct pci_bus *bus)
if (!dn)
goto exit;
- enable_eeh(dn);
+ rpaphp_eeh_init_nodes(dn);
dev = rpaphp_pci_config_slot(bus);
if (!dev) {
err("%s: can't find any devices.\n", __FUNCTION__);
@@ -331,6 +366,7 @@ int rpaphp_unconfig_pci_adapter(struct pci_bus *bus)
}
return 0;
}
+EXPORT_SYMBOL_GPL(rpaphp_unconfig_pci_adapter);
static int setup_pci_hotplug_slot_info(struct slot *slot)
{
@@ -444,8 +480,8 @@ int rpaphp_enable_pci_slot(struct slot *slot)
retval = rpaphp_config_pci_adapter(slot->bus);
if (!retval) {
slot->state = CONFIGURED;
- dbg("%s: PCI devices in slot[%s] has been configured\n",
- __FUNCTION__, slot->name);
+ info("%s: devices in slot[%s] configured\n",
+ __FUNCTION__, slot->name);
} else {
slot->state = NOT_CONFIGURED;
dbg("%s: no pci_dev struct for adapter in slot[%s]\n",
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index b8e95acea3b..38009bc0fd5 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -34,7 +34,7 @@
#include "../pci.h"
#include "shpchp.h"
-void program_fw_provided_values(struct pci_dev *dev)
+static void program_fw_provided_values(struct pci_dev *dev)
{
u16 pci_cmd, pci_bctl;
struct pci_dev *cdev;
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index a2033552423..202b7507a35 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -23,6 +23,8 @@
#include "pci.h"
#include "msi.h"
+#define MSI_TARGET_CPU first_cpu(cpu_online_map)
+
static DEFINE_SPINLOCK(msi_lock);
static struct msi_desc* msi_desc[NR_IRQS] = { [0 ... NR_IRQS-1] = NULL };
static kmem_cache_t* msi_cachep;
@@ -92,6 +94,7 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
struct msi_desc *entry;
struct msg_address address;
unsigned int irq = vector;
+ unsigned int dest_cpu = first_cpu(cpu_mask);
entry = (struct msi_desc *)msi_desc[vector];
if (!entry || !entry->dev)
@@ -108,9 +111,9 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
pci_read_config_dword(entry->dev, msi_lower_address_reg(pos),
&address.lo_address.value);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) <<
- MSI_TARGET_CPU_SHIFT);
- entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
+ address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
+ MSI_TARGET_CPU_SHIFT);
+ entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
pci_write_config_dword(entry->dev, msi_lower_address_reg(pos),
address.lo_address.value);
set_native_irq_info(irq, cpu_mask);
@@ -123,9 +126,9 @@ static void set_msi_affinity(unsigned int vector, cpumask_t cpu_mask)
address.lo_address.value = readl(entry->mask_base + offset);
address.lo_address.value &= MSI_ADDRESS_DEST_ID_MASK;
- address.lo_address.value |= (cpu_mask_to_apicid(cpu_mask) <<
- MSI_TARGET_CPU_SHIFT);
- entry->msi_attrib.current_cpu = cpu_mask_to_apicid(cpu_mask);
+ address.lo_address.value |= (cpu_physical_id(dest_cpu) <<
+ MSI_TARGET_CPU_SHIFT);
+ entry->msi_attrib.current_cpu = cpu_physical_id(dest_cpu);
writel(address.lo_address.value, entry->mask_base + offset);
set_native_irq_info(irq, cpu_mask);
break;
@@ -259,14 +262,15 @@ static void msi_data_init(struct msg_data *msi_data,
static void msi_address_init(struct msg_address *msi_address)
{
unsigned int dest_id;
+ unsigned long dest_phys_id = cpu_physical_id(MSI_TARGET_CPU);
memset(msi_address, 0, sizeof(struct msg_address));
msi_address->hi_address = (u32)0;
dest_id = (MSI_ADDRESS_HEADER << MSI_ADDRESS_HEADER_SHIFT);
- msi_address->lo_address.u.dest_mode = MSI_DEST_MODE;
+ msi_address->lo_address.u.dest_mode = MSI_PHYSICAL_MODE;
msi_address->lo_address.u.redirection_hint = MSI_REDIRECTION_HINT_MODE;
msi_address->lo_address.u.dest_id = dest_id;
- msi_address->lo_address.value |= (MSI_TARGET_CPU << MSI_TARGET_CPU_SHIFT);
+ msi_address->lo_address.value |= (dest_phys_id << MSI_TARGET_CPU_SHIFT);
}
static int msi_free_vector(struct pci_dev* dev, int vector, int reassign);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index e9e37abe1f7..a9b00cc2d88 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -91,9 +91,7 @@ acpi_query_osc (
static acpi_status
acpi_run_osc (
acpi_handle handle,
- u32 level,
- void *context,
- void **retval )
+ void *context)
{
acpi_status status;
struct acpi_object_list input;
@@ -184,7 +182,7 @@ EXPORT_SYMBOL(pci_osc_support_set);
*
* Attempt to take control from Firmware on requested control bits.
**/
-acpi_status pci_osc_control_set(u32 flags)
+acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
{
acpi_status status;
u32 ctrlset;
@@ -198,10 +196,7 @@ acpi_status pci_osc_control_set(u32 flags)
return AE_SUPPORT;
}
ctrlset_buf[OSC_CONTROL_TYPE] |= ctrlset;
- status = acpi_get_devices ( PCI_ROOT_HID_STRING,
- acpi_run_osc,
- ctrlset_buf,
- NULL );
+ status = acpi_run_osc(handle, ctrlset_buf);
if (ACPI_FAILURE (status)) {
ctrlset_buf[OSC_CONTROL_TYPE] &= ~ctrlset;
}
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 94e68c54d27..a9046d4b8af 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -37,7 +37,7 @@ struct pci_dynid {
* Adds a new dynamic pci device ID to this driver,
* and causes the driver to probe for all devices again.
*/
-static inline ssize_t
+static ssize_t
store_new_id(struct device_driver *driver, const char *buf, size_t count)
{
struct pci_dynid *dynid;
@@ -364,15 +364,16 @@ static struct kobj_type pci_driver_kobj_type = {
};
/**
- * pci_register_driver - register a new pci driver
+ * __pci_register_driver - register a new pci driver
* @drv: the driver structure to register
+ * @owner: owner module of drv
*
* Adds the driver structure to the list of registered drivers.
* Returns a negative value on error, otherwise 0.
* If no error occurred, the driver remains registered even if
* no device was claimed during registration.
*/
-int pci_register_driver(struct pci_driver *drv)
+int __pci_register_driver(struct pci_driver *drv, struct module *owner)
{
int error;
@@ -389,7 +390,7 @@ int pci_register_driver(struct pci_driver *drv)
printk(KERN_WARNING "Warning: PCI driver %s has a struct "
"device_driver shutdown method, please update!\n",
drv->name);
- drv->driver.owner = drv->owner;
+ drv->driver.owner = owner;
drv->driver.kobj.ktype = &pci_driver_kobj_type;
spin_lock_init(&drv->dynids.lock);
@@ -526,7 +527,7 @@ postcore_initcall(pci_driver_init);
EXPORT_SYMBOL(pci_match_id);
EXPORT_SYMBOL(pci_match_device);
-EXPORT_SYMBOL(pci_register_driver);
+EXPORT_SYMBOL(__pci_register_driver);
EXPORT_SYMBOL(pci_unregister_driver);
EXPORT_SYMBOL(pci_dev_driver);
EXPORT_SYMBOL(pci_bus_type);
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index e74d7584304..8e287a828d5 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -63,11 +63,38 @@ pci_max_busnr(void)
return max;
}
+static int __pci_find_next_cap(struct pci_bus *bus, unsigned int devfn, u8 pos, int cap)
+{
+ u8 id;
+ int ttl = 48;
+
+ while (ttl--) {
+ pci_bus_read_config_byte(bus, devfn, pos, &pos);
+ if (pos < 0x40)
+ break;
+ pos &= ~3;
+ pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID,
+ &id);
+ if (id == 0xff)
+ break;
+ if (id == cap)
+ return pos;
+ pos += PCI_CAP_LIST_NEXT;
+ }
+ return 0;
+}
+
+int pci_find_next_capability(struct pci_dev *dev, u8 pos, int cap)
+{
+ return __pci_find_next_cap(dev->bus, dev->devfn,
+ pos + PCI_CAP_LIST_NEXT, cap);
+}
+EXPORT_SYMBOL_GPL(pci_find_next_capability);
+
static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_type, int cap)
{
u16 status;
- u8 pos, id;
- int ttl = 48;
+ u8 pos;
pci_bus_read_config_word(bus, devfn, PCI_STATUS, &status);
if (!(status & PCI_STATUS_CAP_LIST))
@@ -76,24 +103,15 @@ static int __pci_bus_find_cap(struct pci_bus *bus, unsigned int devfn, u8 hdr_ty
switch (hdr_type) {
case PCI_HEADER_TYPE_NORMAL:
case PCI_HEADER_TYPE_BRIDGE:
- pci_bus_read_config_byte(bus, devfn, PCI_CAPABILITY_LIST, &pos);
+ pos = PCI_CAPABILITY_LIST;
break;
case PCI_HEADER_TYPE_CARDBUS:
- pci_bus_read_config_byte(bus, devfn, PCI_CB_CAPABILITY_LIST, &pos);
+ pos = PCI_CB_CAPABILITY_LIST;
break;
default:
return 0;
}
- while (ttl-- && pos >= 0x40) {
- pos &= ~3;
- pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_ID, &id);
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pci_bus_read_config_byte(bus, devfn, pos + PCI_CAP_LIST_NEXT, &pos);
- }
- return 0;
+ return __pci_find_next_cap(bus, devfn, pos, cap);
}
/**
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 5627ce1d2b3..3a4f49f4eff 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -462,11 +462,11 @@ static void __devinit quirk_vt82c686_acpi(struct pci_dev *dev)
pci_read_config_word(dev, 0x70, &hm);
hm &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c868 HW-mon");
+ quirk_io_region(dev, hm, 128, PCI_BRIDGE_RESOURCES + 1, "vt82c686 HW-mon");
pci_read_config_dword(dev, 0x90, &smb);
smb &= PCI_BASE_ADDRESS_IO_MASK;
- quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c868 SMB");
+ quirk_io_region(dev, smb, 16, PCI_BRIDGE_RESOURCES + 2, "vt82c686 SMB");
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4, quirk_vt82c686_acpi );
@@ -1243,6 +1243,21 @@ static void __devinit quirk_netmos(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NETMOS, PCI_ANY_ID, quirk_netmos);
+
+static void __devinit fixup_rev1_53c810(struct pci_dev* dev)
+{
+ /* rev 1 ncr53c810 chips don't set the class at all which means
+ * they don't get their resources remapped. Fix that here.
+ */
+
+ if (dev->class == PCI_CLASS_NOT_DEFINED) {
+ printk(KERN_INFO "NCR 53c810 rev 1 detected, setting PCI class.\n");
+ dev->class = PCI_CLASS_STORAGE_SCSI;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_NCR, PCI_DEVICE_ID_NCR_53C810, fixup_rev1_53c810);
+
+
static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, struct pci_fixup *end)
{
while (f < end) {
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index ccf20039e90..309eb557f9a 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -156,7 +156,7 @@ config TCIC
config PCMCIA_M8XX
tristate "MPC8xx PCMCIA support"
- depends on PCMCIA && PPC
+ depends on PCMCIA && PPC && 8xx
select PCCARD_NONSTATIC
help
Say Y here to include support for PowerPC 8xx series PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index fe37541abbf..bcecf5133b7 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -25,7 +25,7 @@ obj-$(CONFIG_PD6729) += pd6729.o
obj-$(CONFIG_I82365) += i82365.o
obj-$(CONFIG_I82092) += i82092.o
obj-$(CONFIG_TCIC) += tcic.o
-obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
+obj-$(CONFIG_PCMCIA_M8XX) += m8xx_pcmcia.o
obj-$(CONFIG_HD64465_PCMCIA) += hd64465_ss.o
obj-$(CONFIG_PCMCIA_SA1100) += sa11xx_core.o sa1100_cs.o
obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o
@@ -47,10 +47,10 @@ au1x00_ss-$(CONFIG_MIPS_PB1200) += au1000_db1x00.o
au1x00_ss-$(CONFIG_MIPS_PB1500) += au1000_pb1x00.o
au1x00_ss-$(CONFIG_MIPS_DB1000) += au1000_db1x00.o
au1x00_ss-$(CONFIG_MIPS_DB1100) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_DB1200) += au1000_db1x00.o
+au1x00_ss-$(CONFIG_MIPS_DB1200) += au1000_db1x00.o
au1x00_ss-$(CONFIG_MIPS_DB1500) += au1000_db1x00.o
au1x00_ss-$(CONFIG_MIPS_DB1550) += au1000_db1x00.o
-au1x00_ss-$(CONFIG_MIPS_XXS1500) += au1000_xxs1500.o
+au1x00_ss-$(CONFIG_MIPS_XXS1500) += au1000_xxs1500.o
sa1111_cs-y += sa1111_generic.o
sa1111_cs-$(CONFIG_ASSABET_NEPONSET) += sa1100_neponset.o
diff --git a/drivers/pcmcia/au1000_db1x00.c b/drivers/pcmcia/au1000_db1x00.c
index 24cfee1a412..abc13f28ba3 100644
--- a/drivers/pcmcia/au1000_db1x00.c
+++ b/drivers/pcmcia/au1000_db1x00.c
@@ -30,6 +30,7 @@
*
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
diff --git a/drivers/pcmcia/au1000_generic.h b/drivers/pcmcia/au1000_generic.h
index b0e7908392a..f2c970b5f4f 100644
--- a/drivers/pcmcia/au1000_generic.h
+++ b/drivers/pcmcia/au1000_generic.h
@@ -22,6 +22,8 @@
#define __ASM_AU1000_PCMCIA_H
/* include the world */
+#include <linux/config.h>
+
#include <pcmcia/cs_types.h>
#include <pcmcia/cs.h>
#include <pcmcia/ss.h>
diff --git a/drivers/pcmcia/au1000_pb1x00.c b/drivers/pcmcia/au1000_pb1x00.c
index 86c0808d6a0..fd5522ede86 100644
--- a/drivers/pcmcia/au1000_pb1x00.c
+++ b/drivers/pcmcia/au1000_pb1x00.c
@@ -21,6 +21,7 @@
* with this program; if not, write to the Free Software Foundation, Inc.,
* 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
+#include <linux/config.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/delay.h>
diff --git a/drivers/pcmcia/au1000_xxs1500.c b/drivers/pcmcia/au1000_xxs1500.c
index 01a895bc9a4..01874b0bb03 100644
--- a/drivers/pcmcia/au1000_xxs1500.c
+++ b/drivers/pcmcia/au1000_xxs1500.c
@@ -27,7 +27,6 @@
*/
#include <linux/module.h>
#include <linux/init.h>
-#include <linux/config.h>
#include <linux/delay.h>
#include <linux/ioport.h>
#include <linux/kernel.h>
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 7ce455d01cc..4ddd76239b3 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1366,6 +1366,7 @@ static int __init init_i82365(void)
if (sockets == 0) {
printk("not found.\n");
platform_device_unregister(&i82365_device);
+ release_region(i365_base, 2);
driver_unregister(&i82365_driver);
return -ENODEV;
}
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index f8bed87cf2f..6d9f71cfcb3 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -39,7 +39,6 @@
#include <asm/io.h>
#include <asm/bitops.h>
-#include <asm/segment.h>
#include <asm/system.h>
#include <linux/kernel.h>
@@ -50,6 +49,7 @@
#include <linux/ioport.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
+#include <linux/platform_device.h>
#include <asm/mpc8xx.h>
#include <asm/8xx_immap.h>
@@ -546,29 +546,11 @@ static void m8xx_shutdown(void)
free_irq(pcmcia_schlvl, NULL);
}
-/* copied from tcic.c */
-
-static int m8xx_drv_suspend(struct device *dev, pm_message_t state, u32 level)
-{
- int ret = 0;
- if (level == SUSPEND_SAVE_STATE)
- ret = pcmcia_socket_dev_suspend(dev, state);
- return ret;
-}
-
-static int m8xx_drv_resume(struct device *dev, u32 level)
-{
- int ret = 0;
- if (level == RESUME_RESTORE_STATE)
- ret = pcmcia_socket_dev_resume(dev);
- return ret;
-}
-
static struct device_driver m8xx_driver = {
.name = "m8xx-pcmcia",
.bus = &platform_bus_type,
- .suspend = m8xx_drv_suspend,
- .resume = m8xx_drv_resume,
+ .suspend = pcmcia_socket_dev_suspend,
+ .resume = pcmcia_socket_dev_resume,
};
static struct platform_device m8xx_device = {
diff --git a/drivers/sbus/char/cpwatchdog.c b/drivers/sbus/char/cpwatchdog.c
index 071ae24be89..fd2cc7782f7 100644
--- a/drivers/sbus/char/cpwatchdog.c
+++ b/drivers/sbus/char/cpwatchdog.c
@@ -407,7 +407,7 @@ static long wd_compat_ioctl(struct file *file, unsigned int cmd,
case WIOCGSTAT:
lock_kernel();
rval = wd_ioctl(file->f_dentry->d_inode, file, cmd, arg);
- lock_kernel();
+ unlock_kernel();
break;
/* everything else is handled by the generic compat layer */
default:
diff --git a/drivers/sbus/char/display7seg.c b/drivers/sbus/char/display7seg.c
index 39f54213a6d..c3a51d1fae5 100644
--- a/drivers/sbus/char/display7seg.c
+++ b/drivers/sbus/char/display7seg.c
@@ -119,7 +119,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
__u8 regs = readb(d7s_regs);
__u8 ireg = 0;
- int error = 0
+ int error = 0;
if (D7S_MINOR != iminor(file->f_dentry->d_inode))
return -ENODEV;
@@ -161,7 +161,7 @@ static long d7s_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
writeb(regs, d7s_regs);
break;
};
- lock_kernel();
+ unlock_kernel();
return error;
}
diff --git a/drivers/sbus/char/rtc.c b/drivers/sbus/char/rtc.c
index 9b988baf0b5..5774bdd0e26 100644
--- a/drivers/sbus/char/rtc.c
+++ b/drivers/sbus/char/rtc.c
@@ -210,6 +210,27 @@ static int rtc_ioctl(struct inode *inode, struct file *file, unsigned int cmd,
}
}
+static long rtc_compat_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ int rval = -ENOIOCTLCMD;
+
+ switch (cmd) {
+ /*
+ * These two are specific to this driver, the generic rtc ioctls
+ * are hanlded elsewhere.
+ */
+ case RTCGET:
+ case RTCSET:
+ lock_kernel();
+ rval = rtc_ioctl(file->f_dentry->d_inode, file, cmd, arg);
+ unlock_kernel();
+ break;
+ }
+
+ return rval;
+}
+
static int rtc_open(struct inode *inode, struct file *file)
{
int ret;
@@ -237,6 +258,7 @@ static struct file_operations rtc_fops = {
.owner = THIS_MODULE,
.llseek = no_llseek,
.ioctl = rtc_ioctl,
+ .compat_ioctl = rtc_compat_ioctl,
.open = rtc_open,
.release = rtc_release,
};
diff --git a/drivers/scsi/ide-scsi.c b/drivers/scsi/ide-scsi.c
index c888af4a456..3553da0e1cd 100644
--- a/drivers/scsi/ide-scsi.c
+++ b/drivers/scsi/ide-scsi.c
@@ -395,6 +395,7 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
int log = test_bit(IDESCSI_LOG_CMD, &scsi->log);
struct Scsi_Host *host;
u8 *scsi_buf;
+ int errors = rq->errors;
unsigned long flags;
if (!(rq->flags & (REQ_SPECIAL|REQ_SENSE))) {
@@ -421,11 +422,11 @@ static int idescsi_end_request (ide_drive_t *drive, int uptodate, int nrsecs)
printk (KERN_WARNING "ide-scsi: %s: timed out for %lu\n",
drive->name, pc->scsi_cmd->serial_number);
pc->scsi_cmd->result = DID_TIME_OUT << 16;
- } else if (rq->errors >= ERROR_MAX) {
+ } else if (errors >= ERROR_MAX) {
pc->scsi_cmd->result = DID_ERROR << 16;
if (log)
printk ("ide-scsi: %s: I/O error for %lu\n", drive->name, pc->scsi_cmd->serial_number);
- } else if (rq->errors) {
+ } else if (errors) {
if (log)
printk ("ide-scsi: %s: check condition for %lu\n", drive->name, pc->scsi_cmd->serial_number);
if (!idescsi_check_condition(drive, rq))
diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
index c90723860a0..07498118359 100644
--- a/drivers/scsi/lpfc/lpfc_init.c
+++ b/drivers/scsi/lpfc/lpfc_init.c
@@ -1704,7 +1704,6 @@ MODULE_DEVICE_TABLE(pci, lpfc_id_table);
static struct pci_driver lpfc_driver = {
.name = LPFC_DRIVER_NAME,
- .owner = THIS_MODULE,
.id_table = lpfc_id_table,
.probe = lpfc_pci_probe_one,
.remove = __devexit_p(lpfc_pci_remove_one),
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index 654469778ab..b0f3cd63e3b 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -1970,7 +1970,6 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver goku_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
- .owner = THIS_MODULE,
.probe = goku_probe,
.remove = goku_remove,
diff --git a/drivers/usb/gadget/net2280.c b/drivers/usb/gadget/net2280.c
index 0dc6bb00bf7..c32e1f7476d 100644
--- a/drivers/usb/gadget/net2280.c
+++ b/drivers/usb/gadget/net2280.c
@@ -2948,7 +2948,6 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver net2280_pci_driver = {
.name = (char *) driver_name,
.id_table = pci_ids,
- .owner = THIS_MODULE,
.probe = net2280_probe,
.remove = net2280_remove,
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index 14500885396..dfd9bd0b182 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -383,7 +383,6 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver ehci_pci_driver = {
.name = (char *) hcd_name,
.id_table = pci_ids,
- .owner = THIS_MODULE,
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
diff --git a/drivers/usb/host/ohci-pci.c b/drivers/usb/host/ohci-pci.c
index 7ce1d9ef028..a59e536441e 100644
--- a/drivers/usb/host/ohci-pci.c
+++ b/drivers/usb/host/ohci-pci.c
@@ -218,7 +218,6 @@ MODULE_DEVICE_TABLE (pci, pci_ids);
static struct pci_driver ohci_pci_driver = {
.name = (char *) hcd_name,
.id_table = pci_ids,
- .owner = THIS_MODULE,
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
diff --git a/drivers/usb/host/uhci-hcd.c b/drivers/usb/host/uhci-hcd.c
index 15e0a511069..d33ce3982a5 100644
--- a/drivers/usb/host/uhci-hcd.c
+++ b/drivers/usb/host/uhci-hcd.c
@@ -831,7 +831,6 @@ MODULE_DEVICE_TABLE(pci, uhci_pci_ids);
static struct pci_driver uhci_pci_driver = {
.name = (char *)hcd_name,
.id_table = uhci_pci_ids,
- .owner = THIS_MODULE,
.probe = usb_hcd_pci_probe,
.remove = usb_hcd_pci_remove,
diff --git a/drivers/video/backlight/corgi_bl.c b/drivers/video/backlight/corgi_bl.c
index 4867498f68e..bd9a6996aee 100644
--- a/drivers/video/backlight/corgi_bl.c
+++ b/drivers/video/backlight/corgi_bl.c
@@ -48,6 +48,12 @@ static void corgibl_send_intensity(int intensity)
corgibl_mach_set_intensity(intensity);
spin_unlock_irqrestore(&bl_lock, flags);
+
+ corgi_kick_batt = symbol_get(sharpsl_battery_kick);
+ if (corgi_kick_batt) {
+ corgi_kick_batt();
+ symbol_put(sharpsl_battery_kick);
+ }
}
static void corgibl_blank(int blank)
diff --git a/fs/Kconfig b/fs/Kconfig
index 7d6ae369ce4..d5255e627b5 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -1601,9 +1601,10 @@ config CIFS
PC operating systems. The CIFS protocol is fully supported by
file servers such as Windows 2000 (including Windows 2003, NT 4
and Windows XP) as well by Samba (which provides excellent CIFS
- server support for Linux and many other operating systems). Currently
- you must use the smbfs client filesystem to access older SMB servers
- such as Windows 9x and OS/2.
+ server support for Linux and many other operating systems). Limited
+ support for Windows ME and similar servers is provided as well.
+ You must use the smbfs client filesystem to access older SMB servers
+ such as OS/2 and DOS.
The intent of the cifs module is to provide an advanced
network file system client for mounting to CIFS compliant servers,
@@ -1614,7 +1615,7 @@ config CIFS
cifs if running only a (Samba) server. It is possible to enable both
smbfs and cifs (e.g. if you are using CIFS for accessing Windows 2003
and Samba 3 servers, and smbfs for accessing old servers). If you need
- to mount to Samba or Windows 2003 servers from this machine, say Y.
+ to mount to Samba or Windows from this machine, say Y.
config CIFS_STATS
bool "CIFS statistics"
@@ -1623,8 +1624,22 @@ config CIFS_STATS
Enabling this option will cause statistics for each server share
mounted by the cifs client to be displayed in /proc/fs/cifs/Stats
+config CIFS_STATS2
+ bool "CIFS extended statistics"
+ depends on CIFS_STATS
+ help
+ Enabling this option will allow more detailed statistics on SMB
+ request timing to be displayed in /proc/fs/cifs/DebugData and also
+ allow optional logging of slow responses to dmesg (depending on the
+ value of /proc/fs/cifs/cifsFYI, see fs/cifs/README for more details).
+ These additional statistics may have a minor effect on performance
+ and memory utilization.
+
+ Unless you are a developer or are doing network performance analysis
+ or tuning, say N.
+
config CIFS_XATTR
- bool "CIFS extended attributes (EXPERIMENTAL)"
+ bool "CIFS extended attributes"
depends on CIFS
help
Extended attributes are name:value pairs associated with inodes by
@@ -1636,11 +1651,11 @@ config CIFS_XATTR
prefaced by the user namespace prefix. The system namespace
(used by some filesystems to store ACLs) is not supported at
this time.
-
+
If unsure, say N.
config CIFS_POSIX
- bool "CIFS POSIX Extensions (EXPERIMENTAL)"
+ bool "CIFS POSIX Extensions"
depends on CIFS_XATTR
help
Enabling this option will cause the cifs client to attempt to
@@ -1653,10 +1668,28 @@ config CIFS_POSIX
config CIFS_EXPERIMENTAL
bool "CIFS Experimental Features (EXPERIMENTAL)"
- depends on CIFS
+ depends on CIFS && EXPERIMENTAL
+ help
+ Enables cifs features under testing. These features are
+ experimental and currently include support for writepages
+ (multipage writebehind performance improvements) and directory
+ change notification ie fcntl(F_DNOTIFY) as well as some security
+ improvements. Some also depend on setting at runtime the
+ pseudo-file /proc/fs/cifs/Experimental (which is disabled by
+ default). See the file fs/cifs/README for more details.
+
+ If unsure, say N.
+
+config CIFS_UPCALL
+ bool "CIFS Kerberos/SPNEGO advanced session setup (EXPERIMENTAL)"
+ depends on CIFS_EXPERIMENTAL
+ select CONNECTOR
help
- Enables cifs features under testing. These features
- are highly experimental. If unsure, say N.
+ Enables an upcall mechanism for CIFS which will be used to contact
+ userspace helper utilities to provide SPNEGO packaged Kerberos
+ tickets which are needed to mount to certain secure servers
+ (for which more secure Kerberos authentication is required). If
+ unsure, say N.
config NCP_FS
tristate "NCP file system support (to mount NetWare volumes)"
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 5bab24f5905..eab3750cf30 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -2,7 +2,7 @@ Version 1.39
------------
Defer close of a file handle slightly if pending writes depend on that file handle
(this reduces the EBADF bad file handle errors that can be logged under heavy
-stress on writes).
+stress on writes). Modify cifs Kconfig options to expose CONFIG_CIFS_STATS2
Version 1.38
------------
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 99a096d3f84..4e12053f080 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -74,10 +74,11 @@ cifs_strtoUCS(wchar_t * to, const char *from, int len,
cERROR(1,
("cifs_strtoUCS: char2uni returned %d",
charlen));
- to[i] = cpu_to_le16(0x003f); /* a question mark */
+ /* A question mark */
+ to[i] = (wchar_t)cpu_to_le16(0x003f);
charlen = 1;
} else
- to[i] = cpu_to_le16(to[i]);
+ to[i] = (wchar_t)cpu_to_le16(to[i]);
}
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 877095a1192..682b0235ad9 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -405,6 +405,7 @@ static struct quotactl_ops cifs_quotactl_ops = {
};
#endif
+#ifdef CONFIG_CIFS_EXPERIMENTAL
static void cifs_umount_begin(struct super_block * sblock)
{
struct cifs_sb_info *cifs_sb;
@@ -422,16 +423,18 @@ static void cifs_umount_begin(struct super_block * sblock)
tcon->tidStatus = CifsExiting;
up(&tcon->tconSem);
+ /* cancel_brl_requests(tcon); */
+ /* cancel_notify_requests(tcon); */
if(tcon->ses && tcon->ses->server)
{
- cERROR(1,("wake up tasks now - umount begin not complete"));
+ cFYI(1,("wake up tasks now - umount begin not complete"));
wake_up_all(&tcon->ses->server->request_q);
}
/* BB FIXME - finish add checks for tidStatus BB */
return;
}
-
+#endif
static int cifs_remount(struct super_block *sb, int *flags, char *data)
{
@@ -450,7 +453,9 @@ struct super_operations cifs_super_ops = {
unless later we add lazy close of inodes or unless the kernel forgets to call
us with the same number of releases (closes) as opens */
.show_options = cifs_show_options,
-/* .umount_begin = cifs_umount_begin, */ /* BB finish in the future */
+#ifdef CONFIG_CIFS_EXPERIMENTAL
+ .umount_begin = cifs_umount_begin,
+#endif
.remount_fs = cifs_remount,
};
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d301149b1bb..1b73f4f4c5c 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -242,11 +242,11 @@ extern int CIFSSMBWrite2(const int xid, struct cifsTconInfo *tcon,
const int netfid, const unsigned int count,
const __u64 offset, unsigned int *nbytes,
struct kvec *iov, const int nvec, const int long_op);
+#endif /* CONFIG_CIFS_EXPERIMENTAL */
extern int CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName, __u64 * inode_number,
const struct nls_table *nls_codepage,
int remap_special_chars);
-#endif /* CONFIG_CIFS_EXPERIMENTAL */
extern int cifs_convertUCSpath(char *target, const __le16 *source, int maxlen,
const struct nls_table * codepage);
extern int cifsConvertToUCS(__le16 * target, const char *source, int maxlen,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 9312bfc5668..a53c596e108 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -2959,7 +2959,6 @@ CIFSFindClose(const int xid, struct cifsTconInfo *tcon, const __u16 searchHandle
return rc;
}
-#ifdef CONFIG_CIFS_EXPERIMENTAL
int
CIFSGetSrvInodeNumber(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
@@ -3053,7 +3052,6 @@ GetInodeNumOut:
goto GetInodeNumberRetry;
return rc;
}
-#endif /* CIFS_EXPERIMENTAL */
int
CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
diff --git a/fs/cifs/cn_cifs.h b/fs/cifs/cn_cifs.h
new file mode 100644
index 00000000000..ea59ccac2eb
--- /dev/null
+++ b/fs/cifs/cn_cifs.h
@@ -0,0 +1,37 @@
+/*
+ * fs/cifs/cn_cifs.h
+ *
+ * Copyright (c) International Business Machines Corp., 2002
+ * Author(s): Steve French (sfrench@us.ibm.com)
+ *
+ * This library is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU Lesser General Public License as published
+ * by the Free Software Foundation; either version 2.1 of the License, or
+ * (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
+ * the GNU Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public License
+ * along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef _CN_CIFS_H
+#define _CN_CIFS_H
+#ifdef CONFIG_CIFS_UPCALL
+#include <linux/types.h>
+#include <linux/connector.h>
+
+struct cifs_upcall {
+ char signature[4]; /* CIFS */
+ enum command {
+ CIFS_GET_IP = 0x00000001, /* get ip address for hostname */
+ CIFS_GET_SECBLOB = 0x00000002, /* get SPNEGO wrapped blob */
+ } command;
+ /* union cifs upcall data follows */
+};
+#endif /* CIFS_UPCALL */
+#endif /* _CN_CIFS_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 450ab75d654..2cb620716bc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -42,6 +42,7 @@
#include "ntlmssp.h"
#include "nterr.h"
#include "rfc1002pdu.h"
+#include "cn_cifs.h"
#define CIFS_PORT 445
#define RFC1001_PORT 139
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 912d401600f..923d071163b 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -283,7 +283,6 @@ int cifs_get_inode_info(struct inode **pinode,
there Windows server or network appliances for which
IndexNumber field is not guaranteed unique? */
-#ifdef CONFIG_CIFS_EXPERIMENTAL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM){
int rc1 = 0;
__u64 inode_num;
@@ -299,7 +298,6 @@ int cifs_get_inode_info(struct inode **pinode,
} else /* do we need cast or hash to ino? */
(*pinode)->i_ino = inode_num;
} /* else ino incremented to unique num in new_inode*/
-#endif /* CIFS_EXPERIMENTAL */
insert_inode_hash(*pinode);
}
inode = *pinode;
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 1abe7343f92..4abbe860430 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -827,6 +827,7 @@ static int jfs_link(struct dentry *old_dentry,
/* update object inode */
ip->i_nlink++; /* for new link */
ip->i_ctime = CURRENT_TIME;
+ dir->i_ctime = dir->i_mtime = CURRENT_TIME;
mark_inode_dirty(dir);
atomic_inc(&ip->i_count);
@@ -1024,6 +1025,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
insert_inode_hash(ip);
mark_inode_dirty(ip);
+ dip->i_ctime = dip->i_mtime = CURRENT_TIME;
+ mark_inode_dirty(dip);
/*
* commit update of parent directory and link object
*/
diff --git a/include/asm-alpha/ide.h b/include/asm-alpha/ide.h
index 68934a25931..6126afe2738 100644
--- a/include/asm-alpha/ide.h
+++ b/include/asm-alpha/ide.h
@@ -15,10 +15,6 @@
#include <linux/config.h>
-#ifndef MAX_HWIFS
-#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
-#endif
-
#define IDE_ARCH_OBSOLETE_DEFAULTS
static inline int ide_default_irq(unsigned long base)
diff --git a/include/asm-arm/arch-ixp4xx/hardware.h b/include/asm-arm/arch-ixp4xx/hardware.h
index 55d85eea8c1..cfb413c845f 100644
--- a/include/asm-arm/arch-ixp4xx/hardware.h
+++ b/include/asm-arm/arch-ixp4xx/hardware.h
@@ -44,5 +44,6 @@ extern unsigned int processor_id;
#include "ixdp425.h"
#include "coyote.h"
#include "prpmc1100.h"
+#include "nslu2.h"
#endif /* _ASM_ARCH_HARDWARE_H */
diff --git a/include/asm-arm/arch-ixp4xx/irqs.h b/include/asm-arm/arch-ixp4xx/irqs.h
index ca808281c7f..2cf4930372b 100644
--- a/include/asm-arm/arch-ixp4xx/irqs.h
+++ b/include/asm-arm/arch-ixp4xx/irqs.h
@@ -93,4 +93,11 @@
#define IRQ_COYOTE_PCI_SLOT1 IRQ_IXP4XX_GPIO11
#define IRQ_COYOTE_IDE IRQ_IXP4XX_GPIO5
+/*
+ * NSLU2 board IRQs
+ */
+#define IRQ_NSLU2_PCI_INTA IRQ_IXP4XX_GPIO11
+#define IRQ_NSLU2_PCI_INTB IRQ_IXP4XX_GPIO10
+#define IRQ_NSLU2_PCI_INTC IRQ_IXP4XX_GPIO9
+
#endif
diff --git a/include/asm-arm/arch-ixp4xx/nslu2.h b/include/asm-arm/arch-ixp4xx/nslu2.h
new file mode 100644
index 00000000000..b8b347a559c
--- /dev/null
+++ b/include/asm-arm/arch-ixp4xx/nslu2.h
@@ -0,0 +1,96 @@
+/*
+ * include/asm-arm/arch-ixp4xx/nslu2.h
+ *
+ * NSLU2 platform specific definitions
+ *
+ * Author: Mark Rakes <mrakes AT mac.com>
+ * Maintainers: http://www.nslu2-linux.org
+ *
+ * based on ixdp425.h:
+ * Copyright 2004 (c) MontaVista, Software, Inc.
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ */
+
+#ifndef __ASM_ARCH_HARDWARE_H__
+#error "Do not include this directly, instead #include <asm/hardware.h>"
+#endif
+
+#define NSLU2_FLASH_BASE IXP4XX_EXP_BUS_CS0_BASE_PHYS
+#define NSLU2_FLASH_SIZE IXP4XX_EXP_BUS_CSX_REGION_SIZE
+
+#define NSLU2_SDA_PIN 7
+#define NSLU2_SCL_PIN 6
+
+/*
+ * NSLU2 PCI IRQs
+ */
+#define NSLU2_PCI_MAX_DEV 3
+#define NSLU2_PCI_IRQ_LINES 3
+
+
+/* PCI controller GPIO to IRQ pin mappings */
+#define NSLU2_PCI_INTA_PIN 11
+#define NSLU2_PCI_INTB_PIN 10
+#define NSLU2_PCI_INTC_PIN 9
+#define NSLU2_PCI_INTD_PIN 8
+
+
+/* NSLU2 Timer */
+#define NSLU2_FREQ 66000000
+#define NSLU2_CLOCK_TICK_RATE (((NSLU2_FREQ / HZ & ~IXP4XX_OST_RELOAD_MASK) + 1) * HZ)
+#define NSLU2_CLOCK_TICKS_PER_USEC ((NSLU2_CLOCK_TICK_RATE + USEC_PER_SEC/2) / USEC_PER_SEC)
+
+/* GPIO */
+
+#define NSLU2_GPIO0 0
+#define NSLU2_GPIO1 1
+#define NSLU2_GPIO2 2
+#define NSLU2_GPIO3 3
+#define NSLU2_GPIO4 4
+#define NSLU2_GPIO5 5
+#define NSLU2_GPIO6 6
+#define NSLU2_GPIO7 7
+#define NSLU2_GPIO8 8
+#define NSLU2_GPIO9 9
+#define NSLU2_GPIO10 10
+#define NSLU2_GPIO11 11
+#define NSLU2_GPIO12 12
+#define NSLU2_GPIO13 13
+#define NSLU2_GPIO14 14
+#define NSLU2_GPIO15 15
+
+/* Buttons */
+
+#define NSLU2_PB_GPIO NSLU2_GPIO5
+#define NSLU2_PO_GPIO NSLU2_GPIO8 /* power off */
+#define NSLU2_RB_GPIO NSLU2_GPIO12
+
+#define NSLU2_PB_IRQ IRQ_IXP4XX_GPIO5
+#define NSLU2_RB_IRQ IRQ_IXP4XX_GPIO12
+
+#define NSLU2_PB_BM (1L << NSLU2_PB_GPIO)
+#define NSLU2_PO_BM (1L << NSLU2_PO_GPIO)
+#define NSLU2_RB_BM (1L << NSLU2_RB_GPIO)
+
+/* Buzzer */
+
+#define NSLU2_GPIO_BUZZ 4
+#define NSLU2_BZ_BM (1L << NSLU2_GPIO_BUZZ)
+/* LEDs */
+
+#define NSLU2_LED_RED NSLU2_GPIO0
+#define NSLU2_LED_GRN NSLU2_GPIO1
+
+#define NSLU2_LED_RED_BM (1L << NSLU2_LED_RED)
+#define NSLU2_LED_GRN_BM (1L << NSLU2_LED_GRN)
+
+#define NSLU2_LED_DISK1 NSLU2_GPIO2
+#define NSLU2_LED_DISK2 NSLU2_GPIO3
+
+#define NSLU2_LED_DISK1_BM (1L << NSLU2_GPIO2)
+#define NSLU2_LED_DISK2_BM (1L << NSLU2_GPIO3)
+
+
diff --git a/include/asm-arm/arch-omap/board-h4.h b/include/asm-arm/arch-omap/board-h4.h
index d64ee9211ee..33ea29a4165 100644
--- a/include/asm-arm/arch-omap/board-h4.h
+++ b/include/asm-arm/arch-omap/board-h4.h
@@ -34,5 +34,11 @@
#define OMAP24XX_ETHR_START 0x08000300
#define OMAP24XX_ETHR_GPIO_IRQ 92
+#define H4_CS0_BASE 0x04000000
+
+#define H4_CS0_BASE 0x04000000
+
+#define H4_CS0_BASE 0x04000000
+
#endif /* __ASM_ARCH_OMAP_H4_H */
diff --git a/include/asm-arm/arch-omap/board-innovator.h b/include/asm-arm/arch-omap/board-innovator.h
index 79574e0ed13..b3cf33441f6 100644
--- a/include/asm-arm/arch-omap/board-innovator.h
+++ b/include/asm-arm/arch-omap/board-innovator.h
@@ -26,7 +26,7 @@
#ifndef __ASM_ARCH_OMAP_INNOVATOR_H
#define __ASM_ARCH_OMAP_INNOVATOR_H
-#if defined (CONFIG_ARCH_OMAP1510)
+#if defined (CONFIG_ARCH_OMAP15XX)
#ifndef OMAP_SDRAM_DEVICE
#define OMAP_SDRAM_DEVICE D256M_1X16_4B
@@ -44,7 +44,7 @@ void fpga_write(unsigned char val, int reg);
unsigned char fpga_read(int reg);
#endif
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
#if defined (CONFIG_ARCH_OMAP16XX)
diff --git a/include/asm-arm/arch-omap/clock.h b/include/asm-arm/arch-omap/clock.h
new file mode 100644
index 00000000000..740c297eb11
--- /dev/null
+++ b/include/asm-arm/arch-omap/clock.h
@@ -0,0 +1,91 @@
+/*
+ * linux/include/asm-arm/arch-omap/clock.h
+ *
+ * Copyright (C) 2004 - 2005 Nokia corporation
+ * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
+ * Based on clocks.h by Tony Lindgren, Gordon McNutt and RidgeRun, Inc
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_OMAP_CLOCK_H
+#define __ARCH_ARM_OMAP_CLOCK_H
+
+struct module;
+
+struct clk {
+ struct list_head node;
+ struct module *owner;
+ const char *name;
+ struct clk *parent;
+ unsigned long rate;
+ __u32 flags;
+ void __iomem *enable_reg;
+ __u8 enable_bit;
+ __u8 rate_offset;
+ __u8 src_offset;
+ __s8 usecount;
+ void (*recalc)(struct clk *);
+ int (*set_rate)(struct clk *, unsigned long);
+ long (*round_rate)(struct clk *, unsigned long);
+ void (*init)(struct clk *);
+ int (*enable)(struct clk *);
+ void (*disable)(struct clk *);
+};
+
+struct clk_functions {
+ int (*clk_enable)(struct clk *clk);
+ void (*clk_disable)(struct clk *clk);
+ int (*clk_use)(struct clk *clk);
+ void (*clk_unuse)(struct clk *clk);
+ long (*clk_round_rate)(struct clk *clk, unsigned long rate);
+ int (*clk_set_rate)(struct clk *clk, unsigned long rate);
+ int (*clk_set_parent)(struct clk *clk, struct clk *parent);
+ struct clk * (*clk_get_parent)(struct clk *clk);
+ void (*clk_allow_idle)(struct clk *clk);
+ void (*clk_deny_idle)(struct clk *clk);
+};
+
+extern unsigned int mpurate;
+extern struct list_head clocks;
+extern spinlock_t clockfw_lock;
+
+extern int clk_init(struct clk_functions * custom_clocks);
+extern int clk_register(struct clk *clk);
+extern void clk_unregister(struct clk *clk);
+extern void propagate_rate(struct clk *clk);
+extern void followparent_recalc(struct clk * clk);
+extern void clk_allow_idle(struct clk *clk);
+extern void clk_deny_idle(struct clk *clk);
+
+/* Clock flags */
+#define RATE_CKCTL (1 << 0) /* Main fixed ratio clocks */
+#define RATE_FIXED (1 << 1) /* Fixed clock rate */
+#define RATE_PROPAGATES (1 << 2) /* Program children too */
+#define VIRTUAL_CLOCK (1 << 3) /* Composite clock from table */
+#define ALWAYS_ENABLED (1 << 4) /* Clock cannot be disabled */
+#define ENABLE_REG_32BIT (1 << 5) /* Use 32-bit access */
+#define VIRTUAL_IO_ADDRESS (1 << 6) /* Clock in virtual address */
+#define CLOCK_IDLE_CONTROL (1 << 7)
+#define CLOCK_NO_IDLE_PARENT (1 << 8)
+#define DELAYED_APP (1 << 9) /* Delay application of clock */
+#define CONFIG_PARTICIPANT (1 << 10) /* Fundamental clock */
+#define CM_MPU_SEL1 (1 << 11) /* Domain divider/source */
+#define CM_DSP_SEL1 (1 << 12)
+#define CM_GFX_SEL1 (1 << 13)
+#define CM_MODEM_SEL1 (1 << 14)
+#define CM_CORE_SEL1 (1 << 15) /* Sets divider for many */
+#define CM_CORE_SEL2 (1 << 16) /* sets parent for GPT */
+#define CM_WKUP_SEL1 (1 << 17)
+#define CM_PLL_SEL1 (1 << 18)
+#define CM_PLL_SEL2 (1 << 19)
+#define CM_SYSCLKOUT_SEL1 (1 << 20)
+#define CLOCK_IN_OMAP730 (1 << 21)
+#define CLOCK_IN_OMAP1510 (1 << 22)
+#define CLOCK_IN_OMAP16XX (1 << 23)
+#define CLOCK_IN_OMAP242X (1 << 24)
+#define CLOCK_IN_OMAP243X (1 << 25)
+
+#endif
diff --git a/include/asm-arm/arch-omap/common.h b/include/asm-arm/arch-omap/common.h
index 2a676b4f13b..08d58abd821 100644
--- a/include/asm-arm/arch-omap/common.h
+++ b/include/asm-arm/arch-omap/common.h
@@ -31,6 +31,6 @@ struct sys_timer;
extern void omap_map_common_io(void);
extern struct sys_timer omap_timer;
-extern void omap_serial_init(int ports[]);
+extern void omap_serial_init(void);
#endif /* __ARCH_ARM_MACH_OMAP_COMMON_H */
diff --git a/include/asm-arm/arch-omap/cpu.h b/include/asm-arm/arch-omap/cpu.h
index 1119e2b53e7..ec7eb675d92 100644
--- a/include/asm-arm/arch-omap/cpu.h
+++ b/include/asm-arm/arch-omap/cpu.h
@@ -28,12 +28,7 @@
extern unsigned int system_rev;
-#define OMAP_DIE_ID_0 0xfffe1800
-#define OMAP_DIE_ID_1 0xfffe1804
-#define OMAP_PRODUCTION_ID_0 0xfffe2000
-#define OMAP_PRODUCTION_ID_1 0xfffe2004
-#define OMAP32_ID_0 0xfffed400
-#define OMAP32_ID_1 0xfffed404
+#define omap2_cpu_rev() ((system_rev >> 8) & 0x0f)
/*
* Test if multicore OMAP support is needed
@@ -50,7 +45,7 @@ extern unsigned int system_rev;
# define OMAP_NAME omap730
# endif
#endif
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
# ifdef OMAP_NAME
# undef MULTI_OMAP1
# define MULTI_OMAP1
@@ -79,9 +74,11 @@ extern unsigned int system_rev;
* Macros to group OMAP into cpu classes.
* These can be used in most places.
* cpu_is_omap7xx(): True for OMAP730
- * cpu_is_omap15xx(): True for OMAP1510 and OMAP5910
+ * cpu_is_omap15xx(): True for OMAP1510, OMAP5910 and OMAP310
* cpu_is_omap16xx(): True for OMAP1610, OMAP5912 and OMAP1710
- * cpu_is_omap24xx(): True for OMAP2420
+ * cpu_is_omap24xx(): True for OMAP2420, OMAP2422, OMAP2423, OMAP2430
+ * cpu_is_omap242x(): True for OMAP2420, OMAP2422, OMAP2423
+ * cpu_is_omap243x(): True for OMAP2430
*/
#define GET_OMAP_CLASS (system_rev & 0xff)
@@ -91,22 +88,35 @@ static inline int is_omap ##class (void) \
return (GET_OMAP_CLASS == (id)) ? 1 : 0; \
}
+#define GET_OMAP_SUBCLASS ((system_rev >> 20) & 0x0fff)
+
+#define IS_OMAP_SUBCLASS(subclass, id) \
+static inline int is_omap ##subclass (void) \
+{ \
+ return (GET_OMAP_SUBCLASS == (id)) ? 1 : 0; \
+}
+
IS_OMAP_CLASS(7xx, 0x07)
IS_OMAP_CLASS(15xx, 0x15)
IS_OMAP_CLASS(16xx, 0x16)
IS_OMAP_CLASS(24xx, 0x24)
+IS_OMAP_SUBCLASS(242x, 0x242)
+IS_OMAP_SUBCLASS(243x, 0x243)
+
#define cpu_is_omap7xx() 0
#define cpu_is_omap15xx() 0
#define cpu_is_omap16xx() 0
#define cpu_is_omap24xx() 0
+#define cpu_is_omap242x() 0
+#define cpu_is_omap243x() 0
#if defined(MULTI_OMAP1)
# if defined(CONFIG_ARCH_OMAP730)
# undef cpu_is_omap7xx
# define cpu_is_omap7xx() is_omap7xx()
# endif
-# if defined(CONFIG_ARCH_OMAP1510)
+# if defined(CONFIG_ARCH_OMAP15XX)
# undef cpu_is_omap15xx
# define cpu_is_omap15xx() is_omap15xx()
# endif
@@ -119,7 +129,7 @@ IS_OMAP_CLASS(24xx, 0x24)
# undef cpu_is_omap7xx
# define cpu_is_omap7xx() 1
# endif
-# if defined(CONFIG_ARCH_OMAP1510)
+# if defined(CONFIG_ARCH_OMAP15XX)
# undef cpu_is_omap15xx
# define cpu_is_omap15xx() 1
# endif
@@ -129,13 +139,18 @@ IS_OMAP_CLASS(24xx, 0x24)
# endif
# if defined(CONFIG_ARCH_OMAP24XX)
# undef cpu_is_omap24xx
+# undef cpu_is_omap242x
+# undef cpu_is_omap243x
# define cpu_is_omap24xx() 1
+# define cpu_is_omap242x() is_omap242x()
+# define cpu_is_omap243x() is_omap243x()
# endif
#endif
/*
* Macros to detect individual cpu types.
* These are only rarely needed.
+ * cpu_is_omap330(): True for OMAP330
* cpu_is_omap730(): True for OMAP730
* cpu_is_omap1510(): True for OMAP1510
* cpu_is_omap1610(): True for OMAP1610
@@ -144,6 +159,9 @@ IS_OMAP_CLASS(24xx, 0x24)
* cpu_is_omap1621(): True for OMAP1621
* cpu_is_omap1710(): True for OMAP1710
* cpu_is_omap2420(): True for OMAP2420
+ * cpu_is_omap2422(): True for OMAP2422
+ * cpu_is_omap2423(): True for OMAP2423
+ * cpu_is_omap2430(): True for OMAP2430
*/
#define GET_OMAP_TYPE ((system_rev >> 16) & 0xffff)
@@ -153,6 +171,7 @@ static inline int is_omap ##type (void) \
return (GET_OMAP_TYPE == (id)) ? 1 : 0; \
}
+IS_OMAP_TYPE(310, 0x0310)
IS_OMAP_TYPE(730, 0x0730)
IS_OMAP_TYPE(1510, 0x1510)
IS_OMAP_TYPE(1610, 0x1610)
@@ -161,7 +180,11 @@ IS_OMAP_TYPE(5912, 0x1611)
IS_OMAP_TYPE(1621, 0x1621)
IS_OMAP_TYPE(1710, 0x1710)
IS_OMAP_TYPE(2420, 0x2420)
+IS_OMAP_TYPE(2422, 0x2422)
+IS_OMAP_TYPE(2423, 0x2423)
+IS_OMAP_TYPE(2430, 0x2430)
+#define cpu_is_omap310() 0
#define cpu_is_omap730() 0
#define cpu_is_omap1510() 0
#define cpu_is_omap1610() 0
@@ -170,31 +193,33 @@ IS_OMAP_TYPE(2420, 0x2420)
#define cpu_is_omap1621() 0
#define cpu_is_omap1710() 0
#define cpu_is_omap2420() 0
+#define cpu_is_omap2422() 0
+#define cpu_is_omap2423() 0
+#define cpu_is_omap2430() 0
#if defined(MULTI_OMAP1)
# if defined(CONFIG_ARCH_OMAP730)
# undef cpu_is_omap730
# define cpu_is_omap730() is_omap730()
# endif
-# if defined(CONFIG_ARCH_OMAP1510)
-# undef cpu_is_omap1510
-# define cpu_is_omap1510() is_omap1510()
-# endif
#else
# if defined(CONFIG_ARCH_OMAP730)
# undef cpu_is_omap730
# define cpu_is_omap730() 1
# endif
-# if defined(CONFIG_ARCH_OMAP1510)
-# undef cpu_is_omap1510
-# define cpu_is_omap1510() 1
-# endif
#endif
/*
* Whether we have MULTI_OMAP1 or not, we still need to distinguish
- * between 1611B/5912 and 1710.
+ * between 330 vs. 1510 and 1611B/5912 vs. 1710.
*/
+#if defined(CONFIG_ARCH_OMAP15XX)
+# undef cpu_is_omap310
+# undef cpu_is_omap1510
+# define cpu_is_omap310() is_omap310()
+# define cpu_is_omap1510() is_omap1510()
+#endif
+
#if defined(CONFIG_ARCH_OMAP16XX)
# undef cpu_is_omap1610
# undef cpu_is_omap1611
@@ -208,9 +233,20 @@ IS_OMAP_TYPE(2420, 0x2420)
# define cpu_is_omap1710() is_omap1710()
#endif
-#if defined(CONFIG_ARCH_OMAP2420)
-# undef cpu_is_omap2420
-# define cpu_is_omap2420() 1
+#if defined(CONFIG_ARCH_OMAP24XX)
+# undef cpu_is_omap2420
+# undef cpu_is_omap2422
+# undef cpu_is_omap2423
+# undef cpu_is_omap2430
+# define cpu_is_omap2420() is_omap2420()
+# define cpu_is_omap2422() is_omap2422()
+# define cpu_is_omap2423() is_omap2423()
+# define cpu_is_omap2430() is_omap2430()
#endif
+/* Macros to detect if we have OMAP1 or OMAP2 */
+#define cpu_class_is_omap1() (cpu_is_omap730() || cpu_is_omap15xx() || \
+ cpu_is_omap16xx())
+#define cpu_class_is_omap2() cpu_is_omap24xx()
+
#endif
diff --git a/include/asm-arm/arch-omap/dma.h b/include/asm-arm/arch-omap/dma.h
index 04ebef5c6e9..ccbcb580a5c 100644
--- a/include/asm-arm/arch-omap/dma.h
+++ b/include/asm-arm/arch-omap/dma.h
@@ -22,9 +22,109 @@
#define __ASM_ARCH_DMA_H
#define MAX_DMA_ADDRESS 0xffffffff
+#define MAX_DMA_CHANNELS 0
+
+/* Hardware registers for omap1 */
+#define OMAP_DMA_BASE (0xfffed800)
+#define OMAP_DMA_GCR (OMAP_DMA_BASE + 0x400)
+#define OMAP_DMA_GSCR (OMAP_DMA_BASE + 0x404)
+#define OMAP_DMA_GRST (OMAP_DMA_BASE + 0x408)
+#define OMAP_DMA_HW_ID (OMAP_DMA_BASE + 0x442)
+#define OMAP_DMA_PCH2_ID (OMAP_DMA_BASE + 0x444)
+#define OMAP_DMA_PCH0_ID (OMAP_DMA_BASE + 0x446)
+#define OMAP_DMA_PCH1_ID (OMAP_DMA_BASE + 0x448)
+#define OMAP_DMA_PCHG_ID (OMAP_DMA_BASE + 0x44a)
+#define OMAP_DMA_PCHD_ID (OMAP_DMA_BASE + 0x44c)
+#define OMAP_DMA_CAPS_0_U (OMAP_DMA_BASE + 0x44e)
+#define OMAP_DMA_CAPS_0_L (OMAP_DMA_BASE + 0x450)
+#define OMAP_DMA_CAPS_1_U (OMAP_DMA_BASE + 0x452)
+#define OMAP_DMA_CAPS_1_L (OMAP_DMA_BASE + 0x454)
+#define OMAP_DMA_CAPS_2 (OMAP_DMA_BASE + 0x456)
+#define OMAP_DMA_CAPS_3 (OMAP_DMA_BASE + 0x458)
+#define OMAP_DMA_CAPS_4 (OMAP_DMA_BASE + 0x45a)
+#define OMAP_DMA_PCH2_SR (OMAP_DMA_BASE + 0x460)
+#define OMAP_DMA_PCH0_SR (OMAP_DMA_BASE + 0x480)
+#define OMAP_DMA_PCH1_SR (OMAP_DMA_BASE + 0x482)
+#define OMAP_DMA_PCHD_SR (OMAP_DMA_BASE + 0x4c0)
+
+/* Hardware registers for omap2 */
+#define OMAP24XX_DMA_BASE (L4_24XX_BASE + 0x56000)
+#define OMAP_DMA4_REVISION (OMAP24XX_DMA_BASE + 0x00)
+#define OMAP_DMA4_GCR_REG (OMAP24XX_DMA_BASE + 0x78)
+#define OMAP_DMA4_IRQSTATUS_L0 (OMAP24XX_DMA_BASE + 0x08)
+#define OMAP_DMA4_IRQSTATUS_L1 (OMAP24XX_DMA_BASE + 0x0c)
+#define OMAP_DMA4_IRQSTATUS_L2 (OMAP24XX_DMA_BASE + 0x10)
+#define OMAP_DMA4_IRQSTATUS_L3 (OMAP24XX_DMA_BASE + 0x14)
+#define OMAP_DMA4_IRQENABLE_L0 (OMAP24XX_DMA_BASE + 0x18)
+#define OMAP_DMA4_IRQENABLE_L1 (OMAP24XX_DMA_BASE + 0x1c)
+#define OMAP_DMA4_IRQENABLE_L2 (OMAP24XX_DMA_BASE + 0x20)
+#define OMAP_DMA4_IRQENABLE_L3 (OMAP24XX_DMA_BASE + 0x24)
+#define OMAP_DMA4_SYSSTATUS (OMAP24XX_DMA_BASE + 0x28)
+#define OMAP_DMA4_CAPS_0 (OMAP24XX_DMA_BASE + 0x64)
+#define OMAP_DMA4_CAPS_2 (OMAP24XX_DMA_BASE + 0x6c)
+#define OMAP_DMA4_CAPS_3 (OMAP24XX_DMA_BASE + 0x70)
+#define OMAP_DMA4_CAPS_4 (OMAP24XX_DMA_BASE + 0x74)
+
+#ifdef CONFIG_ARCH_OMAP1
#define OMAP_LOGICAL_DMA_CH_COUNT 17
+/* Common channel specific registers for omap1 */
+#define OMAP_DMA_CSDP_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x00)
+#define OMAP_DMA_CCR_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x02)
+#define OMAP_DMA_CICR_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x04)
+#define OMAP_DMA_CSR_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x06)
+#define OMAP_DMA_CEN_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x10)
+#define OMAP_DMA_CFN_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x12)
+#define OMAP_DMA_CSFI_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x14)
+#define OMAP_DMA_CSEI_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x16)
+#define OMAP_DMA_CSAC_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x18)
+#define OMAP_DMA_CDAC_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x1a)
+#define OMAP_DMA_CDEI_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x1c)
+#define OMAP_DMA_CDFI_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x1e)
+#define OMAP_DMA_CLNK_CTRL_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x28)
+
+#else
+
+#define OMAP_LOGICAL_DMA_CH_COUNT 32 /* REVISIT: Is this 32 + 2? */
+
+/* Common channel specific registers for omap2 */
+#define OMAP_DMA_CCR_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x80)
+#define OMAP_DMA_CLNK_CTRL_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x84)
+#define OMAP_DMA_CICR_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x88)
+#define OMAP_DMA_CSR_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x8c)
+#define OMAP_DMA_CSDP_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x90)
+#define OMAP_DMA_CEN_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x94)
+#define OMAP_DMA_CFN_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x98)
+#define OMAP_DMA_CSEI_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xa4)
+#define OMAP_DMA_CSFI_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xa8)
+#define OMAP_DMA_CDEI_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xac)
+#define OMAP_DMA_CDFI_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xb0)
+#define OMAP_DMA_CSAC_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xb4)
+#define OMAP_DMA_CDAC_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xb8)
+
+#endif
+
+/* Channel specific registers only on omap1 */
+#define OMAP1_DMA_CSSA_L_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x08)
+#define OMAP1_DMA_CSSA_U_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x0a)
+#define OMAP1_DMA_CDSA_L_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x0c)
+#define OMAP1_DMA_CDSA_U_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x0e)
+#define OMAP1_DMA_COLOR_L_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x20)
+#define OMAP1_DMA_CCR2_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x24)
+#define OMAP1_DMA_COLOR_U_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x22)
+#define OMAP1_DMA_LCH_CTRL_REG(n) __REG16(OMAP_DMA_BASE + 0x40 * (n) + 0x2a)
+
+/* Channel specific registers only on omap2 */
+#define OMAP2_DMA_CSSA_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0x9c)
+#define OMAP2_DMA_CDSA_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xa0)
+#define OMAP2_DMA_CCEN_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xbc)
+#define OMAP2_DMA_CCFN_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xc0)
+#define OMAP2_DMA_COLOR_REG(n) __REG32(OMAP24XX_DMA_BASE + 0x60 * (n) + 0xc4)
+
+/*----------------------------------------------------------------------------*/
+
+/* DMA channels for omap1 */
#define OMAP_DMA_NO_DEVICE 0
#define OMAP_DMA_MCSI1_TX 1
#define OMAP_DMA_MCSI1_RX 2
@@ -85,29 +185,72 @@
#define OMAP_DMA_MMC2_RX 55
#define OMAP_DMA_CRYPTO_DES_OUT 56
+/* DMA channels for 24xx */
+#define OMAP24XX_DMA_NO_DEVICE 0
+#define OMAP24XX_DMA_XTI_DMA 1 /* S_DMA_0 */
+#define OMAP24XX_DMA_EXT_NDMA_REQ0 2 /* S_DMA_1 */
+#define OMAP24XX_DMA_EXT_NDMA_REQ1 3 /* S_DMA_2 */
+#define OMAP24XX_DMA_GPMC 4 /* S_DMA_3 */
+#define OMAP24XX_DMA_GFX 5 /* S_DMA_4 */
+#define OMAP24XX_DMA_DSS 6 /* S_DMA_5 */
+#define OMAP24XX_DMA_VLYNQ_TX 7 /* S_DMA_6 */
+#define OMAP24XX_DMA_CWT 8 /* S_DMA_7 */
+#define OMAP24XX_DMA_AES_TX 9 /* S_DMA_8 */
+#define OMAP24XX_DMA_AES_RX 10 /* S_DMA_9 */
+#define OMAP24XX_DMA_DES_TX 11 /* S_DMA_10 */
+#define OMAP24XX_DMA_DES_RX 12 /* S_DMA_11 */
+#define OMAP24XX_DMA_SHA1MD5_RX 13 /* S_DMA_12 */
-#define OMAP_DMA_BASE (0xfffed800)
-#define OMAP_DMA_GCR (OMAP_DMA_BASE + 0x400)
-#define OMAP_DMA_GSCR (OMAP_DMA_BASE + 0x404)
-#define OMAP_DMA_GRST (OMAP_DMA_BASE + 0x408)
-#define OMAP_DMA_HW_ID (OMAP_DMA_BASE + 0x442)
-#define OMAP_DMA_PCH2_ID (OMAP_DMA_BASE + 0x444)
-#define OMAP_DMA_PCH0_ID (OMAP_DMA_BASE + 0x446)
-#define OMAP_DMA_PCH1_ID (OMAP_DMA_BASE + 0x448)
-#define OMAP_DMA_PCHG_ID (OMAP_DMA_BASE + 0x44a)
-#define OMAP_DMA_PCHD_ID (OMAP_DMA_BASE + 0x44c)
-#define OMAP_DMA_CAPS_0_U (OMAP_DMA_BASE + 0x44e)
-#define OMAP_DMA_CAPS_0_L (OMAP_DMA_BASE + 0x450)
-#define OMAP_DMA_CAPS_1_U (OMAP_DMA_BASE + 0x452)
-#define OMAP_DMA_CAPS_1_L (OMAP_DMA_BASE + 0x454)
-#define OMAP_DMA_CAPS_2 (OMAP_DMA_BASE + 0x456)
-#define OMAP_DMA_CAPS_3 (OMAP_DMA_BASE + 0x458)
-#define OMAP_DMA_CAPS_4 (OMAP_DMA_BASE + 0x45a)
-#define OMAP_DMA_PCH2_SR (OMAP_DMA_BASE + 0x460)
-#define OMAP_DMA_PCH0_SR (OMAP_DMA_BASE + 0x480)
-#define OMAP_DMA_PCH1_SR (OMAP_DMA_BASE + 0x482)
-#define OMAP_DMA_PCHD_SR (OMAP_DMA_BASE + 0x4c0)
+#define OMAP24XX_DMA_EAC_AC_RD 17 /* S_DMA_16 */
+#define OMAP24XX_DMA_EAC_AC_WR 18 /* S_DMA_17 */
+#define OMAP24XX_DMA_EAC_MD_UL_RD 19 /* S_DMA_18 */
+#define OMAP24XX_DMA_EAC_MD_UL_WR 20 /* S_DMA_19 */
+#define OMAP24XX_DMA_EAC_MD_DL_RD 21 /* S_DMA_20 */
+#define OMAP24XX_DMA_EAC_MD_DL_WR 22 /* S_DMA_21 */
+#define OMAP24XX_DMA_EAC_BT_UL_RD 23 /* S_DMA_22 */
+#define OMAP24XX_DMA_EAC_BT_UL_WR 24 /* S_DMA_23 */
+#define OMAP24XX_DMA_EAC_BT_DL_RD 25 /* S_DMA_24 */
+#define OMAP24XX_DMA_EAC_BT_DL_WR 26 /* S_DMA_25 */
+#define OMAP24XX_DMA_I2C1_TX 27 /* S_DMA_26 */
+#define OMAP24XX_DMA_I2C1_RX 28 /* S_DMA_27 */
+#define OMAP24XX_DMA_I2C2_TX 29 /* S_DMA_28 */
+#define OMAP24XX_DMA_I2C2_RX 30 /* S_DMA_29 */
+#define OMAP24XX_DMA_MCBSP1_TX 31 /* SDMA_30 */
+#define OMAP24XX_DMA_MCBSP1_RX 32 /* SDMA_31 */
+#define OMAP24XX_DMA_MCBSP2_TX 33 /* SDMA_32 */
+#define OMAP24XX_DMA_MCBSP2_RX 34 /* SDMA_33 */
+#define OMAP24XX_DMA_SPI1_TX0 35 /* SDMA_34 */
+#define OMAP24XX_DMA_SPI1_RX0 36 /* SDMA_35 */
+#define OMAP24XX_DMA_SPI1_TX1 37 /* SDMA_36 */
+#define OMAP24XX_DMA_SPI1_RX1 38 /* SDMA_37 */
+#define OMAP24XX_DMA_SPI1_TX2 39 /* SDMA_38 */
+#define OMAP24XX_DMA_SPI1_RX2 40 /* SDMA_39 */
+#define OMAP24XX_DMA_SPI1_TX3 41 /* SDMA_40 */
+#define OMAP24XX_DMA_SPI1_RX3 42 /* SDMA_41 */
+#define OMAP24XX_DMA_SPI2_TX0 43 /* SDMA_42 */
+#define OMAP24XX_DMA_SPI2_RX0 44 /* SDMA_43 */
+#define OMAP24XX_DMA_SPI2_TX1 45 /* SDMA_44 */
+#define OMAP24XX_DMA_SPI2_RX1 46 /* SDMA_45 */
+#define OMAP24XX_DMA_UART1_TX 49 /* SDMA_48 */
+#define OMAP24XX_DMA_UART1_RX 50 /* SDMA_49 */
+#define OMAP24XX_DMA_UART2_TX 51 /* SDMA_50 */
+#define OMAP24XX_DMA_UART2_RX 52 /* SDMA_51 */
+#define OMAP24XX_DMA_UART3_TX 53 /* SDMA_52 */
+#define OMAP24XX_DMA_UART3_RX 54 /* SDMA_53 */
+#define OMAP24XX_DMA_USB_W2FC_TX0 55 /* SDMA_54 */
+#define OMAP24XX_DMA_USB_W2FC_RX0 56 /* SDMA_55 */
+#define OMAP24XX_DMA_USB_W2FC_TX1 57 /* SDMA_56 */
+#define OMAP24XX_DMA_USB_W2FC_RX1 58 /* SDMA_57 */
+#define OMAP24XX_DMA_USB_W2FC_TX2 59 /* SDMA_58 */
+#define OMAP24XX_DMA_USB_W2FC_RX2 60 /* SDMA_59 */
+#define OMAP24XX_DMA_MMC1_TX 61 /* SDMA_60 */
+#define OMAP24XX_DMA_MMC1_RX 62 /* SDMA_61 */
+#define OMAP24XX_DMA_MS 63 /* SDMA_62 */
+
+/*----------------------------------------------------------------------------*/
+
+/* Hardware registers for LCD DMA */
#define OMAP1510_DMA_LCD_BASE (0xfffedb00)
#define OMAP1510_DMA_LCD_CTRL (OMAP1510_DMA_LCD_BASE + 0x00)
#define OMAP1510_DMA_LCD_TOP_F1_L (OMAP1510_DMA_LCD_BASE + 0x02)
@@ -116,7 +259,7 @@
#define OMAP1510_DMA_LCD_BOT_F1_U (OMAP1510_DMA_LCD_BASE + 0x08)
#define OMAP1610_DMA_LCD_BASE (0xfffee300)
-#define OMAP1610_DMA_LCD_CSDP (OMAP1610_DMA_LCD_BASE + 0xc0)
+#define OMAP1610_DMA_LCD_CSDP (OMAP1610_DMA_LCD_BASE + 0xc0)
#define OMAP1610_DMA_LCD_CCR (OMAP1610_DMA_LCD_BASE + 0xc2)
#define OMAP1610_DMA_LCD_CTRL (OMAP1610_DMA_LCD_BASE + 0xc4)
#define OMAP1610_DMA_LCD_TOP_B1_L (OMAP1610_DMA_LCD_BASE + 0xc8)
@@ -134,37 +277,18 @@
#define OMAP1610_DMA_LCD_LCH_CTRL (OMAP1610_DMA_LCD_BASE + 0xea)
#define OMAP1610_DMA_LCD_SRC_FI_B1_U (OMAP1610_DMA_LCD_BASE + 0xf4)
-
-/* Every LCh has its own set of the registers below */
-#define OMAP_DMA_CSDP(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x00)
-#define OMAP_DMA_CCR(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x02)
-#define OMAP_DMA_CICR(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x04)
-#define OMAP_DMA_CSR(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x06)
-#define OMAP_DMA_CSSA_L(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x08)
-#define OMAP_DMA_CSSA_U(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x0a)
-#define OMAP_DMA_CDSA_L(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x0c)
-#define OMAP_DMA_CDSA_U(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x0e)
-#define OMAP_DMA_CEN(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x10)
-#define OMAP_DMA_CFN(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x12)
-#define OMAP_DMA_CSFI(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x14)
-#define OMAP_DMA_CSEI(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x16)
-#define OMAP_DMA_CSAC(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x18)
-#define OMAP_DMA_CDAC(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x1a)
-#define OMAP_DMA_CDEI(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x1c)
-#define OMAP_DMA_CDFI(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x1e)
-#define OMAP_DMA_COLOR_L(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x20)
-#define OMAP_DMA_COLOR_U(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x22)
-#define OMAP_DMA_CCR2(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x24)
-#define OMAP_DMA_CLNK_CTRL(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x28)
-#define OMAP_DMA_LCH_CTRL(n) (OMAP_DMA_BASE + 0x40 * (n) + 0x2a)
-
-#define OMAP_DMA_TOUT_IRQ (1 << 0)
+#define OMAP_DMA_TOUT_IRQ (1 << 0) /* Only on omap1 */
#define OMAP_DMA_DROP_IRQ (1 << 1)
#define OMAP_DMA_HALF_IRQ (1 << 2)
#define OMAP_DMA_FRAME_IRQ (1 << 3)
#define OMAP_DMA_LAST_IRQ (1 << 4)
#define OMAP_DMA_BLOCK_IRQ (1 << 5)
-#define OMAP_DMA_SYNC_IRQ (1 << 6)
+#define OMAP1_DMA_SYNC_IRQ (1 << 6)
+#define OMAP2_DMA_PKT_IRQ (1 << 7)
+#define OMAP2_DMA_TRANS_ERR_IRQ (1 << 8)
+#define OMAP2_DMA_SECURE_ERR_IRQ (1 << 9)
+#define OMAP2_DMA_SUPERVISOR_ERR_IRQ (1 << 10)
+#define OMAP2_DMA_MISALIGNED_ERR_IRQ (1 << 11)
#define OMAP_DMA_DATA_TYPE_S8 0x00
#define OMAP_DMA_DATA_TYPE_S16 0x01
@@ -194,6 +318,7 @@ enum {
OMAP_LCD_DMA_B2_BOTTOM
};
+/* REVISIT: Check if BURST_4 is really 1 (or 2) */
enum omap_dma_burst_mode {
OMAP_DMA_DATA_BURST_DIS = 0,
OMAP_DMA_DATA_BURST_4,
@@ -206,6 +331,31 @@ enum omap_dma_color_mode {
OMAP_DMA_TRANSPARENT_COPY
};
+struct omap_dma_channel_params {
+ int data_type; /* data type 8,16,32 */
+ int elem_count; /* number of elements in a frame */
+ int frame_count; /* number of frames in a element */
+
+ int src_port; /* Only on OMAP1 REVISIT: Is this needed? */
+ int src_amode; /* constant , post increment, indexed , double indexed */
+ int src_start; /* source address : physical */
+ int src_ei; /* source element index */
+ int src_fi; /* source frame index */
+
+ int dst_port; /* Only on OMAP1 REVISIT: Is this needed? */
+ int dst_amode; /* constant , post increment, indexed , double indexed */
+ int dst_start; /* source address : physical */
+ int dst_ei; /* source element index */
+ int dst_fi; /* source frame index */
+
+ int trigger; /* trigger attached if the channel is synchronized */
+ int sync_mode; /* sycn on element, frame , block or packet */
+ int src_or_dst_synch; /* source synch(1) or destination synch(0) */
+
+ int ie; /* interrupt enabled */
+};
+
+
extern void omap_set_dma_priority(int dst_port, int priority);
extern int omap_request_dma(int dev_id, const char *dev_name,
void (* callback)(int lch, u16 ch_status, void *data),
@@ -217,24 +367,30 @@ extern void omap_start_dma(int lch);
extern void omap_stop_dma(int lch);
extern void omap_set_dma_transfer_params(int lch, int data_type,
int elem_count, int frame_count,
- int sync_mode);
+ int sync_mode,
+ int dma_trigger, int src_or_dst_synch);
extern void omap_set_dma_color_mode(int lch, enum omap_dma_color_mode mode,
u32 color);
extern void omap_set_dma_src_params(int lch, int src_port, int src_amode,
- unsigned long src_start);
+ unsigned long src_start,
+ int src_ei, int src_fi);
extern void omap_set_dma_src_index(int lch, int eidx, int fidx);
extern void omap_set_dma_src_data_pack(int lch, int enable);
extern void omap_set_dma_src_burst_mode(int lch,
enum omap_dma_burst_mode burst_mode);
extern void omap_set_dma_dest_params(int lch, int dest_port, int dest_amode,
- unsigned long dest_start);
+ unsigned long dest_start,
+ int dst_ei, int dst_fi);
extern void omap_set_dma_dest_index(int lch, int eidx, int fidx);
extern void omap_set_dma_dest_data_pack(int lch, int enable);
extern void omap_set_dma_dest_burst_mode(int lch,
enum omap_dma_burst_mode burst_mode);
+extern void omap_set_dma_params(int lch,
+ struct omap_dma_channel_params * params);
+
extern void omap_dma_link_lch (int lch_head, int lch_queue);
extern void omap_dma_unlink_lch (int lch_head, int lch_queue);
@@ -244,9 +400,6 @@ extern int omap_get_dma_src_addr_counter(int lch);
extern void omap_clear_dma(int lch);
extern int omap_dma_running(void);
-/* Returns 1 if the DMA module is in OMAP1510-compatible mode, 0 otherwise */
-extern int omap_dma_in_1510_mode(void);
-
/* LCD DMA functions */
extern int omap_request_lcd_dma(void (* callback)(u16 status, void *data),
void *data);
diff --git a/include/asm-arm/arch-omap/entry-macro.S b/include/asm-arm/arch-omap/entry-macro.S
index 0d29b9c56a9..f8814a84910 100644
--- a/include/asm-arm/arch-omap/entry-macro.S
+++ b/include/asm-arm/arch-omap/entry-macro.S
@@ -10,6 +10,20 @@
#if defined(CONFIG_ARCH_OMAP1)
+#if defined(CONFIG_ARCH_OMAP730) && \
+ (defined(CONFIG_ARCH_OMAP15XX) || defined(CONFIG_ARCH_OMAP16XX))
+#error "FIXME: OMAP730 doesn't support multiple-OMAP"
+#elif defined(CONFIG_ARCH_OMAP730)
+#define INT_IH2_IRQ INT_730_IH2_IRQ
+#elif defined(CONFIG_ARCH_OMAP15XX)
+#define INT_IH2_IRQ INT_1510_IH2_IRQ
+#elif defined(CONFIG_ARCH_OMAP16XX)
+#define INT_IH2_IRQ INT_1610_IH2_IRQ
+#else
+#warning "IH2 IRQ defaulted"
+#define INT_IH2_IRQ INT_1510_IH2_IRQ
+#endif
+
.macro disable_fiq
.endm
diff --git a/include/asm-arm/arch-omap/fpga.h b/include/asm-arm/arch-omap/fpga.h
index 676807dc50e..6a883e0bdbb 100644
--- a/include/asm-arm/arch-omap/fpga.h
+++ b/include/asm-arm/arch-omap/fpga.h
@@ -19,7 +19,7 @@
#ifndef __ASM_ARCH_OMAP_FPGA_H
#define __ASM_ARCH_OMAP_FPGA_H
-#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP1510)
+#if defined(CONFIG_MACH_OMAP_INNOVATOR) && defined(CONFIG_ARCH_OMAP15XX)
extern void omap1510_fpga_init_irq(void);
#else
#define omap1510_fpga_init_irq() (0)
@@ -77,6 +77,8 @@ struct h2p2_dbg_fpga {
#define H2P2_DBG_FPGA_LOAD_METER_SIZE 11
#define H2P2_DBG_FPGA_LOAD_METER_MASK ((1 << H2P2_DBG_FPGA_LOAD_METER_SIZE) - 1)
+#define H2P2_DBG_FPGA_P2_LED_TIMER (1 << 0)
+#define H2P2_DBG_FPGA_P2_LED_IDLE (1 << 1)
/*
* ---------------------------------------------------------------------------
diff --git a/include/asm-arm/arch-omap/gpio.h b/include/asm-arm/arch-omap/gpio.h
index 74cb2b93b70..1b3885741ac 100644
--- a/include/asm-arm/arch-omap/gpio.h
+++ b/include/asm-arm/arch-omap/gpio.h
@@ -67,7 +67,7 @@
#define OMAP_GPIO_IRQ(nr) (OMAP_GPIO_IS_MPUIO(nr) ? \
IH_MPUIO_BASE + ((nr) & 0x0f) : \
- IH_GPIO_BASE + ((nr) & 0x3f))
+ IH_GPIO_BASE + (nr))
extern int omap_gpio_init(void); /* Call from board init only */
extern int omap_request_gpio(int gpio);
diff --git a/include/asm-arm/arch-omap/hardware.h b/include/asm-arm/arch-omap/hardware.h
index 60201e1dd6a..5406b875c42 100644
--- a/include/asm-arm/arch-omap/hardware.h
+++ b/include/asm-arm/arch-omap/hardware.h
@@ -267,8 +267,6 @@
#define OMAP_LPG2_LCR (OMAP_LPG2_BASE + 0x00)
#define OMAP_LPG2_PMR (OMAP_LPG2_BASE + 0x04)
-#ifndef __ASSEMBLER__
-
/*
* ---------------------------------------------------------------------------
* Processor specific defines
@@ -277,13 +275,11 @@
#include "omap730.h"
#include "omap1510.h"
-
-#ifdef CONFIG_ARCH_OMAP24XX
#include "omap24xx.h"
-#endif
-
#include "omap16xx.h"
+#ifndef __ASSEMBLER__
+
/*
* ---------------------------------------------------------------------------
* Board specific defines
diff --git a/include/asm-arm/arch-omap/io.h b/include/asm-arm/arch-omap/io.h
index 3d5bcd54508..f5bcc9a1aed 100644
--- a/include/asm-arm/arch-omap/io.h
+++ b/include/asm-arm/arch-omap/io.h
@@ -52,23 +52,33 @@
* ----------------------------------------------------------------------------
*/
+#define PCIO_BASE 0
+
#if defined(CONFIG_ARCH_OMAP1)
+
#define IO_PHYS 0xFFFB0000
-#define IO_OFFSET -0x01000000 /* Virtual IO = 0xfefb0000 */
+#define IO_OFFSET 0x01000000 /* Virtual IO = 0xfefb0000 */
#define IO_SIZE 0x40000
+#define IO_VIRT (IO_PHYS - IO_OFFSET)
+#define IO_ADDRESS(pa) ((pa) - IO_OFFSET)
+#define io_p2v(pa) ((pa) - IO_OFFSET)
+#define io_v2p(va) ((va) + IO_OFFSET)
#elif defined(CONFIG_ARCH_OMAP2)
-#define IO_PHYS 0x48000000 /* L4 peripherals; other stuff has to be mapped *
- * manually. */
-#define IO_OFFSET 0x90000000 /* Virtual IO = 0xd8000000 */
-#define IO_SIZE 0x08000000
-#endif
-#define IO_VIRT (IO_PHYS + IO_OFFSET)
-#define IO_ADDRESS(x) ((x) + IO_OFFSET)
-#define PCIO_BASE 0
-#define io_p2v(x) ((x) + IO_OFFSET)
-#define io_v2p(x) ((x) - IO_OFFSET)
+/* We map both L3 and L4 on OMAP2 */
+#define L3_24XX_PHYS L3_24XX_BASE /* 0x68000000 */
+#define L3_24XX_VIRT 0xf8000000
+#define L3_24XX_SIZE SZ_1M /* 44kB of 128MB used, want 1MB sect */
+#define L4_24XX_PHYS L4_24XX_BASE /* 0x48000000 */
+#define L4_24XX_VIRT 0xd8000000
+#define L4_24XX_SIZE SZ_1M /* 1MB of 128MB used, want 1MB sect */
+#define IO_OFFSET 0x90000000
+#define IO_ADDRESS(pa) ((pa) + IO_OFFSET) /* Works for L3 and L4 */
+#define io_p2v(pa) ((pa) + IO_OFFSET) /* Works for L3 and L4 */
+#define io_v2p(va) ((va) - IO_OFFSET) /* Works for L3 and L4 */
+
+#endif
#ifndef __ASSEMBLER__
diff --git a/include/asm-arm/arch-omap/irqs.h b/include/asm-arm/arch-omap/irqs.h
index 74e108ccac1..9779686bdce 100644
--- a/include/asm-arm/arch-omap/irqs.h
+++ b/include/asm-arm/arch-omap/irqs.h
@@ -22,8 +22,8 @@
* are different.
*/
-#ifndef __ASM_ARCH_OMAP1510_IRQS_H
-#define __ASM_ARCH_OMAP1510_IRQS_H
+#ifndef __ASM_ARCH_OMAP15XX_IRQS_H
+#define __ASM_ARCH_OMAP15XX_IRQS_H
/*
* IRQ numbers for interrupt handler 1
@@ -31,7 +31,6 @@
* NOTE: See also the OMAP-1510 and 1610 specific IRQ numbers below
*
*/
-#define INT_IH2_IRQ 0
#define INT_CAMERA 1
#define INT_FIQ 3
#define INT_RTDX 6
@@ -60,6 +59,7 @@
/*
* OMAP-1510 specific IRQ numbers for interrupt handler 1
*/
+#define INT_1510_IH2_IRQ 0
#define INT_1510_RES2 2
#define INT_1510_SPI_TX 4
#define INT_1510_SPI_RX 5
@@ -71,6 +71,7 @@
/*
* OMAP-1610 specific IRQ numbers for interrupt handler 1
*/
+#define INT_1610_IH2_IRQ 0
#define INT_1610_IH2_FIQ 2
#define INT_1610_McBSP2_TX 4
#define INT_1610_McBSP2_RX 5
@@ -231,6 +232,12 @@
#define INT_730_DMA_CH15 (62 + IH2_BASE)
#define INT_730_NAND (63 + IH2_BASE)
+#define INT_24XX_SYS_NIRQ 7
+#define INT_24XX_SDMA_IRQ0 12
+#define INT_24XX_SDMA_IRQ1 13
+#define INT_24XX_SDMA_IRQ2 14
+#define INT_24XX_SDMA_IRQ3 15
+#define INT_24XX_DSS_IRQ 25
#define INT_24XX_GPIO_BANK1 29
#define INT_24XX_GPIO_BANK2 30
#define INT_24XX_GPIO_BANK3 31
diff --git a/include/asm-arm/arch-omap/memory.h b/include/asm-arm/arch-omap/memory.h
index bf545b6e0a2..df50dd53e1d 100644
--- a/include/asm-arm/arch-omap/memory.h
+++ b/include/asm-arm/arch-omap/memory.h
@@ -61,7 +61,7 @@
* Note that the is_lbus_device() test is not very efficient on 1510
* because of the strncmp().
*/
-#ifdef CONFIG_ARCH_OMAP1510
+#ifdef CONFIG_ARCH_OMAP15XX
/*
* OMAP-1510 Local Bus address offset
@@ -84,7 +84,7 @@
virt_to_lbus(addr) : \
__virt_to_bus(addr);})
-#endif /* CONFIG_ARCH_OMAP1510 */
+#endif /* CONFIG_ARCH_OMAP15XX */
#endif
diff --git a/include/asm-arm/arch-omap/menelaus.h b/include/asm-arm/arch-omap/menelaus.h
new file mode 100644
index 00000000000..46be8b8d634
--- /dev/null
+++ b/include/asm-arm/arch-omap/menelaus.h
@@ -0,0 +1,22 @@
+/*
+ * linux/include/asm-arm/arch-omap/menelaus.h
+ *
+ * Functions to access Menelaus power management chip
+ */
+
+#ifndef __ASM_ARCH_MENELAUS_H
+#define __ASM_ARCH_MENELAUS_H
+
+extern void menelaus_mmc_register(void (*callback)(u8 card_mask),
+ unsigned long data);
+extern void menelaus_mmc_remove(void);
+extern void menelaus_mmc_opendrain(int enable);
+
+#if defined(CONFIG_ARCH_OMAP24XX) && defined(CONFIG_MENELAUS)
+#define omap_has_menelaus() 1
+#else
+#define omap_has_menelaus() 0
+#endif
+
+#endif
+
diff --git a/include/asm-arm/arch-omap/mux.h b/include/asm-arm/arch-omap/mux.h
index 1b1ad410534..13415a9aab0 100644
--- a/include/asm-arm/arch-omap/mux.h
+++ b/include/asm-arm/arch-omap/mux.h
@@ -4,7 +4,7 @@
* Table of the Omap register configurations for the FUNC_MUX and
* PULL_DWN combinations.
*
- * Copyright (C) 2003 Nokia Corporation
+ * Copyright (C) 2003 - 2005 Nokia Corporation
*
* Written by Tony Lindgren <tony.lindgren@nokia.com>
*
@@ -58,6 +58,16 @@
.pu_pd_reg = PU_PD_SEL_##reg, \
.pu_pd_val = status,
+#define MUX_REG_730(reg, mode_offset, mode) .mux_reg_name = "OMAP730_IO_CONF_"#reg, \
+ .mux_reg = OMAP730_IO_CONF_##reg, \
+ .mask_offset = mode_offset, \
+ .mask = mode,
+
+#define PULL_REG_730(reg, bit, status) .pull_name = "OMAP730_IO_CONF_"#reg, \
+ .pull_reg = OMAP730_IO_CONF_##reg, \
+ .pull_bit = bit, \
+ .pull_val = status,
+
#else
#define MUX_REG(reg, mode_offset, mode) .mux_reg = FUNC_MUX_CTRL_##reg, \
@@ -71,6 +81,15 @@
#define PU_PD_REG(reg, status) .pu_pd_reg = PU_PD_SEL_##reg, \
.pu_pd_val = status,
+#define MUX_REG_730(reg, mode_offset, mode) \
+ .mux_reg = OMAP730_IO_CONF_##reg, \
+ .mask_offset = mode_offset, \
+ .mask = mode,
+
+#define PULL_REG_730(reg, bit, status) .pull_reg = OMAP730_IO_CONF_##reg, \
+ .pull_bit = bit, \
+ .pull_val = status,
+
#endif /* CONFIG_OMAP_MUX_DEBUG */
#define MUX_CFG(desc, mux_reg, mode_offset, mode, \
@@ -84,13 +103,44 @@
PU_PD_REG(pu_pd_reg, pu_pd_status) \
},
+
+/*
+ * OMAP730 has a slightly different config for the pin mux.
+ * - config regs are the OMAP730_IO_CONF_x regs (see omap730.h) regs and
+ * not the FUNC_MUX_CTRL_x regs from hardware.h
+ * - for pull-up/down, only has one enable bit which is is in the same register
+ * as mux config
+ */
+#define MUX_CFG_730(desc, mux_reg, mode_offset, mode, \
+ pull_reg, pull_bit, pull_status, \
+ pu_pd_reg, pu_pd_status, debug_status)\
+{ \
+ .name = desc, \
+ .debug = debug_status, \
+ MUX_REG_730(mux_reg, mode_offset, mode) \
+ PULL_REG_730(mux_reg, pull_bit, pull_status) \
+ PU_PD_REG(pu_pd_reg, pu_pd_status) \
+},
+
+#define MUX_CFG_24XX(desc, reg_offset, mode, \
+ pull_en, pull_mode, dbg) \
+{ \
+ .name = desc, \
+ .debug = dbg, \
+ .mux_reg = reg_offset, \
+ .mask = mode, \
+ .pull_val = pull_en, \
+ .pu_pd_val = pull_mode, \
+},
+
+
#define PULL_DISABLED 0
#define PULL_ENABLED 1
#define PULL_DOWN 0
#define PULL_UP 1
-typedef struct {
+struct pin_config {
char *name;
unsigned char busy;
unsigned char debug;
@@ -108,13 +158,23 @@ typedef struct {
const char *pu_pd_name;
const unsigned int pu_pd_reg;
const unsigned char pu_pd_val;
-} reg_cfg_set;
+};
-/*
- * Lookup table for FUNC_MUX and PULL_DWN register combinations for each
- * device. See also reg_cfg_table below for the register values.
- */
-typedef enum {
+enum omap730_index {
+ /* OMAP 730 keyboard */
+ E2_730_KBR0,
+ J7_730_KBR1,
+ E1_730_KBR2,
+ F3_730_KBR3,
+ D2_730_KBR4,
+ C2_730_KBC0,
+ D3_730_KBC1,
+ E4_730_KBC2,
+ F4_730_KBC3,
+ E3_730_KBC4,
+};
+
+enum omap1xxx_index {
/* UART1 (BT_UART_GATING)*/
UART1_TX = 0,
UART1_RTS,
@@ -331,245 +391,34 @@ typedef enum {
V10_1610_CF_IREQ,
W10_1610_CF_RESET,
W11_1610_CF_CD1,
-} reg_cfg_t;
+};
-#if defined(__MUX_C__) && defined(CONFIG_OMAP_MUX)
+enum omap24xx_index {
+ /* 24xx I2C */
+ M19_24XX_I2C1_SCL,
+ L15_24XX_I2C1_SDA,
+ J15_24XX_I2C2_SCL,
+ H19_24XX_I2C2_SDA,
-/*
- * Table of various FUNC_MUX and PULL_DWN combinations for each device.
- * See also reg_cfg_t above for the lookup table.
- */
-static const reg_cfg_set __initdata_or_module
-reg_cfg_table[] = {
-/*
- * description mux mode mux pull pull pull pu_pd pu dbg
- * reg offset mode reg bit ena reg
- */
-MUX_CFG("UART1_TX", 9, 21, 1, 2, 3, 0, NA, 0, 0)
-MUX_CFG("UART1_RTS", 9, 12, 1, 2, 0, 0, NA, 0, 0)
-
-/* UART2 (COM_UART_GATING), conflicts with USB2 */
-MUX_CFG("UART2_TX", C, 27, 1, 3, 3, 0, NA, 0, 0)
-MUX_CFG("UART2_RX", C, 18, 0, 3, 1, 1, NA, 0, 0)
-MUX_CFG("UART2_CTS", C, 21, 0, 3, 1, 1, NA, 0, 0)
-MUX_CFG("UART2_RTS", C, 24, 1, 3, 2, 0, NA, 0, 0)
-
-/* UART3 (GIGA_UART_GATING) */
-MUX_CFG("UART3_TX", 6, 0, 1, 0, 30, 0, NA, 0, 0)
-MUX_CFG("UART3_RX", 6, 3, 0, 0, 31, 1, NA, 0, 0)
-MUX_CFG("UART3_CTS", 5, 12, 2, 0, 24, 0, NA, 0, 0)
-MUX_CFG("UART3_RTS", 5, 15, 2, 0, 25, 0, NA, 0, 0)
-MUX_CFG("UART3_CLKREQ", 9, 27, 0, 2, 5, 0, NA, 0, 0)
-MUX_CFG("UART3_BCLK", A, 0, 0, 2, 6, 0, NA, 0, 0)
-MUX_CFG("Y15_1610_UART3_RTS", A, 0, 1, 2, 6, 0, NA, 0, 0)
-
-/* PWT & PWL, conflicts with UART3 */
-MUX_CFG("PWT", 6, 0, 2, 0, 30, 0, NA, 0, 0)
-MUX_CFG("PWL", 6, 3, 1, 0, 31, 1, NA, 0, 0)
-
-/* USB internal master generic */
-MUX_CFG("R18_USB_VBUS", 7, 9, 2, 1, 11, 0, NA, 0, 1)
-MUX_CFG("R18_1510_USB_GPIO0", 7, 9, 0, 1, 11, 1, NA, 0, 1)
-/* works around erratum: W4_USB_PUEN and W4_USB_PUDIS are switched! */
-MUX_CFG("W4_USB_PUEN", D, 3, 3, 3, 5, 1, NA, 0, 1)
-MUX_CFG("W4_USB_CLKO", D, 3, 1, 3, 5, 0, NA, 0, 1)
-MUX_CFG("W4_USB_HIGHZ", D, 3, 4, 3, 5, 0, 3, 0, 1)
-MUX_CFG("W4_GPIO58", D, 3, 7, 3, 5, 0, 3, 0, 1)
-
-/* USB1 master */
-MUX_CFG("USB1_SUSP", 8, 27, 2, 1, 27, 0, NA, 0, 1)
-MUX_CFG("USB1_SE0", 9, 0, 2, 1, 28, 0, NA, 0, 1)
-MUX_CFG("W13_1610_USB1_SE0", 9, 0, 4, 1, 28, 0, NA, 0, 1)
-MUX_CFG("USB1_TXEN", 9, 3, 2, 1, 29, 0, NA, 0, 1)
-MUX_CFG("USB1_TXD", 9, 24, 1, 2, 4, 0, NA, 0, 1)
-MUX_CFG("USB1_VP", A, 3, 1, 2, 7, 0, NA, 0, 1)
-MUX_CFG("USB1_VM", A, 6, 1, 2, 8, 0, NA, 0, 1)
-MUX_CFG("USB1_RCV", A, 9, 1, 2, 9, 0, NA, 0, 1)
-MUX_CFG("USB1_SPEED", A, 12, 2, 2, 10, 0, NA, 0, 1)
-MUX_CFG("R13_1610_USB1_SPEED", A, 12, 5, 2, 10, 0, NA, 0, 1)
-MUX_CFG("R13_1710_USB1_SEO", A, 12, 5, 2, 10, 0, NA, 0, 1)
-
-/* USB2 master */
-MUX_CFG("USB2_SUSP", B, 3, 1, 2, 17, 0, NA, 0, 1)
-MUX_CFG("USB2_VP", B, 6, 1, 2, 18, 0, NA, 0, 1)
-MUX_CFG("USB2_TXEN", B, 9, 1, 2, 19, 0, NA, 0, 1)
-MUX_CFG("USB2_VM", C, 18, 1, 3, 0, 0, NA, 0, 1)
-MUX_CFG("USB2_RCV", C, 21, 1, 3, 1, 0, NA, 0, 1)
-MUX_CFG("USB2_SE0", C, 24, 2, 3, 2, 0, NA, 0, 1)
-MUX_CFG("USB2_TXD", C, 27, 2, 3, 3, 0, NA, 0, 1)
-
-/* OMAP-1510 GPIO */
-MUX_CFG("R18_1510_GPIO0", 7, 9, 0, 1, 11, 1, 0, 0, 1)
-MUX_CFG("R19_1510_GPIO1", 7, 6, 0, 1, 10, 1, 0, 0, 1)
-MUX_CFG("M14_1510_GPIO2", 7, 3, 0, 1, 9, 1, 0, 0, 1)
-
-/* OMAP1610 GPIO */
-MUX_CFG("P18_1610_GPIO3", 7, 0, 0, 1, 8, 0, NA, 0, 1)
-MUX_CFG("Y15_1610_GPIO17", A, 0, 7, 2, 6, 0, NA, 0, 1)
-
-/* OMAP-1710 GPIO */
-MUX_CFG("R18_1710_GPIO0", 7, 9, 0, 1, 11, 1, 1, 1, 1)
-MUX_CFG("V2_1710_GPIO10", F, 27, 1, 4, 3, 1, 4, 1, 1)
-MUX_CFG("N21_1710_GPIO14", 6, 9, 0, 1, 1, 1, 1, 1, 1)
-MUX_CFG("W15_1710_GPIO40", 9, 27, 7, 2, 5, 1, 2, 1, 1)
-
-/* MPUIO */
-MUX_CFG("MPUIO2", 7, 18, 0, 1, 14, 1, NA, 0, 1)
-MUX_CFG("N15_1610_MPUIO2", 7, 18, 0, 1, 14, 1, 1, 0, 1)
-MUX_CFG("MPUIO4", 7, 15, 0, 1, 13, 1, NA, 0, 1)
-MUX_CFG("MPUIO5", 7, 12, 0, 1, 12, 1, NA, 0, 1)
-
-MUX_CFG("T20_1610_MPUIO5", 7, 12, 0, 1, 12, 0, 3, 0, 1)
-MUX_CFG("W11_1610_MPUIO6", 10, 15, 2, 3, 8, 0, 3, 0, 1)
-MUX_CFG("V10_1610_MPUIO7", A, 24, 2, 2, 14, 0, 2, 0, 1)
-MUX_CFG("W11_1610_MPUIO9", 10, 15, 1, 3, 8, 0, 3, 0, 1)
-MUX_CFG("V10_1610_MPUIO10", A, 24, 1, 2, 14, 0, 2, 0, 1)
-MUX_CFG("W10_1610_MPUIO11", A, 18, 2, 2, 11, 0, 2, 0, 1)
-MUX_CFG("E20_1610_MPUIO13", 3, 21, 1, 0, 7, 0, 0, 0, 1)
-MUX_CFG("U20_1610_MPUIO14", 9, 6, 6, 0, 30, 0, 0, 0, 1)
-MUX_CFG("E19_1610_MPUIO15", 3, 18, 1, 0, 6, 0, 0, 0, 1)
-
-/* MCBSP2 */
-MUX_CFG("MCBSP2_CLKR", C, 6, 0, 2, 27, 1, NA, 0, 1)
-MUX_CFG("MCBSP2_CLKX", C, 9, 0, 2, 29, 1, NA, 0, 1)
-MUX_CFG("MCBSP2_DR", C, 0, 0, 2, 26, 1, NA, 0, 1)
-MUX_CFG("MCBSP2_DX", C, 15, 0, 2, 31, 1, NA, 0, 1)
-MUX_CFG("MCBSP2_FSR", C, 12, 0, 2, 30, 1, NA, 0, 1)
-MUX_CFG("MCBSP2_FSX", C, 3, 0, 2, 27, 1, NA, 0, 1)
-
-/* MCBSP3 NOTE: Mode must 1 for clock */
-MUX_CFG("MCBSP3_CLKX", 9, 3, 1, 1, 29, 0, NA, 0, 1)
-
-/* Misc ballouts */
-MUX_CFG("BALLOUT_V8_ARMIO3", B, 18, 0, 2, 25, 1, NA, 0, 1)
-MUX_CFG("N20_HDQ", 6, 18, 1, 1, 4, 0, 1, 4, 0)
-
-/* OMAP-1610 MMC2 */
-MUX_CFG("W8_1610_MMC2_DAT0", B, 21, 6, 2, 23, 1, 2, 1, 1)
-MUX_CFG("V8_1610_MMC2_DAT1", B, 27, 6, 2, 25, 1, 2, 1, 1)
-MUX_CFG("W15_1610_MMC2_DAT2", 9, 12, 6, 2, 5, 1, 2, 1, 1)
-MUX_CFG("R10_1610_MMC2_DAT3", B, 18, 6, 2, 22, 1, 2, 1, 1)
-MUX_CFG("Y10_1610_MMC2_CLK", B, 3, 6, 2, 17, 0, 2, 0, 1)
-MUX_CFG("Y8_1610_MMC2_CMD", B, 24, 6, 2, 24, 1, 2, 1, 1)
-MUX_CFG("V9_1610_MMC2_CMDDIR", B, 12, 6, 2, 20, 0, 2, 1, 1)
-MUX_CFG("V5_1610_MMC2_DATDIR0", B, 15, 6, 2, 21, 0, 2, 1, 1)
-MUX_CFG("W19_1610_MMC2_DATDIR1", 8, 15, 6, 1, 23, 0, 1, 1, 1)
-MUX_CFG("R18_1610_MMC2_CLKIN", 7, 9, 6, 1, 11, 0, 1, 11, 1)
-
-/* OMAP-1610 External Trace Interface */
-MUX_CFG("M19_1610_ETM_PSTAT0", 5, 27, 1, 0, 29, 0, 0, 0, 1)
-MUX_CFG("L15_1610_ETM_PSTAT1", 5, 24, 1, 0, 28, 0, 0, 0, 1)
-MUX_CFG("L18_1610_ETM_PSTAT2", 5, 21, 1, 0, 27, 0, 0, 0, 1)
-MUX_CFG("L19_1610_ETM_D0", 5, 18, 1, 0, 26, 0, 0, 0, 1)
-MUX_CFG("J19_1610_ETM_D6", 5, 0, 1, 0, 20, 0, 0, 0, 1)
-MUX_CFG("J18_1610_ETM_D7", 5, 27, 1, 0, 19, 0, 0, 0, 1)
-
-/* OMAP16XX GPIO */
-MUX_CFG("P20_1610_GPIO4", 6, 27, 0, 1, 7, 0, 1, 1, 1)
-MUX_CFG("V9_1610_GPIO7", B, 12, 1, 2, 20, 0, 2, 1, 1)
-MUX_CFG("W8_1610_GPIO9", B, 21, 0, 2, 23, 0, 2, 1, 1)
-MUX_CFG("N20_1610_GPIO11", 6, 18, 0, 1, 4, 0, 1, 1, 1)
-MUX_CFG("N19_1610_GPIO13", 6, 12, 0, 1, 2, 0, 1, 1, 1)
-MUX_CFG("P10_1610_GPIO22", C, 0, 7, 2, 26, 0, 2, 1, 1)
-MUX_CFG("V5_1610_GPIO24", B, 15, 7, 2, 21, 0, 2, 1, 1)
-MUX_CFG("AA20_1610_GPIO_41", 9, 9, 7, 1, 31, 0, 1, 1, 1)
-MUX_CFG("W19_1610_GPIO48", 8, 15, 7, 1, 23, 1, 1, 0, 1)
-MUX_CFG("M7_1610_GPIO62", 10, 0, 0, 4, 24, 0, 4, 0, 1)
-MUX_CFG("V14_16XX_GPIO37", 9, 18, 7, 2, 2, 0, 2, 2, 0)
-MUX_CFG("R9_16XX_GPIO18", C, 18, 7, 3, 0, 0, 3, 0, 0)
-MUX_CFG("L14_16XX_GPIO49", 6, 3, 7, 0, 31, 0, 0, 31, 0)
-
-/* OMAP-1610 uWire */
-MUX_CFG("V19_1610_UWIRE_SCLK", 8, 6, 0, 1, 20, 0, 1, 1, 1)
-MUX_CFG("U18_1610_UWIRE_SDI", 8, 0, 0, 1, 18, 0, 1, 1, 1)
-MUX_CFG("W21_1610_UWIRE_SDO", 8, 3, 0, 1, 19, 0, 1, 1, 1)
-MUX_CFG("N14_1610_UWIRE_CS0", 8, 9, 1, 1, 21, 0, 1, 1, 1)
-MUX_CFG("P15_1610_UWIRE_CS3", 8, 12, 1, 1, 22, 0, 1, 1, 1)
-MUX_CFG("N15_1610_UWIRE_CS1", 7, 18, 2, 1, 14, 0, NA, 0, 1)
-
-/* OMAP-1610 Flash */
-MUX_CFG("L3_1610_FLASH_CS2B_OE",10, 6, 1, NA, 0, 0, NA, 0, 1)
-MUX_CFG("M8_1610_FLASH_CS2B_WE",10, 3, 1, NA, 0, 0, NA, 0, 1)
-
-/* First MMC interface, same on 1510, 1610 and 1710 */
-MUX_CFG("MMC_CMD", A, 27, 0, 2, 15, 1, 2, 1, 1)
-MUX_CFG("MMC_DAT1", A, 24, 0, 2, 14, 1, 2, 1, 1)
-MUX_CFG("MMC_DAT2", A, 18, 0, 2, 12, 1, 2, 1, 1)
-MUX_CFG("MMC_DAT0", B, 0, 0, 2, 16, 1, 2, 1, 1)
-MUX_CFG("MMC_CLK", A, 21, 0, NA, 0, 0, NA, 0, 1)
-MUX_CFG("MMC_DAT3", 10, 15, 0, 3, 8, 1, 3, 1, 1)
-MUX_CFG("M15_1710_MMC_CLKI", 6, 21, 2, 0, 0, 0, NA, 0, 1)
-MUX_CFG("P19_1710_MMC_CMDDIR", 6, 24, 6, 0, 0, 0, NA, 0, 1)
-MUX_CFG("P20_1710_MMC_DATDIR0", 6, 27, 5, 0, 0, 0, NA, 0, 1)
-
-/* OMAP-1610 USB0 alternate configuration */
-MUX_CFG("W9_USB0_TXEN", B, 9, 5, 2, 19, 0, 2, 0, 1)
-MUX_CFG("AA9_USB0_VP", B, 6, 5, 2, 18, 0, 2, 0, 1)
-MUX_CFG("Y5_USB0_RCV", C, 21, 5, 3, 1, 0, 1, 0, 1)
-MUX_CFG("R9_USB0_VM", C, 18, 5, 3, 0, 0, 3, 0, 1)
-MUX_CFG("V6_USB0_TXD", C, 27, 5, 3, 3, 0, 3, 0, 1)
-MUX_CFG("W5_USB0_SE0", C, 24, 5, 3, 2, 0, 3, 0, 1)
-MUX_CFG("V9_USB0_SPEED", B, 12, 5, 2, 20, 0, 2, 0, 1)
-MUX_CFG("Y10_USB0_SUSP", B, 3, 5, 2, 17, 0, 2, 0, 1)
-
-/* USB2 interface */
-MUX_CFG("W9_USB2_TXEN", B, 9, 1, NA, 0, 0, NA, 0, 1)
-MUX_CFG("AA9_USB2_VP", B, 6, 1, NA, 0, 0, NA, 0, 1)
-MUX_CFG("Y5_USB2_RCV", C, 21, 1, NA, 0, 0, NA, 0, 1)
-MUX_CFG("R9_USB2_VM", C, 18, 1, NA, 0, 0, NA, 0, 1)
-MUX_CFG("V6_USB2_TXD", C, 27, 2, NA, 0, 0, NA, 0, 1)
-MUX_CFG("W5_USB2_SE0", C, 24, 2, NA, 0, 0, NA, 0, 1)
-
-/* 16XX UART */
-MUX_CFG("R13_1610_UART1_TX", A, 12, 6, 2, 10, 0, 2, 10, 1)
-MUX_CFG("V14_16XX_UART1_RX", 9, 18, 0, 2, 2, 0, 2, 2, 1)
-MUX_CFG("R14_1610_UART1_CTS", 9, 15, 0, 2, 1, 0, 2, 1, 1)
-MUX_CFG("AA15_1610_UART1_RTS", 9, 12, 1, 2, 0, 0, 2, 0, 1)
-MUX_CFG("R9_16XX_UART2_RX", C, 18, 0, 3, 0, 0, 3, 0, 1)
-MUX_CFG("L14_16XX_UART3_RX", 6, 3, 0, 0, 31, 0, 0, 31, 1)
-
-/* I2C interface */
-MUX_CFG("I2C_SCL", 7, 24, 0, NA, 0, 0, NA, 0, 0)
-MUX_CFG("I2C_SDA", 7, 27, 0, NA, 0, 0, NA, 0, 0)
-
-/* Keypad */
-MUX_CFG("F18_1610_KBC0", 3, 15, 0, 0, 5, 1, 0, 0, 0)
-MUX_CFG("D20_1610_KBC1", 3, 12, 0, 0, 4, 1, 0, 0, 0)
-MUX_CFG("D19_1610_KBC2", 3, 9, 0, 0, 3, 1, 0, 0, 0)
-MUX_CFG("E18_1610_KBC3", 3, 6, 0, 0, 2, 1, 0, 0, 0)
-MUX_CFG("C21_1610_KBC4", 3, 3, 0, 0, 1, 1, 0, 0, 0)
-MUX_CFG("G18_1610_KBR0", 4, 0, 0, 0, 10, 1, 0, 1, 0)
-MUX_CFG("F19_1610_KBR1", 3, 27, 0, 0, 9, 1, 0, 1, 0)
-MUX_CFG("H14_1610_KBR2", 3, 24, 0, 0, 8, 1, 0, 1, 0)
-MUX_CFG("E20_1610_KBR3", 3, 21, 0, 0, 7, 1, 0, 1, 0)
-MUX_CFG("E19_1610_KBR4", 3, 18, 0, 0, 6, 1, 0, 1, 0)
-MUX_CFG("N19_1610_KBR5", 6, 12, 1, 1, 2, 1, 1, 1, 0)
-
-/* Power management */
-MUX_CFG("T20_1610_LOW_PWR", 7, 12, 1, NA, 0, 0, NA, 0, 0)
-
-/* MCLK Settings */
-MUX_CFG("V5_1710_MCLK_ON", B, 15, 0, NA, 0, 0, NA, 0, 0)
-MUX_CFG("V5_1710_MCLK_OFF", B, 15, 6, NA, 0, 0, NA, 0, 0)
-MUX_CFG("R10_1610_MCLK_ON", B, 18, 0, NA, 22, 0, NA, 1, 0)
-MUX_CFG("R10_1610_MCLK_OFF", B, 18, 6, 2, 22, 1, 2, 1, 1)
-
-/* CompactFlash controller, conflicts with MMC1 */
-MUX_CFG("P11_1610_CF_CD2", A, 27, 3, 2, 15, 1, 2, 1, 1)
-MUX_CFG("R11_1610_CF_IOIS16", B, 0, 3, 2, 16, 1, 2, 1, 1)
-MUX_CFG("V10_1610_CF_IREQ", A, 24, 3, 2, 14, 0, 2, 0, 1)
-MUX_CFG("W10_1610_CF_RESET", A, 18, 3, 2, 12, 1, 2, 1, 1)
-MUX_CFG("W11_1610_CF_CD1", 10, 15, 3, 3, 8, 1, 3, 1, 1)
-};
+ /* 24xx Menelaus interrupt */
+ W19_24XX_SYS_NIRQ,
-#endif /* __MUX_C__ */
+ /* 24xx GPIO */
+ Y20_24XX_GPIO60,
+ M15_24XX_GPIO92,
+};
#ifdef CONFIG_OMAP_MUX
/* setup pin muxing in Linux */
-extern int omap_cfg_reg(reg_cfg_t reg_cfg);
+extern int omap1_mux_init(void);
+extern int omap2_mux_init(void);
+extern int omap_mux_register(struct pin_config * pins, unsigned long size);
+extern int omap_cfg_reg(unsigned long reg_cfg);
#else
/* boot loader does it all (no warnings from CONFIG_OMAP_MUX_WARNINGS) */
-static inline int omap_cfg_reg(reg_cfg_t reg_cfg) { return 0; }
+static inline int omap1_mux_init(void) { return 0; }
+static inline int omap2_mux_init(void) { return 0; }
+static inline int omap_cfg_reg(unsigned long reg_cfg) { return 0; }
#endif
#endif
diff --git a/include/asm-arm/arch-omap/omap1510.h b/include/asm-arm/arch-omap/omap1510.h
index f086a393390..c575d354850 100644
--- a/include/asm-arm/arch-omap/omap1510.h
+++ b/include/asm-arm/arch-omap/omap1510.h
@@ -25,8 +25,8 @@
* 675 Mass Ave, Cambridge, MA 02139, USA.
*/
-#ifndef __ASM_ARCH_OMAP1510_H
-#define __ASM_ARCH_OMAP1510_H
+#ifndef __ASM_ARCH_OMAP15XX_H
+#define __ASM_ARCH_OMAP15XX_H
/*
* ----------------------------------------------------------------------------
@@ -44,5 +44,5 @@
#define OMAP1510_DSPREG_SIZE SZ_128K
#define OMAP1510_DSPREG_START 0xE1000000
-#endif /* __ASM_ARCH_OMAP1510_H */
+#endif /* __ASM_ARCH_OMAP15XX_H */
diff --git a/include/asm-arm/arch-omap/omap24xx.h b/include/asm-arm/arch-omap/omap24xx.h
index a9105466a41..6e59805fa65 100644
--- a/include/asm-arm/arch-omap/omap24xx.h
+++ b/include/asm-arm/arch-omap/omap24xx.h
@@ -1,15 +1,24 @@
#ifndef __ASM_ARCH_OMAP24XX_H
#define __ASM_ARCH_OMAP24XX_H
-#define OMAP24XX_L4_IO_BASE 0x48000000
+/*
+ * Please place only base defines here and put the rest in device
+ * specific headers. Note also that some of these defines are needed
+ * for omap1 to compile without adding ifdefs.
+ */
+
+#define L4_24XX_BASE 0x48000000
+#define L3_24XX_BASE 0x68000000
/* interrupt controller */
-#define OMAP24XX_IC_BASE (OMAP24XX_L4_IO_BASE + 0xfe000)
+#define OMAP24XX_IC_BASE (L4_24XX_BASE + 0xfe000)
#define VA_IC_BASE IO_ADDRESS(OMAP24XX_IC_BASE)
-
#define OMAP24XX_IVA_INTC_BASE 0x40000000
-
#define IRQ_SIR_IRQ 0x0040
+#define OMAP24XX_32KSYNCT_BASE (L4_24XX_BASE + 0x4000)
+#define OMAP24XX_PRCM_BASE (L4_24XX_BASE + 0x8000)
+#define OMAP24XX_SDRC_BASE (L3_24XX_BASE + 0x9000)
+
#endif /* __ASM_ARCH_OMAP24XX_H */
diff --git a/include/asm-arm/arch-omap/omapfb.h b/include/asm-arm/arch-omap/omapfb.h
new file mode 100644
index 00000000000..4ba2622cc14
--- /dev/null
+++ b/include/asm-arm/arch-omap/omapfb.h
@@ -0,0 +1,281 @@
+/*
+ * File: include/asm-arm/arch-omap/omapfb.h
+ *
+ * Framebuffer driver for TI OMAP boards
+ *
+ * Copyright (C) 2004 Nokia Corporation
+ * Author: Imre Deak <imre.deak@nokia.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License along
+ * with this program; if not, write to the Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __OMAPFB_H
+#define __OMAPFB_H
+
+/* IOCTL commands. */
+
+#define OMAP_IOW(num, dtype) _IOW('O', num, dtype)
+#define OMAP_IOR(num, dtype) _IOR('O', num, dtype)
+#define OMAP_IOWR(num, dtype) _IOWR('O', num, dtype)
+#define OMAP_IO(num) _IO('O', num)
+
+#define OMAPFB_MIRROR OMAP_IOW(31, int)
+#define OMAPFB_SYNC_GFX OMAP_IO(37)
+#define OMAPFB_VSYNC OMAP_IO(38)
+#define OMAPFB_SET_UPDATE_MODE OMAP_IOW(40, enum omapfb_update_mode)
+#define OMAPFB_GET_CAPS OMAP_IOR(42, unsigned long)
+#define OMAPFB_GET_UPDATE_MODE OMAP_IOW(43, enum omapfb_update_mode)
+#define OMAPFB_LCD_TEST OMAP_IOW(45, int)
+#define OMAPFB_CTRL_TEST OMAP_IOW(46, int)
+#define OMAPFB_UPDATE_WINDOW OMAP_IOW(47, struct omapfb_update_window)
+#define OMAPFB_SETUP_PLANE OMAP_IOW(48, struct omapfb_setup_plane)
+#define OMAPFB_ENABLE_PLANE OMAP_IOW(49, struct omapfb_enable_plane)
+#define OMAPFB_SET_COLOR_KEY OMAP_IOW(50, struct omapfb_color_key)
+
+#define OMAPFB_CAPS_GENERIC_MASK 0x00000fff
+#define OMAPFB_CAPS_LCDC_MASK 0x00fff000
+#define OMAPFB_CAPS_PANEL_MASK 0xff000000
+
+#define OMAPFB_CAPS_MANUAL_UPDATE 0x00001000
+#define OMAPFB_CAPS_SET_BACKLIGHT 0x01000000
+
+/* Values from DSP must map to lower 16-bits */
+#define OMAPFB_FORMAT_MASK 0x00ff
+#define OMAPFB_FORMAT_FLAG_DOUBLE 0x0100
+
+enum omapfb_color_format {
+ OMAPFB_COLOR_RGB565 = 0,
+ OMAPFB_COLOR_YUV422,
+ OMAPFB_COLOR_YUV420,
+ OMAPFB_COLOR_CLUT_8BPP,
+ OMAPFB_COLOR_CLUT_4BPP,
+ OMAPFB_COLOR_CLUT_2BPP,
+ OMAPFB_COLOR_CLUT_1BPP,
+};
+
+struct omapfb_update_window {
+ u32 x, y;
+ u32 width, height;
+ u32 format;
+};
+
+enum omapfb_plane {
+ OMAPFB_PLANE_GFX = 0,
+ OMAPFB_PLANE_VID1,
+ OMAPFB_PLANE_VID2,
+};
+
+enum omapfb_channel_out {
+ OMAPFB_CHANNEL_OUT_LCD = 0,
+ OMAPFB_CHANNEL_OUT_DIGIT,
+};
+
+struct omapfb_setup_plane {
+ u8 plane;
+ u8 channel_out;
+ u32 offset;
+ u32 pos_x, pos_y;
+ u32 width, height;
+ u32 color_mode;
+};
+
+struct omapfb_enable_plane {
+ u8 plane;
+ u8 enable;
+};
+
+enum omapfb_color_key_type {
+ OMAPFB_COLOR_KEY_DISABLED = 0,
+ OMAPFB_COLOR_KEY_GFX_DST,
+ OMAPFB_COLOR_KEY_VID_SRC,
+};
+
+struct omapfb_color_key {
+ u8 channel_out;
+ u32 background;
+ u32 trans_key;
+ u8 key_type;
+};
+
+enum omapfb_update_mode {
+ OMAPFB_UPDATE_DISABLED = 0,
+ OMAPFB_AUTO_UPDATE,
+ OMAPFB_MANUAL_UPDATE
+};
+
+#ifdef __KERNEL__
+
+#include <linux/completion.h>
+#include <linux/interrupt.h>
+#include <linux/fb.h>
+
+#define OMAP_LCDC_INV_VSYNC 0x0001
+#define OMAP_LCDC_INV_HSYNC 0x0002
+#define OMAP_LCDC_INV_PIX_CLOCK 0x0004
+#define OMAP_LCDC_INV_OUTPUT_EN 0x0008
+#define OMAP_LCDC_HSVS_RISING_EDGE 0x0010
+#define OMAP_LCDC_HSVS_OPPOSITE 0x0020
+
+#define OMAP_LCDC_SIGNAL_MASK 0x003f
+
+#define OMAP_LCDC_PANEL_TFT 0x0100
+
+#ifdef CONFIG_ARCH_OMAP1
+#define OMAPFB_PLANE_NUM 1
+#else
+#define OMAPFB_PLANE_NUM 3
+#endif
+
+struct omapfb_device;
+
+struct lcd_panel {
+ const char *name;
+ int config; /* TFT/STN, signal inversion */
+ int bpp; /* Pixel format in fb mem */
+ int data_lines; /* Lines on LCD HW interface */
+
+ int x_res, y_res;
+ int pixel_clock; /* In kHz */
+ int hsw; /* Horizontal synchronization
+ pulse width */
+ int hfp; /* Horizontal front porch */
+ int hbp; /* Horizontal back porch */
+ int vsw; /* Vertical synchronization
+ pulse width */
+ int vfp; /* Vertical front porch */
+ int vbp; /* Vertical back porch */
+ int acb; /* ac-bias pin frequency */
+ int pcd; /* pixel clock divider.
+ Obsolete use pixel_clock instead */
+
+ int (*init) (struct omapfb_device *fbdev);
+ void (*cleanup) (void);
+ int (*enable) (void);
+ void (*disable) (void);
+ unsigned long (*get_caps) (void);
+ int (*set_bklight_level)(unsigned int level);
+ unsigned int (*get_bklight_level)(void);
+ unsigned int (*get_bklight_max) (void);
+ int (*run_test) (int test_num);
+};
+
+struct omapfb_device;
+
+struct extif_timings {
+ int cs_on_time;
+ int cs_off_time;
+ int we_on_time;
+ int we_off_time;
+ int re_on_time;
+ int re_off_time;
+ int we_cycle_time;
+ int re_cycle_time;
+ int cs_pulse_width;
+ int access_time;
+};
+
+struct lcd_ctrl_extif {
+ int (*init) (void);
+ void (*cleanup) (void);
+ void (*set_timings) (const struct extif_timings *timings);
+ void (*write_command) (u32 cmd);
+ u32 (*read_data) (void);
+ void (*write_data) (u32 data);
+ void (*transfer_area) (int width, int height,
+ void (callback)(void * data), void *data);
+};
+
+struct lcd_ctrl {
+ const char *name;
+ void *data;
+
+ int (*init) (struct omapfb_device *fbdev,
+ int ext_mode, int req_vram_size);
+ void (*cleanup) (void);
+ void (*get_vram_layout)(unsigned long *size,
+ void **virt_base,
+ dma_addr_t *phys_base);
+ unsigned long (*get_caps) (void);
+ int (*set_update_mode)(enum omapfb_update_mode mode);
+ enum omapfb_update_mode (*get_update_mode)(void);
+ int (*setup_plane) (int plane, int channel_out,
+ unsigned long offset,
+ int screen_width,
+ int pos_x, int pos_y, int width,
+ int height, int color_mode);
+ int (*enable_plane) (int plane, int enable);
+ int (*update_window) (struct omapfb_update_window *win,
+ void (*callback)(void *),
+ void *callback_data);
+ void (*sync) (void);
+ void (*suspend) (void);
+ void (*resume) (void);
+ int (*run_test) (int test_num);
+ int (*setcolreg) (u_int regno, u16 red, u16 green,
+ u16 blue, u16 transp,
+ int update_hw_mem);
+ int (*set_color_key) (struct omapfb_color_key *ck);
+
+};
+
+enum omapfb_state {
+ OMAPFB_DISABLED = 0,
+ OMAPFB_SUSPENDED= 99,
+ OMAPFB_ACTIVE = 100
+};
+
+struct omapfb_device {
+ int state;
+ int ext_lcdc; /* Using external
+ LCD controller */
+ struct semaphore rqueue_sema;
+
+ void *vram_virt_base;
+ dma_addr_t vram_phys_base;
+ unsigned long vram_size;
+
+ int color_mode;
+ int palette_size;
+ int mirror;
+ u32 pseudo_palette[17];
+
+ struct lcd_panel *panel; /* LCD panel */
+ struct lcd_ctrl *ctrl; /* LCD controller */
+ struct lcd_ctrl *int_ctrl; /* internal LCD ctrl */
+ struct lcd_ctrl_extif *ext_if; /* LCD ctrl external
+ interface */
+ struct fb_info *fb_info;
+
+ struct device *dev;
+};
+
+extern struct lcd_panel h3_panel;
+extern struct lcd_panel h2_panel;
+extern struct lcd_panel p2_panel;
+extern struct lcd_panel osk_panel;
+extern struct lcd_panel innovator1610_panel;
+extern struct lcd_panel innovator1510_panel;
+
+#ifdef CONFIG_ARCH_OMAP1
+extern struct lcd_ctrl omap1_lcd_ctrl;
+#else
+extern struct lcd_ctrl omap2_disp_ctrl;
+#endif
+
+extern void omapfb_write_first_pixel(struct omapfb_device *fbdev, u16 pixval);
+
+#endif /* __KERNEL__ */
+
+#endif /* __OMAPFB_H */
diff --git a/include/asm-arm/arch-omap/pm.h b/include/asm-arm/arch-omap/pm.h
index fbd742d0c49..7c790425e36 100644
--- a/include/asm-arm/arch-omap/pm.h
+++ b/include/asm-arm/arch-omap/pm.h
@@ -98,7 +98,14 @@
#define OMAP1610_IDLECT3 0xfffece24
#define OMAP1610_IDLE_LOOP_REQUEST 0x0400
-#if !defined(CONFIG_ARCH_OMAP1510) && \
+#define OMAP730_IDLECT1_SLEEP_VAL 0x16c7
+#define OMAP730_IDLECT2_SLEEP_VAL 0x09c7
+#define OMAP730_IDLECT3_VAL 0x3f
+#define OMAP730_IDLECT3 0xfffece24
+#define OMAP730_IDLE_LOOP_REQUEST 0x0C00
+
+#if !defined(CONFIG_ARCH_OMAP730) && \
+ !defined(CONFIG_ARCH_OMAP15XX) && \
!defined(CONFIG_ARCH_OMAP16XX) && \
!defined(CONFIG_ARCH_OMAP24XX)
#error "Power management for this processor not implemented yet"
@@ -107,8 +114,10 @@
#ifndef __ASSEMBLER__
extern void omap_pm_idle(void);
extern void omap_pm_suspend(void);
+extern void omap730_cpu_suspend(unsigned short, unsigned short);
extern void omap1510_cpu_suspend(unsigned short, unsigned short);
extern void omap1610_cpu_suspend(unsigned short, unsigned short);
+extern void omap730_idle_loop_suspend(void);
extern void omap1510_idle_loop_suspend(void);
extern void omap1610_idle_loop_suspend(void);
@@ -118,6 +127,8 @@ extern void omap_serial_wake_trigger(int enable);
#define omap_serial_wake_trigger(x) {}
#endif /* CONFIG_OMAP_SERIAL_WAKE */
+extern unsigned int omap730_cpu_suspend_sz;
+extern unsigned int omap730_idle_loop_suspend_sz;
extern unsigned int omap1510_cpu_suspend_sz;
extern unsigned int omap1510_idle_loop_suspend_sz;
extern unsigned int omap1610_cpu_suspend_sz;
@@ -131,6 +142,10 @@ extern unsigned int omap1610_idle_loop_suspend_sz;
#define ULPD_RESTORE(x) omap_writew((ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]), (x))
#define ULPD_SHOW(x) ulpd_sleep_save[ULPD_SLEEP_SAVE_##x]
+#define MPUI730_SAVE(x) mpui730_sleep_save[MPUI730_SLEEP_SAVE_##x] = omap_readl(x)
+#define MPUI730_RESTORE(x) omap_writel((mpui730_sleep_save[MPUI730_SLEEP_SAVE_##x]), (x))
+#define MPUI730_SHOW(x) mpui730_sleep_save[MPUI730_SLEEP_SAVE_##x]
+
#define MPUI1510_SAVE(x) mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_##x] = omap_readl(x)
#define MPUI1510_RESTORE(x) omap_writel((mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_##x]), (x))
#define MPUI1510_SHOW(x) mpui1510_sleep_save[MPUI1510_SLEEP_SAVE_##x]
@@ -188,13 +203,34 @@ enum mpui1510_save_state {
MPUI1510_SLEEP_SAVE_EMIFS_CONFIG,
MPUI1510_SLEEP_SAVE_OMAP_IH1_MIR,
MPUI1510_SLEEP_SAVE_OMAP_IH2_MIR,
-#if defined(CONFIG_ARCH_OMAP1510)
+#if defined(CONFIG_ARCH_OMAP15XX)
MPUI1510_SLEEP_SAVE_SIZE
#else
MPUI1510_SLEEP_SAVE_SIZE = 0
#endif
};
+enum mpui730_save_state {
+ MPUI730_SLEEP_SAVE_START = 0,
+ /*
+ * MPUI registers 32 bits
+ */
+ MPUI730_SLEEP_SAVE_MPUI_CTRL,
+ MPUI730_SLEEP_SAVE_MPUI_DSP_BOOT_CONFIG,
+ MPUI730_SLEEP_SAVE_MPUI_DSP_API_CONFIG,
+ MPUI730_SLEEP_SAVE_MPUI_DSP_STATUS,
+ MPUI730_SLEEP_SAVE_EMIFF_SDRAM_CONFIG,
+ MPUI730_SLEEP_SAVE_EMIFS_CONFIG,
+ MPUI730_SLEEP_SAVE_OMAP_IH1_MIR,
+ MPUI730_SLEEP_SAVE_OMAP_IH2_0_MIR,
+ MPUI730_SLEEP_SAVE_OMAP_IH2_1_MIR,
+#if defined(CONFIG_ARCH_OMAP730)
+ MPUI730_SLEEP_SAVE_SIZE
+#else
+ MPUI730_SLEEP_SAVE_SIZE = 0
+#endif
+};
+
enum mpui1610_save_state {
MPUI1610_SLEEP_SAVE_START = 0,
/*
diff --git a/include/asm-arm/arch-omap/prcm.h b/include/asm-arm/arch-omap/prcm.h
new file mode 100644
index 00000000000..7b48a5cbb15
--- /dev/null
+++ b/include/asm-arm/arch-omap/prcm.h
@@ -0,0 +1,429 @@
+/*
+ * prcm.h - Access definations for use in OMAP24XX clock and power management
+ *
+ * Copyright (C) 2005 Texas Instruments, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_ARM_ARCH_DPM_PRCM_H
+#define __ASM_ARM_ARCH_DPM_PRCM_H
+
+/* SET_PERFORMANCE_LEVEL PARAMETERS */
+#define PRCM_HALF_SPEED 1
+#define PRCM_FULL_SPEED 2
+
+#ifndef __ASSEMBLER__
+
+#define PRCM_REG32(offset) __REG32(OMAP24XX_PRCM_BASE + (offset))
+
+#define PRCM_REVISION PRCM_REG32(0x000)
+#define PRCM_SYSCONFIG PRCM_REG32(0x010)
+#define PRCM_IRQSTATUS_MPU PRCM_REG32(0x018)
+#define PRCM_IRQENABLE_MPU PRCM_REG32(0x01C)
+#define PRCM_VOLTCTRL PRCM_REG32(0x050)
+#define PRCM_VOLTST PRCM_REG32(0x054)
+#define PRCM_CLKSRC_CTRL PRCM_REG32(0x060)
+#define PRCM_CLKOUT_CTRL PRCM_REG32(0x070)
+#define PRCM_CLKEMUL_CTRL PRCM_REG32(0x078)
+#define PRCM_CLKCFG_CTRL PRCM_REG32(0x080)
+#define PRCM_CLKCFG_STATUS PRCM_REG32(0x084)
+#define PRCM_VOLTSETUP PRCM_REG32(0x090)
+#define PRCM_CLKSSETUP PRCM_REG32(0x094)
+#define PRCM_POLCTRL PRCM_REG32(0x098)
+
+/* GENERAL PURPOSE */
+#define GENERAL_PURPOSE1 PRCM_REG32(0x0B0)
+#define GENERAL_PURPOSE2 PRCM_REG32(0x0B4)
+#define GENERAL_PURPOSE3 PRCM_REG32(0x0B8)
+#define GENERAL_PURPOSE4 PRCM_REG32(0x0BC)
+#define GENERAL_PURPOSE5 PRCM_REG32(0x0C0)
+#define GENERAL_PURPOSE6 PRCM_REG32(0x0C4)
+#define GENERAL_PURPOSE7 PRCM_REG32(0x0C8)
+#define GENERAL_PURPOSE8 PRCM_REG32(0x0CC)
+#define GENERAL_PURPOSE9 PRCM_REG32(0x0D0)
+#define GENERAL_PURPOSE10 PRCM_REG32(0x0D4)
+#define GENERAL_PURPOSE11 PRCM_REG32(0x0D8)
+#define GENERAL_PURPOSE12 PRCM_REG32(0x0DC)
+#define GENERAL_PURPOSE13 PRCM_REG32(0x0E0)
+#define GENERAL_PURPOSE14 PRCM_REG32(0x0E4)
+#define GENERAL_PURPOSE15 PRCM_REG32(0x0E8)
+#define GENERAL_PURPOSE16 PRCM_REG32(0x0EC)
+#define GENERAL_PURPOSE17 PRCM_REG32(0x0F0)
+#define GENERAL_PURPOSE18 PRCM_REG32(0x0F4)
+#define GENERAL_PURPOSE19 PRCM_REG32(0x0F8)
+#define GENERAL_PURPOSE20 PRCM_REG32(0x0FC)
+
+/* MPU */
+#define CM_CLKSEL_MPU PRCM_REG32(0x140)
+#define CM_CLKSTCTRL_MPU PRCM_REG32(0x148)
+#define RM_RSTST_MPU PRCM_REG32(0x158)
+#define PM_WKDEP_MPU PRCM_REG32(0x1C8)
+#define PM_EVGENCTRL_MPU PRCM_REG32(0x1D4)
+#define PM_EVEGENONTIM_MPU PRCM_REG32(0x1D8)
+#define PM_EVEGENOFFTIM_MPU PRCM_REG32(0x1DC)
+#define PM_PWSTCTRL_MPU PRCM_REG32(0x1E0)
+#define PM_PWSTST_MPU PRCM_REG32(0x1E4)
+
+/* CORE */
+#define CM_FCLKEN1_CORE PRCM_REG32(0x200)
+#define CM_FCLKEN2_CORE PRCM_REG32(0x204)
+#define CM_FCLKEN3_CORE PRCM_REG32(0x208)
+#define CM_ICLKEN1_CORE PRCM_REG32(0x210)
+#define CM_ICLKEN2_CORE PRCM_REG32(0x214)
+#define CM_ICLKEN3_CORE PRCM_REG32(0x218)
+#define CM_ICLKEN4_CORE PRCM_REG32(0x21C)
+#define CM_IDLEST1_CORE PRCM_REG32(0x220)
+#define CM_IDLEST2_CORE PRCM_REG32(0x224)
+#define CM_IDLEST3_CORE PRCM_REG32(0x228)
+#define CM_IDLEST4_CORE PRCM_REG32(0x22C)
+#define CM_AUTOIDLE1_CORE PRCM_REG32(0x230)
+#define CM_AUTOIDLE2_CORE PRCM_REG32(0x234)
+#define CM_AUTOIDLE3_CORE PRCM_REG32(0x238)
+#define CM_AUTOIDLE4_CORE PRCM_REG32(0x23C)
+#define CM_CLKSEL1_CORE PRCM_REG32(0x240)
+#define CM_CLKSEL2_CORE PRCM_REG32(0x244)
+#define CM_CLKSTCTRL_CORE PRCM_REG32(0x248)
+#define PM_WKEN1_CORE PRCM_REG32(0x2A0)
+#define PM_WKEN2_CORE PRCM_REG32(0x2A4)
+#define PM_WKST1_CORE PRCM_REG32(0x2B0)
+#define PM_WKST2_CORE PRCM_REG32(0x2B4)
+#define PM_WKDEP_CORE PRCM_REG32(0x2C8)
+#define PM_PWSTCTRL_CORE PRCM_REG32(0x2E0)
+#define PM_PWSTST_CORE PRCM_REG32(0x2E4)
+
+/* GFX */
+#define CM_FCLKEN_GFX PRCM_REG32(0x300)
+#define CM_ICLKEN_GFX PRCM_REG32(0x310)
+#define CM_IDLEST_GFX PRCM_REG32(0x320)
+#define CM_CLKSEL_GFX PRCM_REG32(0x340)
+#define CM_CLKSTCTRL_GFX PRCM_REG32(0x348)
+#define RM_RSTCTRL_GFX PRCM_REG32(0x350)
+#define RM_RSTST_GFX PRCM_REG32(0x358)
+#define PM_WKDEP_GFX PRCM_REG32(0x3C8)
+#define PM_PWSTCTRL_GFX PRCM_REG32(0x3E0)
+#define PM_PWSTST_GFX PRCM_REG32(0x3E4)
+
+/* WAKE-UP */
+#define CM_FCLKEN_WKUP PRCM_REG32(0x400)
+#define CM_ICLKEN_WKUP PRCM_REG32(0x410)
+#define CM_IDLEST_WKUP PRCM_REG32(0x420)
+#define CM_AUTOIDLE_WKUP PRCM_REG32(0x430)
+#define CM_CLKSEL_WKUP PRCM_REG32(0x440)
+#define RM_RSTCTRL_WKUP PRCM_REG32(0x450)
+#define RM_RSTTIME_WKUP PRCM_REG32(0x454)
+#define RM_RSTST_WKUP PRCM_REG32(0x458)
+#define PM_WKEN_WKUP PRCM_REG32(0x4A0)
+#define PM_WKST_WKUP PRCM_REG32(0x4B0)
+
+/* CLOCKS */
+#define CM_CLKEN_PLL PRCM_REG32(0x500)
+#define CM_IDLEST_CKGEN PRCM_REG32(0x520)
+#define CM_AUTOIDLE_PLL PRCM_REG32(0x530)
+#define CM_CLKSEL1_PLL PRCM_REG32(0x540)
+#define CM_CLKSEL2_PLL PRCM_REG32(0x544)
+
+/* DSP */
+#define CM_FCLKEN_DSP PRCM_REG32(0x800)
+#define CM_ICLKEN_DSP PRCM_REG32(0x810)
+#define CM_IDLEST_DSP PRCM_REG32(0x820)
+#define CM_AUTOIDLE_DSP PRCM_REG32(0x830)
+#define CM_CLKSEL_DSP PRCM_REG32(0x840)
+#define CM_CLKSTCTRL_DSP PRCM_REG32(0x848)
+#define RM_RSTCTRL_DSP PRCM_REG32(0x850)
+#define RM_RSTST_DSP PRCM_REG32(0x858)
+#define PM_WKEN_DSP PRCM_REG32(0x8A0)
+#define PM_WKDEP_DSP PRCM_REG32(0x8C8)
+#define PM_PWSTCTRL_DSP PRCM_REG32(0x8E0)
+#define PM_PWSTST_DSP PRCM_REG32(0x8E4)
+#define PRCM_IRQSTATUS_DSP PRCM_REG32(0x8F0)
+#define PRCM_IRQENABLE_DSP PRCM_REG32(0x8F4)
+
+/* IVA */
+#define PRCM_IRQSTATUS_IVA PRCM_REG32(0x8F8)
+#define PRCM_IRQENABLE_IVA PRCM_REG32(0x8FC)
+
+/* Modem on 2430 */
+#define CM_FCLKEN_MDM PRCM_REG32(0xC00)
+#define CM_ICLKEN_MDM PRCM_REG32(0xC10)
+#define CM_IDLEST_MDM PRCM_REG32(0xC20)
+#define CM_CLKSEL_MDM PRCM_REG32(0xC40)
+
+/* FIXME: Move to header for 2430 */
+#define DISP_BASE (OMAP24XX_L4_IO_BASE+0x50000)
+#define DISP_REG32(offset) __REG32(DISP_BASE + (offset))
+
+#define OMAP24XX_GPMC_BASE (L3_24XX_BASE + 0xa000)
+#define GPMC_BASE (OMAP24XX_GPMC_BASE)
+#define GPMC_REG32(offset) __REG32(GPMC_BASE + (offset))
+
+#define GPT1_BASE (OMAP24XX_GPT1)
+#define GPT1_REG32(offset) __REG32(GPT1_BASE + (offset))
+
+/* Misc sysconfig */
+#define DISPC_SYSCONFIG DISP_REG32(0x410)
+#define SPI_BASE (OMAP24XX_L4_IO_BASE+0x98000)
+#define MCSPI1_SYSCONFIG __REG32(SPI_BASE + 0x10)
+#define MCSPI2_SYSCONFIG __REG32(SPI_BASE+0x2000 + 0x10)
+
+//#define DSP_MMU_SYSCONFIG 0x5A000010
+#define CAMERA_MMU_SYSCONFIG __REG32(DISP_BASE+0x2C10)
+//#define IVA_MMU_SYSCONFIG 0x5D000010
+//#define DSP_DMA_SYSCONFIG 0x00FCC02C
+#define CAMERA_DMA_SYSCONFIG __REG32(DISP_BASE+0x282C)
+#define SYSTEM_DMA_SYSCONFIG __REG32(DISP_BASE+0x602C)
+#define GPMC_SYSCONFIG GPMC_REG32(0x010)
+#define MAILBOXES_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x94010)
+#define UART1_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6A054)
+#define UART2_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6C054)
+#define UART3_SYSCONFIG __REG32(OMAP24XX_L4_IO_BASE+0x6E054)
+//#define IVA_SYSCONFIG 0x5C060010
+#define SDRC_SYSCONFIG __REG32(OMAP24XX_SDRC_BASE+0x10)
+#define SMS_SYSCONFIG __REG32(OMAP24XX_SMS_BASE+0x10)
+#define SSI_SYSCONFIG __REG32(DISP_BASE+0x8010)
+//#define VLYNQ_SYSCONFIG 0x67FFFE10
+
+/* rkw - good cannidates for PM_ to start what nm was trying */
+#define OMAP24XX_GPT2 (OMAP24XX_L4_IO_BASE+0x2A000)
+#define OMAP24XX_GPT3 (OMAP24XX_L4_IO_BASE+0x78000)
+#define OMAP24XX_GPT4 (OMAP24XX_L4_IO_BASE+0x7A000)
+#define OMAP24XX_GPT5 (OMAP24XX_L4_IO_BASE+0x7C000)
+#define OMAP24XX_GPT6 (OMAP24XX_L4_IO_BASE+0x7E000)
+#define OMAP24XX_GPT7 (OMAP24XX_L4_IO_BASE+0x80000)
+#define OMAP24XX_GPT8 (OMAP24XX_L4_IO_BASE+0x82000)
+#define OMAP24XX_GPT9 (OMAP24XX_L4_IO_BASE+0x84000)
+#define OMAP24XX_GPT10 (OMAP24XX_L4_IO_BASE+0x86000)
+#define OMAP24XX_GPT11 (OMAP24XX_L4_IO_BASE+0x88000)
+#define OMAP24XX_GPT12 (OMAP24XX_L4_IO_BASE+0x8A000)
+
+#define GPTIMER1_SYSCONFIG GPT1_REG32(0x010)
+#define GPTIMER2_SYSCONFIG __REG32(OMAP24XX_GPT2 + 0x10)
+#define GPTIMER3_SYSCONFIG __REG32(OMAP24XX_GPT3 + 0x10)
+#define GPTIMER4_SYSCONFIG __REG32(OMAP24XX_GPT4 + 0x10)
+#define GPTIMER5_SYSCONFIG __REG32(OMAP24XX_GPT5 + 0x10)
+#define GPTIMER6_SYSCONFIG __REG32(OMAP24XX_GPT6 + 0x10)
+#define GPTIMER7_SYSCONFIG __REG32(OMAP24XX_GPT7 + 0x10)
+#define GPTIMER8_SYSCONFIG __REG32(OMAP24XX_GPT8 + 0x10)
+#define GPTIMER9_SYSCONFIG __REG32(OMAP24XX_GPT9 + 0x10)
+#define GPTIMER10_SYSCONFIG __REG32(OMAP24XX_GPT10 + 0x10)
+#define GPTIMER11_SYSCONFIG __REG32(OMAP24XX_GPT11 + 0x10)
+#define GPTIMER12_SYSCONFIG __REG32(OMAP24XX_GPT12 + 0x10)
+
+#define GPIOX_BASE(X) (OMAP24XX_GPIO_BASE+(0x2000*((X)-1)))
+
+#define GPIO1_SYSCONFIG __REG32((GPIOX_BASE(1)+0x10))
+#define GPIO2_SYSCONFIG __REG32((GPIOX_BASE(2)+0x10))
+#define GPIO3_SYSCONFIG __REG32((GPIOX_BASE(3)+0x10))
+#define GPIO4_SYSCONFIG __REG32((GPIOX_BASE(4)+0x10))
+
+/* GP TIMER 1 */
+#define GPTIMER1_TISTAT GPT1_REG32(0x014)
+#define GPTIMER1_TISR GPT1_REG32(0x018)
+#define GPTIMER1_TIER GPT1_REG32(0x01C)
+#define GPTIMER1_TWER GPT1_REG32(0x020)
+#define GPTIMER1_TCLR GPT1_REG32(0x024)
+#define GPTIMER1_TCRR GPT1_REG32(0x028)
+#define GPTIMER1_TLDR GPT1_REG32(0x02C)
+#define GPTIMER1_TTGR GPT1_REG32(0x030)
+#define GPTIMER1_TWPS GPT1_REG32(0x034)
+#define GPTIMER1_TMAR GPT1_REG32(0x038)
+#define GPTIMER1_TCAR1 GPT1_REG32(0x03C)
+#define GPTIMER1_TSICR GPT1_REG32(0x040)
+#define GPTIMER1_TCAR2 GPT1_REG32(0x044)
+
+/* rkw -- base fix up please... */
+#define GPTIMER3_TISR __REG32(OMAP24XX_L4_IO_BASE+0x78018)
+
+/* SDRC */
+#define SDRC_DLLA_CTRL __REG32(OMAP24XX_SDRC_BASE+0x060)
+#define SDRC_DLLA_STATUS __REG32(OMAP24XX_SDRC_BASE+0x064)
+#define SDRC_DLLB_CTRL __REG32(OMAP24XX_SDRC_BASE+0x068)
+#define SDRC_DLLB_STATUS __REG32(OMAP24XX_SDRC_BASE+0x06C)
+#define SDRC_POWER __REG32(OMAP24XX_SDRC_BASE+0x070)
+#define SDRC_MR_0 __REG32(OMAP24XX_SDRC_BASE+0x084)
+
+/* GPIO 1 */
+#define GPIO1_BASE GPIOX_BASE(1)
+#define GPIO1_REG32(offset) __REG32(GPIO1_BASE + (offset))
+#define GPIO1_IRQENABLE1 GPIO1_REG32(0x01C)
+#define GPIO1_IRQSTATUS1 GPIO1_REG32(0x018)
+#define GPIO1_IRQENABLE2 GPIO1_REG32(0x02C)
+#define GPIO1_IRQSTATUS2 GPIO1_REG32(0x028)
+#define GPIO1_WAKEUPENABLE GPIO1_REG32(0x020)
+#define GPIO1_RISINGDETECT GPIO1_REG32(0x048)
+#define GPIO1_DATAIN GPIO1_REG32(0x038)
+#define GPIO1_OE GPIO1_REG32(0x034)
+#define GPIO1_DATAOUT GPIO1_REG32(0x03C)
+
+/* GPIO2 */
+#define GPIO2_BASE GPIOX_BASE(2)
+#define GPIO2_REG32(offset) __REG32(GPIO2_BASE + (offset))
+#define GPIO2_IRQENABLE1 GPIO2_REG32(0x01C)
+#define GPIO2_IRQSTATUS1 GPIO2_REG32(0x018)
+#define GPIO2_IRQENABLE2 GPIO2_REG32(0x02C)
+#define GPIO2_IRQSTATUS2 GPIO2_REG32(0x028)
+#define GPIO2_WAKEUPENABLE GPIO2_REG32(0x020)
+#define GPIO2_RISINGDETECT GPIO2_REG32(0x048)
+#define GPIO2_DATAIN GPIO2_REG32(0x038)
+#define GPIO2_OE GPIO2_REG32(0x034)
+#define GPIO2_DATAOUT GPIO2_REG32(0x03C)
+
+/* GPIO 3 */
+#define GPIO3_BASE GPIOX_BASE(3)
+#define GPIO3_REG32(offset) __REG32(GPIO3_BASE + (offset))
+#define GPIO3_IRQENABLE1 GPIO3_REG32(0x01C)
+#define GPIO3_IRQSTATUS1 GPIO3_REG32(0x018)
+#define GPIO3_IRQENABLE2 GPIO3_REG32(0x02C)
+#define GPIO3_IRQSTATUS2 GPIO3_REG32(0x028)
+#define GPIO3_WAKEUPENABLE GPIO3_REG32(0x020)
+#define GPIO3_RISINGDETECT GPIO3_REG32(0x048)
+#define GPIO3_FALLINGDETECT GPIO3_REG32(0x04C)
+#define GPIO3_DATAIN GPIO3_REG32(0x038)
+#define GPIO3_OE GPIO3_REG32(0x034)
+#define GPIO3_DATAOUT GPIO3_REG32(0x03C)
+#define GPIO3_DEBOUNCENABLE GPIO3_REG32(0x050)
+#define GPIO3_DEBOUNCINGTIME GPIO3_REG32(0x054)
+
+/* GPIO 4 */
+#define GPIO4_BASE GPIOX_BASE(4)
+#define GPIO4_REG32(offset) __REG32(GPIO4_BASE + (offset))
+#define GPIO4_IRQENABLE1 GPIO4_REG32(0x01C)
+#define GPIO4_IRQSTATUS1 GPIO4_REG32(0x018)
+#define GPIO4_IRQENABLE2 GPIO4_REG32(0x02C)
+#define GPIO4_IRQSTATUS2 GPIO4_REG32(0x028)
+#define GPIO4_WAKEUPENABLE GPIO4_REG32(0x020)
+#define GPIO4_RISINGDETECT GPIO4_REG32(0x048)
+#define GPIO4_FALLINGDETECT GPIO4_REG32(0x04C)
+#define GPIO4_DATAIN GPIO4_REG32(0x038)
+#define GPIO4_OE GPIO4_REG32(0x034)
+#define GPIO4_DATAOUT GPIO4_REG32(0x03C)
+#define GPIO4_DEBOUNCENABLE GPIO4_REG32(0x050)
+#define GPIO4_DEBOUNCINGTIME GPIO4_REG32(0x054)
+
+
+/* IO CONFIG */
+#define CONTROL_BASE (OMAP24XX_CTRL_BASE)
+#define CONTROL_REG32(offset) __REG32(CONTROL_BASE + (offset))
+
+#define CONTROL_PADCONF_SPI1_NCS2 CONTROL_REG32(0x104)
+#define CONTROL_PADCONF_SYS_XTALOUT CONTROL_REG32(0x134)
+#define CONTROL_PADCONF_UART1_RX CONTROL_REG32(0x0C8)
+#define CONTROL_PADCONF_MCBSP1_DX CONTROL_REG32(0x10C)
+#define CONTROL_PADCONF_GPMC_NCS4 CONTROL_REG32(0x090)
+#define CONTROL_PADCONF_DSS_D5 CONTROL_REG32(0x0B8)
+#define CONTROL_PADCONF_DSS_D9 CONTROL_REG32(0x0BC)
+#define CONTROL_PADCONF_DSS_D13 CONTROL_REG32(0x0C0)
+#define CONTROL_PADCONF_DSS_VSYNC CONTROL_REG32(0x0CC)
+
+/* CONTROL */
+#define CONTROL_DEVCONF CONTROL_REG32(0x274)
+
+/* INTERRUPT CONTROLLER */
+#define INTC_BASE (OMAP24XX_L4_IO_BASE+0xfe000)
+#define INTC_REG32(offset) __REG32(INTC_BASE + (offset))
+
+#define INTC1_U_BASE INTC_REG32(0x000)
+#define INTC_MIR0 INTC_REG32(0x084)
+#define INTC_MIR_SET0 INTC_REG32(0x08C)
+#define INTC_MIR_CLEAR0 INTC_REG32(0x088)
+#define INTC_ISR_CLEAR0 INTC_REG32(0x094)
+#define INTC_MIR1 INTC_REG32(0x0A4)
+#define INTC_MIR_SET1 INTC_REG32(0x0AC)
+#define INTC_MIR_CLEAR1 INTC_REG32(0x0A8)
+#define INTC_ISR_CLEAR1 INTC_REG32(0x0B4)
+#define INTC_MIR2 INTC_REG32(0x0C4)
+#define INTC_MIR_SET2 INTC_REG32(0x0CC)
+#define INTC_MIR_CLEAR2 INTC_REG32(0x0C8)
+#define INTC_ISR_CLEAR2 INTC_REG32(0x0D4)
+#define INTC_SIR_IRQ INTC_REG32(0x040)
+#define INTC_CONTROL INTC_REG32(0x048)
+#define INTC_ILR11 INTC_REG32(0x12C)
+#define INTC_ILR32 INTC_REG32(0x180)
+#define INTC_ILR37 INTC_REG32(0x194)
+#define INTC_SYSCONFIG INTC_REG32(0x010)
+
+/* RAM FIREWALL */
+#define RAMFW_BASE (0x68005000)
+#define RAMFW_REG32(offset) __REG32(RAMFW_BASE + (offset))
+
+#define RAMFW_REQINFOPERM0 RAMFW_REG32(0x048)
+#define RAMFW_READPERM0 RAMFW_REG32(0x050)
+#define RAMFW_WRITEPERM0 RAMFW_REG32(0x058)
+
+/* GPMC CS1 FPGA ON USER INTERFACE MODULE */
+//#define DEBUG_BOARD_LED_REGISTER 0x04000014
+
+/* GPMC CS0 */
+#define GPMC_CONFIG1_0 GPMC_REG32(0x060)
+#define GPMC_CONFIG2_0 GPMC_REG32(0x064)
+#define GPMC_CONFIG3_0 GPMC_REG32(0x068)
+#define GPMC_CONFIG4_0 GPMC_REG32(0x06C)
+#define GPMC_CONFIG5_0 GPMC_REG32(0x070)
+#define GPMC_CONFIG6_0 GPMC_REG32(0x074)
+#define GPMC_CONFIG7_0 GPMC_REG32(0x078)
+
+/* GPMC CS1 */
+#define GPMC_CONFIG1_1 GPMC_REG32(0x090)
+#define GPMC_CONFIG2_1 GPMC_REG32(0x094)
+#define GPMC_CONFIG3_1 GPMC_REG32(0x098)
+#define GPMC_CONFIG4_1 GPMC_REG32(0x09C)
+#define GPMC_CONFIG5_1 GPMC_REG32(0x0a0)
+#define GPMC_CONFIG6_1 GPMC_REG32(0x0a4)
+#define GPMC_CONFIG7_1 GPMC_REG32(0x0a8)
+
+/* DSS */
+#define DSS_CONTROL DISP_REG32(0x040)
+#define DISPC_CONTROL DISP_REG32(0x440)
+#define DISPC_SYSSTATUS DISP_REG32(0x414)
+#define DISPC_IRQSTATUS DISP_REG32(0x418)
+#define DISPC_IRQENABLE DISP_REG32(0x41C)
+#define DISPC_CONFIG DISP_REG32(0x444)
+#define DISPC_DEFAULT_COLOR0 DISP_REG32(0x44C)
+#define DISPC_DEFAULT_COLOR1 DISP_REG32(0x450)
+#define DISPC_TRANS_COLOR0 DISP_REG32(0x454)
+#define DISPC_TRANS_COLOR1 DISP_REG32(0x458)
+#define DISPC_LINE_NUMBER DISP_REG32(0x460)
+#define DISPC_TIMING_H DISP_REG32(0x464)
+#define DISPC_TIMING_V DISP_REG32(0x468)
+#define DISPC_POL_FREQ DISP_REG32(0x46C)
+#define DISPC_DIVISOR DISP_REG32(0x470)
+#define DISPC_SIZE_DIG DISP_REG32(0x478)
+#define DISPC_SIZE_LCD DISP_REG32(0x47C)
+#define DISPC_GFX_BA0 DISP_REG32(0x480)
+#define DISPC_GFX_BA1 DISP_REG32(0x484)
+#define DISPC_GFX_POSITION DISP_REG32(0x488)
+#define DISPC_GFX_SIZE DISP_REG32(0x48C)
+#define DISPC_GFX_ATTRIBUTES DISP_REG32(0x4A0)
+#define DISPC_GFX_FIFO_THRESHOLD DISP_REG32(0x4A4)
+#define DISPC_GFX_ROW_INC DISP_REG32(0x4AC)
+#define DISPC_GFX_PIXEL_INC DISP_REG32(0x4B0)
+#define DISPC_GFX_WINDOW_SKIP DISP_REG32(0x4B4)
+#define DISPC_GFX_TABLE_BA DISP_REG32(0x4B8)
+#define DISPC_DATA_CYCLE1 DISP_REG32(0x5D4)
+#define DISPC_DATA_CYCLE2 DISP_REG32(0x5D8)
+#define DISPC_DATA_CYCLE3 DISP_REG32(0x5DC)
+
+/* Wake up define for board */
+#define GPIO97 (1 << 1)
+#define GPIO88 (1 << 24)
+
+#endif /* __ASSEMBLER__ */
+
+#endif
+
+
+
+
+
diff --git a/include/asm-arm/arch-omap/sram.h b/include/asm-arm/arch-omap/sram.h
new file mode 100644
index 00000000000..e72ccbf0fe0
--- /dev/null
+++ b/include/asm-arm/arch-omap/sram.h
@@ -0,0 +1,38 @@
+/*
+ * linux/include/asm-arm/arch-omap/sram.h
+ *
+ * Interface for functions that need to be run in internal SRAM
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __ARCH_ARM_OMAP_SRAM_H
+#define __ARCH_ARM_OMAP_SRAM_H
+
+extern void * omap_sram_push(void * start, unsigned long size);
+extern void omap_sram_reprogram_clock(u32 dpllctl, u32 ckctl);
+
+extern void omap2_sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+ u32 base_cs, u32 force_unlock);
+extern void omap2_sram_reprogram_sdrc(u32 perf_level, u32 dll_val,
+ u32 mem_type);
+extern u32 omap2_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
+
+
+/* Do not use these */
+extern void sram_reprogram_clock(u32 ckctl, u32 dpllctl);
+extern unsigned long sram_reprogram_clock_sz;
+
+extern void sram_ddr_init(u32 *slow_dll_ctrl, u32 fast_dll_ctrl,
+ u32 base_cs, u32 force_unlock);
+extern unsigned long sram_ddr_init_sz;
+
+extern u32 sram_set_prcm(u32 dpll_ctrl_val, u32 sdrc_rfr_val, int bypass);
+extern unsigned long sram_set_prcm_sz;
+
+extern void sram_reprogram_sdrc(u32 perf_level, u32 dll_val, u32 mem_type);
+extern unsigned long sram_reprogram_sdrc_sz;
+
+#endif
diff --git a/include/asm-arm/arch-omap/system.h b/include/asm-arm/arch-omap/system.h
index ff37bc27e60..b43cdd2a387 100644
--- a/include/asm-arm/arch-omap/system.h
+++ b/include/asm-arm/arch-omap/system.h
@@ -6,18 +6,21 @@
#define __ASM_ARCH_SYSTEM_H
#include <linux/config.h>
#include <asm/mach-types.h>
+#include <asm/hardware/clock.h>
#include <asm/arch/hardware.h>
-#include <asm/mach-types.h>
+#include <asm/arch/prcm.h>
+
+#ifndef CONFIG_MACH_VOICEBLUE
+#define voiceblue_reset() do {} while (0)
+#endif
static inline void arch_idle(void)
{
cpu_do_idle();
}
-static inline void arch_reset(char mode)
+static inline void omap1_arch_reset(char mode)
{
-
-#ifdef CONFIG_ARCH_OMAP16XX
/*
* Workaround for 5912/1611b bug mentioned in sprz209d.pdf p. 28
* "Global Software Reset Affects Traffic Controller Frequency".
@@ -27,13 +30,31 @@ static inline void arch_reset(char mode)
DPLL_CTL);
omap_writew(0x8, ARM_RSTCT1);
}
-#endif
-#ifdef CONFIG_MACH_VOICEBLUE
+
if (machine_is_voiceblue())
voiceblue_reset();
else
-#endif
omap_writew(1, ARM_RSTCT1);
}
+static inline void omap2_arch_reset(char mode)
+{
+ u32 rate;
+ struct clk *vclk, *sclk;
+
+ vclk = clk_get(NULL, "virt_prcm_set");
+ sclk = clk_get(NULL, "sys_ck");
+ rate = clk_get_rate(sclk);
+ clk_set_rate(vclk, rate); /* go to bypass for OMAP limitation */
+ RM_RSTCTRL_WKUP |= 2;
+}
+
+static inline void arch_reset(char mode)
+{
+ if (!cpu_is_omap24xx())
+ omap1_arch_reset(mode);
+ else
+ omap2_arch_reset(mode);
+}
+
#endif
diff --git a/include/asm-arm/arch-omap/timex.h b/include/asm-arm/arch-omap/timex.h
index b61ddb491e8..21f2e367185 100644
--- a/include/asm-arm/arch-omap/timex.h
+++ b/include/asm-arm/arch-omap/timex.h
@@ -28,6 +28,14 @@
#if !defined(__ASM_ARCH_OMAP_TIMEX_H)
#define __ASM_ARCH_OMAP_TIMEX_H
+/*
+ * OMAP 32KHz timer updates time one jiffie at a time from a secondary timer,
+ * and that's why the CLOCK_TICK_RATE is not 32768.
+ */
+#ifdef CONFIG_OMAP_32K_TIMER
+#define CLOCK_TICK_RATE (CONFIG_OMAP_32K_TIMER_HZ)
+#else
#define CLOCK_TICK_RATE (HZ * 100000UL)
+#endif
#endif /* __ASM_ARCH_OMAP_TIMEX_H */
diff --git a/include/asm-arm/arch-omap/uncompress.h b/include/asm-arm/arch-omap/uncompress.h
index 3545c86859c..c718264affb 100644
--- a/include/asm-arm/arch-omap/uncompress.h
+++ b/include/asm-arm/arch-omap/uncompress.h
@@ -36,10 +36,14 @@ putstr(const char *s)
volatile u8 * uart = 0;
int shift = 2;
+#ifdef CONFIG_MACH_OMAP_PALMTE
+ return;
+#endif
+
#ifdef CONFIG_ARCH_OMAP
#ifdef CONFIG_OMAP_LL_DEBUG_UART3
uart = (volatile u8 *)(OMAP_UART3_BASE);
-#elif CONFIG_OMAP_LL_DEBUG_UART2
+#elif defined(CONFIG_OMAP_LL_DEBUG_UART2)
uart = (volatile u8 *)(OMAP_UART2_BASE);
#else
uart = (volatile u8 *)(OMAP_UART1_BASE);
diff --git a/include/asm-arm/arch-pxa/sharpsl.h b/include/asm-arm/arch-pxa/sharpsl.h
index 311f2bb5386..0b43495d24b 100644
--- a/include/asm-arm/arch-pxa/sharpsl.h
+++ b/include/asm-arm/arch-pxa/sharpsl.h
@@ -21,12 +21,18 @@ struct corgits_machinfo {
void (*wait_hsync)(void);
};
+
/*
* SharpSL Backlight
*/
-
struct corgibl_machinfo {
int max_intensity;
void (*set_bl_intensity)(int intensity);
};
+extern void corgibl_limit_intensity(int limit);
+
+/*
+ * SharpSL Battery/PM Driver
+ */
+extern void sharpsl_battery_kick(void);
diff --git a/include/asm-arm/arch-pxa/ssp.h b/include/asm-arm/arch-pxa/ssp.h
index 6ec67b018c0..949878c0d90 100644
--- a/include/asm-arm/arch-pxa/ssp.h
+++ b/include/asm-arm/arch-pxa/ssp.h
@@ -18,6 +18,11 @@
#ifndef SSP_H
#define SSP_H
+/*
+ * SSP initialisation flags
+ */
+#define SSP_NO_IRQ 0x1 /* don't register an irq handler in SSP driver */
+
struct ssp_state {
u32 cr0;
u32 cr1;
@@ -31,6 +36,7 @@ struct ssp_dev {
u32 flags;
u32 psp_flags;
u32 speed;
+ int irq;
};
int ssp_write_word(struct ssp_dev *dev, u32 data);
@@ -40,7 +46,7 @@ void ssp_enable(struct ssp_dev *dev);
void ssp_disable(struct ssp_dev *dev);
void ssp_save_state(struct ssp_dev *dev, struct ssp_state *ssp);
void ssp_restore_state(struct ssp_dev *dev, struct ssp_state *ssp);
-int ssp_init(struct ssp_dev *dev, u32 port);
+int ssp_init(struct ssp_dev *dev, u32 port, u32 init_flags);
int ssp_config(struct ssp_dev *dev, u32 mode, u32 flags, u32 psp_flags, u32 speed);
void ssp_exit(struct ssp_dev *dev);
diff --git a/include/asm-i386/ide.h b/include/asm-i386/ide.h
index 79dfab87135..454440193ea 100644
--- a/include/asm-i386/ide.h
+++ b/include/asm-i386/ide.h
@@ -41,6 +41,12 @@ static __inline__ int ide_default_irq(unsigned long base)
static __inline__ unsigned long ide_default_io_base(int index)
{
+ /*
+ * If PCI is present then it is not safe to poke around
+ * the other legacy IDE ports. Only 0x1f0 and 0x170 are
+ * defined compatibility mode ports for PCI. A user can
+ * override this using ide= but we must default safe.
+ */
if (pci_find_device(PCI_ANY_ID, PCI_ANY_ID, NULL) == NULL) {
switch(index) {
case 2: return 0x1e8;
diff --git a/include/asm-i386/msi.h b/include/asm-i386/msi.h
index b85393094c8..f041d4495fa 100644
--- a/include/asm-i386/msi.h
+++ b/include/asm-i386/msi.h
@@ -10,13 +10,6 @@
#include <mach_apic.h>
#define LAST_DEVICE_VECTOR 232
-#define MSI_DEST_MODE MSI_LOGICAL_MODE
-#define MSI_TARGET_CPU_SHIFT 12
-
-#ifdef CONFIG_SMP
-#define MSI_TARGET_CPU logical_smp_processor_id()
-#else
-#define MSI_TARGET_CPU cpu_to_logical_apicid(first_cpu(cpu_online_map))
-#endif
+#define MSI_TARGET_CPU_SHIFT 12
#endif /* ASM_MSI_H */
diff --git a/include/asm-i386/smp.h b/include/asm-i386/smp.h
index 13250199976..61d3ab9db70 100644
--- a/include/asm-i386/smp.h
+++ b/include/asm-i386/smp.h
@@ -45,6 +45,8 @@ extern void unlock_ipi_call_lock(void);
#define MAX_APICID 256
extern u8 x86_cpu_to_apicid[];
+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+
#ifdef CONFIG_HOTPLUG_CPU
extern void cpu_exit_clear(void);
extern void cpu_uninit(void);
@@ -92,6 +94,10 @@ extern int __cpu_disable(void);
extern void __cpu_die(unsigned int cpu);
#endif /* !__ASSEMBLY__ */
+#else /* CONFIG_SMP */
+
+#define cpu_physical_id(cpu) boot_cpu_physical_apicid
+
#define NO_PROC_ID 0xFF /* No processor magic marker */
#endif
diff --git a/include/asm-ia64/kdebug.h b/include/asm-ia64/kdebug.h
index 4d376e1663f..8b01a083dde 100644
--- a/include/asm-ia64/kdebug.h
+++ b/include/asm-ia64/kdebug.h
@@ -22,6 +22,9 @@
* 2005-Apr Rusty Lynch <rusty.lynch@intel.com> and Anil S Keshavamurthy
* <anil.s.keshavamurthy@intel.com> adopted from
* include/asm-x86_64/kdebug.h
+ *
+ * 2005-Oct Keith Owens <kaos@sgi.com>. Expand notify_die to cover more
+ * events.
*/
#include <linux/notifier.h>
@@ -35,13 +38,36 @@ struct die_args {
int signr;
};
-int register_die_notifier(struct notifier_block *nb);
+extern int register_die_notifier(struct notifier_block *);
+extern int unregister_die_notifier(struct notifier_block *);
extern struct notifier_block *ia64die_chain;
enum die_val {
DIE_BREAK = 1,
- DIE_SS,
+ DIE_FAULT,
+ DIE_OOPS,
DIE_PAGE_FAULT,
+ DIE_MACHINE_HALT,
+ DIE_MACHINE_RESTART,
+ DIE_MCA_MONARCH_ENTER,
+ DIE_MCA_MONARCH_PROCESS,
+ DIE_MCA_MONARCH_LEAVE,
+ DIE_MCA_SLAVE_ENTER,
+ DIE_MCA_SLAVE_PROCESS,
+ DIE_MCA_SLAVE_LEAVE,
+ DIE_MCA_RENDZVOUS_ENTER,
+ DIE_MCA_RENDZVOUS_PROCESS,
+ DIE_MCA_RENDZVOUS_LEAVE,
+ DIE_INIT_MONARCH_ENTER,
+ DIE_INIT_MONARCH_PROCESS,
+ DIE_INIT_MONARCH_LEAVE,
+ DIE_INIT_SLAVE_ENTER,
+ DIE_INIT_SLAVE_PROCESS,
+ DIE_INIT_SLAVE_LEAVE,
+ DIE_KDEBUG_ENTER,
+ DIE_KDEBUG_LEAVE,
+ DIE_KDUMP_ENTER,
+ DIE_KDUMP_LEAVE,
};
static inline int notify_die(enum die_val val, char *str, struct pt_regs *regs,
diff --git a/include/asm-ia64/mmu_context.h b/include/asm-ia64/mmu_context.h
index 8d6e72f7b08..b5c65081a3a 100644
--- a/include/asm-ia64/mmu_context.h
+++ b/include/asm-ia64/mmu_context.h
@@ -7,12 +7,13 @@
*/
/*
- * Routines to manage the allocation of task context numbers. Task context numbers are
- * used to reduce or eliminate the need to perform TLB flushes due to context switches.
- * Context numbers are implemented using ia-64 region ids. Since the IA-64 TLB does not
- * consider the region number when performing a TLB lookup, we need to assign a unique
- * region id to each region in a process. We use the least significant three bits in a
- * region id for this purpose.
+ * Routines to manage the allocation of task context numbers. Task context
+ * numbers are used to reduce or eliminate the need to perform TLB flushes
+ * due to context switches. Context numbers are implemented using ia-64
+ * region ids. Since the IA-64 TLB does not consider the region number when
+ * performing a TLB lookup, we need to assign a unique region id to each
+ * region in a process. We use the least significant three bits in aregion
+ * id for this purpose.
*/
#define IA64_REGION_ID_KERNEL 0 /* the kernel's region id (tlb.c depends on this being 0) */
@@ -32,13 +33,17 @@
struct ia64_ctx {
spinlock_t lock;
unsigned int next; /* next context number to use */
- unsigned int limit; /* next >= limit => must call wrap_mmu_context() */
- unsigned int max_ctx; /* max. context value supported by all CPUs */
+ unsigned int limit; /* available free range */
+ unsigned int max_ctx; /* max. context value supported by all CPUs */
+ /* call wrap_mmu_context when next >= max */
+ unsigned long *bitmap; /* bitmap size is max_ctx+1 */
+ unsigned long *flushmap;/* pending rid to be flushed */
};
extern struct ia64_ctx ia64_ctx;
DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
+extern void mmu_context_init (void);
extern void wrap_mmu_context (struct mm_struct *mm);
static inline void
@@ -47,10 +52,10 @@ enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
}
/*
- * When the context counter wraps around all TLBs need to be flushed because an old
- * context number might have been reused. This is signalled by the ia64_need_tlb_flush
- * per-CPU variable, which is checked in the routine below. Called by activate_mm().
- * <efocht@ess.nec.de>
+ * When the context counter wraps around all TLBs need to be flushed because
+ * an old context number might have been reused. This is signalled by the
+ * ia64_need_tlb_flush per-CPU variable, which is checked in the routine
+ * below. Called by activate_mm(). <efocht@ess.nec.de>
*/
static inline void
delayed_tlb_flush (void)
@@ -60,11 +65,9 @@ delayed_tlb_flush (void)
if (unlikely(__ia64_per_cpu_var(ia64_need_tlb_flush))) {
spin_lock_irqsave(&ia64_ctx.lock, flags);
- {
- if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
- local_flush_tlb_all();
- __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
- }
+ if (__ia64_per_cpu_var(ia64_need_tlb_flush)) {
+ local_flush_tlb_all();
+ __ia64_per_cpu_var(ia64_need_tlb_flush) = 0;
}
spin_unlock_irqrestore(&ia64_ctx.lock, flags);
}
@@ -76,20 +79,27 @@ get_mmu_context (struct mm_struct *mm)
unsigned long flags;
nv_mm_context_t context = mm->context;
- if (unlikely(!context)) {
- spin_lock_irqsave(&ia64_ctx.lock, flags);
- {
- /* re-check, now that we've got the lock: */
- context = mm->context;
- if (context == 0) {
- cpus_clear(mm->cpu_vm_mask);
- if (ia64_ctx.next >= ia64_ctx.limit)
- wrap_mmu_context(mm);
- mm->context = context = ia64_ctx.next++;
- }
+ if (likely(context))
+ goto out;
+
+ spin_lock_irqsave(&ia64_ctx.lock, flags);
+ /* re-check, now that we've got the lock: */
+ context = mm->context;
+ if (context == 0) {
+ cpus_clear(mm->cpu_vm_mask);
+ if (ia64_ctx.next >= ia64_ctx.limit) {
+ ia64_ctx.next = find_next_zero_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
+ ia64_ctx.limit = find_next_bit(ia64_ctx.bitmap,
+ ia64_ctx.max_ctx, ia64_ctx.next);
+ if (ia64_ctx.next >= ia64_ctx.max_ctx)
+ wrap_mmu_context(mm);
}
- spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+ mm->context = context = ia64_ctx.next++;
+ __set_bit(context, ia64_ctx.bitmap);
}
+ spin_unlock_irqrestore(&ia64_ctx.lock, flags);
+out:
/*
* Ensure we're not starting to use "context" before any old
* uses of it are gone from our TLB.
@@ -100,8 +110,8 @@ get_mmu_context (struct mm_struct *mm)
}
/*
- * Initialize context number to some sane value. MM is guaranteed to be a brand-new
- * address-space, so no TLB flushing is needed, ever.
+ * Initialize context number to some sane value. MM is guaranteed to be a
+ * brand-new address-space, so no TLB flushing is needed, ever.
*/
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
@@ -162,7 +172,10 @@ activate_context (struct mm_struct *mm)
if (!cpu_isset(smp_processor_id(), mm->cpu_vm_mask))
cpu_set(smp_processor_id(), mm->cpu_vm_mask);
reload_context(context);
- /* in the unlikely event of a TLB-flush by another thread, redo the load: */
+ /*
+ * in the unlikely event of a TLB-flush by another thread,
+ * redo the load.
+ */
} while (unlikely(context != mm->context));
}
@@ -175,8 +188,8 @@ static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
/*
- * We may get interrupts here, but that's OK because interrupt handlers cannot
- * touch user-space.
+ * We may get interrupts here, but that's OK because interrupt
+ * handlers cannot touch user-space.
*/
ia64_set_kr(IA64_KR_PT_BASE, __pa(next->pgd));
activate_context(next);
diff --git a/include/asm-ia64/msi.h b/include/asm-ia64/msi.h
index 60f2137f927..97890f7762b 100644
--- a/include/asm-ia64/msi.h
+++ b/include/asm-ia64/msi.h
@@ -12,9 +12,6 @@
static inline void set_intr_gate (int nr, void *func) {}
#define IO_APIC_VECTOR(irq) (irq)
#define ack_APIC_irq ia64_eoi
-#define cpu_mask_to_apicid(mask) cpu_physical_id(first_cpu(mask))
-#define MSI_DEST_MODE MSI_PHYSICAL_MODE
-#define MSI_TARGET_CPU ((ia64_getreg(_IA64_REG_CR_LID) >> 16) & 0xffff)
#define MSI_TARGET_CPU_SHIFT 4
#endif /* ASM_MSI_H */
diff --git a/include/asm-ia64/tlbflush.h b/include/asm-ia64/tlbflush.h
index b65c6270272..a35b323bae4 100644
--- a/include/asm-ia64/tlbflush.h
+++ b/include/asm-ia64/tlbflush.h
@@ -51,6 +51,7 @@ flush_tlb_mm (struct mm_struct *mm)
if (!mm)
return;
+ set_bit(mm->context, ia64_ctx.flushmap);
mm->context = 0;
if (atomic_read(&mm->mm_users) == 0)
diff --git a/include/asm-ppc64/abs_addr.h b/include/asm-powerpc/abs_addr.h
index dc3fc3fefef..18415108fc5 100644
--- a/include/asm-ppc64/abs_addr.h
+++ b/include/asm-powerpc/abs_addr.h
@@ -1,5 +1,5 @@
-#ifndef _ABS_ADDR_H
-#define _ABS_ADDR_H
+#ifndef _ASM_POWERPC_ABS_ADDR_H
+#define _ASM_POWERPC_ABS_ADDR_H
#include <linux/config.h>
@@ -70,4 +70,4 @@ static inline unsigned long phys_to_abs(unsigned long pa)
#define iseries_hv_addr(virtaddr) \
(0x8000000000000000 | virt_to_abs(virtaddr))
-#endif /* _ABS_ADDR_H */
+#endif /* _ASM_POWERPC_ABS_ADDR_H */
diff --git a/include/asm-powerpc/asm-compat.h b/include/asm-powerpc/asm-compat.h
new file mode 100644
index 00000000000..8b133efc9f7
--- /dev/null
+++ b/include/asm-powerpc/asm-compat.h
@@ -0,0 +1,55 @@
+#ifndef _ASM_POWERPC_ASM_COMPAT_H
+#define _ASM_POWERPC_ASM_COMPAT_H
+
+#include <linux/config.h>
+#include <asm/types.h>
+
+#ifdef __ASSEMBLY__
+# define stringify_in_c(...) __VA_ARGS__
+# define ASM_CONST(x) x
+#else
+/* This version of stringify will deal with commas... */
+# define __stringify_in_c(...) #__VA_ARGS__
+# define stringify_in_c(...) __stringify_in_c(__VA_ARGS__) " "
+# define __ASM_CONST(x) x##UL
+# define ASM_CONST(x) __ASM_CONST(x)
+#endif
+
+#ifdef __powerpc64__
+
+/* operations for longs and pointers */
+#define PPC_LL stringify_in_c(ld)
+#define PPC_STL stringify_in_c(std)
+#define PPC_LCMPI stringify_in_c(cmpdi)
+#define PPC_LONG stringify_in_c(.llong)
+#define PPC_TLNEI stringify_in_c(tdnei)
+#define PPC_LLARX stringify_in_c(ldarx)
+#define PPC_STLCX stringify_in_c(stdcx.)
+#define PPC_CNTLZL stringify_in_c(cntlzd)
+
+#else /* 32-bit */
+
+/* operations for longs and pointers */
+#define PPC_LL stringify_in_c(lwz)
+#define PPC_STL stringify_in_c(stw)
+#define PPC_LCMPI stringify_in_c(cmpwi)
+#define PPC_LONG stringify_in_c(.long)
+#define PPC_TLNEI stringify_in_c(twnei)
+#define PPC_LLARX stringify_in_c(lwarx)
+#define PPC_STLCX stringify_in_c(stwcx.)
+#define PPC_CNTLZL stringify_in_c(cntlzw)
+
+#endif
+
+#ifdef CONFIG_IBM405_ERR77
+/* Erratum #77 on the 405 means we need a sync or dcbt before every
+ * stwcx. The old ATOMIC_SYNC_FIX covered some but not all of this.
+ */
+#define PPC405_ERR77(ra,rb) stringify_in_c(dcbt ra, rb;)
+#define PPC405_ERR77_SYNC stringify_in_c(sync;)
+#else
+#define PPC405_ERR77(ra,rb)
+#define PPC405_ERR77_SYNC
+#endif
+
+#endif /* _ASM_POWERPC_ASM_COMPAT_H */
diff --git a/include/asm-powerpc/atomic.h b/include/asm-powerpc/atomic.h
index ed4b345ed75..9c0b372a46e 100644
--- a/include/asm-powerpc/atomic.h
+++ b/include/asm-powerpc/atomic.h
@@ -9,21 +9,13 @@ typedef struct { volatile int counter; } atomic_t;
#ifdef __KERNEL__
#include <asm/synch.h>
+#include <asm/asm-compat.h>
#define ATOMIC_INIT(i) { (i) }
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
-/* Erratum #77 on the 405 means we need a sync or dcbt before every stwcx.
- * The old ATOMIC_SYNC_FIX covered some but not all of this.
- */
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb) "dcbt " #ra "," #rb ";"
-#else
-#define PPC405_ERR77(ra,rb)
-#endif
-
static __inline__ void atomic_add(int a, atomic_t *v)
{
int t;
@@ -205,5 +197,183 @@ static __inline__ int atomic_dec_if_positive(atomic_t *v)
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
+#ifdef __powerpc64__
+
+typedef struct { volatile long counter; } atomic64_t;
+
+#define ATOMIC64_INIT(i) { (i) }
+
+#define atomic64_read(v) ((v)->counter)
+#define atomic64_set(v,i) (((v)->counter) = (i))
+
+static __inline__ void atomic64_add(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # atomic64_add\n\
+ add %0,%2,%0\n\
+ stdcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_add_return(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # atomic64_add_return\n\
+ add %0,%1,%0\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
+
+static __inline__ void atomic64_sub(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%3 # atomic64_sub\n\
+ subf %0,%2,%0\n\
+ stdcx. %0,0,%3 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (a), "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_sub_return(long a, atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%2 # atomic64_sub_return\n\
+ subf %0,%1,%0\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (a), "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+static __inline__ void atomic64_inc(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_inc\n\
+ addic %0,%0,1\n\
+ stdcx. %0,0,%2 \n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_inc_return(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%1 # atomic64_inc_return\n\
+ addic %0,%0,1\n\
+ stdcx. %0,0,%1 \n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+/*
+ * atomic64_inc_and_test - increment and test
+ * @v: pointer of type atomic64_t
+ *
+ * Atomically increments @v by 1
+ * and returns true if the result is zero, or false for all
+ * other cases.
+ */
+#define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
+
+static __inline__ void atomic64_dec(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+"1: ldarx %0,0,%2 # atomic64_dec\n\
+ addic %0,%0,-1\n\
+ stdcx. %0,0,%2\n\
+ bne- 1b"
+ : "=&r" (t), "=m" (v->counter)
+ : "r" (&v->counter), "m" (v->counter)
+ : "cc");
+}
+
+static __inline__ long atomic64_dec_return(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%1 # atomic64_dec_return\n\
+ addic %0,%0,-1\n\
+ stdcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
+#define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
+
+/*
+ * Atomically test *v and decrement if it is greater than 0.
+ * The function returns the old value of *v minus 1.
+ */
+static __inline__ long atomic64_dec_if_positive(atomic64_t *v)
+{
+ long t;
+
+ __asm__ __volatile__(
+ EIEIO_ON_SMP
+"1: ldarx %0,0,%1 # atomic64_dec_if_positive\n\
+ addic. %0,%0,-1\n\
+ blt- 2f\n\
+ stdcx. %0,0,%1\n\
+ bne- 1b"
+ ISYNC_ON_SMP
+ "\n\
+2:" : "=&r" (t)
+ : "r" (&v->counter)
+ : "cc", "memory");
+
+ return t;
+}
+
+#endif /* __powerpc64__ */
+
#endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_ATOMIC_H_ */
diff --git a/include/asm-powerpc/bitops.h b/include/asm-powerpc/bitops.h
index dc25c53704d..5727229b044 100644
--- a/include/asm-powerpc/bitops.h
+++ b/include/asm-powerpc/bitops.h
@@ -40,6 +40,7 @@
#include <linux/compiler.h>
#include <asm/atomic.h>
+#include <asm/asm-compat.h>
#include <asm/synch.h>
/*
@@ -52,16 +53,6 @@
#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG)
#define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
-#ifdef CONFIG_PPC64
-#define LARXL "ldarx"
-#define STCXL "stdcx."
-#define CNTLZL "cntlzd"
-#else
-#define LARXL "lwarx"
-#define STCXL "stwcx."
-#define CNTLZL "cntlzw"
-#endif
-
static __inline__ void set_bit(int nr, volatile unsigned long *addr)
{
unsigned long old;
@@ -69,10 +60,10 @@ static __inline__ void set_bit(int nr, volatile unsigned long *addr)
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
-"1:" LARXL " %0,0,%3 # set_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # set_bit\n"
"or %0,%0,%2\n"
PPC405_ERR77(0,%3)
- STCXL " %0,0,%3\n"
+ PPC_STLCX "%0,0,%3\n"
"bne- 1b"
: "=&r"(old), "=m"(*p)
: "r"(mask), "r"(p), "m"(*p)
@@ -86,10 +77,10 @@ static __inline__ void clear_bit(int nr, volatile unsigned long *addr)
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
-"1:" LARXL " %0,0,%3 # set_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # clear_bit\n"
"andc %0,%0,%2\n"
PPC405_ERR77(0,%3)
- STCXL " %0,0,%3\n"
+ PPC_STLCX "%0,0,%3\n"
"bne- 1b"
: "=&r"(old), "=m"(*p)
: "r"(mask), "r"(p), "m"(*p)
@@ -103,10 +94,10 @@ static __inline__ void change_bit(int nr, volatile unsigned long *addr)
unsigned long *p = ((unsigned long *)addr) + BITOP_WORD(nr);
__asm__ __volatile__(
-"1:" LARXL " %0,0,%3 # set_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # change_bit\n"
"xor %0,%0,%2\n"
PPC405_ERR77(0,%3)
- STCXL " %0,0,%3\n"
+ PPC_STLCX "%0,0,%3\n"
"bne- 1b"
: "=&r"(old), "=m"(*p)
: "r"(mask), "r"(p), "m"(*p)
@@ -122,10 +113,10 @@ static __inline__ int test_and_set_bit(unsigned long nr,
__asm__ __volatile__(
EIEIO_ON_SMP
-"1:" LARXL " %0,0,%3 # test_and_set_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # test_and_set_bit\n"
"or %1,%0,%2 \n"
PPC405_ERR77(0,%3)
- STCXL " %1,0,%3 \n"
+ PPC_STLCX "%1,0,%3 \n"
"bne- 1b"
ISYNC_ON_SMP
: "=&r" (old), "=&r" (t)
@@ -144,10 +135,10 @@ static __inline__ int test_and_clear_bit(unsigned long nr,
__asm__ __volatile__(
EIEIO_ON_SMP
-"1:" LARXL " %0,0,%3 # test_and_clear_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # test_and_clear_bit\n"
"andc %1,%0,%2 \n"
PPC405_ERR77(0,%3)
- STCXL " %1,0,%3 \n"
+ PPC_STLCX "%1,0,%3 \n"
"bne- 1b"
ISYNC_ON_SMP
: "=&r" (old), "=&r" (t)
@@ -166,10 +157,10 @@ static __inline__ int test_and_change_bit(unsigned long nr,
__asm__ __volatile__(
EIEIO_ON_SMP
-"1:" LARXL " %0,0,%3 # test_and_change_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # test_and_change_bit\n"
"xor %1,%0,%2 \n"
PPC405_ERR77(0,%3)
- STCXL " %1,0,%3 \n"
+ PPC_STLCX "%1,0,%3 \n"
"bne- 1b"
ISYNC_ON_SMP
: "=&r" (old), "=&r" (t)
@@ -184,9 +175,9 @@ static __inline__ void set_bits(unsigned long mask, unsigned long *addr)
unsigned long old;
__asm__ __volatile__(
-"1:" LARXL " %0,0,%3 # set_bit\n"
+"1:" PPC_LLARX "%0,0,%3 # set_bits\n"
"or %0,%0,%2\n"
- STCXL " %0,0,%3\n"
+ PPC_STLCX "%0,0,%3\n"
"bne- 1b"
: "=&r" (old), "=m" (*addr)
: "r" (mask), "r" (addr), "m" (*addr)
@@ -268,7 +259,7 @@ static __inline__ int __ilog2(unsigned long x)
{
int lz;
- asm (CNTLZL " %0,%1" : "=r" (lz) : "r" (x));
+ asm (PPC_CNTLZL "%0,%1" : "=r" (lz) : "r" (x));
return BITS_PER_LONG - 1 - lz;
}
diff --git a/include/asm-powerpc/bug.h b/include/asm-powerpc/bug.h
index d625ee55f95..b001ecb3cd9 100644
--- a/include/asm-powerpc/bug.h
+++ b/include/asm-powerpc/bug.h
@@ -1,6 +1,7 @@
#ifndef _ASM_POWERPC_BUG_H
#define _ASM_POWERPC_BUG_H
+#include <asm/asm-compat.h>
/*
* Define an illegal instr to trap on the bug.
* We don't use 0 because that marks the end of a function
@@ -11,14 +12,6 @@
#ifndef __ASSEMBLY__
-#ifdef __powerpc64__
-#define BUG_TABLE_ENTRY ".llong"
-#define BUG_TRAP_OP "tdnei"
-#else
-#define BUG_TABLE_ENTRY ".long"
-#define BUG_TRAP_OP "twnei"
-#endif /* __powerpc64__ */
-
struct bug_entry {
unsigned long bug_addr;
long line;
@@ -40,16 +33,16 @@ struct bug_entry *find_bug(unsigned long bugaddr);
__asm__ __volatile__( \
"1: twi 31,0,0\n" \
".section __bug_table,\"a\"\n" \
- "\t"BUG_TABLE_ENTRY" 1b,%0,%1,%2\n" \
+ "\t"PPC_LONG" 1b,%0,%1,%2\n" \
".previous" \
: : "i" (__LINE__), "i" (__FILE__), "i" (__FUNCTION__)); \
} while (0)
#define BUG_ON(x) do { \
__asm__ __volatile__( \
- "1: "BUG_TRAP_OP" %0,0\n" \
+ "1: "PPC_TLNEI" %0,0\n" \
".section __bug_table,\"a\"\n" \
- "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
+ "\t"PPC_LONG" 1b,%1,%2,%3\n" \
".previous" \
: : "r" ((long)(x)), "i" (__LINE__), \
"i" (__FILE__), "i" (__FUNCTION__)); \
@@ -57,9 +50,9 @@ struct bug_entry *find_bug(unsigned long bugaddr);
#define WARN_ON(x) do { \
__asm__ __volatile__( \
- "1: "BUG_TRAP_OP" %0,0\n" \
+ "1: "PPC_TLNEI" %0,0\n" \
".section __bug_table,\"a\"\n" \
- "\t"BUG_TABLE_ENTRY" 1b,%1,%2,%3\n" \
+ "\t"PPC_LONG" 1b,%1,%2,%3\n" \
".previous" \
: : "r" ((long)(x)), \
"i" (__LINE__ + BUG_WARNING_TRAP), \
diff --git a/include/asm-powerpc/cache.h b/include/asm-powerpc/cache.h
new file mode 100644
index 00000000000..26ce502e76e
--- /dev/null
+++ b/include/asm-powerpc/cache.h
@@ -0,0 +1,40 @@
+#ifndef _ASM_POWERPC_CACHE_H
+#define _ASM_POWERPC_CACHE_H
+
+#ifdef __KERNEL__
+
+#include <linux/config.h>
+
+/* bytes per L1 cache line */
+#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
+#define L1_CACHE_SHIFT 4
+#define MAX_COPY_PREFETCH 1
+#elif defined(CONFIG_PPC32)
+#define L1_CACHE_SHIFT 5
+#define MAX_COPY_PREFETCH 4
+#else /* CONFIG_PPC64 */
+#define L1_CACHE_SHIFT 7
+#endif
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#define SMP_CACHE_BYTES L1_CACHE_BYTES
+#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
+
+#if defined(__powerpc64__) && !defined(__ASSEMBLY__)
+struct ppc64_caches {
+ u32 dsize; /* L1 d-cache size */
+ u32 dline_size; /* L1 d-cache line size */
+ u32 log_dline_size;
+ u32 dlines_per_page;
+ u32 isize; /* L1 i-cache size */
+ u32 iline_size; /* L1 i-cache line size */
+ u32 log_iline_size;
+ u32 ilines_per_page;
+};
+
+extern struct ppc64_caches ppc64_caches;
+#endif /* __powerpc64__ && ! __ASSEMBLY__ */
+
+#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_CACHE_H */
diff --git a/include/asm-ppc64/cacheflush.h b/include/asm-powerpc/cacheflush.h
index ffbc08be8e5..8a740c88d93 100644
--- a/include/asm-ppc64/cacheflush.h
+++ b/include/asm-powerpc/cacheflush.h
@@ -1,13 +1,20 @@
-#ifndef _PPC64_CACHEFLUSH_H
-#define _PPC64_CACHEFLUSH_H
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#ifndef _ASM_POWERPC_CACHEFLUSH_H
+#define _ASM_POWERPC_CACHEFLUSH_H
+
+#ifdef __KERNEL__
#include <linux/mm.h>
#include <asm/cputable.h>
/*
- * No cache flushing is required when address mappings are
- * changed, because the caches on PowerPCs are physically
- * addressed.
+ * No cache flushing is required when address mappings are changed,
+ * because the caches on PowerPCs are physically addressed.
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
@@ -22,27 +29,40 @@ extern void flush_dcache_page(struct page *page);
#define flush_dcache_mmap_unlock(mapping) do { } while (0)
extern void __flush_icache_range(unsigned long, unsigned long);
+static inline void flush_icache_range(unsigned long start, unsigned long stop)
+{
+ if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
+ __flush_icache_range(start, stop);
+}
+
extern void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, unsigned long addr,
int len);
+extern void __flush_dcache_icache(void *page_va);
+extern void flush_dcache_icache_page(struct page *page);
+#if defined(CONFIG_PPC32) && !defined(CONFIG_BOOKE)
+extern void __flush_dcache_icache_phys(unsigned long physaddr);
+#endif /* CONFIG_PPC32 && !CONFIG_BOOKE */
extern void flush_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
+#ifdef CONFIG_PPC32
+extern void clean_dcache_range(unsigned long start, unsigned long stop);
+extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
+#endif /* CONFIG_PPC32 */
+#ifdef CONFIG_PPC64
extern void flush_inval_dcache_range(unsigned long start, unsigned long stop);
+extern void flush_dcache_phys_range(unsigned long start, unsigned long stop);
+#endif
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
+ do { \
+ memcpy(dst, src, len); \
+ flush_icache_user_range(vma, page, vaddr, len); \
+ } while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len)
-extern void __flush_dcache_icache(void *page_va);
-static inline void flush_icache_range(unsigned long start, unsigned long stop)
-{
- if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
- __flush_icache_range(start, stop);
-}
+#endif /* __KERNEL__ */
-#endif /* _PPC64_CACHEFLUSH_H */
+#endif /* _ASM_POWERPC_CACHEFLUSH_H */
diff --git a/include/asm-ppc64/compat.h b/include/asm-powerpc/compat.h
index 6ec62cd2d1d..4db4360c4d4 100644
--- a/include/asm-ppc64/compat.h
+++ b/include/asm-powerpc/compat.h
@@ -1,5 +1,5 @@
-#ifndef _ASM_PPC64_COMPAT_H
-#define _ASM_PPC64_COMPAT_H
+#ifndef _ASM_POWERPC_COMPAT_H
+#define _ASM_POWERPC_COMPAT_H
/*
* Architecture specific compatibility types
*/
@@ -49,7 +49,7 @@ struct compat_stat {
compat_dev_t st_dev;
compat_ino_t st_ino;
compat_mode_t st_mode;
- compat_nlink_t st_nlink;
+ compat_nlink_t st_nlink;
__compat_uid32_t st_uid;
__compat_gid32_t st_gid;
compat_dev_t st_rdev;
@@ -202,4 +202,4 @@ struct compat_shmid64_ds {
compat_ulong_t __unused6;
};
-#endif /* _ASM_PPC64_COMPAT_H */
+#endif /* _ASM_POWERPC_COMPAT_H */
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 79a0556a0ab..04e2726002c 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -2,7 +2,7 @@
#define __ASM_POWERPC_CPUTABLE_H
#include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
+#include <asm/asm-compat.h>
#define PPC_FEATURE_32 0x80000000
#define PPC_FEATURE_64 0x40000000
@@ -16,6 +16,10 @@
#define PPC_FEATURE_HAS_EFP_SINGLE 0x00400000
#define PPC_FEATURE_HAS_EFP_DOUBLE 0x00200000
#define PPC_FEATURE_NO_TB 0x00100000
+#define PPC_FEATURE_POWER4 0x00080000
+#define PPC_FEATURE_POWER5 0x00040000
+#define PPC_FEATURE_POWER5_PLUS 0x00020000
+#define PPC_FEATURE_CELL 0x00010000
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
diff --git a/include/asm-powerpc/current.h b/include/asm-powerpc/current.h
new file mode 100644
index 00000000000..82cd4a9ca99
--- /dev/null
+++ b/include/asm-powerpc/current.h
@@ -0,0 +1,27 @@
+#ifndef _ASM_POWERPC_CURRENT_H
+#define _ASM_POWERPC_CURRENT_H
+
+/*
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct task_struct;
+
+#ifdef __powerpc64__
+#include <asm/paca.h>
+
+#define current (get_paca()->__current)
+
+#else
+
+/*
+ * We keep `current' in r2 for speed.
+ */
+register struct task_struct *current asm ("r2");
+
+#endif
+
+#endif /* _ASM_POWERPC_CURRENT_H */
diff --git a/include/asm-powerpc/eeh_event.h b/include/asm-powerpc/eeh_event.h
new file mode 100644
index 00000000000..d168a30b386
--- /dev/null
+++ b/include/asm-powerpc/eeh_event.h
@@ -0,0 +1,52 @@
+/*
+ * eeh_event.h
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ *
+ * Copyright (c) 2005 Linas Vepstas <linas@linas.org>
+ */
+
+#ifndef ASM_PPC64_EEH_EVENT_H
+#define ASM_PPC64_EEH_EVENT_H
+
+/** EEH event -- structure holding pci controller data that describes
+ * a change in the isolation status of a PCI slot. A pointer
+ * to this struct is passed as the data pointer in a notify callback.
+ */
+struct eeh_event {
+ struct list_head list;
+ struct device_node *dn; /* struct device node */
+ struct pci_dev *dev; /* affected device */
+ int state;
+ int time_unavail; /* milliseconds until device might be available */
+};
+
+/**
+ * eeh_send_failure_event - generate a PCI error event
+ * @dev pci device
+ *
+ * This routine builds a PCI error event which will be delivered
+ * to all listeners on the peh_notifier_chain.
+ *
+ * This routine can be called within an interrupt context;
+ * the actual event will be delivered in a normal context
+ * (from a workqueue).
+ */
+int eeh_send_failure_event (struct device_node *dn,
+ struct pci_dev *dev,
+ int reset_state,
+ int time_unavail);
+
+#endif /* ASM_PPC64_EEH_EVENT_H */
diff --git a/include/asm-powerpc/firmware.h b/include/asm-powerpc/firmware.h
index 806c142ae9e..12fabbcb04f 100644
--- a/include/asm-powerpc/firmware.h
+++ b/include/asm-powerpc/firmware.h
@@ -43,6 +43,7 @@
#define FW_FEATURE_ISERIES (1UL<<21)
enum {
+#ifdef CONFIG_PPC64
FW_FEATURE_PSERIES_POSSIBLE = FW_FEATURE_PFT | FW_FEATURE_TCE |
FW_FEATURE_SPRG0 | FW_FEATURE_DABR | FW_FEATURE_COPY |
FW_FEATURE_ASR | FW_FEATURE_DEBUG | FW_FEATURE_TERM |
@@ -70,6 +71,11 @@ enum {
FW_FEATURE_ISERIES_ALWAYS &
#endif
FW_FEATURE_POSSIBLE,
+
+#else /* CONFIG_PPC64 */
+ FW_FEATURE_POSSIBLE = 0,
+ FW_FEATURE_ALWAYS = 0,
+#endif
};
/* This is used to identify firmware features which are available
diff --git a/include/asm-powerpc/futex.h b/include/asm-powerpc/futex.h
index 37c94e52ab6..f0319d50b12 100644
--- a/include/asm-powerpc/futex.h
+++ b/include/asm-powerpc/futex.h
@@ -7,13 +7,14 @@
#include <asm/errno.h>
#include <asm/synch.h>
#include <asm/uaccess.h>
-#include <asm/ppc_asm.h>
+#include <asm/asm-compat.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile ( \
SYNC_ON_SMP \
"1: lwarx %0,0,%2\n" \
insn \
+ PPC405_ERR77(0, %2) \
"2: stwcx. %1,0,%2\n" \
"bne- 1b\n" \
"li %1,0\n" \
@@ -23,7 +24,7 @@
".previous\n" \
".section __ex_table,\"a\"\n" \
".align 3\n" \
- DATAL " 1b,4b,2b,4b\n" \
+ PPC_LONG "1b,4b,2b,4b\n" \
".previous" \
: "=&r" (oldval), "=&r" (ret) \
: "b" (uaddr), "i" (-EFAULT), "1" (oparg) \
diff --git a/include/asm-ppc64/hvcall.h b/include/asm-powerpc/hvcall.h
index ab7c3cf2488..d36da61dbc5 100644
--- a/include/asm-ppc64/hvcall.h
+++ b/include/asm-powerpc/hvcall.h
@@ -1,5 +1,5 @@
-#ifndef _PPC64_HVCALL_H
-#define _PPC64_HVCALL_H
+#ifndef _ASM_POWERPC_HVCALL_H
+#define _ASM_POWERPC_HVCALL_H
#define HVSC .long 0x44000022
@@ -138,7 +138,7 @@ long plpar_hcall(unsigned long opcode,
*/
long plpar_hcall_norets(unsigned long opcode, ...);
-/*
+/*
* Special hcall interface for ibmveth support.
* Takes 8 input parms. Returns a rc and stores the
* R4 return value in *out1.
@@ -153,11 +153,11 @@ long plpar_hcall_8arg_2ret(unsigned long opcode,
unsigned long arg7,
unsigned long arg8,
unsigned long *out1);
-
+
/* plpar_hcall_4out()
*
- * same as plpar_hcall except with 4 output arguments.
- *
+ * same as plpar_hcall except with 4 output arguments.
+ *
*/
long plpar_hcall_4out(unsigned long opcode,
unsigned long arg1,
@@ -170,4 +170,4 @@ long plpar_hcall_4out(unsigned long opcode,
unsigned long *out4);
#endif /* __ASSEMBLY__ */
-#endif /* _PPC64_HVCALL_H */
+#endif /* _ASM_POWERPC_HVCALL_H */
diff --git a/include/asm-powerpc/hw_irq.h b/include/asm-powerpc/hw_irq.h
index c37b31b9633..26b89d859c5 100644
--- a/include/asm-powerpc/hw_irq.h
+++ b/include/asm-powerpc/hw_irq.h
@@ -12,7 +12,6 @@
#include <asm/processor.h>
extern void timer_interrupt(struct pt_regs *);
-extern void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq);
#ifdef CONFIG_PPC_ISERIES
diff --git a/include/asm-powerpc/irq.h b/include/asm-powerpc/irq.h
index b3935ea28ff..c9fbcede0ef 100644
--- a/include/asm-powerpc/irq.h
+++ b/include/asm-powerpc/irq.h
@@ -429,7 +429,6 @@ extern u64 ppc64_interrupt_controller;
#define NR_MASK_WORDS ((NR_IRQS + 31) / 32)
/* pedantic: these are long because they are used with set_bit --RR */
extern unsigned long ppc_cached_irq_mask[NR_MASK_WORDS];
-extern unsigned long ppc_lost_interrupts[NR_MASK_WORDS];
extern atomic_t ppc_n_lost_interrupts;
#define virt_irq_create_mapping(x) (x)
@@ -488,8 +487,8 @@ extern struct thread_info *softirq_ctx[NR_CPUS];
extern void irq_ctx_init(void);
extern void call_do_softirq(struct thread_info *tp);
-extern int call_handle_IRQ_event(int irq, struct pt_regs *regs,
- struct irqaction *action, struct thread_info *tp);
+extern int call___do_IRQ(int irq, struct pt_regs *regs,
+ struct thread_info *tp);
#define __ARCH_HAS_DO_SOFTIRQ
diff --git a/include/asm-ppc64/lppaca.h b/include/asm-powerpc/lppaca.h
index 9e2a6c0649a..c1bedab1515 100644
--- a/include/asm-ppc64/lppaca.h
+++ b/include/asm-powerpc/lppaca.h
@@ -16,8 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ASM_LPPACA_H
-#define _ASM_LPPACA_H
+#ifndef _ASM_POWERPC_LPPACA_H
+#define _ASM_POWERPC_LPPACA_H
//=============================================================================
//
@@ -28,8 +28,7 @@
//----------------------------------------------------------------------------
#include <asm/types.h>
-struct lppaca
-{
+struct lppaca {
//=============================================================================
// CACHE_LINE_1 0x0000 - 0x007F Contains read-only data
// NOTE: The xDynXyz fields are fields that will be dynamically changed by
@@ -129,4 +128,4 @@ struct lppaca
u8 pmc_save_area[256]; // PMC interrupt Area x00-xFF
};
-#endif /* _ASM_LPPACA_H */
+#endif /* _ASM_POWERPC_LPPACA_H */
diff --git a/include/asm-ppc64/paca.h b/include/asm-powerpc/paca.h
index bccacd6aa93..92c765c35bd 100644
--- a/include/asm-ppc64/paca.h
+++ b/include/asm-powerpc/paca.h
@@ -1,11 +1,8 @@
-#ifndef _PPC64_PACA_H
-#define _PPC64_PACA_H
-
/*
- * include/asm-ppc64/paca.h
+ * include/asm-powerpc/paca.h
*
- * This control block defines the PACA which defines the processor
- * specific data for each logical processor on the system.
+ * This control block defines the PACA which defines the processor
+ * specific data for each logical processor on the system.
* There are some pointers defined that are utilized by PLIC.
*
* C 2001 PPC 64 Team, IBM Corp
@@ -14,7 +11,9 @@
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
- */
+ */
+#ifndef _ASM_POWERPC_PACA_H
+#define _ASM_POWERPC_PACA_H
#include <linux/config.h>
#include <asm/types.h>
@@ -118,4 +117,4 @@ struct paca_struct {
extern struct paca_struct paca[];
-#endif /* _PPC64_PACA_H */
+#endif /* _ASM_POWERPC_PACA_H */
diff --git a/include/asm-powerpc/ppc-pci.h b/include/asm-powerpc/ppc-pci.h
index 13aacff755f..9896fade98a 100644
--- a/include/asm-powerpc/ppc-pci.h
+++ b/include/asm-powerpc/ppc-pci.h
@@ -26,6 +26,10 @@ extern unsigned long find_and_init_phbs(void);
extern struct pci_dev *ppc64_isabridge_dev; /* may be NULL if no ISA bus */
+/** Bus Unit ID macros; get low and hi 32-bits of the 64-bit BUID */
+#define BUID_HI(buid) ((buid) >> 32)
+#define BUID_LO(buid) ((buid) & 0xffffffff)
+
/* PCI device_node operations */
struct device_node;
typedef void *(*traverse_func)(struct device_node *me, void *data);
@@ -36,10 +40,6 @@ void pci_devs_phb_init(void);
void pci_devs_phb_init_dynamic(struct pci_controller *phb);
void __devinit scan_phb(struct pci_controller *hose);
-/* PCI address cache management routines */
-void pci_addr_cache_insert_device(struct pci_dev *dev);
-void pci_addr_cache_remove_device(struct pci_dev *dev);
-
/* From rtas_pci.h */
void init_pci_config_tokens (void);
unsigned long get_phb_buid (struct device_node *);
@@ -52,4 +52,48 @@ extern unsigned long pci_probe_only;
extern unsigned long pci_assign_all_buses;
extern int pci_read_irq_line(struct pci_dev *pci_dev);
+/* ---- EEH internal-use-only related routines ---- */
+#ifdef CONFIG_EEH
+/**
+ * rtas_set_slot_reset -- unfreeze a frozen slot
+ *
+ * Clear the EEH-frozen condition on a slot. This routine
+ * does this by asserting the PCI #RST line for 1/8th of
+ * a second; this routine will sleep while the adapter is
+ * being reset.
+ */
+void rtas_set_slot_reset (struct pci_dn *);
+
+/**
+ * eeh_restore_bars - Restore device configuration info.
+ *
+ * A reset of a PCI device will clear out its config space.
+ * This routines will restore the config space for this
+ * device, and is children, to values previously obtained
+ * from the firmware.
+ */
+void eeh_restore_bars(struct pci_dn *);
+
+/**
+ * rtas_configure_bridge -- firmware initialization of pci bridge
+ *
+ * Ask the firmware to configure all PCI bridges devices
+ * located behind the indicated node. Required after a
+ * pci device reset. Does essentially the same hing as
+ * eeh_restore_bars, but for brdges, and lets firmware
+ * do the work.
+ */
+void rtas_configure_bridge(struct pci_dn *);
+
+int rtas_write_config(struct pci_dn *, int where, int size, u32 val);
+
+/**
+ * mark and clear slots: find "partition endpoint" PE and set or
+ * clear the flags for each subnode of the PE.
+ */
+void eeh_mark_slot (struct device_node *dn, int mode_flag);
+void eeh_clear_slot (struct device_node *dn, int mode_flag);
+
+#endif
+
#endif /* _ASM_POWERPC_PPC_PCI_H */
diff --git a/include/asm-powerpc/ppc_asm.h b/include/asm-powerpc/ppc_asm.h
index c534ca41224..c27baa0563f 100644
--- a/include/asm-powerpc/ppc_asm.h
+++ b/include/asm-powerpc/ppc_asm.h
@@ -6,8 +6,13 @@
#include <linux/stringify.h>
#include <linux/config.h>
+#include <asm/asm-compat.h>
-#ifdef __ASSEMBLY__
+#ifndef __ASSEMBLY__
+#error __FILE__ should only be used in assembler files
+#else
+
+#define SZL (BITS_PER_LONG/8)
/*
* Macros for storing registers into and loading registers from
@@ -184,12 +189,6 @@ n:
oris reg,reg,(label)@h; \
ori reg,reg,(label)@l;
-/* operations for longs and pointers */
-#define LDL ld
-#define STL std
-#define CMPI cmpdi
-#define SZL 8
-
/* offsets for stack frame layout */
#define LRSAVE 16
@@ -203,12 +202,6 @@ n:
#define OFF(name) name@l
-/* operations for longs and pointers */
-#define LDL lwz
-#define STL stw
-#define CMPI cmpwi
-#define SZL 4
-
/* offsets for stack frame layout */
#define LRSAVE 4
@@ -266,15 +259,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#endif
-#ifdef CONFIG_IBM405_ERR77
-#define PPC405_ERR77(ra,rb) dcbt ra, rb;
-#define PPC405_ERR77_SYNC sync;
-#else
-#define PPC405_ERR77(ra,rb)
-#define PPC405_ERR77_SYNC
-#endif
-
-
#ifdef CONFIG_IBM440EP_ERR42
#define PPC440EP_ERR42 isync
#else
@@ -502,17 +486,6 @@ END_FTR_SECTION_IFCLR(CPU_FTR_601)
#define N_SLINE 68
#define N_SO 100
-#define ASM_CONST(x) x
-#else
- #define __ASM_CONST(x) x##UL
- #define ASM_CONST(x) __ASM_CONST(x)
-
-#ifdef CONFIG_PPC64
-#define DATAL ".llong"
-#else
-#define DATAL ".long"
-#endif
-
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PPC_ASM_H */
diff --git a/include/asm-powerpc/processor.h b/include/asm-powerpc/processor.h
index 1dc4bf7b52b..f6f186b56b0 100644
--- a/include/asm-powerpc/processor.h
+++ b/include/asm-powerpc/processor.h
@@ -17,65 +17,71 @@
#include <linux/compiler.h>
#include <asm/ptrace.h>
#include <asm/types.h>
-#ifdef CONFIG_PPC64
-#include <asm/systemcfg.h>
-#endif
-#ifdef CONFIG_PPC32
-/* 32-bit platform types */
-/* We only need to define a new _MACH_xxx for machines which are part of
- * a configuration which supports more than one type of different machine.
- * This is currently limited to CONFIG_PPC_MULTIPLATFORM and CHRP/PReP/PMac.
- * -- Tom
+/* We do _not_ want to define new machine types at all, those must die
+ * in favor of using the device-tree
+ * -- BenH.
*/
-#define _MACH_prep 0x00000001
-#define _MACH_Pmac 0x00000002 /* pmac or pmac clone (non-chrp) */
-#define _MACH_chrp 0x00000004 /* chrp machine */
-/* see residual.h for these */
+/* Platforms codes (to be obsoleted) */
+#define PLATFORM_PSERIES 0x0100
+#define PLATFORM_PSERIES_LPAR 0x0101
+#define PLATFORM_ISERIES_LPAR 0x0201
+#define PLATFORM_LPAR 0x0001
+#define PLATFORM_POWERMAC 0x0400
+#define PLATFORM_MAPLE 0x0500
+#define PLATFORM_PREP 0x0600
+#define PLATFORM_CHRP 0x0700
+#define PLATFORM_CELL 0x1000
+
+/* Compat platform codes for 32 bits */
+#define _MACH_prep PLATFORM_PREP
+#define _MACH_Pmac PLATFORM_POWERMAC
+#define _MACH_chrp PLATFORM_CHRP
+
+/* PREP sub-platform types see residual.h for these */
#define _PREP_Motorola 0x01 /* motorola prep */
#define _PREP_Firm 0x02 /* firmworks prep */
#define _PREP_IBM 0x00 /* ibm prep */
#define _PREP_Bull 0x03 /* bull prep */
-/* these are arbitrary */
+/* CHRP sub-platform types. These are arbitrary */
#define _CHRP_Motorola 0x04 /* motorola chrp, the cobra */
#define _CHRP_IBM 0x05 /* IBM chrp, the longtrail and longtrail 2 */
#define _CHRP_Pegasos 0x06 /* Genesi/bplan's Pegasos and Pegasos2 */
-#ifdef CONFIG_PPC_MULTIPLATFORM
+#define platform_is_pseries() (_machine == PLATFORM_PSERIES || \
+ _machine == PLATFORM_PSERIES_LPAR)
+#define platform_is_lpar() (!!(_machine & PLATFORM_LPAR))
+
+#if defined(CONFIG_PPC_MULTIPLATFORM)
extern int _machine;
+#ifdef CONFIG_PPC32
+
/* what kind of prep workstation we are */
extern int _prep_type;
extern int _chrp_type;
/*
* This is used to identify the board type from a given PReP board
- * vendor. Board revision is also made available.
+ * vendor. Board revision is also made available. This will be moved
+ * elsewhere soon
*/
extern unsigned char ucSystemType;
extern unsigned char ucBoardRev;
extern unsigned char ucBoardRevMaj, ucBoardRevMin;
+
+#endif /* CONFIG_PPC32 */
+
+#elif defined(CONFIG_PPC_ISERIES)
+/*
+ * iSeries is soon to become MULTIPLATFORM hopefully ...
+ */
+#define _machine PLATFORM_ISERIES_LPAR
#else
#define _machine 0
#endif /* CONFIG_PPC_MULTIPLATFORM */
-#endif /* CONFIG_PPC32 */
-
-#ifdef CONFIG_PPC64
-/* Platforms supported by PPC64 */
-#define PLATFORM_PSERIES 0x0100
-#define PLATFORM_PSERIES_LPAR 0x0101
-#define PLATFORM_ISERIES_LPAR 0x0201
-#define PLATFORM_LPAR 0x0001
-#define PLATFORM_POWERMAC 0x0400
-#define PLATFORM_MAPLE 0x0500
-#define PLATFORM_CELL 0x1000
-
-/* Compatibility with drivers coming from PPC32 world */
-#define _machine (systemcfg->platform)
-#define _MACH_Pmac PLATFORM_POWERMAC
-#endif
/*
* Default implementation of macro that returns current
diff --git a/include/asm-powerpc/reg.h b/include/asm-powerpc/reg.h
index 489cf4c99c2..eb392d038ed 100644
--- a/include/asm-powerpc/reg.h
+++ b/include/asm-powerpc/reg.h
@@ -16,7 +16,11 @@
/* Pickup Book E specific registers. */
#if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
#include <asm/reg_booke.h>
-#endif
+#endif /* CONFIG_BOOKE || CONFIG_40x */
+
+#ifdef CONFIG_8xx
+#include <asm/reg_8xx.h>
+#endif /* CONFIG_8xx */
#define MSR_SF_LG 63 /* Enable 64 bit mode */
#define MSR_ISF_LG 61 /* Interrupt 64b mode valid on 630 */
@@ -359,6 +363,7 @@
#define SPRN_RPA 0x3D6 /* Required Physical Address Register */
#define SPRN_SDA 0x3BF /* Sampled Data Address Register */
#define SPRN_SDR1 0x019 /* MMU Hash Base Register */
+#define SPRN_ASR 0x118 /* Address Space Register */
#define SPRN_SIA 0x3BB /* Sampled Instruction Address Register */
#define SPRN_SPRG0 0x110 /* Special Purpose Register General 0 */
#define SPRN_SPRG1 0x111 /* Special Purpose Register General 1 */
diff --git a/include/asm-ppc/cache.h b/include/asm-powerpc/reg_8xx.h
index 7a157d0f4b5..e8ea346b21d 100644
--- a/include/asm-ppc/cache.h
+++ b/include/asm-powerpc/reg_8xx.h
@@ -1,49 +1,9 @@
/*
- * include/asm-ppc/cache.h
+ * Contains register definitions common to PowerPC 8xx CPUs. Notice
*/
-#ifdef __KERNEL__
-#ifndef __ARCH_PPC_CACHE_H
-#define __ARCH_PPC_CACHE_H
+#ifndef _ASM_POWERPC_REG_8xx_H
+#define _ASM_POWERPC_REG_8xx_H
-#include <linux/config.h>
-
-/* bytes per L1 cache line */
-#if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
-#define L1_CACHE_SHIFT 4
-#define MAX_COPY_PREFETCH 1
-#elif defined(CONFIG_PPC64BRIDGE)
-#define L1_CACHE_SHIFT 7
-#define MAX_COPY_PREFETCH 1
-#else
-#define L1_CACHE_SHIFT 5
-#define MAX_COPY_PREFETCH 4
-#endif
-
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
-#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
-#define L1_CACHE_PAGES 8
-
-#ifndef __ASSEMBLY__
-extern void clean_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_range(unsigned long start, unsigned long stop);
-extern void invalidate_dcache_range(unsigned long start, unsigned long stop);
-extern void flush_dcache_all(void);
-#endif /* __ASSEMBLY__ */
-
-/* prep registers for L2 */
-#define CACHECRBA 0x80000823 /* Cache configuration register address */
-#define L2CACHE_MASK 0x03 /* Mask for 2 L2 Cache bits */
-#define L2CACHE_512KB 0x00 /* 512KB */
-#define L2CACHE_256KB 0x01 /* 256KB */
-#define L2CACHE_1MB 0x02 /* 1MB */
-#define L2CACHE_NONE 0x03 /* NONE */
-#define L2CACHE_PARITY 0x08 /* Mask for L2 Cache Parity Protected bit */
-
-#ifdef CONFIG_8xx
/* Cache control on the MPC8xx is provided through some additional
* special purpose registers.
*/
@@ -78,7 +38,5 @@ extern void flush_dcache_all(void);
#define DC_DFWT 0x40000000 /* Data cache is forced write through */
#define DC_LES 0x20000000 /* Caches are little endian mode */
-#endif /* CONFIG_8xx */
-#endif
-#endif /* __KERNEL__ */
+#endif /* _ASM_POWERPC_REG_8xx_H */
diff --git a/include/asm-ppc/signal.h b/include/asm-powerpc/signal.h
index caf6ede3710..694c8d2dab8 100644
--- a/include/asm-ppc/signal.h
+++ b/include/asm-powerpc/signal.h
@@ -1,18 +1,11 @@
-#ifndef _ASMPPC_SIGNAL_H
-#define _ASMPPC_SIGNAL_H
+#ifndef _ASM_POWERPC_SIGNAL_H
+#define _ASM_POWERPC_SIGNAL_H
-#ifdef __KERNEL__
#include <linux/types.h>
-#endif /* __KERNEL__ */
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-/* Most things should be clean enough to redefine this at will, if care
- is taken to make libc match. */
+#include <linux/config.h>
#define _NSIG 64
-#define _NSIG_BPW 32
+#define _NSIG_BPW BITS_PER_LONG
#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
typedef unsigned long old_sigset_t; /* at least 32 bits */
@@ -77,19 +70,19 @@ typedef struct {
* SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
* Unix names RESETHAND and NODEFER respectively.
*/
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
+#define SA_NOCLDSTOP 0x00000001U
+#define SA_NOCLDWAIT 0x00000002U
+#define SA_SIGINFO 0x00000004U
+#define SA_ONSTACK 0x08000000U
+#define SA_RESTART 0x10000000U
+#define SA_NODEFER 0x40000000U
+#define SA_RESETHAND 0x80000000U
#define SA_NOMASK SA_NODEFER
#define SA_ONESHOT SA_RESETHAND
-#define SA_INTERRUPT 0x20000000 /* dummy -- ignored */
+#define SA_INTERRUPT 0x20000000u /* dummy -- ignored */
-#define SA_RESTORER 0x04000000
+#define SA_RESTORER 0x04000000U
/*
* sigaltstack controls
@@ -127,10 +120,13 @@ typedef struct sigaltstack {
} stack_t;
#ifdef __KERNEL__
-#include <asm/sigcontext.h>
+struct pt_regs;
+extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
+extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
#define ptrace_signal_deliver(regs, cookie) do { } while (0)
#endif /* __KERNEL__ */
+#ifndef __powerpc64__
/*
* These are parameters to dbg_sigreturn syscall. They enable or
* disable certain debugging things that can be done from signal
@@ -149,5 +145,6 @@ struct sig_dbg_op {
/* Enable or disable branch tracing. The value sets the state. */
#define SIG_DBG_BRANCH_TRACING 2
+#endif /* ! __powerpc64__ */
-#endif
+#endif /* _ASM_POWERPC_SIGNAL_H */
diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h
index 1c95ab99deb..58d2aab416f 100644
--- a/include/asm-powerpc/sparsemem.h
+++ b/include/asm-powerpc/sparsemem.h
@@ -11,6 +11,10 @@
#define MAX_PHYSADDR_BITS 38
#define MAX_PHYSMEM_BITS 36
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern void create_section_mapping(unsigned long start, unsigned long end);
+#endif /* CONFIG_MEMORY_HOTPLUG */
+
#endif /* CONFIG_SPARSEMEM */
#endif /* _ASM_POWERPC_SPARSEMEM_H */
diff --git a/include/asm-powerpc/system.h b/include/asm-powerpc/system.h
index 3536a5cd7a2..5341b75c75c 100644
--- a/include/asm-powerpc/system.h
+++ b/include/asm-powerpc/system.h
@@ -8,7 +8,6 @@
#include <linux/kernel.h>
#include <asm/hw_irq.h>
-#include <asm/ppc_asm.h>
#include <asm/atomic.h>
/*
@@ -180,6 +179,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
extern unsigned int rtas_data;
extern int mem_init_done; /* set on boot once kmalloc can be called */
extern unsigned long memory_limit;
+extern unsigned long klimit;
extern int powersave_nap; /* set if nap mode can be used in idle loop */
diff --git a/include/asm-ppc64/systemcfg.h b/include/asm-powerpc/systemcfg.h
index 9b86b53129a..36b5cbe466f 100644
--- a/include/asm-ppc64/systemcfg.h
+++ b/include/asm-powerpc/systemcfg.h
@@ -1,7 +1,7 @@
#ifndef _SYSTEMCFG_H
#define _SYSTEMCFG_H
-/*
+/*
* Copyright (C) 2002 Peter Bergner <bergner@vnet.ibm.com>, IBM
*
* This program is free software; you can redistribute it and/or
@@ -12,7 +12,7 @@
/* Change Activity:
* 2002/09/30 : bergner : Created
- * End Change Activity
+ * End Change Activity
*/
/*
@@ -56,7 +56,7 @@ struct systemcfg {
};
#ifdef __KERNEL__
-extern struct systemcfg *systemcfg;
+extern struct systemcfg *_systemcfg; /* to be renamed */
#endif
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/tce.h b/include/asm-powerpc/tce.h
index d40b6b42ab3..d099d5200f9 100644
--- a/include/asm-ppc64/tce.h
+++ b/include/asm-powerpc/tce.h
@@ -18,8 +18,8 @@
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
-#ifndef _ASM_TCE_H
-#define _ASM_TCE_H
+#ifndef _ASM_POWERPC_TCE_H
+#define _ASM_POWERPC_TCE_H
/*
* Tces come in two formats, one for the virtual bus and a different
@@ -61,4 +61,4 @@ union tce_entry {
};
-#endif
+#endif /* _ASM_POWERPC_TCE_H */
diff --git a/include/asm-powerpc/uaccess.h b/include/asm-powerpc/uaccess.h
index 33af730f0d1..3872e924cdd 100644
--- a/include/asm-powerpc/uaccess.h
+++ b/include/asm-powerpc/uaccess.h
@@ -120,14 +120,6 @@ struct exception_table_entry {
extern long __put_user_bad(void);
-#ifdef __powerpc64__
-#define __EX_TABLE_ALIGN "3"
-#define __EX_TABLE_TYPE "llong"
-#else
-#define __EX_TABLE_ALIGN "2"
-#define __EX_TABLE_TYPE "long"
-#endif
-
/*
* We don't tell gcc that we are accessing memory, but this is OK
* because we do not write to any memory gcc knows about, so there
@@ -142,11 +134,12 @@ extern long __put_user_bad(void);
" b 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
- " .align " __EX_TABLE_ALIGN "\n" \
- " ."__EX_TABLE_TYPE" 1b,3b\n" \
+ " .balign %5\n" \
+ PPC_LONG "1b,3b\n" \
".previous" \
: "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
+ "i"(sizeof(unsigned long)))
#ifdef __powerpc64__
#define __put_user_asm2(x, ptr, retval) \
@@ -162,12 +155,13 @@ extern long __put_user_bad(void);
" b 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
- " .align " __EX_TABLE_ALIGN "\n" \
- " ." __EX_TABLE_TYPE " 1b,4b\n" \
- " ." __EX_TABLE_TYPE " 2b,4b\n" \
+ " .balign %5\n" \
+ PPC_LONG "1b,4b\n" \
+ PPC_LONG "2b,4b\n" \
".previous" \
: "=r" (err) \
- : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err))
+ : "r" (x), "b" (addr), "i" (-EFAULT), "0" (err),\
+ "i"(sizeof(unsigned long)))
#endif /* __powerpc64__ */
#define __put_user_size(x, ptr, size, retval) \
@@ -213,11 +207,12 @@ extern long __get_user_bad(void);
" b 2b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
- " .align "__EX_TABLE_ALIGN "\n" \
- " ." __EX_TABLE_TYPE " 1b,3b\n" \
+ " .balign %5\n" \
+ PPC_LONG "1b,3b\n" \
".previous" \
: "=r" (err), "=r" (x) \
- : "b" (addr), "i" (-EFAULT), "0" (err))
+ : "b" (addr), "i" (-EFAULT), "0" (err), \
+ "i"(sizeof(unsigned long)))
#ifdef __powerpc64__
#define __get_user_asm2(x, addr, err) \
@@ -235,12 +230,13 @@ extern long __get_user_bad(void);
" b 3b\n" \
".previous\n" \
".section __ex_table,\"a\"\n" \
- " .align " __EX_TABLE_ALIGN "\n" \
- " ." __EX_TABLE_TYPE " 1b,4b\n" \
- " ." __EX_TABLE_TYPE " 2b,4b\n" \
+ " .balign %5\n" \
+ PPC_LONG "1b,4b\n" \
+ PPC_LONG "2b,4b\n" \
".previous" \
: "=r" (err), "=&r" (x) \
- : "b" (addr), "i" (-EFAULT), "0" (err))
+ : "b" (addr), "i" (-EFAULT), "0" (err), \
+ "i"(sizeof(unsigned long)))
#endif /* __powerpc64__ */
#define __get_user_size(x, ptr, size, retval) \
diff --git a/include/asm-powerpc/xmon.h b/include/asm-powerpc/xmon.h
index ace2072d4a8..43f7129984c 100644
--- a/include/asm-powerpc/xmon.h
+++ b/include/asm-powerpc/xmon.h
@@ -7,7 +7,6 @@ struct pt_regs;
extern int xmon(struct pt_regs *excp);
extern void xmon_printf(const char *fmt, ...);
extern void xmon_init(int);
-extern void xmon_map_scc(void);
#endif
#endif
diff --git a/include/asm-ppc/cacheflush.h b/include/asm-ppc/cacheflush.h
deleted file mode 100644
index 6a243efb331..00000000000
--- a/include/asm-ppc/cacheflush.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * include/asm-ppc/cacheflush.h
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifdef __KERNEL__
-#ifndef _PPC_CACHEFLUSH_H
-#define _PPC_CACHEFLUSH_H
-
-#include <linux/mm.h>
-
-/*
- * No cache flushing is required when address mappings are
- * changed, because the caches on PowerPCs are physically
- * addressed. -- paulus
- * Also, when SMP we use the coherency (M) bit of the
- * BATs and PTEs. -- Cort
- */
-#define flush_cache_all() do { } while (0)
-#define flush_cache_mm(mm) do { } while (0)
-#define flush_cache_range(vma, a, b) do { } while (0)
-#define flush_cache_page(vma, p, pfn) do { } while (0)
-#define flush_icache_page(vma, page) do { } while (0)
-#define flush_cache_vmap(start, end) do { } while (0)
-#define flush_cache_vunmap(start, end) do { } while (0)
-
-extern void flush_dcache_page(struct page *page);
-#define flush_dcache_mmap_lock(mapping) do { } while (0)
-#define flush_dcache_mmap_unlock(mapping) do { } while (0)
-
-extern void flush_icache_range(unsigned long, unsigned long);
-extern void flush_icache_user_range(struct vm_area_struct *vma,
- struct page *page, unsigned long addr, int len);
-
-#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
-do { memcpy(dst, src, len); \
- flush_icache_user_range(vma, page, vaddr, len); \
-} while (0)
-#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
- memcpy(dst, src, len)
-
-extern void __flush_dcache_icache(void *page_va);
-extern void __flush_dcache_icache_phys(unsigned long physaddr);
-extern void flush_dcache_icache_page(struct page *page);
-#endif /* _PPC_CACHEFLUSH_H */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc/current.h b/include/asm-ppc/current.h
deleted file mode 100644
index 8d41501ba10..00000000000
--- a/include/asm-ppc/current.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#ifdef __KERNEL__
-#ifndef _PPC_CURRENT_H
-#define _PPC_CURRENT_H
-
-/*
- * We keep `current' in r2 for speed.
- */
-register struct task_struct *current asm ("r2");
-
-#endif /* !(_PPC_CURRENT_H) */
-#endif /* __KERNEL__ */
diff --git a/include/asm-ppc64/cache.h b/include/asm-ppc64/cache.h
deleted file mode 100644
index 92140a7efbd..00000000000
--- a/include/asm-ppc64/cache.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-#ifndef __ARCH_PPC64_CACHE_H
-#define __ARCH_PPC64_CACHE_H
-
-#include <asm/types.h>
-
-/* bytes per L1 cache line */
-#define L1_CACHE_SHIFT 7
-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
-
-#define SMP_CACHE_BYTES L1_CACHE_BYTES
-#define L1_CACHE_SHIFT_MAX 7 /* largest L1 which this arch supports */
-
-#ifndef __ASSEMBLY__
-
-struct ppc64_caches {
- u32 dsize; /* L1 d-cache size */
- u32 dline_size; /* L1 d-cache line size */
- u32 log_dline_size;
- u32 dlines_per_page;
- u32 isize; /* L1 i-cache size */
- u32 iline_size; /* L1 i-cache line size */
- u32 log_iline_size;
- u32 ilines_per_page;
-};
-
-extern struct ppc64_caches ppc64_caches;
-
-#endif
-
-#endif
diff --git a/include/asm-ppc64/current.h b/include/asm-ppc64/current.h
deleted file mode 100644
index 52ddc60c8b6..00000000000
--- a/include/asm-ppc64/current.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#ifndef _PPC64_CURRENT_H
-#define _PPC64_CURRENT_H
-
-#include <asm/paca.h>
-
-/*
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * as published by the Free Software Foundation; either version
- * 2 of the License, or (at your option) any later version.
- */
-
-#define get_current() (get_paca()->__current)
-#define current get_current()
-
-#endif /* !(_PPC64_CURRENT_H) */
diff --git a/include/asm-ppc64/eeh.h b/include/asm-ppc64/eeh.h
index 40c8eb57493..89f26ab3190 100644
--- a/include/asm-ppc64/eeh.h
+++ b/include/asm-ppc64/eeh.h
@@ -1,4 +1,4 @@
-/*
+/*
* eeh.h
* Copyright (C) 2001 Dave Engebretsen & Todd Inglett IBM Corporation.
*
@@ -6,12 +6,12 @@
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
- *
+ *
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
- *
+ *
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
@@ -27,8 +27,6 @@
struct pci_dev;
struct device_node;
-struct device_node;
-struct notifier_block;
#ifdef CONFIG_EEH
@@ -37,6 +35,10 @@ struct notifier_block;
#define EEH_MODE_NOCHECK (1<<1)
#define EEH_MODE_ISOLATED (1<<2)
+/* Max number of EEH freezes allowed before we consider the device
+ * to be permanently disabled. */
+#define EEH_MAX_ALLOWED_FREEZES 5
+
void __init eeh_init(void);
unsigned long eeh_check_failure(const volatile void __iomem *token,
unsigned long val);
@@ -59,36 +61,14 @@ void eeh_add_device_late(struct pci_dev *);
* eeh_remove_device - undo EEH setup for the indicated pci device
* @dev: pci device to be removed
*
- * This routine should be when a device is removed from a running
- * system (e.g. by hotplug or dlpar).
+ * This routine should be called when a device is removed from
+ * a running system (e.g. by hotplug or dlpar). It unregisters
+ * the PCI device from the EEH subsystem. I/O errors affecting
+ * this device will no longer be detected after this call; thus,
+ * i/o errors affecting this slot may leave this device unusable.
*/
void eeh_remove_device(struct pci_dev *);
-#define EEH_DISABLE 0
-#define EEH_ENABLE 1
-#define EEH_RELEASE_LOADSTORE 2
-#define EEH_RELEASE_DMA 3
-
-/**
- * Notifier event flags.
- */
-#define EEH_NOTIFY_FREEZE 1
-
-/** EEH event -- structure holding pci slot data that describes
- * a change in the isolation status of a PCI slot. A pointer
- * to this struct is passed as the data pointer in a notify callback.
- */
-struct eeh_event {
- struct list_head list;
- struct pci_dev *dev;
- struct device_node *dn;
- int reset_state;
-};
-
-/** Register to find out about EEH events. */
-int eeh_register_notifier(struct notifier_block *nb);
-int eeh_unregister_notifier(struct notifier_block *nb);
-
/**
* EEH_POSSIBLE_ERROR() -- test for possible MMIO failure.
*
@@ -129,7 +109,7 @@ static inline void eeh_remove_device(struct pci_dev *dev) { }
#define EEH_IO_ERROR_VALUE(size) (-1UL)
#endif /* CONFIG_EEH */
-/*
+/*
* MMIO read/write operations with EEH support.
*/
static inline u8 eeh_readb(const volatile void __iomem *addr)
diff --git a/include/asm-ppc64/mmu.h b/include/asm-ppc64/mmu.h
index 4c18a5cb69f..1a7e0afa2dc 100644
--- a/include/asm-ppc64/mmu.h
+++ b/include/asm-ppc64/mmu.h
@@ -14,7 +14,7 @@
#define _PPC64_MMU_H_
#include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
+#include <asm/asm-compat.h>
#include <asm/page.h>
/*
@@ -224,9 +224,12 @@ extern int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long pstart, unsigned long mode,
int psize);
+extern void htab_initialize(void);
+extern void htab_initialize_secondary(void);
extern void hpte_init_native(void);
extern void hpte_init_lpar(void);
extern void hpte_init_iSeries(void);
+extern void mm_init_ppc64(void);
extern long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned long va, unsigned long prpn,
@@ -245,6 +248,7 @@ extern long iSeries_hpte_insert(unsigned long hpte_group,
extern void stabs_alloc(void);
extern void slb_initialize(void);
+extern void stab_initialize(unsigned long stab);
#endif /* __ASSEMBLY__ */
diff --git a/include/asm-ppc64/mmzone.h b/include/asm-ppc64/mmzone.h
index 80a708e7093..15e777ce0f4 100644
--- a/include/asm-ppc64/mmzone.h
+++ b/include/asm-ppc64/mmzone.h
@@ -33,6 +33,9 @@ extern int numa_cpu_lookup_table[];
extern char *numa_memory_lookup_table;
extern cpumask_t numa_cpumask_lookup_table[];
extern int nr_cpus_in_node[];
+#ifdef CONFIG_MEMORY_HOTPLUG
+extern unsigned long max_pfn;
+#endif
/* 16MB regions */
#define MEMORY_INCREMENT_SHIFT 24
@@ -45,6 +48,11 @@ static inline int pa_to_nid(unsigned long pa)
{
int nid;
+#ifdef CONFIG_MEMORY_HOTPLUG
+ /* kludge hot added sections default to node 0 */
+ if (pa >= (max_pfn << PAGE_SHIFT))
+ return 0;
+#endif
nid = numa_memory_lookup_table[pa >> MEMORY_INCREMENT_SHIFT];
#ifdef DEBUG_NUMA
diff --git a/include/asm-ppc64/page.h b/include/asm-ppc64/page.h
index 82ce187e5be..e32f1187aa2 100644
--- a/include/asm-ppc64/page.h
+++ b/include/asm-ppc64/page.h
@@ -11,7 +11,7 @@
*/
#include <linux/config.h>
-#include <asm/ppc_asm.h> /* for ASM_CONST */
+#include <asm/asm-compat.h>
/*
* We support either 4k or 64k software page size. When using 64k pages
diff --git a/include/asm-ppc64/pci-bridge.h b/include/asm-ppc64/pci-bridge.h
index 60cf8c838af..efbdaece0cf 100644
--- a/include/asm-ppc64/pci-bridge.h
+++ b/include/asm-ppc64/pci-bridge.h
@@ -63,7 +63,6 @@ struct pci_dn {
int devfn; /* for pci devices */
int eeh_mode; /* See eeh.h for possible EEH_MODEs */
int eeh_config_addr;
- int eeh_capable; /* from firmware */
int eeh_check_count; /* # times driver ignored error */
int eeh_freeze_count; /* # times this device froze up. */
int eeh_is_bridge; /* device is pci-to-pci bridge */
diff --git a/include/asm-ppc64/pgalloc.h b/include/asm-ppc64/pgalloc.h
index 98da0e4262b..dcf3622d194 100644
--- a/include/asm-ppc64/pgalloc.h
+++ b/include/asm-ppc64/pgalloc.h
@@ -10,8 +10,8 @@ extern kmem_cache_t *pgtable_cache[];
#ifdef CONFIG_PPC_64K_PAGES
#define PTE_CACHE_NUM 0
-#define PMD_CACHE_NUM 0
-#define PGD_CACHE_NUM 1
+#define PMD_CACHE_NUM 1
+#define PGD_CACHE_NUM 2
#else
#define PTE_CACHE_NUM 0
#define PMD_CACHE_NUM 1
diff --git a/include/asm-ppc64/prom.h b/include/asm-ppc64/prom.h
index 76bb0266d67..ddfe186589f 100644
--- a/include/asm-ppc64/prom.h
+++ b/include/asm-ppc64/prom.h
@@ -204,6 +204,8 @@ extern void of_detach_node(const struct device_node *);
extern unsigned long prom_init(unsigned long, unsigned long, unsigned long,
unsigned long, unsigned long);
extern void finish_device_tree(void);
+extern void unflatten_device_tree(void);
+extern void early_init_devtree(void *);
extern int device_is_compatible(struct device_node *device, const char *);
extern int machine_is_compatible(const char *compat);
extern unsigned char *get_property(struct device_node *node, const char *name,
diff --git a/include/asm-ppc64/signal.h b/include/asm-ppc64/signal.h
deleted file mode 100644
index 432df7dd355..00000000000
--- a/include/asm-ppc64/signal.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#ifndef _ASMPPC64_SIGNAL_H
-#define _ASMPPC64_SIGNAL_H
-
-#include <linux/types.h>
-#include <linux/compiler.h>
-#include <asm/siginfo.h>
-
-/* Avoid too many header ordering problems. */
-struct siginfo;
-
-#define _NSIG 64
-#define _NSIG_BPW 64
-#define _NSIG_WORDS (_NSIG / _NSIG_BPW)
-
-typedef unsigned long old_sigset_t; /* at least 32 bits */
-
-typedef struct {
- unsigned long sig[_NSIG_WORDS];
-} sigset_t;
-
-#define SIGHUP 1
-#define SIGINT 2
-#define SIGQUIT 3
-#define SIGILL 4
-#define SIGTRAP 5
-#define SIGABRT 6
-#define SIGIOT 6
-#define SIGBUS 7
-#define SIGFPE 8
-#define SIGKILL 9
-#define SIGUSR1 10
-#define SIGSEGV 11
-#define SIGUSR2 12
-#define SIGPIPE 13
-#define SIGALRM 14
-#define SIGTERM 15
-#define SIGSTKFLT 16
-#define SIGCHLD 17
-#define SIGCONT 18
-#define SIGSTOP 19
-#define SIGTSTP 20
-#define SIGTTIN 21
-#define SIGTTOU 22
-#define SIGURG 23
-#define SIGXCPU 24
-#define SIGXFSZ 25
-#define SIGVTALRM 26
-#define SIGPROF 27
-#define SIGWINCH 28
-#define SIGIO 29
-#define SIGPOLL SIGIO
-/*
-#define SIGLOST 29
-*/
-#define SIGPWR 30
-#define SIGSYS 31
-#define SIGUNUSED 31
-
-/* These should not be considered constants from userland. */
-#define SIGRTMIN 32
-#define SIGRTMAX _NSIG
-
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK is not currently supported, but will allow sigaltstack(2).
- * SA_INTERRUPT is a no-op, but left due to historical reasons. Use the
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001u
-#define SA_NOCLDWAIT 0x00000002u
-#define SA_SIGINFO 0x00000004u
-#define SA_ONSTACK 0x08000000u
-#define SA_RESTART 0x10000000u
-#define SA_NODEFER 0x40000000u
-#define SA_RESETHAND 0x80000000u
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-#define SA_INTERRUPT 0x20000000u /* dummy -- ignored */
-
-#define SA_RESTORER 0x04000000u
-
-/*
- * sigaltstack controls
- */
-#define SS_ONSTACK 1
-#define SS_DISABLE 2
-
-#define MINSIGSTKSZ 2048
-#define SIGSTKSZ 8192
-
-#include <asm-generic/signal.h>
-
-struct old_sigaction {
- __sighandler_t sa_handler;
- old_sigset_t sa_mask;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
-};
-
-struct sigaction {
- __sighandler_t sa_handler;
- unsigned long sa_flags;
- __sigrestore_t sa_restorer;
- sigset_t sa_mask; /* mask last for extensibility */
-};
-
-struct k_sigaction {
- struct sigaction sa;
-};
-
-typedef struct sigaltstack {
- void __user *ss_sp;
- int ss_flags;
- size_t ss_size;
-} stack_t;
-
-struct pt_regs;
-struct timespec;
-extern int do_signal(sigset_t *oldset, struct pt_regs *regs);
-extern int do_signal32(sigset_t *oldset, struct pt_regs *regs);
-#define ptrace_signal_deliver(regs, cookie) do { } while (0)
-
-#endif /* _ASMPPC64_SIGNAL_H */
diff --git a/include/asm-ppc64/system.h b/include/asm-ppc64/system.h
index 0cdd66c9f4b..bf9a6aba19c 100644
--- a/include/asm-ppc64/system.h
+++ b/include/asm-ppc64/system.h
@@ -149,6 +149,8 @@ struct thread_struct;
extern struct task_struct * _switch(struct thread_struct *prev,
struct thread_struct *next);
+extern unsigned long klimit;
+
extern int powersave_nap; /* set if nap mode can be used in idle loop */
/*
diff --git a/include/asm-sh/ide.h b/include/asm-sh/ide.h
index f42cf3977a5..711dad4cb48 100644
--- a/include/asm-sh/ide.h
+++ b/include/asm-sh/ide.h
@@ -16,10 +16,6 @@
#include <linux/config.h>
-#ifndef MAX_HWIFS
-#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
-#endif
-
#define ide_default_io_ctl(base) (0)
#include <asm-generic/ide_iops.h>
diff --git a/include/asm-sh64/ide.h b/include/asm-sh64/ide.h
index 6fd514daa1b..852f50afe39 100644
--- a/include/asm-sh64/ide.h
+++ b/include/asm-sh64/ide.h
@@ -17,10 +17,6 @@
#include <linux/config.h>
-#ifndef MAX_HWIFS
-#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
-#endif
-
/* Without this, the initialisation of PCI IDE cards end up calling
* ide_init_hwif_ports, which won't work. */
#ifdef CONFIG_BLK_DEV_IDEPCI
diff --git a/include/asm-x86_64/msi.h b/include/asm-x86_64/msi.h
index 85c427e472b..356e0e82f50 100644
--- a/include/asm-x86_64/msi.h
+++ b/include/asm-x86_64/msi.h
@@ -11,8 +11,6 @@
#include <asm/smp.h>
#define LAST_DEVICE_VECTOR 232
-#define MSI_DEST_MODE MSI_LOGICAL_MODE
-#define MSI_TARGET_CPU_SHIFT 12
-#define MSI_TARGET_CPU logical_smp_processor_id()
+#define MSI_TARGET_CPU_SHIFT 12
#endif /* ASM_MSI_H */
diff --git a/include/asm-x86_64/smp.h b/include/asm-x86_64/smp.h
index c57ce407134..b9fb2173ef9 100644
--- a/include/asm-x86_64/smp.h
+++ b/include/asm-x86_64/smp.h
@@ -135,5 +135,11 @@ static __inline int logical_smp_processor_id(void)
}
#endif
+#ifdef CONFIG_SMP
+#define cpu_physical_id(cpu) x86_cpu_to_apicid[cpu]
+#else
+#define cpu_physical_id(cpu) boot_cpu_id
+#endif
+
#endif
diff --git a/include/linux/connector.h b/include/linux/connector.h
index c5769c6585f..ad1a22c1c42 100644
--- a/include/linux/connector.h
+++ b/include/linux/connector.h
@@ -32,6 +32,8 @@
*/
#define CN_IDX_PROC 0x1
#define CN_VAL_PROC 0x1
+#define CN_IDX_CIFS 0x2
+#define CN_VAL_CIFS 0x1
#define CN_NETLINK_USERS 1
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
new file mode 100644
index 00000000000..84f12a41dc0
--- /dev/null
+++ b/include/linux/genetlink.h
@@ -0,0 +1,51 @@
+#ifndef __LINUX_GENERIC_NETLINK_H
+#define __LINUX_GENERIC_NETLINK_H
+
+#include <linux/netlink.h>
+
+#define GENL_NAMSIZ 16 /* length of family name */
+
+#define GENL_MIN_ID NLMSG_MIN_TYPE
+#define GENL_MAX_ID 1023
+
+struct genlmsghdr {
+ __u8 cmd;
+ __u8 version;
+ __u16 reserved;
+};
+
+#define GENL_HDRLEN NLMSG_ALIGN(sizeof(struct genlmsghdr))
+
+/*
+ * List of reserved static generic netlink identifiers:
+ */
+#define GENL_ID_GENERATE 0
+#define GENL_ID_CTRL NLMSG_MIN_TYPE
+
+/**************************************************************************
+ * Controller
+ **************************************************************************/
+
+enum {
+ CTRL_CMD_UNSPEC,
+ CTRL_CMD_NEWFAMILY,
+ CTRL_CMD_DELFAMILY,
+ CTRL_CMD_GETFAMILY,
+ CTRL_CMD_NEWOPS,
+ CTRL_CMD_DELOPS,
+ CTRL_CMD_GETOPS,
+ __CTRL_CMD_MAX,
+};
+
+#define CTRL_CMD_MAX (__CTRL_CMD_MAX - 1)
+
+enum {
+ CTRL_ATTR_UNSPEC,
+ CTRL_ATTR_FAMILY_ID,
+ CTRL_ATTR_FAMILY_NAME,
+ __CTRL_ATTR_MAX,
+};
+
+#define CTRL_ATTR_MAX (__CTRL_ATTR_MAX - 1)
+
+#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 3461abc1e85..ac8b25fa650 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -230,6 +230,7 @@ typedef struct hw_regs_s {
int dma; /* our dma entry */
ide_ack_intr_t *ack_intr; /* acknowledge interrupt */
hwif_chipset_t chipset;
+ struct device *dev;
} hw_regs_t;
/*
@@ -266,6 +267,10 @@ static inline void ide_std_init_ports(hw_regs_t *hw,
#include <asm/ide.h>
+#ifndef MAX_HWIFS
+#define MAX_HWIFS CONFIG_IDE_MAX_HWIFS
+#endif
+
/* needed on alpha, x86/x86_64, ia64, mips, ppc32 and sh */
#ifndef IDE_ARCH_OBSOLETE_DEFAULTS
# define ide_default_io_base(index) (0)
@@ -1324,7 +1329,8 @@ void ide_init_disk(struct gendisk *, ide_drive_t *);
extern int ideprobe_init(void);
extern void ide_scan_pcibus(int scan_direction) __init;
-extern int ide_pci_register_driver(struct pci_driver *driver);
+extern int __ide_pci_register_driver(struct pci_driver *driver, struct module *owner);
+#define ide_pci_register_driver(d) __ide_pci_register_driver(d, THIS_MODULE)
extern void ide_pci_unregister_driver(struct pci_driver *driver);
void ide_pci_setup_ports(struct pci_dev *, struct ide_pci_device_s *, int, ata_index_t *);
extern void ide_setup_pci_noise (struct pci_dev *dev, struct ide_pci_device_s *d);
diff --git a/include/linux/if_ether.h b/include/linux/if_ether.h
index d21c305c6c6..fe26d431de8 100644
--- a/include/linux/if_ether.h
+++ b/include/linux/if_ether.h
@@ -21,6 +21,8 @@
#ifndef _LINUX_IF_ETHER_H
#define _LINUX_IF_ETHER_H
+#include <linux/types.h>
+
/*
* IEEE 802.3 Ethernet magic constants. The frame sizes omit the preamble
* and FCS/CRC (frame check sequence).
@@ -100,7 +102,7 @@
struct ethhdr {
unsigned char h_dest[ETH_ALEN]; /* destination eth addr */
unsigned char h_source[ETH_ALEN]; /* source ether addr */
- unsigned short h_proto; /* packet type ID field */
+ __be16 h_proto; /* packet type ID field */
} __attribute__((packed));
#ifdef __KERNEL__
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index c6efce4a04a..936f8b76114 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -927,6 +927,13 @@ extern int netdev_max_backlog;
extern int weight_p;
extern int netdev_set_master(struct net_device *dev, struct net_device *master);
extern int skb_checksum_help(struct sk_buff *skb, int inward);
+#ifdef CONFIG_BUG
+extern void netdev_rx_csum_fault(struct net_device *dev);
+#else
+static inline void netdev_rx_csum_fault(struct net_device *dev)
+{
+}
+#endif
/* rx skb timestamps */
extern void net_enable_timestamp(void);
extern void net_disable_timestamp(void);
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
new file mode 100644
index 00000000000..6d39b518486
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -0,0 +1,159 @@
+#ifndef _NF_CONNTRACK_COMMON_H
+#define _NF_CONNTRACK_COMMON_H
+/* Connection state tracking for netfilter. This is separated from,
+ but required by, the NAT layer; it can also be used by an iptables
+ extension. */
+enum ip_conntrack_info
+{
+ /* Part of an established connection (either direction). */
+ IP_CT_ESTABLISHED,
+
+ /* Like NEW, but related to an existing connection, or ICMP error
+ (in either direction). */
+ IP_CT_RELATED,
+
+ /* Started a new connection to track (only
+ IP_CT_DIR_ORIGINAL); may be a retransmission. */
+ IP_CT_NEW,
+
+ /* >= this indicates reply direction */
+ IP_CT_IS_REPLY,
+
+ /* Number of distinct IP_CT types (no NEW in reply dirn). */
+ IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
+};
+
+/* Bitset representing status of connection. */
+enum ip_conntrack_status {
+ /* It's an expected connection: bit 0 set. This bit never changed */
+ IPS_EXPECTED_BIT = 0,
+ IPS_EXPECTED = (1 << IPS_EXPECTED_BIT),
+
+ /* We've seen packets both ways: bit 1 set. Can be set, not unset. */
+ IPS_SEEN_REPLY_BIT = 1,
+ IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT),
+
+ /* Conntrack should never be early-expired. */
+ IPS_ASSURED_BIT = 2,
+ IPS_ASSURED = (1 << IPS_ASSURED_BIT),
+
+ /* Connection is confirmed: originating packet has left box */
+ IPS_CONFIRMED_BIT = 3,
+ IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT),
+
+ /* Connection needs src nat in orig dir. This bit never changed. */
+ IPS_SRC_NAT_BIT = 4,
+ IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT),
+
+ /* Connection needs dst nat in orig dir. This bit never changed. */
+ IPS_DST_NAT_BIT = 5,
+ IPS_DST_NAT = (1 << IPS_DST_NAT_BIT),
+
+ /* Both together. */
+ IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT),
+
+ /* Connection needs TCP sequence adjusted. */
+ IPS_SEQ_ADJUST_BIT = 6,
+ IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT),
+
+ /* NAT initialization bits. */
+ IPS_SRC_NAT_DONE_BIT = 7,
+ IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT),
+
+ IPS_DST_NAT_DONE_BIT = 8,
+ IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT),
+
+ /* Both together */
+ IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE),
+
+ /* Connection is dying (removed from lists), can not be unset. */
+ IPS_DYING_BIT = 9,
+ IPS_DYING = (1 << IPS_DYING_BIT),
+};
+
+/* Connection tracking event bits */
+enum ip_conntrack_events
+{
+ /* New conntrack */
+ IPCT_NEW_BIT = 0,
+ IPCT_NEW = (1 << IPCT_NEW_BIT),
+
+ /* Expected connection */
+ IPCT_RELATED_BIT = 1,
+ IPCT_RELATED = (1 << IPCT_RELATED_BIT),
+
+ /* Destroyed conntrack */
+ IPCT_DESTROY_BIT = 2,
+ IPCT_DESTROY = (1 << IPCT_DESTROY_BIT),
+
+ /* Timer has been refreshed */
+ IPCT_REFRESH_BIT = 3,
+ IPCT_REFRESH = (1 << IPCT_REFRESH_BIT),
+
+ /* Status has changed */
+ IPCT_STATUS_BIT = 4,
+ IPCT_STATUS = (1 << IPCT_STATUS_BIT),
+
+ /* Update of protocol info */
+ IPCT_PROTOINFO_BIT = 5,
+ IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT),
+
+ /* Volatile protocol info */
+ IPCT_PROTOINFO_VOLATILE_BIT = 6,
+ IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT),
+
+ /* New helper for conntrack */
+ IPCT_HELPER_BIT = 7,
+ IPCT_HELPER = (1 << IPCT_HELPER_BIT),
+
+ /* Update of helper info */
+ IPCT_HELPINFO_BIT = 8,
+ IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT),
+
+ /* Volatile helper info */
+ IPCT_HELPINFO_VOLATILE_BIT = 9,
+ IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT),
+
+ /* NAT info */
+ IPCT_NATINFO_BIT = 10,
+ IPCT_NATINFO = (1 << IPCT_NATINFO_BIT),
+
+ /* Counter highest bit has been set */
+ IPCT_COUNTER_FILLING_BIT = 11,
+ IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT),
+};
+
+enum ip_conntrack_expect_events {
+ IPEXP_NEW_BIT = 0,
+ IPEXP_NEW = (1 << IPEXP_NEW_BIT),
+};
+
+#ifdef __KERNEL__
+struct ip_conntrack_counter
+{
+ u_int32_t packets;
+ u_int32_t bytes;
+};
+
+struct ip_conntrack_stat
+{
+ unsigned int searched;
+ unsigned int found;
+ unsigned int new;
+ unsigned int invalid;
+ unsigned int ignore;
+ unsigned int delete;
+ unsigned int delete_list;
+ unsigned int insert;
+ unsigned int insert_failed;
+ unsigned int drop;
+ unsigned int early_drop;
+ unsigned int error;
+ unsigned int expect_new;
+ unsigned int expect_create;
+ unsigned int expect_delete;
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/nf_conntrack_ftp.h b/include/linux/netfilter/nf_conntrack_ftp.h
new file mode 100644
index 00000000000..ad4a41c9ce9
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_ftp.h
@@ -0,0 +1,44 @@
+#ifndef _NF_CONNTRACK_FTP_H
+#define _NF_CONNTRACK_FTP_H
+/* FTP tracking. */
+
+/* This enum is exposed to userspace */
+enum ip_ct_ftp_type
+{
+ /* PORT command from client */
+ IP_CT_FTP_PORT,
+ /* PASV response from server */
+ IP_CT_FTP_PASV,
+ /* EPRT command from client */
+ IP_CT_FTP_EPRT,
+ /* EPSV response from server */
+ IP_CT_FTP_EPSV,
+};
+
+#ifdef __KERNEL__
+
+#define FTP_PORT 21
+
+#define NUM_SEQ_TO_REMEMBER 2
+/* This structure exists only once per master */
+struct ip_ct_ftp_master {
+ /* Valid seq positions for cmd matching after newline */
+ u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
+ /* 0 means seq_match_aft_nl not set */
+ int seq_aft_nl_num[IP_CT_DIR_MAX];
+};
+
+struct ip_conntrack_expect;
+
+/* For NAT to hook in when we find a packet which describes what other
+ * connection we should expect. */
+extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
+ enum ip_conntrack_info ctinfo,
+ enum ip_ct_ftp_type type,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct ip_conntrack_expect *exp,
+ u32 *seq);
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
new file mode 100644
index 00000000000..b8994d9fd1a
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -0,0 +1,27 @@
+#ifndef _NF_CONNTRACK_SCTP_H
+#define _NF_CONNTRACK_SCTP_H
+/* SCTP tracking. */
+
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+enum sctp_conntrack {
+ SCTP_CONNTRACK_NONE,
+ SCTP_CONNTRACK_CLOSED,
+ SCTP_CONNTRACK_COOKIE_WAIT,
+ SCTP_CONNTRACK_COOKIE_ECHOED,
+ SCTP_CONNTRACK_ESTABLISHED,
+ SCTP_CONNTRACK_SHUTDOWN_SENT,
+ SCTP_CONNTRACK_SHUTDOWN_RECD,
+ SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
+ SCTP_CONNTRACK_MAX
+};
+
+struct ip_ct_sctp
+{
+ enum sctp_conntrack state;
+
+ u_int32_t vtag[IP_CT_DIR_MAX];
+ u_int32_t ttag[IP_CT_DIR_MAX];
+};
+
+#endif /* _NF_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tcp.h b/include/linux/netfilter/nf_conntrack_tcp.h
new file mode 100644
index 00000000000..b2feeffde38
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tcp.h
@@ -0,0 +1,56 @@
+#ifndef _NF_CONNTRACK_TCP_H
+#define _NF_CONNTRACK_TCP_H
+/* TCP tracking. */
+
+/* This is exposed to userspace (ctnetlink) */
+enum tcp_conntrack {
+ TCP_CONNTRACK_NONE,
+ TCP_CONNTRACK_SYN_SENT,
+ TCP_CONNTRACK_SYN_RECV,
+ TCP_CONNTRACK_ESTABLISHED,
+ TCP_CONNTRACK_FIN_WAIT,
+ TCP_CONNTRACK_CLOSE_WAIT,
+ TCP_CONNTRACK_LAST_ACK,
+ TCP_CONNTRACK_TIME_WAIT,
+ TCP_CONNTRACK_CLOSE,
+ TCP_CONNTRACK_LISTEN,
+ TCP_CONNTRACK_MAX,
+ TCP_CONNTRACK_IGNORE
+};
+
+/* Window scaling is advertised by the sender */
+#define IP_CT_TCP_FLAG_WINDOW_SCALE 0x01
+
+/* SACK is permitted by the sender */
+#define IP_CT_TCP_FLAG_SACK_PERM 0x02
+
+/* This sender sent FIN first */
+#define IP_CT_TCP_FLAG_CLOSE_INIT 0x03
+
+#ifdef __KERNEL__
+
+struct ip_ct_tcp_state {
+ u_int32_t td_end; /* max of seq + len */
+ u_int32_t td_maxend; /* max of ack + max(win, 1) */
+ u_int32_t td_maxwin; /* max(win) */
+ u_int8_t td_scale; /* window scale factor */
+ u_int8_t loose; /* used when connection picked up from the middle */
+ u_int8_t flags; /* per direction options */
+};
+
+struct ip_ct_tcp
+{
+ struct ip_ct_tcp_state seen[2]; /* connection parameters per direction */
+ u_int8_t state; /* state of the connection (enum tcp_conntrack) */
+ /* For detecting stale connections */
+ u_int8_t last_dir; /* Direction of the last packet (enum ip_conntrack_dir) */
+ u_int8_t retrans; /* Number of retransmitted packets */
+ u_int8_t last_index; /* Index of the last packet */
+ u_int32_t last_seq; /* Last sequence number seen in dir */
+ u_int32_t last_ack; /* Last sequence number seen in opposite dir */
+ u_int32_t last_end; /* Last seq + len */
+};
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_TCP_H */
diff --git a/include/linux/netfilter/nf_conntrack_tuple_common.h b/include/linux/netfilter/nf_conntrack_tuple_common.h
new file mode 100644
index 00000000000..8e145f0d61c
--- /dev/null
+++ b/include/linux/netfilter/nf_conntrack_tuple_common.h
@@ -0,0 +1,13 @@
+#ifndef _NF_CONNTRACK_TUPLE_COMMON_H
+#define _NF_CONNTRACK_TUPLE_COMMON_H
+
+enum ip_conntrack_dir
+{
+ IP_CT_DIR_ORIGINAL,
+ IP_CT_DIR_REPLY,
+ IP_CT_DIR_MAX
+};
+
+#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
+
+#endif /* _NF_CONNTRACK_TUPLE_COMMON_H */
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index f08e870100f..72975fa8795 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -146,7 +146,7 @@ extern void nfnl_unlock(void);
extern int nfnetlink_subsys_register(struct nfnetlink_subsystem *n);
extern int nfnetlink_subsys_unregister(struct nfnetlink_subsystem *n);
-extern int nfattr_parse(struct nfattr *tb[], int maxattr,
+extern void nfattr_parse(struct nfattr *tb[], int maxattr,
struct nfattr *nfa, int len);
#define nfattr_parse_nested(tb, max, nfa) \
diff --git a/include/linux/netfilter_ipv4/ip_conntrack.h b/include/linux/netfilter_ipv4/ip_conntrack.h
index d078bb91d9e..b3432ab59a1 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack.h
@@ -1,132 +1,7 @@
#ifndef _IP_CONNTRACK_H
#define _IP_CONNTRACK_H
-/* Connection state tracking for netfilter. This is separated from,
- but required by, the NAT layer; it can also be used by an iptables
- extension. */
-enum ip_conntrack_info
-{
- /* Part of an established connection (either direction). */
- IP_CT_ESTABLISHED,
-
- /* Like NEW, but related to an existing connection, or ICMP error
- (in either direction). */
- IP_CT_RELATED,
-
- /* Started a new connection to track (only
- IP_CT_DIR_ORIGINAL); may be a retransmission. */
- IP_CT_NEW,
-
- /* >= this indicates reply direction */
- IP_CT_IS_REPLY,
-
- /* Number of distinct IP_CT types (no NEW in reply dirn). */
- IP_CT_NUMBER = IP_CT_IS_REPLY * 2 - 1
-};
-
-/* Bitset representing status of connection. */
-enum ip_conntrack_status {
- /* It's an expected connection: bit 0 set. This bit never changed */
- IPS_EXPECTED_BIT = 0,
- IPS_EXPECTED = (1 << IPS_EXPECTED_BIT),
-
- /* We've seen packets both ways: bit 1 set. Can be set, not unset. */
- IPS_SEEN_REPLY_BIT = 1,
- IPS_SEEN_REPLY = (1 << IPS_SEEN_REPLY_BIT),
-
- /* Conntrack should never be early-expired. */
- IPS_ASSURED_BIT = 2,
- IPS_ASSURED = (1 << IPS_ASSURED_BIT),
-
- /* Connection is confirmed: originating packet has left box */
- IPS_CONFIRMED_BIT = 3,
- IPS_CONFIRMED = (1 << IPS_CONFIRMED_BIT),
-
- /* Connection needs src nat in orig dir. This bit never changed. */
- IPS_SRC_NAT_BIT = 4,
- IPS_SRC_NAT = (1 << IPS_SRC_NAT_BIT),
-
- /* Connection needs dst nat in orig dir. This bit never changed. */
- IPS_DST_NAT_BIT = 5,
- IPS_DST_NAT = (1 << IPS_DST_NAT_BIT),
-
- /* Both together. */
- IPS_NAT_MASK = (IPS_DST_NAT | IPS_SRC_NAT),
-
- /* Connection needs TCP sequence adjusted. */
- IPS_SEQ_ADJUST_BIT = 6,
- IPS_SEQ_ADJUST = (1 << IPS_SEQ_ADJUST_BIT),
-
- /* NAT initialization bits. */
- IPS_SRC_NAT_DONE_BIT = 7,
- IPS_SRC_NAT_DONE = (1 << IPS_SRC_NAT_DONE_BIT),
-
- IPS_DST_NAT_DONE_BIT = 8,
- IPS_DST_NAT_DONE = (1 << IPS_DST_NAT_DONE_BIT),
-
- /* Both together */
- IPS_NAT_DONE_MASK = (IPS_DST_NAT_DONE | IPS_SRC_NAT_DONE),
-
- /* Connection is dying (removed from lists), can not be unset. */
- IPS_DYING_BIT = 9,
- IPS_DYING = (1 << IPS_DYING_BIT),
-};
-
-/* Connection tracking event bits */
-enum ip_conntrack_events
-{
- /* New conntrack */
- IPCT_NEW_BIT = 0,
- IPCT_NEW = (1 << IPCT_NEW_BIT),
-
- /* Expected connection */
- IPCT_RELATED_BIT = 1,
- IPCT_RELATED = (1 << IPCT_RELATED_BIT),
-
- /* Destroyed conntrack */
- IPCT_DESTROY_BIT = 2,
- IPCT_DESTROY = (1 << IPCT_DESTROY_BIT),
-
- /* Timer has been refreshed */
- IPCT_REFRESH_BIT = 3,
- IPCT_REFRESH = (1 << IPCT_REFRESH_BIT),
-
- /* Status has changed */
- IPCT_STATUS_BIT = 4,
- IPCT_STATUS = (1 << IPCT_STATUS_BIT),
-
- /* Update of protocol info */
- IPCT_PROTOINFO_BIT = 5,
- IPCT_PROTOINFO = (1 << IPCT_PROTOINFO_BIT),
-
- /* Volatile protocol info */
- IPCT_PROTOINFO_VOLATILE_BIT = 6,
- IPCT_PROTOINFO_VOLATILE = (1 << IPCT_PROTOINFO_VOLATILE_BIT),
-
- /* New helper for conntrack */
- IPCT_HELPER_BIT = 7,
- IPCT_HELPER = (1 << IPCT_HELPER_BIT),
-
- /* Update of helper info */
- IPCT_HELPINFO_BIT = 8,
- IPCT_HELPINFO = (1 << IPCT_HELPINFO_BIT),
-
- /* Volatile helper info */
- IPCT_HELPINFO_VOLATILE_BIT = 9,
- IPCT_HELPINFO_VOLATILE = (1 << IPCT_HELPINFO_VOLATILE_BIT),
- /* NAT info */
- IPCT_NATINFO_BIT = 10,
- IPCT_NATINFO = (1 << IPCT_NATINFO_BIT),
-
- /* Counter highest bit has been set */
- IPCT_COUNTER_FILLING_BIT = 11,
- IPCT_COUNTER_FILLING = (1 << IPCT_COUNTER_FILLING_BIT),
-};
-
-enum ip_conntrack_expect_events {
- IPEXP_NEW_BIT = 0,
- IPEXP_NEW = (1 << IPEXP_NEW_BIT),
-};
+#include <linux/netfilter/nf_conntrack_common.h>
#ifdef __KERNEL__
#include <linux/config.h>
@@ -194,12 +69,6 @@ do { \
#define IP_NF_ASSERT(x)
#endif
-struct ip_conntrack_counter
-{
- u_int32_t packets;
- u_int32_t bytes;
-};
-
struct ip_conntrack_helper;
struct ip_conntrack
@@ -426,25 +295,6 @@ static inline int is_dying(struct ip_conntrack *ct)
extern unsigned int ip_conntrack_htable_size;
-struct ip_conntrack_stat
-{
- unsigned int searched;
- unsigned int found;
- unsigned int new;
- unsigned int invalid;
- unsigned int ignore;
- unsigned int delete;
- unsigned int delete_list;
- unsigned int insert;
- unsigned int insert_failed;
- unsigned int drop;
- unsigned int early_drop;
- unsigned int error;
- unsigned int expect_new;
- unsigned int expect_create;
- unsigned int expect_delete;
-};
-
#define CONNTRACK_STAT_INC(count) (__get_cpu_var(ip_conntrack_stat).count++)
#ifdef CONFIG_IP_NF_CONNTRACK_EVENTS
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
index 5f06429b904..63811934de4 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_ftp.h
@@ -1,43 +1,6 @@
#ifndef _IP_CONNTRACK_FTP_H
#define _IP_CONNTRACK_FTP_H
-/* FTP tracking. */
-#ifdef __KERNEL__
+#include <linux/netfilter/nf_conntrack_ftp.h>
-#define FTP_PORT 21
-
-#endif /* __KERNEL__ */
-
-enum ip_ct_ftp_type
-{
- /* PORT command from client */
- IP_CT_FTP_PORT,
- /* PASV response from server */
- IP_CT_FTP_PASV,
- /* EPRT command from client */
- IP_CT_FTP_EPRT,
- /* EPSV response from server */
- IP_CT_FTP_EPSV,
-};
-
-#define NUM_SEQ_TO_REMEMBER 2
-/* This structure exists only once per master */
-struct ip_ct_ftp_master {
- /* Valid seq positions for cmd matching after newline */
- u_int32_t seq_aft_nl[IP_CT_DIR_MAX][NUM_SEQ_TO_REMEMBER];
- /* 0 means seq_match_aft_nl not set */
- int seq_aft_nl_num[IP_CT_DIR_MAX];
-};
-
-struct ip_conntrack_expect;
-
-/* For NAT to hook in when we find a packet which describes what other
- * connection we should expect. */
-extern unsigned int (*ip_nat_ftp_hook)(struct sk_buff **pskb,
- enum ip_conntrack_info ctinfo,
- enum ip_ct_ftp_type type,
- unsigned int matchoff,
- unsigned int matchlen,
- struct ip_conntrack_expect *exp,
- u32 *seq);
#endif /* _IP_CONNTRACK_FTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h b/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
index f1664abbe39..eed5ee3e474 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_icmp.h
@@ -1,11 +1,6 @@
#ifndef _IP_CONNTRACK_ICMP_H
#define _IP_CONNTRACK_ICMP_H
-/* ICMP tracking. */
-#include <asm/atomic.h>
-struct ip_ct_icmp
-{
- /* Optimization: when number in == number out, forget immediately. */
- atomic_t count;
-};
+#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
+
#endif /* _IP_CONNTRACK_ICMP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h b/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
index 7a8d869321f..4099a041a32 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_sctp.h
@@ -1,25 +1,6 @@
#ifndef _IP_CONNTRACK_SCTP_H
#define _IP_CONNTRACK_SCTP_H
-/* SCTP tracking. */
-enum sctp_conntrack {
- SCTP_CONNTRACK_NONE,
- SCTP_CONNTRACK_CLOSED,
- SCTP_CONNTRACK_COOKIE_WAIT,
- SCTP_CONNTRACK_COOKIE_ECHOED,
- SCTP_CONNTRACK_ESTABLISHED,
- SCTP_CONNTRACK_SHUTDOWN_SENT,
- SCTP_CONNTRACK_SHUTDOWN_RECD,
- SCTP_CONNTRACK_SHUTDOWN_ACK_SENT,
- SCTP_CONNTRACK_MAX
-};
-
-struct ip_ct_sctp
-{
- enum sctp_conntrack state;
-
- u_int32_t vtag[IP_CT_DIR_MAX];
- u_int32_t ttag[IP_CT_DIR_MAX];
-};
+#include <linux/netfilter/nf_conntrack_sctp.h>
#endif /* _IP_CONNTRACK_SCTP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h b/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
index 16da044d97a..876b8fb17e6 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tcp.h
@@ -1,51 +1,6 @@
#ifndef _IP_CONNTRACK_TCP_H
#define _IP_CONNTRACK_TCP_H
-/* TCP tracking. */
-enum tcp_conntrack {
- TCP_CONNTRACK_NONE,
- TCP_CONNTRACK_SYN_SENT,
- TCP_CONNTRACK_SYN_RECV,
- TCP_CONNTRACK_ESTABLISHED,
- TCP_CONNTRACK_FIN_WAIT,
- TCP_CONNTRACK_CLOSE_WAIT,
- TCP_CONNTRACK_LAST_ACK,
- TCP_CONNTRACK_TIME_WAIT,
- TCP_CONNTRACK_CLOSE,
- TCP_CONNTRACK_LISTEN,
- TCP_CONNTRACK_MAX,
- TCP_CONNTRACK_IGNORE
-};
-
-/* Window scaling is advertised by the sender */
-#define IP_CT_TCP_FLAG_WINDOW_SCALE 0x01
-
-/* SACK is permitted by the sender */
-#define IP_CT_TCP_FLAG_SACK_PERM 0x02
-
-/* This sender sent FIN first */
-#define IP_CT_TCP_FLAG_CLOSE_INIT 0x03
-
-struct ip_ct_tcp_state {
- u_int32_t td_end; /* max of seq + len */
- u_int32_t td_maxend; /* max of ack + max(win, 1) */
- u_int32_t td_maxwin; /* max(win) */
- u_int8_t td_scale; /* window scale factor */
- u_int8_t loose; /* used when connection picked up from the middle */
- u_int8_t flags; /* per direction options */
-};
-
-struct ip_ct_tcp
-{
- struct ip_ct_tcp_state seen[2]; /* connection parameters per direction */
- u_int8_t state; /* state of the connection (enum tcp_conntrack) */
- /* For detecting stale connections */
- u_int8_t last_dir; /* Direction of the last packet (enum ip_conntrack_dir) */
- u_int8_t retrans; /* Number of retransmitted packets */
- u_int8_t last_index; /* Index of the last packet */
- u_int32_t last_seq; /* Last sequence number seen in dir */
- u_int32_t last_ack; /* Last sequence number seen in opposite dir */
- u_int32_t last_end; /* Last seq + len */
-};
+#include <linux/netfilter/nf_conntrack_tcp.h>
#endif /* _IP_CONNTRACK_TCP_H */
diff --git a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
index 3232db11a4e..2fdabdb4c0e 100644
--- a/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
+++ b/include/linux/netfilter_ipv4/ip_conntrack_tuple.h
@@ -2,6 +2,7 @@
#define _IP_CONNTRACK_TUPLE_H
#include <linux/types.h>
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
/* A `tuple' is a structure containing the information to uniquely
identify a connection. ie. if two packets have the same tuple, they
@@ -88,13 +89,6 @@ struct ip_conntrack_tuple
(tuple)->dst.u.all = 0; \
} while (0)
-enum ip_conntrack_dir
-{
- IP_CT_DIR_ORIGINAL,
- IP_CT_DIR_REPLY,
- IP_CT_DIR_MAX
-};
-
#ifdef __KERNEL__
#define DUMP_TUPLE(tp) \
@@ -103,8 +97,6 @@ DEBUGP("tuple %p: %u %u.%u.%u.%u:%hu -> %u.%u.%u.%u:%hu\n", \
NIPQUAD((tp)->src.ip), ntohs((tp)->src.u.all), \
NIPQUAD((tp)->dst.ip), ntohs((tp)->dst.u.all))
-#define CTINFO2DIR(ctinfo) ((ctinfo) >= IP_CT_IS_REPLY ? IP_CT_DIR_REPLY : IP_CT_DIR_ORIGINAL)
-
/* If we're the first tuple, it's the original dir. */
#define DIRECTION(h) ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index edcc2c6eb5c..53b2983f627 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -59,6 +59,7 @@
enum nf_ip6_hook_priorities {
NF_IP6_PRI_FIRST = INT_MIN,
+ NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
NF_IP6_PRI_SELINUX_FIRST = -225,
NF_IP6_PRI_CONNTRACK = -200,
NF_IP6_PRI_BRIDGE_SABOTAGE_FORWARD = -175,
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index ba25ca874c2..6a2ccf78a35 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -71,7 +71,8 @@ struct nlmsghdr
#define NLMSG_ALIGNTO 4
#define NLMSG_ALIGN(len) ( ((len)+NLMSG_ALIGNTO-1) & ~(NLMSG_ALIGNTO-1) )
-#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(sizeof(struct nlmsghdr)))
+#define NLMSG_HDRLEN ((int) NLMSG_ALIGN(sizeof(struct nlmsghdr)))
+#define NLMSG_LENGTH(len) ((len)+NLMSG_ALIGN(NLMSG_HDRLEN))
#define NLMSG_SPACE(len) NLMSG_ALIGN(NLMSG_LENGTH(len))
#define NLMSG_DATA(nlh) ((void*)(((char*)nlh) + NLMSG_LENGTH(0)))
#define NLMSG_NEXT(nlh,len) ((len) -= NLMSG_ALIGN((nlh)->nlmsg_len), \
@@ -86,6 +87,8 @@ struct nlmsghdr
#define NLMSG_DONE 0x3 /* End of a dump */
#define NLMSG_OVERRUN 0x4 /* Data lost */
+#define NLMSG_MIN_TYPE 0x10 /* < 0x10: reserved control messages */
+
struct nlmsgerr
{
int error;
@@ -108,6 +111,25 @@ enum {
NETLINK_CONNECTED,
};
+/*
+ * <------- NLA_HDRLEN ------> <-- NLA_ALIGN(payload)-->
+ * +---------------------+- - -+- - - - - - - - - -+- - -+
+ * | Header | Pad | Payload | Pad |
+ * | (struct nlattr) | ing | | ing |
+ * +---------------------+- - -+- - - - - - - - - -+- - -+
+ * <-------------- nlattr->nla_len -------------->
+ */
+
+struct nlattr
+{
+ __u16 nla_len;
+ __u16 nla_type;
+};
+
+#define NLA_ALIGNTO 4
+#define NLA_ALIGN(len) (((len) + NLA_ALIGNTO - 1) & ~(NLA_ALIGNTO - 1))
+#define NLA_HDRLEN ((int) NLA_ALIGN(sizeof(struct nlattr)))
+
#ifdef __KERNEL__
#include <linux/capability.h>
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 857126a36ec..4877e35ae20 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -47,14 +47,15 @@
OSC_PCI_EXPRESS_CAP_STRUCTURE_CONTROL)
#ifdef CONFIG_ACPI
-extern acpi_status pci_osc_control_set(u32 flags);
+extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
extern acpi_status pci_osc_support_set(u32 flags);
#else
#if !defined(acpi_status)
typedef u32 acpi_status;
#define AE_ERROR (acpi_status) (0x0001)
#endif
-static inline acpi_status pci_osc_control_set(u32 flags) {return AE_ERROR;}
+static inline acpi_status pci_osc_control_set(acpi_handle handle, u32 flags)
+{return AE_ERROR;}
static inline acpi_status pci_osc_support_set(u32 flags) {return AE_ERROR;}
#endif
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 3596ac94ecf..de690ca73d5 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -236,7 +236,6 @@ struct module;
struct pci_driver {
struct list_head node;
char *name;
- struct module *owner;
const struct pci_device_id *id_table; /* must be non-NULL for probe to be called */
int (*probe) (struct pci_dev *dev, const struct pci_device_id *id); /* New device inserted */
void (*remove) (struct pci_dev *dev); /* Device removed (NULL if not a hot-plug capable driver) */
@@ -338,6 +337,7 @@ struct pci_dev *pci_find_device (unsigned int vendor, unsigned int device, const
struct pci_dev *pci_find_device_reverse (unsigned int vendor, unsigned int device, const struct pci_dev *from);
struct pci_dev *pci_find_slot (unsigned int bus, unsigned int devfn);
int pci_find_capability (struct pci_dev *dev, int cap);
+int pci_find_next_capability (struct pci_dev *dev, u8 pos, int cap);
int pci_find_ext_capability (struct pci_dev *dev, int cap);
struct pci_bus * pci_find_next_bus(const struct pci_bus *from);
@@ -432,8 +432,13 @@ int pci_bus_alloc_resource(struct pci_bus *bus, struct resource *res,
void *alignf_data);
void pci_enable_bridges(struct pci_bus *bus);
-/* New-style probing supporting hot-pluggable devices */
-int pci_register_driver(struct pci_driver *);
+/* Proper probing supporting hot-pluggable devices */
+int __pci_register_driver(struct pci_driver *, struct module *);
+static inline int pci_register_driver(struct pci_driver *driver)
+{
+ return __pci_register_driver(driver, THIS_MODULE);
+}
+
void pci_unregister_driver(struct pci_driver *);
void pci_remove_behind_bridge(struct pci_dev *);
struct pci_driver *pci_dev_driver(const struct pci_dev *);
@@ -547,9 +552,11 @@ static inline int pci_enable_device(struct pci_dev *dev) { return -EIO; }
static inline void pci_disable_device(struct pci_dev *dev) { }
static inline int pci_set_dma_mask(struct pci_dev *dev, u64 mask) { return -EIO; }
static inline int pci_assign_resource(struct pci_dev *dev, int i) { return -EBUSY;}
+static inline int __pci_register_driver(struct pci_driver *drv, struct module *owner) { return 0;}
static inline int pci_register_driver(struct pci_driver *drv) { return 0;}
static inline void pci_unregister_driver(struct pci_driver *drv) { }
static inline int pci_find_capability (struct pci_dev *dev, int cap) {return 0; }
+static inline int pci_find_next_capability (struct pci_dev *dev, u8 post, int cap) { return 0; }
static inline int pci_find_ext_capability (struct pci_dev *dev, int cap) {return 0; }
static inline const struct pci_device_id *pci_match_device(const struct pci_device_id *ids, const struct pci_dev *dev) { return NULL; }
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 9a96f058839..4e06eb0f445 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -387,6 +387,7 @@
#define PCI_DEVICE_ID_NS_SC1100_SMI 0x0511
#define PCI_DEVICE_ID_NS_SC1100_XBUS 0x0515
#define PCI_DEVICE_ID_NS_87410 0xd001
+#define PCI_DEVICE_ID_NS_CS5535_IDE 0x002d
#define PCI_VENDOR_ID_TSENG 0x100c
#define PCI_DEVICE_ID_TSENG_W32P_2 0x3202
@@ -487,6 +488,8 @@
#define PCI_DEVICE_ID_AMD_8151_0 0x7454
#define PCI_DEVICE_ID_AMD_8131_APIC 0x7450
+#define PCI_DEVICE_ID_AMD_CS5536_IDE 0x209A
+
#define PCI_VENDOR_ID_TRIDENT 0x1023
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_DX 0x2000
#define PCI_DEVICE_ID_TRIDENT_4DWAVE_NX 0x2001
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index fdfb8fe8c38..0a8ea8b3581 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -274,6 +274,9 @@ struct sk_buff {
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
__u8 ipvs_property:1;
#endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ struct sk_buff *nfct_reasm;
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
struct nf_bridge_info *nf_bridge;
#endif
@@ -1233,8 +1236,7 @@ extern unsigned int datagram_poll(struct file *file, struct socket *sock,
extern int skb_copy_datagram_iovec(const struct sk_buff *from,
int offset, struct iovec *to,
int size);
-extern int skb_copy_and_csum_datagram_iovec(const
- struct sk_buff *skb,
+extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen,
struct iovec *iov);
extern void skb_free_datagram(struct sock *sk, struct sk_buff *skb);
@@ -1302,6 +1304,30 @@ static inline void skb_set_timestamp(struct sk_buff *skb, const struct timeval *
extern void __net_timestamp(struct sk_buff *skb);
+extern unsigned int __skb_checksum_complete(struct sk_buff *skb);
+
+/**
+ * skb_checksum_complete - Calculate checksum of an entire packet
+ * @skb: packet to process
+ *
+ * This function calculates the checksum over the entire packet plus
+ * the value of skb->csum. The latter can be used to supply the
+ * checksum of a pseudo header as used by TCP/UDP. It returns the
+ * checksum.
+ *
+ * For protocols that contain complete checksums such as ICMP/TCP/UDP,
+ * this function can be used to verify that checksum on received
+ * packets. In that case the function should return zero if the
+ * checksum is correct. In particular, this function will return zero
+ * if skb->ip_summed is CHECKSUM_UNNECESSARY which indicates that the
+ * hardware has already verified the correctness of the checksum.
+ */
+static inline unsigned int skb_checksum_complete(struct sk_buff *skb)
+{
+ return skb->ip_summed != CHECKSUM_UNNECESSARY &&
+ __skb_checksum_complete(skb);
+}
+
#ifdef CONFIG_NETFILTER
static inline void nf_conntrack_put(struct nf_conntrack *nfct)
{
@@ -1313,10 +1339,26 @@ static inline void nf_conntrack_get(struct nf_conntrack *nfct)
if (nfct)
atomic_inc(&nfct->use);
}
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+static inline void nf_conntrack_get_reasm(struct sk_buff *skb)
+{
+ if (skb)
+ atomic_inc(&skb->users);
+}
+static inline void nf_conntrack_put_reasm(struct sk_buff *skb)
+{
+ if (skb)
+ kfree_skb(skb);
+}
+#endif
static inline void nf_reset(struct sk_buff *skb)
{
nf_conntrack_put(skb->nfct);
skb->nfct = NULL;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put_reasm(skb->nfct_reasm);
+ skb->nfct_reasm = NULL;
+#endif
}
#ifdef CONFIG_BRIDGE_NETFILTER
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index fc131d6602b..ab2791b3189 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -205,6 +205,7 @@ enum
NET_ECONET=16,
NET_SCTP=17,
NET_LLC=18,
+ NET_NETFILTER=19,
};
/* /proc/sys/kernel/random */
@@ -270,6 +271,42 @@ enum
NET_UNIX_MAX_DGRAM_QLEN=3,
};
+/* /proc/sys/net/netfilter */
+enum
+{
+ NET_NF_CONNTRACK_MAX=1,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT=2,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV=3,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED=4,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT=5,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT=6,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK=7,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT=8,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE=9,
+ NET_NF_CONNTRACK_UDP_TIMEOUT=10,
+ NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM=11,
+ NET_NF_CONNTRACK_ICMP_TIMEOUT=12,
+ NET_NF_CONNTRACK_GENERIC_TIMEOUT=13,
+ NET_NF_CONNTRACK_BUCKETS=14,
+ NET_NF_CONNTRACK_LOG_INVALID=15,
+ NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS=16,
+ NET_NF_CONNTRACK_TCP_LOOSE=17,
+ NET_NF_CONNTRACK_TCP_BE_LIBERAL=18,
+ NET_NF_CONNTRACK_TCP_MAX_RETRANS=19,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED=20,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT=21,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED=22,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED=23,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT=24,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD=25,
+ NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT=26,
+ NET_NF_CONNTRACK_COUNT=27,
+ NET_NF_CONNTRACK_ICMPV6_TIMEOUT=28,
+ NET_NF_CONNTRACK_FRAG6_TIMEOUT=29,
+ NET_NF_CONNTRACK_FRAG6_LOW_THRESH=30,
+ NET_NF_CONNTRACK_FRAG6_HIGH_THRESH=31,
+};
+
/* /proc/sys/net/ipv4 */
enum
{
@@ -353,6 +390,7 @@ enum
NET_TCP_BIC_BETA=108,
NET_IPV4_ICMP_ERRORS_USE_INBOUND_IFADDR=109,
NET_TCP_CONG_CONTROL=110,
+ NET_TCP_ABC=111,
};
enum {
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index ac4ca44c75c..0e1da6602e0 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -307,6 +307,21 @@ struct tcp_sock {
struct tcp_sack_block duplicate_sack[1]; /* D-SACK block */
struct tcp_sack_block selective_acks[4]; /* The SACKS themselves*/
+ struct tcp_sack_block recv_sack_cache[4];
+
+ /* from STCP, retrans queue hinting */
+ struct sk_buff* lost_skb_hint;
+
+ struct sk_buff *scoreboard_skb_hint;
+ struct sk_buff *retransmit_skb_hint;
+ struct sk_buff *forward_skb_hint;
+ struct sk_buff *fastpath_skb_hint;
+
+ int fastpath_cnt_hint;
+ int lost_cnt_hint;
+ int retransmit_cnt_hint;
+ int forward_cnt_hint;
+
__u16 advmss; /* Advertised MSS */
__u16 prior_ssthresh; /* ssthresh saved at recovery start */
__u32 lost_out; /* Lost packets */
@@ -326,6 +341,7 @@ struct tcp_sock {
__u32 snd_up; /* Urgent pointer */
__u32 total_retrans; /* Total retransmits for entire connection */
+ __u32 bytes_acked; /* Appropriate Byte Counting - RFC3465 */
unsigned int keepalive_time; /* time before keep alive takes place */
unsigned int keepalive_intvl; /* time interval between keep alive probes */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
new file mode 100644
index 00000000000..52d8b1a73d5
--- /dev/null
+++ b/include/net/genetlink.h
@@ -0,0 +1,154 @@
+#ifndef __NET_GENERIC_NETLINK_H
+#define __NET_GENERIC_NETLINK_H
+
+#include <linux/genetlink.h>
+#include <net/netlink.h>
+
+/**
+ * struct genl_family - generic netlink family
+ * @id: protocol family idenfitier
+ * @hdrsize: length of user specific header in bytes
+ * @name: name of family
+ * @version: protocol version
+ * @maxattr: maximum number of attributes supported
+ * @attrbuf: buffer to store parsed attributes
+ * @ops_list: list of all assigned operations
+ * @family_list: family list
+ */
+struct genl_family
+{
+ unsigned int id;
+ unsigned int hdrsize;
+ char name[GENL_NAMSIZ];
+ unsigned int version;
+ unsigned int maxattr;
+ struct module * owner;
+ struct nlattr ** attrbuf; /* private */
+ struct list_head ops_list; /* private */
+ struct list_head family_list; /* private */
+};
+
+#define GENL_ADMIN_PERM 0x01
+
+/**
+ * struct genl_info - receiving information
+ * @snd_seq: sending sequence number
+ * @snd_pid: netlink pid of sender
+ * @nlhdr: netlink message header
+ * @genlhdr: generic netlink message header
+ * @userhdr: user specific header
+ * @attrs: netlink attributes
+ */
+struct genl_info
+{
+ u32 snd_seq;
+ u32 snd_pid;
+ struct nlmsghdr * nlhdr;
+ struct genlmsghdr * genlhdr;
+ void * userhdr;
+ struct nlattr ** attrs;
+};
+
+/**
+ * struct genl_ops - generic netlink operations
+ * @cmd: command identifier
+ * @flags: flags
+ * @policy: attribute validation policy
+ * @doit: standard command callback
+ * @dumpit: callback for dumpers
+ * @ops_list: operations list
+ */
+struct genl_ops
+{
+ unsigned int cmd;
+ unsigned int flags;
+ struct nla_policy *policy;
+ int (*doit)(struct sk_buff *skb,
+ struct genl_info *info);
+ int (*dumpit)(struct sk_buff *skb,
+ struct netlink_callback *cb);
+ struct list_head ops_list;
+};
+
+extern int genl_register_family(struct genl_family *family);
+extern int genl_unregister_family(struct genl_family *family);
+extern int genl_register_ops(struct genl_family *, struct genl_ops *ops);
+extern int genl_unregister_ops(struct genl_family *, struct genl_ops *ops);
+
+extern struct sock *genl_sock;
+
+/**
+ * genlmsg_put - Add generic netlink header to netlink message
+ * @skb: socket buffer holding the message
+ * @pid: netlink pid the message is addressed to
+ * @seq: sequence number (usually the one of the sender)
+ * @type: netlink message type
+ * @hdrlen: length of the user specific header
+ * @flags netlink message flags
+ * @cmd: generic netlink command
+ * @version: version
+ *
+ * Returns pointer to user specific header
+ */
+static inline void *genlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+ int type, int hdrlen, int flags,
+ u8 cmd, u8 version)
+{
+ struct nlmsghdr *nlh;
+ struct genlmsghdr *hdr;
+
+ nlh = nlmsg_put(skb, pid, seq, type, GENL_HDRLEN + hdrlen, flags);
+ if (nlh == NULL)
+ return NULL;
+
+ hdr = nlmsg_data(nlh);
+ hdr->cmd = cmd;
+ hdr->version = version;
+ hdr->reserved = 0;
+
+ return (char *) hdr + GENL_HDRLEN;
+}
+
+/**
+ * genlmsg_end - Finalize a generic netlink message
+ * @skb: socket buffer the message is stored in
+ * @hdr: user specific header
+ */
+static inline int genlmsg_end(struct sk_buff *skb, void *hdr)
+{
+ return nlmsg_end(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_cancel - Cancel construction of a generic netlink message
+ * @skb: socket buffer the message is stored in
+ * @hdr: generic netlink message header
+ */
+static inline int genlmsg_cancel(struct sk_buff *skb, void *hdr)
+{
+ return nlmsg_cancel(skb, hdr - GENL_HDRLEN - NLMSG_HDRLEN);
+}
+
+/**
+ * genlmsg_multicast - multicast a netlink message
+ * @skb: netlink message as socket buffer
+ * @pid: own netlink pid to avoid sending to yourself
+ * @group: multicast group id
+ */
+static inline int genlmsg_multicast(struct sk_buff *skb, u32 pid,
+ unsigned int group)
+{
+ return nlmsg_multicast(genl_sock, skb, pid, group);
+}
+
+/**
+ * genlmsg_unicast - unicast a netlink message
+ * @skb: netlink message as socket buffer
+ * @pid: netlink pid of the destination socket
+ */
+static inline int genlmsg_unicast(struct sk_buff *skb, u32 pid)
+{
+ return nlmsg_unicast(genl_sock, skb, pid);
+}
+
+#endif /* __NET_GENERIC_NETLINK_H */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_icmp.h b/include/net/netfilter/ipv4/nf_conntrack_icmp.h
new file mode 100644
index 00000000000..3dd22cff23e
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_conntrack_icmp.h
@@ -0,0 +1,11 @@
+#ifndef _NF_CONNTRACK_ICMP_H
+#define _NF_CONNTRACK_ICMP_H
+/* ICMP tracking. */
+#include <asm/atomic.h>
+
+struct ip_ct_icmp
+{
+ /* Optimization: when number in == number out, forget immediately. */
+ atomic_t count;
+};
+#endif /* _NF_CONNTRACK_ICMP_H */
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
new file mode 100644
index 00000000000..25b081a730e
--- /dev/null
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -0,0 +1,43 @@
+/*
+ * IPv4 support for nf_conntrack.
+ *
+ * 23 Mar 2004: Yasuyuki Kozakai @ USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - move L3 protocol dependent part from include/linux/netfilter_ipv4/
+ * ip_conntarck.h
+ */
+
+#ifndef _NF_CONNTRACK_IPV4_H
+#define _NF_CONNTRACK_IPV4_H
+
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+#include <linux/netfilter_ipv4/ip_nat.h>
+
+/* per conntrack: nat application helper private data */
+union ip_conntrack_nat_help {
+ /* insert nat helper private data here */
+};
+
+struct nf_conntrack_ipv4_nat {
+ struct ip_nat_info info;
+ union ip_conntrack_nat_help help;
+#if defined(CONFIG_IP_NF_TARGET_MASQUERADE) || \
+ defined(CONFIG_IP_NF_TARGET_MASQUERADE_MODULE)
+ int masq_index;
+#endif
+};
+#endif /* CONFIG_IP_NF_NAT_NEEDED */
+
+struct nf_conntrack_ipv4 {
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+ struct nf_conntrack_ipv4_nat *nat;
+#endif
+};
+
+/* Returns new sk_buff, or NULL */
+struct sk_buff *
+nf_ct_ipv4_ct_gather_frags(struct sk_buff *skb);
+
+/* call to create an explicit dependency on nf_conntrack_l3proto_ipv4. */
+extern void need_ip_conntrack(void);
+
+#endif /*_NF_CONNTRACK_IPV4_H*/
diff --git a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h
new file mode 100644
index 00000000000..86591afda29
--- /dev/null
+++ b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h
@@ -0,0 +1,27 @@
+/*
+ * ICMPv6 tracking.
+ *
+ * 21 Apl 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - separated from nf_conntrack_icmp.h
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_icmp.h
+ */
+
+#ifndef _NF_CONNTRACK_ICMPV6_H
+#define _NF_CONNTRACK_ICMPV6_H
+#include <asm/atomic.h>
+
+#ifndef ICMPV6_NI_QUERY
+#define ICMPV6_NI_QUERY 139
+#endif
+#ifndef ICMPV6_NI_REPLY
+#define ICMPV6_NI_REPLY 140
+#endif
+
+struct nf_ct_icmpv6
+{
+ /* Optimization: when number in == number out, forget immediately. */
+ atomic_t count;
+};
+
+#endif /* _NF_CONNTRACK_ICMPV6_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
new file mode 100644
index 00000000000..cc482561079
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack.h
@@ -0,0 +1,354 @@
+/*
+ * Connection state tracking for netfilter. This is separated from,
+ * but required by, the (future) NAT layer; it can also be used by an iptables
+ * extension.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack.h
+ */
+
+#ifndef _NF_CONNTRACK_H
+#define _NF_CONNTRACK_H
+
+#include <linux/netfilter/nf_conntrack_common.h>
+
+#ifdef __KERNEL__
+#include <linux/config.h>
+#include <linux/bitops.h>
+#include <linux/compiler.h>
+#include <asm/atomic.h>
+
+#include <linux/netfilter/nf_conntrack_tcp.h>
+#include <linux/netfilter/nf_conntrack_sctp.h>
+#include <net/netfilter/ipv4/nf_conntrack_icmp.h>
+#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
+
+#include <net/netfilter/nf_conntrack_tuple.h>
+
+/* per conntrack: protocol private data */
+union nf_conntrack_proto {
+ /* insert conntrack proto private data here */
+ struct ip_ct_sctp sctp;
+ struct ip_ct_tcp tcp;
+ struct ip_ct_icmp icmp;
+ struct nf_ct_icmpv6 icmpv6;
+};
+
+union nf_conntrack_expect_proto {
+ /* insert expect proto private data here */
+};
+
+/* Add protocol helper include file here */
+#include <linux/netfilter/nf_conntrack_ftp.h>
+
+/* per conntrack: application helper private data */
+union nf_conntrack_help {
+ /* insert conntrack helper private data (master) here */
+ struct ip_ct_ftp_master ct_ftp_info;
+};
+
+#include <linux/types.h>
+#include <linux/skbuff.h>
+
+#ifdef CONFIG_NETFILTER_DEBUG
+#define NF_CT_ASSERT(x) \
+do { \
+ if (!(x)) \
+ /* Wooah! I'm tripping my conntrack in a frenzy of \
+ netplay... */ \
+ printk("NF_CT_ASSERT: %s:%i(%s)\n", \
+ __FILE__, __LINE__, __FUNCTION__); \
+} while(0)
+#else
+#define NF_CT_ASSERT(x)
+#endif
+
+struct nf_conntrack_helper;
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+struct nf_conn
+{
+ /* Usage count in here is 1 for hash table/destruct timer, 1 per skb,
+ plus 1 for any connection(s) we are `master' for */
+ struct nf_conntrack ct_general;
+
+ /* XXX should I move this to the tail ? - Y.K */
+ /* These are my tuples; original and reply */
+ struct nf_conntrack_tuple_hash tuplehash[IP_CT_DIR_MAX];
+
+ /* Have we seen traffic both ways yet? (bitset) */
+ unsigned long status;
+
+ /* Timer function; drops refcnt when it goes off. */
+ struct timer_list timeout;
+
+#ifdef CONFIG_NF_CT_ACCT
+ /* Accounting Information (same cache line as other written members) */
+ struct ip_conntrack_counter counters[IP_CT_DIR_MAX];
+#endif
+ /* If we were expected by an expectation, this will be it */
+ struct nf_conn *master;
+
+ /* Current number of expected connections */
+ unsigned int expecting;
+
+ /* Helper. if any */
+ struct nf_conntrack_helper *helper;
+
+ /* features - nat, helper, ... used by allocating system */
+ u_int32_t features;
+
+ /* Storage reserved for other modules: */
+
+ union nf_conntrack_proto proto;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ u_int32_t mark;
+#endif
+
+ /* These members are dynamically allocated. */
+
+ union nf_conntrack_help *help;
+
+ /* Layer 3 dependent members. (ex: NAT) */
+ union {
+ struct nf_conntrack_ipv4 *ipv4;
+ } l3proto;
+ void *data[0];
+};
+
+struct nf_conntrack_expect
+{
+ /* Internal linked list (global expectation list) */
+ struct list_head list;
+
+ /* We expect this tuple, with the following mask */
+ struct nf_conntrack_tuple tuple, mask;
+
+ /* Function to call after setup and insertion */
+ void (*expectfn)(struct nf_conn *new,
+ struct nf_conntrack_expect *this);
+
+ /* The conntrack of the master connection */
+ struct nf_conn *master;
+
+ /* Timer function; deletes the expectation. */
+ struct timer_list timeout;
+
+ /* Usage count. */
+ atomic_t use;
+
+ /* Flags */
+ unsigned int flags;
+
+#ifdef CONFIG_NF_NAT_NEEDED
+ /* This is the original per-proto part, used to map the
+ * expected connection the way the recipient expects. */
+ union nf_conntrack_manip_proto saved_proto;
+ /* Direction relative to the master connection. */
+ enum ip_conntrack_dir dir;
+#endif
+};
+
+#define NF_CT_EXPECT_PERMANENT 0x1
+
+static inline struct nf_conn *
+nf_ct_tuplehash_to_ctrack(const struct nf_conntrack_tuple_hash *hash)
+{
+ return container_of(hash, struct nf_conn,
+ tuplehash[hash->tuple.dst.dir]);
+}
+
+/* get master conntrack via master expectation */
+#define master_ct(conntr) (conntr->master)
+
+/* Alter reply tuple (maybe alter helper). */
+extern void
+nf_conntrack_alter_reply(struct nf_conn *conntrack,
+ const struct nf_conntrack_tuple *newreply);
+
+/* Is this tuple taken? (ignoring any belonging to the given
+ conntrack). */
+extern int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
+
+/* Return conntrack_info and tuple hash for given skb. */
+static inline struct nf_conn *
+nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
+{
+ *ctinfo = skb->nfctinfo;
+ return (struct nf_conn *)skb->nfct;
+}
+
+/* decrement reference count on a conntrack */
+static inline void nf_ct_put(struct nf_conn *ct)
+{
+ NF_CT_ASSERT(ct);
+ nf_conntrack_put(&ct->ct_general);
+}
+
+/* call to create an explicit dependency on nf_conntrack. */
+extern void need_nf_conntrack(void);
+
+extern int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig);
+
+extern void __nf_ct_refresh_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ unsigned long extra_jiffies,
+ int do_acct);
+
+/* Refresh conntrack for this many jiffies and do accounting */
+static inline void nf_ct_refresh_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ unsigned long extra_jiffies)
+{
+ __nf_ct_refresh_acct(ct, ctinfo, skb, extra_jiffies, 1);
+}
+
+/* Refresh conntrack for this many jiffies */
+static inline void nf_ct_refresh(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ unsigned long extra_jiffies)
+{
+ __nf_ct_refresh_acct(ct, 0, skb, extra_jiffies, 0);
+}
+
+/* These are for NAT. Icky. */
+/* Update TCP window tracking data when NAT mangles the packet */
+extern void nf_conntrack_tcp_update(struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conn *conntrack,
+ int dir);
+
+/* Call me when a conntrack is destroyed. */
+extern void (*nf_conntrack_destroyed)(struct nf_conn *conntrack);
+
+/* Fake conntrack entry for untracked connections */
+extern struct nf_conn nf_conntrack_untracked;
+
+extern int nf_ct_no_defrag;
+
+/* Iterate over all conntracks: if iter returns true, it's deleted. */
+extern void
+nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data);
+extern void nf_conntrack_free(struct nf_conn *ct);
+extern struct nf_conn *
+nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl);
+
+/* It's confirmed if it is, or has been in the hash table. */
+static inline int nf_ct_is_confirmed(struct nf_conn *ct)
+{
+ return test_bit(IPS_CONFIRMED_BIT, &ct->status);
+}
+
+static inline int nf_ct_is_dying(struct nf_conn *ct)
+{
+ return test_bit(IPS_DYING_BIT, &ct->status);
+}
+
+extern unsigned int nf_conntrack_htable_size;
+
+#define NF_CT_STAT_INC(count) (__get_cpu_var(nf_conntrack_stat).count++)
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+#include <linux/notifier.h>
+#include <linux/interrupt.h>
+
+struct nf_conntrack_ecache {
+ struct nf_conn *ct;
+ unsigned int events;
+};
+DECLARE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+
+#define CONNTRACK_ECACHE(x) (__get_cpu_var(nf_conntrack_ecache).x)
+
+extern struct notifier_block *nf_conntrack_chain;
+extern struct notifier_block *nf_conntrack_expect_chain;
+
+static inline int nf_conntrack_register_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&nf_conntrack_chain, nb);
+}
+
+static inline int nf_conntrack_unregister_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&nf_conntrack_chain, nb);
+}
+
+static inline int
+nf_conntrack_expect_register_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_register(&nf_conntrack_expect_chain, nb);
+}
+
+static inline int
+nf_conntrack_expect_unregister_notifier(struct notifier_block *nb)
+{
+ return notifier_chain_unregister(&nf_conntrack_expect_chain, nb);
+}
+
+extern void nf_ct_deliver_cached_events(const struct nf_conn *ct);
+extern void __nf_ct_event_cache_init(struct nf_conn *ct);
+
+static inline void
+nf_conntrack_event_cache(enum ip_conntrack_events event,
+ const struct sk_buff *skb)
+{
+ struct nf_conn *ct = (struct nf_conn *)skb->nfct;
+ struct nf_conntrack_ecache *ecache;
+
+ local_bh_disable();
+ ecache = &__get_cpu_var(nf_conntrack_ecache);
+ if (ct != ecache->ct)
+ __nf_ct_event_cache_init(ct);
+ ecache->events |= event;
+ local_bh_enable();
+}
+
+static inline void nf_conntrack_event(enum ip_conntrack_events event,
+ struct nf_conn *ct)
+{
+ if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct))
+ notifier_call_chain(&nf_conntrack_chain, event, ct);
+}
+
+static inline void
+nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
+ struct nf_conntrack_expect *exp)
+{
+ notifier_call_chain(&nf_conntrack_expect_chain, event, exp);
+}
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
+static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
+ const struct sk_buff *skb) {}
+static inline void nf_conntrack_event(enum ip_conntrack_events event,
+ struct nf_conn *ct) {}
+static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
+static inline void
+nf_conntrack_expect_event(enum ip_conntrack_expect_events event,
+ struct nf_conntrack_expect *exp) {}
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+
+/* no helper, no nat */
+#define NF_CT_F_BASIC 0
+/* for helper */
+#define NF_CT_F_HELP 1
+/* for nat. */
+#define NF_CT_F_NAT 2
+#define NF_CT_F_NUM 4
+
+extern int
+nf_conntrack_register_cache(u_int32_t features, const char *name, size_t size,
+ int (*init_conntrack)(struct nf_conn *, u_int32_t));
+extern void
+nf_conntrack_unregister_cache(u_int32_t features);
+
+#endif /* __KERNEL__ */
+#endif /* _NF_CONNTRACK_H */
diff --git a/include/net/netfilter/nf_conntrack_compat.h b/include/net/netfilter/nf_conntrack_compat.h
new file mode 100644
index 00000000000..3cac19fb364
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_compat.h
@@ -0,0 +1,108 @@
+#ifndef _NF_CONNTRACK_COMPAT_H
+#define _NF_CONNTRACK_COMPAT_H
+
+#ifdef __KERNEL__
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+
+#include <linux/netfilter_ipv4/ip_conntrack.h>
+
+#ifdef CONFIG_IP_NF_CONNTRACK_MARK
+static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
+ u_int32_t *ctinfo)
+{
+ struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
+
+ if (ct)
+ return &ct->mark;
+ else
+ return NULL;
+}
+#endif /* CONFIG_IP_NF_CONNTRACK_MARK */
+
+#ifdef CONFIG_IP_NF_CT_ACCT
+static inline struct ip_conntrack_counter *
+nf_ct_get_counters(const struct sk_buff *skb)
+{
+ enum ip_conntrack_info ctinfo;
+ struct ip_conntrack *ct = ip_conntrack_get(skb, &ctinfo);
+
+ if (ct)
+ return ct->counters;
+ else
+ return NULL;
+}
+#endif /* CONFIG_IP_NF_CT_ACCT */
+
+static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+{
+ return (skb->nfct == &ip_conntrack_untracked.ct_general);
+}
+
+static inline void nf_ct_untrack(struct sk_buff *skb)
+{
+ skb->nfct = &ip_conntrack_untracked.ct_general;
+}
+
+static inline int nf_ct_get_ctinfo(const struct sk_buff *skb,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct ip_conntrack *ct = ip_conntrack_get(skb, ctinfo);
+ return (ct != NULL);
+}
+
+#else /* CONFIG_IP_NF_CONNTRACK */
+
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+
+#ifdef CONFIG_NF_CONNTRACK_MARK
+
+static inline u_int32_t *nf_ct_get_mark(const struct sk_buff *skb,
+ u_int32_t *ctinfo)
+{
+ struct nf_conn *ct = nf_ct_get(skb, ctinfo);
+
+ if (ct)
+ return &ct->mark;
+ else
+ return NULL;
+}
+#endif /* CONFIG_NF_CONNTRACK_MARK */
+
+#ifdef CONFIG_NF_CT_ACCT
+static inline struct ip_conntrack_counter *
+nf_ct_get_counters(const struct sk_buff *skb)
+{
+ enum ip_conntrack_info ctinfo;
+ struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
+
+ if (ct)
+ return ct->counters;
+ else
+ return NULL;
+}
+#endif /* CONFIG_NF_CT_ACCT */
+
+static inline int nf_ct_is_untracked(const struct sk_buff *skb)
+{
+ return (skb->nfct == &nf_conntrack_untracked.ct_general);
+}
+
+static inline void nf_ct_untrack(struct sk_buff *skb)
+{
+ skb->nfct = &nf_conntrack_untracked.ct_general;
+}
+
+static inline int nf_ct_get_ctinfo(const struct sk_buff *skb,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct nf_conn *ct = nf_ct_get(skb, ctinfo);
+ return (ct != NULL);
+}
+
+#endif /* CONFIG_IP_NF_CONNTRACK */
+
+#endif /* __KERNEL__ */
+
+#endif /* _NF_CONNTRACK_COMPAT_H */
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
new file mode 100644
index 00000000000..da254525a4c
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -0,0 +1,76 @@
+/*
+ * This header is used to share core functionality between the
+ * standalone connection tracking module, and the compatibility layer's use
+ * of connection tracking.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_core.h
+ */
+
+#ifndef _NF_CONNTRACK_CORE_H
+#define _NF_CONNTRACK_CORE_H
+
+#include <linux/netfilter.h>
+
+/* This header is used to share core functionality between the
+ standalone connection tracking module, and the compatibility layer's use
+ of connection tracking. */
+extern unsigned int nf_conntrack_in(int pf,
+ unsigned int hooknum,
+ struct sk_buff **pskb);
+
+extern int nf_conntrack_init(void);
+extern void nf_conntrack_cleanup(void);
+
+struct nf_conntrack_l3proto;
+extern struct nf_conntrack_l3proto *nf_ct_find_l3proto(u_int16_t pf);
+/* Like above, but you already have conntrack read lock. */
+extern struct nf_conntrack_l3proto *__nf_ct_find_l3proto(u_int16_t l3proto);
+
+struct nf_conntrack_protocol;
+
+extern int
+nf_ct_get_tuple(const struct sk_buff *skb,
+ unsigned int nhoff,
+ unsigned int dataoff,
+ u_int16_t l3num,
+ u_int8_t protonum,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_protocol *protocol);
+
+extern int
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_protocol *protocol);
+
+/* Find a connection corresponding to a tuple. */
+extern struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack);
+
+extern int __nf_conntrack_confirm(struct sk_buff **pskb);
+
+/* Confirm a connection: returns NF_DROP if packet must be dropped. */
+static inline int nf_conntrack_confirm(struct sk_buff **pskb)
+{
+ struct nf_conn *ct = (struct nf_conn *)(*pskb)->nfct;
+ int ret = NF_ACCEPT;
+
+ if (ct) {
+ if (!nf_ct_is_confirmed(ct))
+ ret = __nf_conntrack_confirm(pskb);
+ nf_ct_deliver_cached_events(ct);
+ }
+ return ret;
+}
+
+extern void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb);
+
+extern struct list_head *nf_conntrack_hash;
+extern struct list_head nf_conntrack_expect_list;
+extern rwlock_t nf_conntrack_lock ;
+#endif /* _NF_CONNTRACK_CORE_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
new file mode 100644
index 00000000000..5a66b2a3a62
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -0,0 +1,51 @@
+/*
+ * connection tracking helpers.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_helper.h
+ */
+
+#ifndef _NF_CONNTRACK_HELPER_H
+#define _NF_CONNTRACK_HELPER_H
+#include <net/netfilter/nf_conntrack.h>
+
+struct module;
+
+struct nf_conntrack_helper
+{
+ struct list_head list; /* Internal use. */
+
+ const char *name; /* name of the module */
+ struct module *me; /* pointer to self */
+ unsigned int max_expected; /* Maximum number of concurrent
+ * expected connections */
+ unsigned int timeout; /* timeout for expecteds */
+
+ /* Mask of things we will help (compared against server response) */
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple mask;
+
+ /* Function to call when data passes; return verdict, or -1 to
+ invalidate. */
+ int (*help)(struct sk_buff **pskb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info conntrackinfo);
+};
+
+extern int nf_conntrack_helper_register(struct nf_conntrack_helper *);
+extern void nf_conntrack_helper_unregister(struct nf_conntrack_helper *);
+
+/* Allocate space for an expectation: this is mandatory before calling
+ nf_conntrack_expect_related. You will have to call put afterwards. */
+extern struct nf_conntrack_expect *
+nf_conntrack_expect_alloc(struct nf_conn *master);
+extern void nf_conntrack_expect_put(struct nf_conntrack_expect *exp);
+
+/* Add an expected connection: can have more than one per connection */
+extern int nf_conntrack_expect_related(struct nf_conntrack_expect *exp);
+extern void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp);
+
+#endif /*_NF_CONNTRACK_HELPER_H*/
diff --git a/include/net/netfilter/nf_conntrack_l3proto.h b/include/net/netfilter/nf_conntrack_l3proto.h
new file mode 100644
index 00000000000..01663e5b33d
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_l3proto.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C)2003,2004 USAGI/WIDE Project
+ *
+ * Header for use in defining a given L3 protocol for connection tracking.
+ *
+ * Author:
+ * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * Derived from include/netfilter_ipv4/ip_conntrack_protocol.h
+ */
+
+#ifndef _NF_CONNTRACK_L3PROTO_H
+#define _NF_CONNTRACK_L3PROTO_H
+#include <linux/seq_file.h>
+#include <net/netfilter/nf_conntrack.h>
+
+struct nf_conntrack_l3proto
+{
+ /* Next pointer. */
+ struct list_head list;
+
+ /* L3 Protocol Family number. ex) PF_INET */
+ u_int16_t l3proto;
+
+ /* Protocol name */
+ const char *name;
+
+ /*
+ * Try to fill in the third arg: nhoff is offset of l3 proto
+ * hdr. Return true if possible.
+ */
+ int (*pkt_to_tuple)(const struct sk_buff *skb, unsigned int nhoff,
+ struct nf_conntrack_tuple *tuple);
+
+ /*
+ * Invert the per-proto part of the tuple: ie. turn xmit into reply.
+ * Some packets can't be inverted: return 0 in that case.
+ */
+ int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig);
+
+ /* Print out the per-protocol part of the tuple. */
+ int (*print_tuple)(struct seq_file *s,
+ const struct nf_conntrack_tuple *);
+
+ /* Print out the private part of the conntrack. */
+ int (*print_conntrack)(struct seq_file *s, const struct nf_conn *);
+
+ /* Returns verdict for packet, or -1 for invalid. */
+ int (*packet)(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ enum ip_conntrack_info ctinfo);
+
+ /*
+ * Called when a new connection for this protocol found;
+ * returns TRUE if it's OK. If so, packet() called next.
+ */
+ int (*new)(struct nf_conn *conntrack, const struct sk_buff *skb);
+
+ /* Called when a conntrack entry is destroyed */
+ void (*destroy)(struct nf_conn *conntrack);
+
+ /*
+ * Called before tracking.
+ * *dataoff: offset of protocol header (TCP, UDP,...) in *pskb
+ * *protonum: protocol number
+ */
+ int (*prepare)(struct sk_buff **pskb, unsigned int hooknum,
+ unsigned int *dataoff, u_int8_t *protonum);
+
+ u_int32_t (*get_features)(const struct nf_conntrack_tuple *tuple);
+
+ /* Module (if any) which this is connected to. */
+ struct module *me;
+};
+
+extern struct nf_conntrack_l3proto *nf_ct_l3protos[AF_MAX];
+
+/* Protocol registration. */
+extern int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto);
+extern void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto);
+
+static inline struct nf_conntrack_l3proto *
+nf_ct_find_l3proto(u_int16_t l3proto)
+{
+ return nf_ct_l3protos[l3proto];
+}
+
+/* Existing built-in protocols */
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
+extern struct nf_conntrack_l3proto nf_conntrack_generic_l3proto;
+#endif /*_NF_CONNTRACK_L3PROTO_H*/
diff --git a/include/net/netfilter/nf_conntrack_protocol.h b/include/net/netfilter/nf_conntrack_protocol.h
new file mode 100644
index 00000000000..b3afda35397
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_protocol.h
@@ -0,0 +1,105 @@
+/*
+ * Header for use in defining a given protocol for connection tracking.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalized L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_protcol.h
+ */
+
+#ifndef _NF_CONNTRACK_PROTOCOL_H
+#define _NF_CONNTRACK_PROTOCOL_H
+#include <net/netfilter/nf_conntrack.h>
+
+struct seq_file;
+
+struct nf_conntrack_protocol
+{
+ /* Next pointer. */
+ struct list_head list;
+
+ /* L3 Protocol number. */
+ u_int16_t l3proto;
+
+ /* Protocol number. */
+ u_int8_t proto;
+
+ /* Protocol name */
+ const char *name;
+
+ /* Try to fill in the third arg: dataoff is offset past network protocol
+ hdr. Return true if possible. */
+ int (*pkt_to_tuple)(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple);
+
+ /* Invert the per-proto part of the tuple: ie. turn xmit into reply.
+ * Some packets can't be inverted: return 0 in that case.
+ */
+ int (*invert_tuple)(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig);
+
+ /* Print out the per-protocol part of the tuple. Return like seq_* */
+ int (*print_tuple)(struct seq_file *s,
+ const struct nf_conntrack_tuple *);
+
+ /* Print out the private part of the conntrack. */
+ int (*print_conntrack)(struct seq_file *s, const struct nf_conn *);
+
+ /* Returns verdict for packet, or -1 for invalid. */
+ int (*packet)(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum);
+
+ /* Called when a new connection for this protocol found;
+ * returns TRUE if it's OK. If so, packet() called next. */
+ int (*new)(struct nf_conn *conntrack, const struct sk_buff *skb,
+ unsigned int dataoff);
+
+ /* Called when a conntrack entry is destroyed */
+ void (*destroy)(struct nf_conn *conntrack);
+
+ int (*error)(struct sk_buff *skb, unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf, unsigned int hooknum);
+
+ /* Module (if any) which this is connected to. */
+ struct module *me;
+};
+
+/* Existing built-in protocols */
+extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
+extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
+
+#define MAX_NF_CT_PROTO 256
+extern struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
+
+extern struct nf_conntrack_protocol *
+nf_ct_find_proto(u_int16_t l3proto, u_int8_t protocol);
+
+/* Protocol registration. */
+extern int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto);
+extern void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto);
+
+/* Log invalid packets */
+extern unsigned int nf_ct_log_invalid;
+
+#ifdef CONFIG_SYSCTL
+#ifdef DEBUG_INVALID_PACKETS
+#define LOG_INVALID(proto) \
+ (nf_ct_log_invalid == (proto) || nf_ct_log_invalid == IPPROTO_RAW)
+#else
+#define LOG_INVALID(proto) \
+ ((nf_ct_log_invalid == (proto) || nf_ct_log_invalid == IPPROTO_RAW) \
+ && net_ratelimit())
+#endif
+#else
+#define LOG_INVALID(proto) 0
+#endif /* CONFIG_SYSCTL */
+
+#endif /*_NF_CONNTRACK_PROTOCOL_H*/
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
new file mode 100644
index 00000000000..14ce790e5c6
--- /dev/null
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -0,0 +1,190 @@
+/*
+ * Definitions and Declarations for tuple.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from include/linux/netfiter_ipv4/ip_conntrack_tuple.h
+ */
+
+#ifndef _NF_CONNTRACK_TUPLE_H
+#define _NF_CONNTRACK_TUPLE_H
+
+#include <linux/netfilter/nf_conntrack_tuple_common.h>
+
+/* A `tuple' is a structure containing the information to uniquely
+ identify a connection. ie. if two packets have the same tuple, they
+ are in the same connection; if not, they are not.
+
+ We divide the structure along "manipulatable" and
+ "non-manipulatable" lines, for the benefit of the NAT code.
+*/
+
+#define NF_CT_TUPLE_L3SIZE 4
+
+/* The l3 protocol-specific manipulable parts of the tuple: always in
+ network order! */
+union nf_conntrack_man_l3proto {
+ u_int32_t all[NF_CT_TUPLE_L3SIZE];
+ u_int32_t ip;
+ u_int32_t ip6[4];
+};
+
+/* The protocol-specific manipulable parts of the tuple: always in
+ network order! */
+union nf_conntrack_man_proto
+{
+ /* Add other protocols here. */
+ u_int16_t all;
+
+ struct {
+ u_int16_t port;
+ } tcp;
+ struct {
+ u_int16_t port;
+ } udp;
+ struct {
+ u_int16_t id;
+ } icmp;
+ struct {
+ u_int16_t port;
+ } sctp;
+};
+
+/* The manipulable part of the tuple. */
+struct nf_conntrack_man
+{
+ union nf_conntrack_man_l3proto u3;
+ union nf_conntrack_man_proto u;
+ /* Layer 3 protocol */
+ u_int16_t l3num;
+};
+
+/* This contains the information to distinguish a connection. */
+struct nf_conntrack_tuple
+{
+ struct nf_conntrack_man src;
+
+ /* These are the parts of the tuple which are fixed. */
+ struct {
+ union {
+ u_int32_t all[NF_CT_TUPLE_L3SIZE];
+ u_int32_t ip;
+ u_int32_t ip6[4];
+ } u3;
+ union {
+ /* Add other protocols here. */
+ u_int16_t all;
+
+ struct {
+ u_int16_t port;
+ } tcp;
+ struct {
+ u_int16_t port;
+ } udp;
+ struct {
+ u_int8_t type, code;
+ } icmp;
+ struct {
+ u_int16_t port;
+ } sctp;
+ } u;
+
+ /* The protocol. */
+ u_int8_t protonum;
+
+ /* The direction (for tuplehash) */
+ u_int8_t dir;
+ } dst;
+};
+
+/* This is optimized opposed to a memset of the whole structure. Everything we
+ * really care about is the source/destination unions */
+#define NF_CT_TUPLE_U_BLANK(tuple) \
+ do { \
+ (tuple)->src.u.all = 0; \
+ (tuple)->dst.u.all = 0; \
+ memset(&(tuple)->src.u3, 0, sizeof((tuple)->src.u3)); \
+ memset(&(tuple)->dst.u3, 0, sizeof((tuple)->dst.u3)); \
+ } while (0)
+
+#ifdef __KERNEL__
+
+#define NF_CT_DUMP_TUPLE(tp) \
+DEBUGP("tuple %p: %u %u %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu -> %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x %hu\n", \
+ (tp), (tp)->src.l3num, (tp)->dst.protonum, \
+ NIP6(*(struct in6_addr *)(tp)->src.u3.all), ntohs((tp)->src.u.all), \
+ NIP6(*(struct in6_addr *)(tp)->dst.u3.all), ntohs((tp)->dst.u.all))
+
+/* If we're the first tuple, it's the original dir. */
+#define NF_CT_DIRECTION(h) \
+ ((enum ip_conntrack_dir)(h)->tuple.dst.dir)
+
+/* Connections have two entries in the hash table: one for each way */
+struct nf_conntrack_tuple_hash
+{
+ struct list_head list;
+
+ struct nf_conntrack_tuple tuple;
+};
+
+#endif /* __KERNEL__ */
+
+static inline int nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2)
+{
+ return (t1->src.u3.all[0] == t2->src.u3.all[0] &&
+ t1->src.u3.all[1] == t2->src.u3.all[1] &&
+ t1->src.u3.all[2] == t2->src.u3.all[2] &&
+ t1->src.u3.all[3] == t2->src.u3.all[3] &&
+ t1->src.u.all == t2->src.u.all &&
+ t1->src.l3num == t2->src.l3num &&
+ t1->dst.protonum == t2->dst.protonum);
+}
+
+static inline int nf_ct_tuple_dst_equal(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2)
+{
+ return (t1->dst.u3.all[0] == t2->dst.u3.all[0] &&
+ t1->dst.u3.all[1] == t2->dst.u3.all[1] &&
+ t1->dst.u3.all[2] == t2->dst.u3.all[2] &&
+ t1->dst.u3.all[3] == t2->dst.u3.all[3] &&
+ t1->dst.u.all == t2->dst.u.all &&
+ t1->src.l3num == t2->src.l3num &&
+ t1->dst.protonum == t2->dst.protonum);
+}
+
+static inline int nf_ct_tuple_equal(const struct nf_conntrack_tuple *t1,
+ const struct nf_conntrack_tuple *t2)
+{
+ return nf_ct_tuple_src_equal(t1, t2) && nf_ct_tuple_dst_equal(t1, t2);
+}
+
+static inline int nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *mask)
+{
+ int count = 0;
+
+ for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+ if ((t->src.u3.all[count] ^ tuple->src.u3.all[count]) &
+ mask->src.u3.all[count])
+ return 0;
+ }
+
+ for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+ if ((t->dst.u3.all[count] ^ tuple->dst.u3.all[count]) &
+ mask->dst.u3.all[count])
+ return 0;
+ }
+
+ if ((t->src.u.all ^ tuple->src.u.all) & mask->src.u.all ||
+ (t->dst.u.all ^ tuple->dst.u.all) & mask->dst.u.all ||
+ (t->src.l3num ^ tuple->src.l3num) & mask->src.l3num ||
+ (t->dst.protonum ^ tuple->dst.protonum) & mask->dst.protonum)
+ return 0;
+
+ return 1;
+}
+
+#endif /* _NF_CONNTRACK_TUPLE_H */
diff --git a/include/net/netlink.h b/include/net/netlink.h
new file mode 100644
index 00000000000..640c26a90cf
--- /dev/null
+++ b/include/net/netlink.h
@@ -0,0 +1,883 @@
+#ifndef __NET_NETLINK_H
+#define __NET_NETLINK_H
+
+#include <linux/types.h>
+#include <linux/netlink.h>
+
+/* ========================================================================
+ * Netlink Messages and Attributes Interface (As Seen On TV)
+ * ------------------------------------------------------------------------
+ * Messages Interface
+ * ------------------------------------------------------------------------
+ *
+ * Message Format:
+ * <--- nlmsg_total_size(payload) --->
+ * <-- nlmsg_msg_size(payload) ->
+ * +----------+- - -+-------------+- - -+-------- - -
+ * | nlmsghdr | Pad | Payload | Pad | nlmsghdr
+ * +----------+- - -+-------------+- - -+-------- - -
+ * nlmsg_data(nlh)---^ ^
+ * nlmsg_next(nlh)-----------------------+
+ *
+ * Payload Format:
+ * <---------------------- nlmsg_len(nlh) --------------------->
+ * <------ hdrlen ------> <- nlmsg_attrlen(nlh, hdrlen) ->
+ * +----------------------+- - -+--------------------------------+
+ * | Family Header | Pad | Attributes |
+ * +----------------------+- - -+--------------------------------+
+ * nlmsg_attrdata(nlh, hdrlen)---^
+ *
+ * Data Structures:
+ * struct nlmsghdr netlink message header
+ *
+ * Message Construction:
+ * nlmsg_new() create a new netlink message
+ * nlmsg_put() add a netlink message to an skb
+ * nlmsg_put_answer() callback based nlmsg_put()
+ * nlmsg_end() finanlize netlink message
+ * nlmsg_cancel() cancel message construction
+ * nlmsg_free() free a netlink message
+ *
+ * Message Sending:
+ * nlmsg_multicast() multicast message to several groups
+ * nlmsg_unicast() unicast a message to a single socket
+ *
+ * Message Length Calculations:
+ * nlmsg_msg_size(payload) length of message w/o padding
+ * nlmsg_total_size(payload) length of message w/ padding
+ * nlmsg_padlen(payload) length of padding at tail
+ *
+ * Message Payload Access:
+ * nlmsg_data(nlh) head of message payload
+ * nlmsg_len(nlh) length of message payload
+ * nlmsg_attrdata(nlh, hdrlen) head of attributes data
+ * nlmsg_attrlen(nlh, hdrlen) length of attributes data
+ *
+ * Message Parsing:
+ * nlmsg_ok(nlh, remaining) does nlh fit into remaining bytes?
+ * nlmsg_next(nlh, remaining) get next netlink message
+ * nlmsg_parse() parse attributes of a message
+ * nlmsg_find_attr() find an attribute in a message
+ * nlmsg_for_each_msg() loop over all messages
+ * nlmsg_validate() validate netlink message incl. attrs
+ * nlmsg_for_each_attr() loop over all attributes
+ *
+ * ------------------------------------------------------------------------
+ * Attributes Interface
+ * ------------------------------------------------------------------------
+ *
+ * Attribute Format:
+ * <------- nla_total_size(payload) ------->
+ * <---- nla_attr_size(payload) ----->
+ * +----------+- - -+- - - - - - - - - +- - -+-------- - -
+ * | Header | Pad | Payload | Pad | Header
+ * +----------+- - -+- - - - - - - - - +- - -+-------- - -
+ * <- nla_len(nla) -> ^
+ * nla_data(nla)----^ |
+ * nla_next(nla)-----------------------------'
+ *
+ * Data Structures:
+ * struct nlattr netlink attribtue header
+ *
+ * Attribute Construction:
+ * nla_reserve(skb, type, len) reserve skb tailroom for an attribute
+ * nla_put(skb, type, len, data) add attribute to skb
+ *
+ * Attribute Construction for Basic Types:
+ * nla_put_u8(skb, type, value) add u8 attribute to skb
+ * nla_put_u16(skb, type, value) add u16 attribute to skb
+ * nla_put_u32(skb, type, value) add u32 attribute to skb
+ * nla_put_u64(skb, type, value) add u64 attribute to skb
+ * nla_put_string(skb, type, str) add string attribute to skb
+ * nla_put_flag(skb, type) add flag attribute to skb
+ * nla_put_msecs(skb, type, jiffies) add msecs attribute to skb
+ *
+ * Exceptions Based Attribute Construction:
+ * NLA_PUT(skb, type, len, data) add attribute to skb
+ * NLA_PUT_U8(skb, type, value) add u8 attribute to skb
+ * NLA_PUT_U16(skb, type, value) add u16 attribute to skb
+ * NLA_PUT_U32(skb, type, value) add u32 attribute to skb
+ * NLA_PUT_U64(skb, type, value) add u64 attribute to skb
+ * NLA_PUT_STRING(skb, type, str) add string attribute to skb
+ * NLA_PUT_FLAG(skb, type) add flag attribute to skb
+ * NLA_PUT_MSECS(skb, type, jiffies) add msecs attribute to skb
+ *
+ * The meaning of these functions is equal to their lower case
+ * variants but they jump to the label nla_put_failure in case
+ * of a failure.
+ *
+ * Nested Attributes Construction:
+ * nla_nest_start(skb, type) start a nested attribute
+ * nla_nest_end(skb, nla) finalize a nested attribute
+ * nla_nest_cancel(skb, nla) cancel nested attribute construction
+ *
+ * Attribute Length Calculations:
+ * nla_attr_size(payload) length of attribute w/o padding
+ * nla_total_size(payload) length of attribute w/ padding
+ * nla_padlen(payload) length of padding
+ *
+ * Attribute Payload Access:
+ * nla_data(nla) head of attribute payload
+ * nla_len(nla) length of attribute payload
+ *
+ * Attribute Payload Access for Basic Types:
+ * nla_get_u8(nla) get payload for a u8 attribute
+ * nla_get_u16(nla) get payload for a u16 attribute
+ * nla_get_u32(nla) get payload for a u32 attribute
+ * nla_get_u64(nla) get payload for a u64 attribute
+ * nla_get_flag(nla) return 1 if flag is true
+ * nla_get_msecs(nla) get payload for a msecs attribute
+ *
+ * Attribute Misc:
+ * nla_memcpy(dest, nla, count) copy attribute into memory
+ * nla_memcmp(nla, data, size) compare attribute with memory area
+ * nla_strlcpy(dst, nla, size) copy attribute to a sized string
+ * nla_strcmp(nla, str) compare attribute with string
+ *
+ * Attribute Parsing:
+ * nla_ok(nla, remaining) does nla fit into remaining bytes?
+ * nla_next(nla, remaining) get next netlink attribute
+ * nla_validate() validate a stream of attributes
+ * nla_find() find attribute in stream of attributes
+ * nla_parse() parse and validate stream of attrs
+ * nla_parse_nested() parse nested attribuets
+ * nla_for_each_attr() loop over all attributes
+ *=========================================================================
+ */
+
+ /**
+ * Standard attribute types to specify validation policy
+ */
+enum {
+ NLA_UNSPEC,
+ NLA_U8,
+ NLA_U16,
+ NLA_U32,
+ NLA_U64,
+ NLA_STRING,
+ NLA_FLAG,
+ NLA_MSECS,
+ NLA_NESTED,
+ __NLA_TYPE_MAX,
+};
+
+#define NLA_TYPE_MAX (__NLA_TYPE_MAX - 1)
+
+/**
+ * struct nla_policy - attribute validation policy
+ * @type: Type of attribute or NLA_UNSPEC
+ * @minlen: Minimal length of payload required to be available
+ *
+ * Policies are defined as arrays of this struct, the array must be
+ * accessible by attribute type up to the highest identifier to be expected.
+ *
+ * Example:
+ * static struct nla_policy my_policy[ATTR_MAX+1] __read_mostly = {
+ * [ATTR_FOO] = { .type = NLA_U16 },
+ * [ATTR_BAR] = { .type = NLA_STRING },
+ * [ATTR_BAZ] = { .minlen = sizeof(struct mystruct) },
+ * };
+ */
+struct nla_policy {
+ u16 type;
+ u16 minlen;
+};
+
+extern void netlink_run_queue(struct sock *sk, unsigned int *qlen,
+ int (*cb)(struct sk_buff *,
+ struct nlmsghdr *, int *));
+extern void netlink_queue_skip(struct nlmsghdr *nlh,
+ struct sk_buff *skb);
+
+extern int nla_validate(struct nlattr *head, int len, int maxtype,
+ struct nla_policy *policy);
+extern int nla_parse(struct nlattr *tb[], int maxtype,
+ struct nlattr *head, int len,
+ struct nla_policy *policy);
+extern struct nlattr * nla_find(struct nlattr *head, int len, int attrtype);
+extern size_t nla_strlcpy(char *dst, const struct nlattr *nla,
+ size_t dstsize);
+extern int nla_memcpy(void *dest, struct nlattr *src, int count);
+extern int nla_memcmp(const struct nlattr *nla, const void *data,
+ size_t size);
+extern int nla_strcmp(const struct nlattr *nla, const char *str);
+extern struct nlattr * __nla_reserve(struct sk_buff *skb, int attrtype,
+ int attrlen);
+extern struct nlattr * nla_reserve(struct sk_buff *skb, int attrtype,
+ int attrlen);
+extern void __nla_put(struct sk_buff *skb, int attrtype,
+ int attrlen, const void *data);
+extern int nla_put(struct sk_buff *skb, int attrtype,
+ int attrlen, const void *data);
+
+/**************************************************************************
+ * Netlink Messages
+ **************************************************************************/
+
+/**
+ * nlmsg_msg_size - length of netlink message not including padding
+ * @payload: length of message payload
+ */
+static inline int nlmsg_msg_size(int payload)
+{
+ return NLMSG_HDRLEN + payload;
+}
+
+/**
+ * nlmsg_total_size - length of netlink message including padding
+ * @payload: length of message payload
+ */
+static inline int nlmsg_total_size(int payload)
+{
+ return NLMSG_ALIGN(nlmsg_msg_size(payload));
+}
+
+/**
+ * nlmsg_padlen - length of padding at the message's tail
+ * @payload: length of message payload
+ */
+static inline int nlmsg_padlen(int payload)
+{
+ return nlmsg_total_size(payload) - nlmsg_msg_size(payload);
+}
+
+/**
+ * nlmsg_data - head of message payload
+ * @nlh: netlink messsage header
+ */
+static inline void *nlmsg_data(const struct nlmsghdr *nlh)
+{
+ return (unsigned char *) nlh + NLMSG_HDRLEN;
+}
+
+/**
+ * nlmsg_len - length of message payload
+ * @nlh: netlink message header
+ */
+static inline int nlmsg_len(const struct nlmsghdr *nlh)
+{
+ return nlh->nlmsg_len - NLMSG_HDRLEN;
+}
+
+/**
+ * nlmsg_attrdata - head of attributes data
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ */
+static inline struct nlattr *nlmsg_attrdata(const struct nlmsghdr *nlh,
+ int hdrlen)
+{
+ unsigned char *data = nlmsg_data(nlh);
+ return (struct nlattr *) (data + NLMSG_ALIGN(hdrlen));
+}
+
+/**
+ * nlmsg_attrlen - length of attributes data
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ */
+static inline int nlmsg_attrlen(const struct nlmsghdr *nlh, int hdrlen)
+{
+ return nlmsg_len(nlh) - NLMSG_ALIGN(hdrlen);
+}
+
+/**
+ * nlmsg_ok - check if the netlink message fits into the remaining bytes
+ * @nlh: netlink message header
+ * @remaining: number of bytes remaining in message stream
+ */
+static inline int nlmsg_ok(const struct nlmsghdr *nlh, int remaining)
+{
+ return (remaining >= sizeof(struct nlmsghdr) &&
+ nlh->nlmsg_len >= sizeof(struct nlmsghdr) &&
+ nlh->nlmsg_len <= remaining);
+}
+
+/**
+ * nlmsg_next - next netlink message in message stream
+ * @nlh: netlink message header
+ * @remaining: number of bytes remaining in message stream
+ *
+ * Returns the next netlink message in the message stream and
+ * decrements remaining by the size of the current message.
+ */
+static inline struct nlmsghdr *nlmsg_next(struct nlmsghdr *nlh, int *remaining)
+{
+ int totlen = NLMSG_ALIGN(nlh->nlmsg_len);
+
+ *remaining -= totlen;
+
+ return (struct nlmsghdr *) ((unsigned char *) nlh + totlen);
+}
+
+/**
+ * nlmsg_parse - parse attributes of a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of family specific header
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ *
+ * See nla_parse()
+ */
+static inline int nlmsg_parse(struct nlmsghdr *nlh, int hdrlen,
+ struct nlattr *tb[], int maxtype,
+ struct nla_policy *policy)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return -EINVAL;
+
+ return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), policy);
+}
+
+/**
+ * nlmsg_find_attr - find a specific attribute in a netlink message
+ * @nlh: netlink message header
+ * @hdrlen: length of familiy specific header
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute which matches the specified type.
+ */
+static inline struct nlattr *nlmsg_find_attr(struct nlmsghdr *nlh,
+ int hdrlen, int attrtype)
+{
+ return nla_find(nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), attrtype);
+}
+
+/**
+ * nlmsg_validate - validate a netlink message including attributes
+ * @nlh: netlinket message header
+ * @hdrlen: length of familiy specific header
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ */
+static inline int nlmsg_validate(struct nlmsghdr *nlh, int hdrlen, int maxtype,
+ struct nla_policy *policy)
+{
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ return -EINVAL;
+
+ return nla_validate(nlmsg_attrdata(nlh, hdrlen),
+ nlmsg_attrlen(nlh, hdrlen), maxtype, policy);
+}
+
+/**
+ * nlmsg_for_each_attr - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @nlh: netlink message header
+ * @hdrlen: length of familiy specific header
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_attr(pos, nlh, hdrlen, rem) \
+ nla_for_each_attr(pos, nlmsg_attrdata(nlh, hdrlen), \
+ nlmsg_attrlen(nlh, hdrlen), rem)
+
+#if 0
+/* FIXME: Enable once all users have been converted */
+
+/**
+ * __nlmsg_put - Add a new netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @pid: netlink process id
+ * @seq: sequence number of message
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for both the netlink header and payload.
+ */
+static inline struct nlmsghdr *__nlmsg_put(struct sk_buff *skb, u32 pid,
+ u32 seq, int type, int payload,
+ int flags)
+{
+ struct nlmsghdr *nlh;
+
+ nlh = (struct nlmsghdr *) skb_put(skb, nlmsg_total_size(payload));
+ nlh->nlmsg_type = type;
+ nlh->nlmsg_len = nlmsg_msg_size(payload);
+ nlh->nlmsg_flags = flags;
+ nlh->nlmsg_pid = pid;
+ nlh->nlmsg_seq = seq;
+
+ memset((unsigned char *) nlmsg_data(nlh) + payload, 0,
+ nlmsg_padlen(payload));
+
+ return nlh;
+}
+#endif
+
+/**
+ * nlmsg_put - Add a new netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @pid: netlink process id
+ * @seq: sequence number of message
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the message header and payload.
+ */
+static inline struct nlmsghdr *nlmsg_put(struct sk_buff *skb, u32 pid, u32 seq,
+ int type, int payload, int flags)
+{
+ if (unlikely(skb_tailroom(skb) < nlmsg_total_size(payload)))
+ return NULL;
+
+ return __nlmsg_put(skb, pid, seq, type, payload, flags);
+}
+
+/**
+ * nlmsg_put_answer - Add a new callback based netlink message to an skb
+ * @skb: socket buffer to store message in
+ * @cb: netlink callback
+ * @type: message type
+ * @payload: length of message payload
+ * @flags: message flags
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the message header and payload.
+ */
+static inline struct nlmsghdr *nlmsg_put_answer(struct sk_buff *skb,
+ struct netlink_callback *cb,
+ int type, int payload,
+ int flags)
+{
+ return nlmsg_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq,
+ type, payload, flags);
+}
+
+/**
+ * nlmsg_new - Allocate a new netlink message
+ * @size: maximum size of message
+ *
+ * Use NLMSG_GOODSIZE if size isn't know and you need a good default size.
+ */
+static inline struct sk_buff *nlmsg_new(int size)
+{
+ return alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
+}
+
+/**
+ * nlmsg_end - Finalize a netlink message
+ * @skb: socket buffer the message is stored in
+ * @nlh: netlink message header
+ *
+ * Corrects the netlink message header to include the appeneded
+ * attributes. Only necessary if attributes have been added to
+ * the message.
+ *
+ * Returns the total data length of the skb.
+ */
+static inline int nlmsg_end(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ nlh->nlmsg_len = skb->tail - (unsigned char *) nlh;
+
+ return skb->len;
+}
+
+/**
+ * nlmsg_cancel - Cancel construction of a netlink message
+ * @skb: socket buffer the message is stored in
+ * @nlh: netlink message header
+ *
+ * Removes the complete netlink message including all
+ * attributes from the socket buffer again. Returns -1.
+ */
+static inline int nlmsg_cancel(struct sk_buff *skb, struct nlmsghdr *nlh)
+{
+ skb_trim(skb, (unsigned char *) nlh - skb->data);
+
+ return -1;
+}
+
+/**
+ * nlmsg_free - free a netlink message
+ * @skb: socket buffer of netlink message
+ */
+static inline void nlmsg_free(struct sk_buff *skb)
+{
+ kfree_skb(skb);
+}
+
+/**
+ * nlmsg_multicast - multicast a netlink message
+ * @sk: netlink socket to spread messages to
+ * @skb: netlink message as socket buffer
+ * @pid: own netlink pid to avoid sending to yourself
+ * @group: multicast group id
+ */
+static inline int nlmsg_multicast(struct sock *sk, struct sk_buff *skb,
+ u32 pid, unsigned int group)
+{
+ int err;
+
+ NETLINK_CB(skb).dst_group = group;
+
+ err = netlink_broadcast(sk, skb, pid, group, GFP_KERNEL);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+
+/**
+ * nlmsg_unicast - unicast a netlink message
+ * @sk: netlink socket to spread message to
+ * @skb: netlink message as socket buffer
+ * @pid: netlink pid of the destination socket
+ */
+static inline int nlmsg_unicast(struct sock *sk, struct sk_buff *skb, u32 pid)
+{
+ int err;
+
+ err = netlink_unicast(sk, skb, pid, MSG_DONTWAIT);
+ if (err > 0)
+ err = 0;
+
+ return err;
+}
+
+/**
+ * nlmsg_for_each_msg - iterate over a stream of messages
+ * @pos: loop counter, set to current message
+ * @head: head of message stream
+ * @len: length of message stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nlmsg_for_each_msg(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ nlmsg_ok(pos, rem); \
+ pos = nlmsg_next(pos, &(rem)))
+
+/**************************************************************************
+ * Netlink Attributes
+ **************************************************************************/
+
+/**
+ * nla_attr_size - length of attribute not including padding
+ * @payload: length of payload
+ */
+static inline int nla_attr_size(int payload)
+{
+ return NLA_HDRLEN + payload;
+}
+
+/**
+ * nla_total_size - total length of attribute including padding
+ * @payload: length of payload
+ */
+static inline int nla_total_size(int payload)
+{
+ return NLA_ALIGN(nla_attr_size(payload));
+}
+
+/**
+ * nla_padlen - length of padding at the tail of attribute
+ * @payload: length of payload
+ */
+static inline int nla_padlen(int payload)
+{
+ return nla_total_size(payload) - nla_attr_size(payload);
+}
+
+/**
+ * nla_data - head of payload
+ * @nla: netlink attribute
+ */
+static inline void *nla_data(const struct nlattr *nla)
+{
+ return (char *) nla + NLA_HDRLEN;
+}
+
+/**
+ * nla_len - length of payload
+ * @nla: netlink attribute
+ */
+static inline int nla_len(const struct nlattr *nla)
+{
+ return nla->nla_len - NLA_HDRLEN;
+}
+
+/**
+ * nla_ok - check if the netlink attribute fits into the remaining bytes
+ * @nla: netlink attribute
+ * @remaining: number of bytes remaining in attribute stream
+ */
+static inline int nla_ok(const struct nlattr *nla, int remaining)
+{
+ return remaining >= sizeof(*nla) &&
+ nla->nla_len >= sizeof(*nla) &&
+ nla->nla_len <= remaining;
+}
+
+/**
+ * nla_next - next netlink attribte in attribute stream
+ * @nla: netlink attribute
+ * @remaining: number of bytes remaining in attribute stream
+ *
+ * Returns the next netlink attribute in the attribute stream and
+ * decrements remaining by the size of the current attribute.
+ */
+static inline struct nlattr *nla_next(const struct nlattr *nla, int *remaining)
+{
+ int totlen = NLA_ALIGN(nla->nla_len);
+
+ *remaining -= totlen;
+ return (struct nlattr *) ((char *) nla + totlen);
+}
+
+/**
+ * nla_parse_nested - parse nested attributes
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @nla: attribute containing the nested attributes
+ * @policy: validation policy
+ *
+ * See nla_parse()
+ */
+static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
+ struct nlattr *nla,
+ struct nla_policy *policy)
+{
+ return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
+}
+/**
+ * nla_put_u8 - Add a u16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u8(struct sk_buff *skb, int attrtype, u8 value)
+{
+ return nla_put(skb, attrtype, sizeof(u8), &value);
+}
+
+/**
+ * nla_put_u16 - Add a u16 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u16(struct sk_buff *skb, int attrtype, u16 value)
+{
+ return nla_put(skb, attrtype, sizeof(u16), &value);
+}
+
+/**
+ * nla_put_u32 - Add a u32 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u32(struct sk_buff *skb, int attrtype, u32 value)
+{
+ return nla_put(skb, attrtype, sizeof(u32), &value);
+}
+
+/**
+ * nla_put_64 - Add a u64 netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @value: numeric value
+ */
+static inline int nla_put_u64(struct sk_buff *skb, int attrtype, u64 value)
+{
+ return nla_put(skb, attrtype, sizeof(u64), &value);
+}
+
+/**
+ * nla_put_string - Add a string netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @str: NUL terminated string
+ */
+static inline int nla_put_string(struct sk_buff *skb, int attrtype,
+ const char *str)
+{
+ return nla_put(skb, attrtype, strlen(str) + 1, str);
+}
+
+/**
+ * nla_put_flag - Add a flag netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ */
+static inline int nla_put_flag(struct sk_buff *skb, int attrtype)
+{
+ return nla_put(skb, attrtype, 0, NULL);
+}
+
+/**
+ * nla_put_msecs - Add a msecs netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @jiffies: number of msecs in jiffies
+ */
+static inline int nla_put_msecs(struct sk_buff *skb, int attrtype,
+ unsigned long jiffies)
+{
+ u64 tmp = jiffies_to_msecs(jiffies);
+ return nla_put(skb, attrtype, sizeof(u64), &tmp);
+}
+
+#define NLA_PUT(skb, attrtype, attrlen, data) \
+ do { \
+ if (nla_put(skb, attrtype, attrlen, data) < 0) \
+ goto nla_put_failure; \
+ } while(0)
+
+#define NLA_PUT_TYPE(skb, type, attrtype, value) \
+ do { \
+ type __tmp = value; \
+ NLA_PUT(skb, attrtype, sizeof(type), &__tmp); \
+ } while(0)
+
+#define NLA_PUT_U8(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, u8, attrtype, value)
+
+#define NLA_PUT_U16(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, u16, attrtype, value)
+
+#define NLA_PUT_U32(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, u32, attrtype, value)
+
+#define NLA_PUT_U64(skb, attrtype, value) \
+ NLA_PUT_TYPE(skb, u64, attrtype, value)
+
+#define NLA_PUT_STRING(skb, attrtype, value) \
+ NLA_PUT(skb, attrtype, strlen(value) + 1, value)
+
+#define NLA_PUT_FLAG(skb, attrtype, value) \
+ NLA_PUT(skb, attrtype, 0, NULL)
+
+#define NLA_PUT_MSECS(skb, attrtype, jiffies) \
+ NLA_PUT_U64(skb, attrtype, jiffies_to_msecs(jiffies))
+
+/**
+ * nla_get_u32 - return payload of u32 attribute
+ * @nla: u32 netlink attribute
+ */
+static inline u32 nla_get_u32(struct nlattr *nla)
+{
+ return *(u32 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u16 - return payload of u16 attribute
+ * @nla: u16 netlink attribute
+ */
+static inline u16 nla_get_u16(struct nlattr *nla)
+{
+ return *(u16 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u8 - return payload of u8 attribute
+ * @nla: u8 netlink attribute
+ */
+static inline u8 nla_get_u8(struct nlattr *nla)
+{
+ return *(u8 *) nla_data(nla);
+}
+
+/**
+ * nla_get_u64 - return payload of u64 attribute
+ * @nla: u64 netlink attribute
+ */
+static inline u64 nla_get_u64(struct nlattr *nla)
+{
+ u64 tmp;
+
+ nla_memcpy(&tmp, nla, sizeof(tmp));
+
+ return tmp;
+}
+
+/**
+ * nla_get_flag - return payload of flag attribute
+ * @nla: flag netlink attribute
+ */
+static inline int nla_get_flag(struct nlattr *nla)
+{
+ return !!nla;
+}
+
+/**
+ * nla_get_msecs - return payload of msecs attribute
+ * @nla: msecs netlink attribute
+ *
+ * Returns the number of milliseconds in jiffies.
+ */
+static inline unsigned long nla_get_msecs(struct nlattr *nla)
+{
+ u64 msecs = nla_get_u64(nla);
+
+ return msecs_to_jiffies((unsigned long) msecs);
+}
+
+/**
+ * nla_nest_start - Start a new level of nested attributes
+ * @skb: socket buffer to add attributes to
+ * @attrtype: attribute type of container
+ *
+ * Returns the container attribute
+ */
+static inline struct nlattr *nla_nest_start(struct sk_buff *skb, int attrtype)
+{
+ struct nlattr *start = (struct nlattr *) skb->tail;
+
+ if (nla_put(skb, attrtype, 0, NULL) < 0)
+ return NULL;
+
+ return start;
+}
+
+/**
+ * nla_nest_end - Finalize nesting of attributes
+ * @skb: socket buffer the attribtues are stored in
+ * @start: container attribute
+ *
+ * Corrects the container attribute header to include the all
+ * appeneded attributes.
+ *
+ * Returns the total data length of the skb.
+ */
+static inline int nla_nest_end(struct sk_buff *skb, struct nlattr *start)
+{
+ start->nla_len = skb->tail - (unsigned char *) start;
+ return skb->len;
+}
+
+/**
+ * nla_nest_cancel - Cancel nesting of attributes
+ * @skb: socket buffer the message is stored in
+ * @start: container attribute
+ *
+ * Removes the container attribute and including all nested
+ * attributes. Returns -1.
+ */
+static inline int nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
+{
+ if (start)
+ skb_trim(skb, (unsigned char *) start - skb->data);
+
+ return -1;
+}
+
+/**
+ * nla_for_each_attr - iterate over a stream of attributes
+ * @pos: loop counter, set to current attribute
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @rem: initialized to len, holds bytes currently remaining in stream
+ */
+#define nla_for_each_attr(pos, head, len, rem) \
+ for (pos = head, rem = len; \
+ nla_ok(pos, rem); \
+ pos = nla_next(pos, &(rem)))
+
+#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index ff13c4cc287..982b4ecd187 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1247,6 +1247,12 @@ static inline struct page *sk_stream_alloc_page(struct sock *sk)
(skb != (struct sk_buff *)&(sk)->sk_write_queue); \
skb = skb->next)
+/*from STCP for fast SACK Process*/
+#define sk_stream_for_retrans_queue_from(skb, sk) \
+ for (; (skb != (sk)->sk_send_head) && \
+ (skb != (struct sk_buff *)&(sk)->sk_write_queue); \
+ skb = skb->next)
+
/*
* Default write policy as shown to user space via poll/select/SIGIO
*/
diff --git a/include/net/tcp.h b/include/net/tcp.h
index c24339c4e31..0f984801197 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -27,6 +27,7 @@
#include <linux/slab.h>
#include <linux/cache.h>
#include <linux/percpu.h>
+#include <linux/skbuff.h>
#include <net/inet_connection_sock.h>
#include <net/inet_timewait_sock.h>
@@ -88,10 +89,10 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
*/
#define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
- * connection: ~180sec is RFC minumum */
+ * connection: ~180sec is RFC minimum */
#define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
- * connection: ~180sec is RFC minumum */
+ * connection: ~180sec is RFC minimum */
#define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
@@ -179,7 +180,7 @@ extern void tcp_time_wait(struct sock *sk, int state, int timeo);
/* Flags in tp->nonagle */
#define TCP_NAGLE_OFF 1 /* Nagle's algo is disabled */
#define TCP_NAGLE_CORK 2 /* Socket is corked */
-#define TCP_NAGLE_PUSH 4 /* Cork is overriden for already queued data */
+#define TCP_NAGLE_PUSH 4 /* Cork is overridden for already queued data */
extern struct inet_timewait_death_row tcp_death_row;
@@ -217,6 +218,7 @@ extern int sysctl_tcp_low_latency;
extern int sysctl_tcp_nometrics_save;
extern int sysctl_tcp_moderate_rcvbuf;
extern int sysctl_tcp_tso_win_divisor;
+extern int sysctl_tcp_abc;
extern atomic_t tcp_memory_allocated;
extern atomic_t tcp_sockets_allocated;
@@ -550,13 +552,13 @@ extern u32 __tcp_select_window(struct sock *sk);
/* TCP timestamps are only 32-bits, this causes a slight
* complication on 64-bit systems since we store a snapshot
- * of jiffies in the buffer control blocks below. We decidely
+ * of jiffies in the buffer control blocks below. We decidedly
* only use of the low 32-bits of jiffies and hide the ugly
* casts with the following macro.
*/
#define tcp_time_stamp ((__u32)(jiffies))
-/* This is what the send packet queueing engine uses to pass
+/* This is what the send packet queuing engine uses to pass
* TCP per-packet control information to the transmission
* code. We also store the host-order sequence numbers in
* here too. This is 36 bytes on 32-bit architectures,
@@ -596,7 +598,7 @@ struct tcp_skb_cb {
#define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
#define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
-#define TCPCB_URG 0x20 /* Urgent pointer advenced here */
+#define TCPCB_URG 0x20 /* Urgent pointer advanced here */
#define TCPCB_AT_TAIL (TCPCB_URG)
@@ -764,6 +766,33 @@ static inline __u32 tcp_current_ssthresh(const struct sock *sk)
(tp->snd_cwnd >> 2)));
}
+/*
+ * Linear increase during slow start
+ */
+static inline void tcp_slow_start(struct tcp_sock *tp)
+{
+ if (sysctl_tcp_abc) {
+ /* RFC3465: Slow Start
+ * TCP sender SHOULD increase cwnd by the number of
+ * previously unacknowledged bytes ACKed by each incoming
+ * acknowledgment, provided the increase is not more than L
+ */
+ if (tp->bytes_acked < tp->mss_cache)
+ return;
+
+ /* We MAY increase by 2 if discovered delayed ack */
+ if (sysctl_tcp_abc > 1 && tp->bytes_acked > 2*tp->mss_cache) {
+ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+ tp->snd_cwnd++;
+ }
+ }
+ tp->bytes_acked = 0;
+
+ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+ tp->snd_cwnd++;
+}
+
+
static inline void tcp_sync_left_out(struct tcp_sock *tp)
{
if (tp->rx_opt.sack_ok &&
@@ -793,6 +822,7 @@ static inline void tcp_enter_cwr(struct sock *sk)
struct tcp_sock *tp = tcp_sk(sk);
tp->prior_ssthresh = 0;
+ tp->bytes_acked = 0;
if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
__tcp_enter_cwr(sk);
tcp_set_ca_state(sk, TCP_CA_CWR);
@@ -809,6 +839,27 @@ static __inline__ __u32 tcp_max_burst(const struct tcp_sock *tp)
return 3;
}
+/* RFC2861 Check whether we are limited by application or congestion window
+ * This is the inverse of cwnd check in tcp_tso_should_defer
+ */
+static inline int tcp_is_cwnd_limited(const struct sock *sk, u32 in_flight)
+{
+ const struct tcp_sock *tp = tcp_sk(sk);
+ u32 left;
+
+ if (in_flight >= tp->snd_cwnd)
+ return 1;
+
+ if (!(sk->sk_route_caps & NETIF_F_TSO))
+ return 0;
+
+ left = tp->snd_cwnd - in_flight;
+ if (sysctl_tcp_tso_win_divisor)
+ return left * sysctl_tcp_tso_win_divisor < tp->snd_cwnd;
+ else
+ return left <= tcp_max_burst(tp);
+}
+
static __inline__ void tcp_minshall_update(struct tcp_sock *tp, int mss,
const struct sk_buff *skb)
{
@@ -852,7 +903,7 @@ static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
{
- return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+ return __skb_checksum_complete(skb);
}
static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
@@ -1156,6 +1207,15 @@ static inline void tcp_mib_init(void)
TCP_ADD_STATS_USER(TCP_MIB_MAXCONN, -1);
}
+/*from STCP */
+static inline void clear_all_retrans_hints(struct tcp_sock *tp){
+ tp->lost_skb_hint = NULL;
+ tp->scoreboard_skb_hint = NULL;
+ tp->retransmit_skb_hint = NULL;
+ tp->forward_skb_hint = NULL;
+ tp->fastpath_skb_hint = NULL;
+}
+
/* /proc */
enum tcp_seq_states {
TCP_SEQ_STATE_LISTENING,
diff --git a/include/rdma/ib_user_verbs.h b/include/rdma/ib_user_verbs.h
index 072f3a2edac..5ff1490c08d 100644
--- a/include/rdma/ib_user_verbs.h
+++ b/include/rdma/ib_user_verbs.h
@@ -43,7 +43,7 @@
* Increment this value if any changes that break userspace ABI
* compatibility are made.
*/
-#define IB_USER_VERBS_ABI_VERSION 3
+#define IB_USER_VERBS_ABI_VERSION 4
enum {
IB_USER_VERBS_CMD_GET_CONTEXT,
@@ -333,6 +333,11 @@ struct ib_uverbs_create_qp {
struct ib_uverbs_create_qp_resp {
__u32 qp_handle;
__u32 qpn;
+ __u32 max_send_wr;
+ __u32 max_recv_wr;
+ __u32 max_send_sge;
+ __u32 max_recv_sge;
+ __u32 max_inline_data;
};
/*
@@ -552,9 +557,7 @@ struct ib_uverbs_modify_srq {
__u32 srq_handle;
__u32 attr_mask;
__u32 max_wr;
- __u32 max_sge;
__u32 srq_limit;
- __u32 reserved;
__u64 driver_data[0];
};
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index f72d46d54e0..a7f4c355a91 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -881,7 +881,7 @@ struct ib_device {
struct ib_ucontext *context,
struct ib_udata *udata);
int (*destroy_cq)(struct ib_cq *cq);
- int (*resize_cq)(struct ib_cq *cq, int *cqe);
+ int (*resize_cq)(struct ib_cq *cq, int cqe);
int (*poll_cq)(struct ib_cq *cq, int num_entries,
struct ib_wc *wc);
int (*peek_cq)(struct ib_cq *cq, int wc_cnt);
diff --git a/kernel/ptrace.c b/kernel/ptrace.c
index 5b8dd98a230..b88d4186cd7 100644
--- a/kernel/ptrace.c
+++ b/kernel/ptrace.c
@@ -155,7 +155,7 @@ int ptrace_attach(struct task_struct *task)
retval = -EPERM;
if (task->pid <= 1)
goto bad;
- if (task == current)
+ if (task->tgid == current->tgid)
goto bad;
/* the same process cannot be attached many times */
if (task->ptrace & PT_PTRACED)
diff --git a/kernel/sched.c b/kernel/sched.c
index ac3f5cc3bb5..b6506671b2b 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -815,7 +815,8 @@ static void activate_task(task_t *p, runqueue_t *rq, int local)
}
#endif
- p->prio = recalc_task_prio(p, now);
+ if (!rt_task(p))
+ p->prio = recalc_task_prio(p, now);
/*
* This checks to make sure it's not an uninterruptible task
diff --git a/kernel/signal.c b/kernel/signal.c
index 1bf3c39d610..80789a59b4d 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1499,7 +1499,7 @@ void do_notify_parent(struct task_struct *tsk, int sig)
psig = tsk->parent->sighand;
spin_lock_irqsave(&psig->siglock, flags);
- if (sig == SIGCHLD &&
+ if (!tsk->ptrace && sig == SIGCHLD &&
(psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN ||
(psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT))) {
/*
diff --git a/kernel/sys.c b/kernel/sys.c
index c43b3e22bbd..bce933ebb29 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -1497,6 +1497,8 @@ EXPORT_SYMBOL(in_egroup_p);
DECLARE_RWSEM(uts_sem);
+EXPORT_SYMBOL(uts_sem);
+
asmlinkage long sys_newuname(struct new_utsname __user * name)
{
int errno = 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ff81b5c6551..987225bdd66 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1330,7 +1330,7 @@ void show_free_areas(void)
} else
printk("\n");
- for_each_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct per_cpu_pageset *pageset;
pageset = zone_pcp(zone, cpu);
diff --git a/net/core/datagram.c b/net/core/datagram.c
index d219435d086..1bcfef51ac5 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -350,6 +350,20 @@ fault:
return -EFAULT;
}
+unsigned int __skb_checksum_complete(struct sk_buff *skb)
+{
+ unsigned int sum;
+
+ sum = (u16)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+ if (likely(!sum)) {
+ if (unlikely(skb->ip_summed == CHECKSUM_HW))
+ netdev_rx_csum_fault(skb->dev);
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
+ }
+ return sum;
+}
+EXPORT_SYMBOL(__skb_checksum_complete);
+
/**
* skb_copy_and_csum_datagram_iovec - Copy and checkum skb to user iovec.
* @skb: skbuff
@@ -363,7 +377,7 @@ fault:
* -EFAULT - fault during copy. Beware, in this case iovec
* can be modified!
*/
-int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb,
+int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
int hlen, struct iovec *iov)
{
unsigned int csum;
@@ -376,8 +390,7 @@ int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb,
iov++;
if (iov->iov_len < chunk) {
- if ((unsigned short)csum_fold(skb_checksum(skb, 0, chunk + hlen,
- skb->csum)))
+ if (__skb_checksum_complete(skb))
goto csum_error;
if (skb_copy_datagram_iovec(skb, hlen, iov, chunk))
goto fault;
@@ -388,6 +401,8 @@ int skb_copy_and_csum_datagram_iovec(const struct sk_buff *skb,
goto fault;
if ((unsigned short)csum_fold(csum))
goto csum_error;
+ if (unlikely(skb->ip_summed == CHECKSUM_HW))
+ netdev_rx_csum_fault(skb->dev);
iov->iov_len -= chunk;
iov->iov_base += chunk;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index 8d154159527..0b48e294aaf 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1108,6 +1108,18 @@ out:
return ret;
}
+/* Take action when hardware reception checksum errors are detected. */
+#ifdef CONFIG_BUG
+void netdev_rx_csum_fault(struct net_device *dev)
+{
+ if (net_ratelimit()) {
+ printk(KERN_ERR "%s: hw csum failure.\n", dev->name);
+ dump_stack();
+ }
+}
+EXPORT_SYMBOL(netdev_rx_csum_fault);
+#endif
+
#ifdef CONFIG_HIGHMEM
/* Actually, we should eliminate this check as soon as we know, that:
* 1. IOMMU is present and allows to map all the memory.
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 802fe11efad..49424a42a2c 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -101,16 +101,20 @@ void netpoll_queue(struct sk_buff *skb)
static int checksum_udp(struct sk_buff *skb, struct udphdr *uh,
unsigned short ulen, u32 saddr, u32 daddr)
{
- if (uh->check == 0)
+ unsigned int psum;
+
+ if (uh->check == 0 || skb->ip_summed == CHECKSUM_UNNECESSARY)
return 0;
- if (skb->ip_summed == CHECKSUM_HW)
- return csum_tcpudp_magic(
- saddr, daddr, ulen, IPPROTO_UDP, skb->csum);
+ psum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+
+ if (skb->ip_summed == CHECKSUM_HW &&
+ !(u16)csum_fold(csum_add(psum, skb->csum)))
+ return 0;
- skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+ skb->csum = psum;
- return csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+ return __skb_checksum_complete(skb);
}
/*
@@ -489,7 +493,7 @@ int __netpoll_rx(struct sk_buff *skb)
if (ulen != len)
goto out;
- if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr) < 0)
+ if (checksum_udp(skb, uh, ulen, iph->saddr, iph->daddr))
goto out;
if (np->local_ip && np->local_ip != ntohl(iph->daddr))
goto out;
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9bed7569ce3..8700379685e 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -49,6 +49,7 @@
#include <net/udp.h>
#include <net/sock.h>
#include <net/pkt_sched.h>
+#include <net/netlink.h>
DECLARE_MUTEX(rtnl_sem);
@@ -462,11 +463,6 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned change)
netlink_broadcast(rtnl, skb, 0, RTNLGRP_LINK, GFP_KERNEL);
}
-static int rtnetlink_done(struct netlink_callback *cb)
-{
- return 0;
-}
-
/* Protected by RTNL sempahore. */
static struct rtattr **rta_buf;
static int rtattr_max;
@@ -524,8 +520,6 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
}
if (kind == 2 && nlh->nlmsg_flags&NLM_F_DUMP) {
- u32 rlen;
-
if (link->dumpit == NULL)
link = &(rtnetlink_links[PF_UNSPEC][type]);
@@ -533,14 +527,11 @@ rtnetlink_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
goto err_inval;
if ((*errp = netlink_dump_start(rtnl, skb, nlh,
- link->dumpit,
- rtnetlink_done)) != 0) {
+ link->dumpit, NULL)) != 0) {
return -1;
}
- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
- if (rlen > skb->len)
- rlen = skb->len;
- skb_pull(skb, rlen);
+
+ netlink_queue_skip(nlh, skb);
return -1;
}
@@ -579,75 +570,13 @@ err_inval:
return -1;
}
-/*
- * Process one packet of messages.
- * Malformed skbs with wrong lengths of messages are discarded silently.
- */
-
-static inline int rtnetlink_rcv_skb(struct sk_buff *skb)
-{
- int err;
- struct nlmsghdr * nlh;
-
- while (skb->len >= NLMSG_SPACE(0)) {
- u32 rlen;
-
- nlh = (struct nlmsghdr *)skb->data;
- if (nlh->nlmsg_len < sizeof(*nlh) || skb->len < nlh->nlmsg_len)
- return 0;
- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
- if (rlen > skb->len)
- rlen = skb->len;
- if (rtnetlink_rcv_msg(skb, nlh, &err)) {
- /* Not error, but we must interrupt processing here:
- * Note, that in this case we do not pull message
- * from skb, it will be processed later.
- */
- if (err == 0)
- return -1;
- netlink_ack(skb, nlh, err);
- } else if (nlh->nlmsg_flags&NLM_F_ACK)
- netlink_ack(skb, nlh, 0);
- skb_pull(skb, rlen);
- }
-
- return 0;
-}
-
-/*
- * rtnetlink input queue processing routine:
- * - process as much as there was in the queue upon entry.
- * - feed skbs to rtnetlink_rcv_skb, until it refuse a message,
- * that will occur, when a dump started.
- */
-
static void rtnetlink_rcv(struct sock *sk, int len)
{
- unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
+ unsigned int qlen = 0;
do {
- struct sk_buff *skb;
-
rtnl_lock();
-
- if (qlen > skb_queue_len(&sk->sk_receive_queue))
- qlen = skb_queue_len(&sk->sk_receive_queue);
-
- for (; qlen; qlen--) {
- skb = skb_dequeue(&sk->sk_receive_queue);
- if (rtnetlink_rcv_skb(skb)) {
- if (skb->len)
- skb_queue_head(&sk->sk_receive_queue,
- skb);
- else {
- kfree_skb(skb);
- qlen--;
- }
- break;
- }
- kfree_skb(skb);
- }
-
+ netlink_run_queue(sk, &qlen, &rtnetlink_rcv_msg);
up(&rtnl_sem);
netdev_run_todo();
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 95501e40100..b7d13a4fff4 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -336,6 +336,9 @@ void __kfree_skb(struct sk_buff *skb)
}
#ifdef CONFIG_NETFILTER
nf_conntrack_put(skb->nfct);
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put_reasm(skb->nfct_reasm);
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(skb->nf_bridge);
#endif
@@ -414,9 +417,17 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
C(nfct);
nf_conntrack_get(skb->nfct);
C(nfctinfo);
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ C(nfct_reasm);
+ nf_conntrack_get_reasm(skb->nfct_reasm);
+#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
C(ipvs_property);
#endif
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ C(nfct_reasm);
+ nf_conntrack_get_reasm(skb->nfct_reasm);
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
C(nf_bridge);
nf_bridge_get(skb->nf_bridge);
@@ -474,6 +485,10 @@ static void copy_skb_header(struct sk_buff *new, const struct sk_buff *old)
new->nfct = old->nfct;
nf_conntrack_get(old->nfct);
new->nfctinfo = old->nfctinfo;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ new->nfct_reasm = old->nfct_reasm;
+ nf_conntrack_get_reasm(old->nfct_reasm);
+#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
new->ipvs_property = old->ipvs_property;
#endif
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 175e093ec56..e3eceecd049 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -934,11 +934,11 @@ int icmp_rcv(struct sk_buff *skb)
case CHECKSUM_HW:
if (!(u16)csum_fold(skb->csum))
break;
- LIMIT_NETDEBUG(KERN_DEBUG "icmp v4 hw csum failure\n");
+ /* fall through */
case CHECKSUM_NONE:
- if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0)))
+ skb->csum = 0;
+ if (__skb_checksum_complete(skb))
goto error;
- default:;
}
if (!pskb_pull(skb, sizeof(struct icmphdr)))
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index c6247fc8406..c04607b4921 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -872,11 +872,18 @@ int igmp_rcv(struct sk_buff *skb)
return 0;
}
- if (!pskb_may_pull(skb, sizeof(struct igmphdr)) ||
- (u16)csum_fold(skb_checksum(skb, 0, len, 0))) {
- in_dev_put(in_dev);
- kfree_skb(skb);
- return 0;
+ if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
+ goto drop;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_HW:
+ if (!(u16)csum_fold(skb->csum))
+ break;
+ /* fall through */
+ case CHECKSUM_NONE:
+ skb->csum = 0;
+ if (__skb_checksum_complete(skb))
+ goto drop;
}
ih = skb->h.igmph;
@@ -906,6 +913,8 @@ int igmp_rcv(struct sk_buff *skb)
default:
NETDEBUG(KERN_DEBUG "New IGMP type=%d, why we do not know about it?\n", ih->type);
}
+
+drop:
in_dev_put(in_dev);
kfree_skb(skb);
return 0;
diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c
index 71f3c7350c6..39061ed53cf 100644
--- a/net/ipv4/inet_diag.c
+++ b/net/ipv4/inet_diag.c
@@ -724,12 +724,6 @@ done:
return skb->len;
}
-static int inet_diag_dump_done(struct netlink_callback *cb)
-{
- return 0;
-}
-
-
static __inline__ int
inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
{
@@ -760,8 +754,7 @@ inet_diag_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
goto err_inval;
}
return netlink_dump_start(idiagnl, skb, nlh,
- inet_diag_dump,
- inet_diag_dump_done);
+ inet_diag_dump, NULL);
} else {
return inet_diag_get_exact(skb, nlh);
}
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 896ce3f8f53..4e9c74b54b1 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -577,15 +577,16 @@ static int ipgre_rcv(struct sk_buff *skb)
goto drop_nolock;
if (flags&GRE_CSUM) {
- if (skb->ip_summed == CHECKSUM_HW) {
+ switch (skb->ip_summed) {
+ case CHECKSUM_HW:
csum = (u16)csum_fold(skb->csum);
- if (csum)
- skb->ip_summed = CHECKSUM_NONE;
- }
- if (skb->ip_summed == CHECKSUM_NONE) {
- skb->csum = skb_checksum(skb, 0, skb->len, 0);
+ if (!csum)
+ break;
+ /* fall through */
+ case CHECKSUM_NONE:
+ skb->csum = 0;
+ csum = __skb_checksum_complete(skb);
skb->ip_summed = CHECKSUM_HW;
- csum = (u16)csum_fold(skb->csum);
}
offset += 4;
}
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 7d917e4ce1d..9d3c8b5f327 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -5,6 +5,20 @@
menu "IP: Netfilter Configuration"
depends on INET && NETFILTER
+config NF_CONNTRACK_IPV4
+ tristate "IPv4 support for new connection tracking (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && NF_CONNTRACK
+ ---help---
+ Connection tracking keeps a record of what packets have passed
+ through your machine, in order to figure out how they are related
+ into connections.
+
+ This is IPv4 support on Layer 3 independent connection tracking.
+ Layer 3 independent connection tracking is experimental scheme
+ which generalize ip_conntrack to support other layer 3 protocols.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
# connection tracking, helpers and protocols
config IP_NF_CONNTRACK
tristate "Connection tracking (required for masq/NAT)"
@@ -209,8 +223,8 @@ config IP_NF_MATCH_PKTTYPE
tristate "Packet type match support"
depends on IP_NF_IPTABLES
help
- Packet type matching allows you to match a packet by
- its "class", eg. BROADCAST, MULTICAST, ...
+ Packet type matching allows you to match a packet by
+ its "class", eg. BROADCAST, MULTICAST, ...
Typical usage:
iptables -A INPUT -m pkttype --pkt-type broadcast -j LOG
@@ -317,7 +331,8 @@ config IP_NF_MATCH_TCPMSS
config IP_NF_MATCH_HELPER
tristate "Helper match support"
- depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+ depends on IP_NF_IPTABLES
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
help
Helper matching allows you to match packets in dynamic connections
tracked by a conntrack-helper, ie. ip_conntrack_ftp
@@ -326,7 +341,8 @@ config IP_NF_MATCH_HELPER
config IP_NF_MATCH_STATE
tristate "Connection state match support"
- depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+ depends on IP_NF_IPTABLES
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
help
Connection state matching allows you to match packets based on their
relationship to a tracked connection (ie. previous packets). This
@@ -336,7 +352,8 @@ config IP_NF_MATCH_STATE
config IP_NF_MATCH_CONNTRACK
tristate "Connection tracking match support"
- depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+ depends on IP_NF_IPTABLES
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
help
This is a general conntrack match module, a superset of the state match.
@@ -422,7 +439,8 @@ config IP_NF_MATCH_COMMENT
config IP_NF_MATCH_CONNMARK
tristate 'Connection mark match support'
- depends on IP_NF_CONNTRACK_MARK && IP_NF_IPTABLES
+ depends on IP_NF_IPTABLES
+ depends on IP_NF_CONNTRACK_MARK || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
help
This option adds a `connmark' match, which allows you to match the
connection mark value previously set for the session by `CONNMARK'.
@@ -433,7 +451,8 @@ config IP_NF_MATCH_CONNMARK
config IP_NF_MATCH_CONNBYTES
tristate 'Connection byte/packet counter match support'
- depends on IP_NF_CT_ACCT && IP_NF_IPTABLES
+ depends on IP_NF_IPTABLES
+ depends on IP_NF_CT_ACCT || (NF_CT_ACCT && NF_CONNTRACK_IPV4)
help
This option adds a `connbytes' match, which allows you to match the
number of bytes and/or packets for each direction within a connection.
@@ -747,7 +766,8 @@ config IP_NF_TARGET_TTL
config IP_NF_TARGET_CONNMARK
tristate 'CONNMARK target support'
- depends on IP_NF_CONNTRACK_MARK && IP_NF_MANGLE
+ depends on IP_NF_MANGLE
+ depends on IP_NF_CONNTRACK_MARK || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
help
This option adds a `CONNMARK' target, which allows one to manipulate
the connection mark value. Similar to the MARK target, but
@@ -759,7 +779,8 @@ config IP_NF_TARGET_CONNMARK
config IP_NF_TARGET_CLUSTERIP
tristate "CLUSTERIP target support (EXPERIMENTAL)"
- depends on IP_NF_CONNTRACK_MARK && IP_NF_IPTABLES && EXPERIMENTAL
+ depends on IP_NF_IPTABLES && EXPERIMENTAL
+ depends on IP_NF_CONNTRACK_MARK || (NF_CONNTRACK_MARK && NF_CONNTRACK_IPV4)
help
The CLUSTERIP target allows you to build load-balancing clusters of
network servers without having a dedicated load-balancing
@@ -782,7 +803,7 @@ config IP_NF_RAW
config IP_NF_TARGET_NOTRACK
tristate 'NOTRACK target support'
depends on IP_NF_RAW
- depends on IP_NF_CONNTRACK
+ depends on IP_NF_CONNTRACK || NF_CONNTRACK_IPV4
help
The NOTRACK target allows a select rule to specify
which packets *not* to enter the conntrack/NAT
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index dab4b58dd31..058c48e258f 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -103,3 +103,9 @@ obj-$(CONFIG_IP_NF_ARP_MANGLE) += arpt_mangle.o
obj-$(CONFIG_IP_NF_ARPFILTER) += arptable_filter.o
obj-$(CONFIG_IP_NF_QUEUE) += ip_queue.o
+
+# objects for l3 independent conntrack
+nf_conntrack_ipv4-objs := nf_conntrack_l3proto_ipv4.o nf_conntrack_proto_icmp.o
+
+# l3 independent conntrack
+obj-$(CONFIG_NF_CONNTRACK_IPV4) += nf_conntrack_ipv4.o
diff --git a/net/ipv4/netfilter/ip_conntrack_netlink.c b/net/ipv4/netfilter/ip_conntrack_netlink.c
index 82a65043a8e..d2a4fec2286 100644
--- a/net/ipv4/netfilter/ip_conntrack_netlink.c
+++ b/net/ipv4/netfilter/ip_conntrack_netlink.c
@@ -28,11 +28,8 @@
#include <linux/netlink.h>
#include <linux/spinlock.h>
#include <linux/notifier.h>
-#include <linux/rtnetlink.h>
#include <linux/netfilter.h>
-#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_core.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
@@ -58,14 +55,17 @@ ctnetlink_dump_tuples_proto(struct sk_buff *skb,
const struct ip_conntrack_tuple *tuple)
{
struct ip_conntrack_protocol *proto;
+ int ret = 0;
NFA_PUT(skb, CTA_PROTO_NUM, sizeof(u_int8_t), &tuple->dst.protonum);
proto = ip_conntrack_proto_find_get(tuple->dst.protonum);
- if (proto && proto->tuple_to_nfattr)
- return proto->tuple_to_nfattr(skb, tuple);
+ if (likely(proto && proto->tuple_to_nfattr)) {
+ ret = proto->tuple_to_nfattr(skb, tuple);
+ ip_conntrack_proto_put(proto);
+ }
- return 0;
+ return ret;
nfattr_failure:
return -1;
@@ -175,7 +175,7 @@ ctnetlink_dump_counters(struct sk_buff *skb, const struct ip_conntrack *ct,
{
enum ctattr_type type = dir ? CTA_COUNTERS_REPLY: CTA_COUNTERS_ORIG;
struct nfattr *nest_count = NFA_NEST(skb, type);
- u_int64_t tmp;
+ u_int32_t tmp;
tmp = htonl(ct->counters[dir].packets);
NFA_PUT(skb, CTA_COUNTERS32_PACKETS, sizeof(u_int32_t), &tmp);
@@ -479,9 +479,7 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
DEBUGP("entered %s\n", __FUNCTION__);
-
- if (nfattr_parse_nested(tb, CTA_IP_MAX, attr) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_IP_MAX, attr);
if (nfattr_bad_size(tb, CTA_IP_MAX, cta_min_ip))
return -EINVAL;
@@ -497,9 +495,6 @@ ctnetlink_parse_tuple_ip(struct nfattr *attr, struct ip_conntrack_tuple *tuple)
DEBUGP("leaving\n");
return 0;
-
-nfattr_failure:
- return -1;
}
static const int cta_min_proto[CTA_PROTO_MAX] = {
@@ -521,8 +516,7 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
DEBUGP("entered %s\n", __FUNCTION__);
- if (nfattr_parse_nested(tb, CTA_PROTO_MAX, attr) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_PROTO_MAX, attr);
if (nfattr_bad_size(tb, CTA_PROTO_MAX, cta_min_proto))
return -EINVAL;
@@ -539,9 +533,6 @@ ctnetlink_parse_tuple_proto(struct nfattr *attr,
}
return ret;
-
-nfattr_failure:
- return -1;
}
static inline int
@@ -555,8 +546,7 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
memset(tuple, 0, sizeof(*tuple));
- if (nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_TUPLE_MAX, cda[type-1]);
if (!tb[CTA_TUPLE_IP-1])
return -EINVAL;
@@ -583,9 +573,6 @@ ctnetlink_parse_tuple(struct nfattr *cda[], struct ip_conntrack_tuple *tuple,
DEBUGP("leaving\n");
return 0;
-
-nfattr_failure:
- return -1;
}
#ifdef CONFIG_IP_NF_NAT_NEEDED
@@ -603,11 +590,10 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
DEBUGP("entered %s\n", __FUNCTION__);
- if (nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_PROTONAT_MAX, attr);
if (nfattr_bad_size(tb, CTA_PROTONAT_MAX, cta_min_protonat))
- goto nfattr_failure;
+ return -EINVAL;
npt = ip_nat_proto_find_get(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum);
if (!npt)
@@ -626,9 +612,6 @@ static int ctnetlink_parse_nat_proto(struct nfattr *attr,
DEBUGP("leaving\n");
return 0;
-
-nfattr_failure:
- return -1;
}
static inline int
@@ -642,8 +625,7 @@ ctnetlink_parse_nat(struct nfattr *cda[],
memset(range, 0, sizeof(*range));
- if (nfattr_parse_nested(tb, CTA_NAT_MAX, cda[CTA_NAT-1]) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_NAT_MAX, cda[CTA_NAT-1]);
if (tb[CTA_NAT_MINIP-1])
range->min_ip = *(u_int32_t *)NFA_DATA(tb[CTA_NAT_MINIP-1]);
@@ -665,9 +647,6 @@ ctnetlink_parse_nat(struct nfattr *cda[],
DEBUGP("leaving\n");
return 0;
-
-nfattr_failure:
- return -1;
}
#endif
@@ -678,8 +657,7 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
DEBUGP("entered %s\n", __FUNCTION__);
- if (nfattr_parse_nested(tb, CTA_HELP_MAX, attr) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_HELP_MAX, attr);
if (!tb[CTA_HELP_NAME-1])
return -EINVAL;
@@ -687,9 +665,6 @@ ctnetlink_parse_help(struct nfattr *attr, char **helper_name)
*helper_name = NFA_DATA(tb[CTA_HELP_NAME-1]);
return 0;
-
-nfattr_failure:
- return -1;
}
static int
@@ -804,7 +779,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
ct = tuplehash_to_ctrack(h);
err = -ENOMEM;
- skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_ATOMIC);
+ skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb2) {
ip_conntrack_put(ct);
return -ENOMEM;
@@ -827,7 +802,7 @@ ctnetlink_get_conntrack(struct sock *ctnl, struct sk_buff *skb,
free:
kfree_skb(skb2);
out:
- return -1;
+ return err;
}
static inline int
@@ -957,8 +932,7 @@ ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
u_int16_t npt = ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum;
int err = 0;
- if (nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr) < 0)
- goto nfattr_failure;
+ nfattr_parse_nested(tb, CTA_PROTOINFO_MAX, attr);
proto = ip_conntrack_proto_find_get(npt);
if (!proto)
@@ -969,9 +943,6 @@ ctnetlink_change_protoinfo(struct ip_conntrack *ct, struct nfattr *cda[])
ip_conntrack_proto_put(proto);
return err;
-
-nfattr_failure:
- return -ENOMEM;
}
static int
@@ -1005,6 +976,11 @@ ctnetlink_change_conntrack(struct ip_conntrack *ct, struct nfattr *cda[])
return err;
}
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
+ if (cda[CTA_MARK-1])
+ ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
+#endif
+
DEBUGP("all done\n");
return 0;
}
@@ -1048,6 +1024,11 @@ ctnetlink_create_conntrack(struct nfattr *cda[],
if (ct->helper)
ip_conntrack_helper_put(ct->helper);
+#if defined(CONFIG_IP_NF_CONNTRACK_MARK)
+ if (cda[CTA_MARK-1])
+ ct->mark = ntohl(*(u_int32_t *)NFA_DATA(cda[CTA_MARK-1]));
+#endif
+
DEBUGP("conntrack with id %u inserted\n", ct->id);
return 0;
@@ -1312,6 +1293,14 @@ ctnetlink_get_expect(struct sock *ctnl, struct sk_buff *skb,
if (!exp)
return -ENOENT;
+ if (cda[CTA_EXPECT_ID-1]) {
+ u_int32_t id = *(u_int32_t *)NFA_DATA(cda[CTA_EXPECT_ID-1]);
+ if (exp->id != ntohl(id)) {
+ ip_conntrack_expect_put(exp);
+ return -ENOENT;
+ }
+ }
+
err = -ENOMEM;
skb2 = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
if (!skb2)
@@ -1387,7 +1376,7 @@ ctnetlink_del_expect(struct sock *ctnl, struct sk_buff *skb,
ip_conntrack_expect_put(exp);
}
}
- write_unlock(&ip_conntrack_lock);
+ write_unlock_bh(&ip_conntrack_lock);
} else {
/* This basically means we have to flush everything*/
write_lock_bh(&ip_conntrack_lock);
@@ -1554,6 +1543,8 @@ static struct nfnetlink_subsystem ctnl_exp_subsys = {
.cb = ctnl_exp_cb,
};
+MODULE_ALIAS_NFNL_SUBSYS(NFNL_SUBSYS_CTNETLINK);
+
static int __init ctnetlink_init(void)
{
int ret;
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
index 98f0015dd25..e4d6b268e8c 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_icmp.c
@@ -13,6 +13,7 @@
#include <linux/in.h>
#include <linux/icmp.h>
#include <linux/seq_file.h>
+#include <linux/skbuff.h>
#include <net/ip.h>
#include <net/checksum.h>
#include <linux/netfilter.h>
@@ -151,13 +152,13 @@ icmp_error_message(struct sk_buff *skb,
/* Not enough header? */
inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in);
if (inside == NULL)
- return NF_ACCEPT;
+ return -NF_ACCEPT;
/* Ignore ICMP's containing fragments (shouldn't happen) */
if (inside->ip.frag_off & htons(IP_OFFSET)) {
DEBUGP("icmp_error_track: fragment of proto %u\n",
inside->ip.protocol);
- return NF_ACCEPT;
+ return -NF_ACCEPT;
}
innerproto = ip_conntrack_proto_find_get(inside->ip.protocol);
@@ -166,7 +167,7 @@ icmp_error_message(struct sk_buff *skb,
if (!ip_ct_get_tuple(&inside->ip, skb, dataoff, &origtuple, innerproto)) {
DEBUGP("icmp_error: ! get_tuple p=%u", inside->ip.protocol);
ip_conntrack_proto_put(innerproto);
- return NF_ACCEPT;
+ return -NF_ACCEPT;
}
/* Ordinarily, we'd expect the inverted tupleproto, but it's
@@ -174,7 +175,7 @@ icmp_error_message(struct sk_buff *skb,
if (!ip_ct_invert_tuple(&innertuple, &origtuple, innerproto)) {
DEBUGP("icmp_error_track: Can't invert tuple\n");
ip_conntrack_proto_put(innerproto);
- return NF_ACCEPT;
+ return -NF_ACCEPT;
}
ip_conntrack_proto_put(innerproto);
@@ -190,7 +191,7 @@ icmp_error_message(struct sk_buff *skb,
if (!h) {
DEBUGP("icmp_error_track: no match\n");
- return NF_ACCEPT;
+ return -NF_ACCEPT;
}
/* Reverse direction from that found */
if (DIRECTION(h) != IP_CT_DIR_REPLY)
@@ -230,19 +231,15 @@ icmp_error(struct sk_buff *skb, enum ip_conntrack_info *ctinfo,
case CHECKSUM_HW:
if (!(u16)csum_fold(skb->csum))
break;
- if (LOG_INVALID(IPPROTO_ICMP))
- nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
- "ip_ct_icmp: bad HW ICMP checksum ");
- return -NF_ACCEPT;
+ /* fall through */
case CHECKSUM_NONE:
- if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) {
+ skb->csum = 0;
+ if (__skb_checksum_complete(skb)) {
if (LOG_INVALID(IPPROTO_ICMP))
nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
"ip_ct_icmp: bad ICMP checksum ");
return -NF_ACCEPT;
}
- default:
- break;
}
checksum_skipped:
@@ -296,7 +293,8 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
struct ip_conntrack_tuple *tuple)
{
if (!tb[CTA_PROTO_ICMP_TYPE-1]
- || !tb[CTA_PROTO_ICMP_CODE-1])
+ || !tb[CTA_PROTO_ICMP_CODE-1]
+ || !tb[CTA_PROTO_ICMP_ID-1])
return -1;
tuple->dst.u.icmp.type =
@@ -304,7 +302,7 @@ static int icmp_nfattr_to_tuple(struct nfattr *tb[],
tuple->dst.u.icmp.code =
*(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_CODE-1]);
tuple->src.u.icmp.id =
- *(u_int8_t *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
+ *(u_int16_t *)NFA_DATA(tb[CTA_PROTO_ICMP_ID-1]);
return 0;
}
diff --git a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
index d6701cafbcc..468c6003b4c 100644
--- a/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+++ b/net/ipv4/netfilter/ip_conntrack_proto_tcp.c
@@ -362,8 +362,12 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
struct nfattr *attr = cda[CTA_PROTOINFO_TCP-1];
struct nfattr *tb[CTA_PROTOINFO_TCP_MAX];
- if (nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr) < 0)
- goto nfattr_failure;
+ /* updates could not contain anything about the private
+ * protocol info, in that case skip the parsing */
+ if (!attr)
+ return 0;
+
+ nfattr_parse_nested(tb, CTA_PROTOINFO_TCP_MAX, attr);
if (!tb[CTA_PROTOINFO_TCP_STATE-1])
return -EINVAL;
@@ -374,9 +378,6 @@ static int nfattr_to_tcp(struct nfattr *cda[], struct ip_conntrack *ct)
write_unlock_bh(&tcp_lock);
return 0;
-
-nfattr_failure:
- return -1;
}
#endif
diff --git a/net/ipv4/netfilter/ip_nat_helper_pptp.c b/net/ipv4/netfilter/ip_nat_helper_pptp.c
index ee6ab74ad3a..e546203f566 100644
--- a/net/ipv4/netfilter/ip_nat_helper_pptp.c
+++ b/net/ipv4/netfilter/ip_nat_helper_pptp.c
@@ -73,6 +73,7 @@ static void pptp_nat_expected(struct ip_conntrack *ct,
struct ip_conntrack_tuple t;
struct ip_ct_pptp_master *ct_pptp_info;
struct ip_nat_pptp *nat_pptp_info;
+ struct ip_nat_range range;
ct_pptp_info = &master->help.ct_pptp_info;
nat_pptp_info = &master->nat.help.nat_pptp_info;
@@ -110,7 +111,30 @@ static void pptp_nat_expected(struct ip_conntrack *ct,
DEBUGP("not found!\n");
}
- ip_nat_follow_master(ct, exp);
+ /* This must be a fresh one. */
+ BUG_ON(ct->status & IPS_NAT_DONE_MASK);
+
+ /* Change src to where master sends to */
+ range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.min_ip = range.max_ip
+ = ct->master->tuplehash[!exp->dir].tuple.dst.ip;
+ if (exp->dir == IP_CT_DIR_ORIGINAL) {
+ range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+ range.min = range.max = exp->saved_proto;
+ }
+ /* hook doesn't matter, but it has to do source manip */
+ ip_nat_setup_info(ct, &range, NF_IP_POST_ROUTING);
+
+ /* For DST manip, map port here to where it's expected. */
+ range.flags = IP_NAT_RANGE_MAP_IPS;
+ range.min_ip = range.max_ip
+ = ct->master->tuplehash[!exp->dir].tuple.src.ip;
+ if (exp->dir == IP_CT_DIR_REPLY) {
+ range.flags |= IP_NAT_RANGE_PROTO_SPECIFIED;
+ range.min = range.max = exp->saved_proto;
+ }
+ /* hook doesn't matter, but it has to do destination manip */
+ ip_nat_setup_info(ct, &range, NF_IP_PRE_ROUTING);
}
/* outbound packets == from PNS to PAC */
@@ -213,7 +237,7 @@ pptp_exp_gre(struct ip_conntrack_expect *expect_orig,
/* alter expectation for PNS->PAC direction */
invert_tuplepr(&inv_t, &expect_orig->tuple);
- expect_orig->saved_proto.gre.key = htons(nat_pptp_info->pac_call_id);
+ expect_orig->saved_proto.gre.key = htons(ct_pptp_info->pns_call_id);
expect_orig->tuple.src.u.gre.key = htons(nat_pptp_info->pns_call_id);
expect_orig->tuple.dst.u.gre.key = htons(ct_pptp_info->pac_call_id);
expect_orig->dir = IP_CT_DIR_ORIGINAL;
diff --git a/net/ipv4/netfilter/ipt_CLUSTERIP.c b/net/ipv4/netfilter/ipt_CLUSTERIP.c
index 9bcb398fbc1..45c52d8f4d9 100644
--- a/net/ipv4/netfilter/ipt_CLUSTERIP.c
+++ b/net/ipv4/netfilter/ipt_CLUSTERIP.c
@@ -29,7 +29,7 @@
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_CLUSTERIP.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
#define CLUSTERIP_VERSION "0.8"
@@ -316,14 +316,14 @@ target(struct sk_buff **pskb,
{
const struct ipt_clusterip_tgt_info *cipinfo = targinfo;
enum ip_conntrack_info ctinfo;
- struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo);
- u_int32_t hash;
+ u_int32_t *mark, hash;
/* don't need to clusterip_config_get() here, since refcount
* is only decremented by destroy() - and ip_tables guarantees
* that the ->target() function isn't called after ->destroy() */
- if (!ct) {
+ mark = nf_ct_get_mark((*pskb), &ctinfo);
+ if (mark == NULL) {
printk(KERN_ERR "CLUSTERIP: no conntrack!\n");
/* FIXME: need to drop invalid ones, since replies
* to outgoing connections of other nodes will be
@@ -346,7 +346,7 @@ target(struct sk_buff **pskb,
switch (ctinfo) {
case IP_CT_NEW:
- ct->mark = hash;
+ *mark = hash;
break;
case IP_CT_RELATED:
case IP_CT_RELATED+IP_CT_IS_REPLY:
@@ -363,7 +363,7 @@ target(struct sk_buff **pskb,
#ifdef DEBUG_CLUSTERP
DUMP_TUPLE(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
#endif
- DEBUGP("hash=%u ct_hash=%u ", hash, ct->mark);
+ DEBUGP("hash=%u ct_hash=%u ", hash, *mark);
if (!clusterip_responsible(cipinfo->config, hash)) {
DEBUGP("not responsible\n");
return NF_DROP;
diff --git a/net/ipv4/netfilter/ipt_CONNMARK.c b/net/ipv4/netfilter/ipt_CONNMARK.c
index 05d66ab5942..8acac5a40a9 100644
--- a/net/ipv4/netfilter/ipt_CONNMARK.c
+++ b/net/ipv4/netfilter/ipt_CONNMARK.c
@@ -29,7 +29,7 @@ MODULE_LICENSE("GPL");
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_CONNMARK.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
static unsigned int
target(struct sk_buff **pskb,
@@ -43,24 +43,24 @@ target(struct sk_buff **pskb,
u_int32_t diff;
u_int32_t nfmark;
u_int32_t newmark;
+ u_int32_t ctinfo;
+ u_int32_t *ctmark = nf_ct_get_mark(*pskb, &ctinfo);
- enum ip_conntrack_info ctinfo;
- struct ip_conntrack *ct = ip_conntrack_get((*pskb), &ctinfo);
- if (ct) {
+ if (ctmark) {
switch(markinfo->mode) {
case IPT_CONNMARK_SET:
- newmark = (ct->mark & ~markinfo->mask) | markinfo->mark;
- if (newmark != ct->mark)
- ct->mark = newmark;
+ newmark = (*ctmark & ~markinfo->mask) | markinfo->mark;
+ if (newmark != *ctmark)
+ *ctmark = newmark;
break;
case IPT_CONNMARK_SAVE:
- newmark = (ct->mark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
- if (ct->mark != newmark)
- ct->mark = newmark;
+ newmark = (*ctmark & ~markinfo->mask) | ((*pskb)->nfmark & markinfo->mask);
+ if (*ctmark != newmark)
+ *ctmark = newmark;
break;
case IPT_CONNMARK_RESTORE:
nfmark = (*pskb)->nfmark;
- diff = (ct->mark ^ nfmark) & markinfo->mask;
+ diff = (*ctmark ^ nfmark) & markinfo->mask;
if (diff != 0)
(*pskb)->nfmark = nfmark ^ diff;
break;
diff --git a/net/ipv4/netfilter/ipt_NOTRACK.c b/net/ipv4/netfilter/ipt_NOTRACK.c
index a4bb9b3bc29..e3c69d072c6 100644
--- a/net/ipv4/netfilter/ipt_NOTRACK.c
+++ b/net/ipv4/netfilter/ipt_NOTRACK.c
@@ -5,7 +5,7 @@
#include <linux/skbuff.h>
#include <linux/netfilter_ipv4/ip_tables.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
static unsigned int
target(struct sk_buff **pskb,
@@ -23,7 +23,7 @@ target(struct sk_buff **pskb,
If there is a real ct entry correspondig to this packet,
it'll hang aroun till timing out. We don't deal with it
for performance reasons. JK */
- (*pskb)->nfct = &ip_conntrack_untracked.ct_general;
+ nf_ct_untrack(*pskb);
(*pskb)->nfctinfo = IP_CT_NEW;
nf_conntrack_get((*pskb)->nfct);
diff --git a/net/ipv4/netfilter/ipt_connbytes.c b/net/ipv4/netfilter/ipt_connbytes.c
index df4a42c6da2..d68a048b717 100644
--- a/net/ipv4/netfilter/ipt_connbytes.c
+++ b/net/ipv4/netfilter/ipt_connbytes.c
@@ -10,7 +10,7 @@
*/
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_connbytes.h>
@@ -46,60 +46,59 @@ match(const struct sk_buff *skb,
int *hotdrop)
{
const struct ipt_connbytes_info *sinfo = matchinfo;
- enum ip_conntrack_info ctinfo;
- struct ip_conntrack *ct;
u_int64_t what = 0; /* initialize to make gcc happy */
+ const struct ip_conntrack_counter *counters;
- if (!(ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo)))
+ if (!(counters = nf_ct_get_counters(skb)))
return 0; /* no match */
switch (sinfo->what) {
case IPT_CONNBYTES_PKTS:
switch (sinfo->direction) {
case IPT_CONNBYTES_DIR_ORIGINAL:
- what = ct->counters[IP_CT_DIR_ORIGINAL].packets;
+ what = counters[IP_CT_DIR_ORIGINAL].packets;
break;
case IPT_CONNBYTES_DIR_REPLY:
- what = ct->counters[IP_CT_DIR_REPLY].packets;
+ what = counters[IP_CT_DIR_REPLY].packets;
break;
case IPT_CONNBYTES_DIR_BOTH:
- what = ct->counters[IP_CT_DIR_ORIGINAL].packets;
- what += ct->counters[IP_CT_DIR_REPLY].packets;
+ what = counters[IP_CT_DIR_ORIGINAL].packets;
+ what += counters[IP_CT_DIR_REPLY].packets;
break;
}
break;
case IPT_CONNBYTES_BYTES:
switch (sinfo->direction) {
case IPT_CONNBYTES_DIR_ORIGINAL:
- what = ct->counters[IP_CT_DIR_ORIGINAL].bytes;
+ what = counters[IP_CT_DIR_ORIGINAL].bytes;
break;
case IPT_CONNBYTES_DIR_REPLY:
- what = ct->counters[IP_CT_DIR_REPLY].bytes;
+ what = counters[IP_CT_DIR_REPLY].bytes;
break;
case IPT_CONNBYTES_DIR_BOTH:
- what = ct->counters[IP_CT_DIR_ORIGINAL].bytes;
- what += ct->counters[IP_CT_DIR_REPLY].bytes;
+ what = counters[IP_CT_DIR_ORIGINAL].bytes;
+ what += counters[IP_CT_DIR_REPLY].bytes;
break;
}
break;
case IPT_CONNBYTES_AVGPKT:
switch (sinfo->direction) {
case IPT_CONNBYTES_DIR_ORIGINAL:
- what = div64_64(ct->counters[IP_CT_DIR_ORIGINAL].bytes,
- ct->counters[IP_CT_DIR_ORIGINAL].packets);
+ what = div64_64(counters[IP_CT_DIR_ORIGINAL].bytes,
+ counters[IP_CT_DIR_ORIGINAL].packets);
break;
case IPT_CONNBYTES_DIR_REPLY:
- what = div64_64(ct->counters[IP_CT_DIR_REPLY].bytes,
- ct->counters[IP_CT_DIR_REPLY].packets);
+ what = div64_64(counters[IP_CT_DIR_REPLY].bytes,
+ counters[IP_CT_DIR_REPLY].packets);
break;
case IPT_CONNBYTES_DIR_BOTH:
{
u_int64_t bytes;
u_int64_t pkts;
- bytes = ct->counters[IP_CT_DIR_ORIGINAL].bytes +
- ct->counters[IP_CT_DIR_REPLY].bytes;
- pkts = ct->counters[IP_CT_DIR_ORIGINAL].packets+
- ct->counters[IP_CT_DIR_REPLY].packets;
+ bytes = counters[IP_CT_DIR_ORIGINAL].bytes +
+ counters[IP_CT_DIR_REPLY].bytes;
+ pkts = counters[IP_CT_DIR_ORIGINAL].packets+
+ counters[IP_CT_DIR_REPLY].packets;
/* FIXME_THEORETICAL: what to do if sum
* overflows ? */
diff --git a/net/ipv4/netfilter/ipt_connmark.c b/net/ipv4/netfilter/ipt_connmark.c
index bf8de47ce00..5306ef293b9 100644
--- a/net/ipv4/netfilter/ipt_connmark.c
+++ b/net/ipv4/netfilter/ipt_connmark.c
@@ -28,7 +28,7 @@ MODULE_LICENSE("GPL");
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_connmark.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
static int
match(const struct sk_buff *skb,
@@ -39,12 +39,12 @@ match(const struct sk_buff *skb,
int *hotdrop)
{
const struct ipt_connmark_info *info = matchinfo;
- enum ip_conntrack_info ctinfo;
- struct ip_conntrack *ct = ip_conntrack_get((struct sk_buff *)skb, &ctinfo);
- if (!ct)
+ u_int32_t ctinfo;
+ const u_int32_t *ctmark = nf_ct_get_mark(skb, &ctinfo);
+ if (!ctmark)
return 0;
- return ((ct->mark & info->mask) == info->mark) ^ info->invert;
+ return (((*ctmark) & info->mask) == info->mark) ^ info->invert;
}
static int
diff --git a/net/ipv4/netfilter/ipt_conntrack.c b/net/ipv4/netfilter/ipt_conntrack.c
index c1d22801b7c..c8d18705469 100644
--- a/net/ipv4/netfilter/ipt_conntrack.c
+++ b/net/ipv4/netfilter/ipt_conntrack.c
@@ -10,7 +10,14 @@
#include <linux/module.h>
#include <linux/skbuff.h>
+
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <linux/netfilter_ipv4/ip_conntrack_tuple.h>
+#else
+#include <net/netfilter/nf_conntrack.h>
+#endif
+
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_conntrack.h>
@@ -18,6 +25,8 @@ MODULE_LICENSE("GPL");
MODULE_AUTHOR("Marc Boucher <marc@mbsi.ca>");
MODULE_DESCRIPTION("iptables connection tracking match module");
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
+
static int
match(const struct sk_buff *skb,
const struct net_device *in,
@@ -102,6 +111,93 @@ match(const struct sk_buff *skb,
return 1;
}
+#else /* CONFIG_IP_NF_CONNTRACK */
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ int *hotdrop)
+{
+ const struct ipt_conntrack_info *sinfo = matchinfo;
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ unsigned int statebit;
+
+ ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
+
+#define FWINV(bool,invflg) ((bool) ^ !!(sinfo->invflags & invflg))
+
+ if (ct == &nf_conntrack_untracked)
+ statebit = IPT_CONNTRACK_STATE_UNTRACKED;
+ else if (ct)
+ statebit = IPT_CONNTRACK_STATE_BIT(ctinfo);
+ else
+ statebit = IPT_CONNTRACK_STATE_INVALID;
+
+ if(sinfo->flags & IPT_CONNTRACK_STATE) {
+ if (ct) {
+ if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip !=
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip)
+ statebit |= IPT_CONNTRACK_STATE_SNAT;
+
+ if(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip !=
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip)
+ statebit |= IPT_CONNTRACK_STATE_DNAT;
+ }
+
+ if (FWINV((statebit & sinfo->statemask) == 0, IPT_CONNTRACK_STATE))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_PROTO) {
+ if (!ct || FWINV(ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.protonum, IPT_CONNTRACK_PROTO))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_ORIGSRC) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].src.ip, IPT_CONNTRACK_ORIGSRC))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_ORIGDST) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_ORIGINAL].s_addr) != sinfo->tuple[IP_CT_DIR_ORIGINAL].dst.ip, IPT_CONNTRACK_ORIGDST))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_REPLSRC) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.u3.ip&sinfo->sipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].src.ip, IPT_CONNTRACK_REPLSRC))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_REPLDST) {
+ if (!ct || FWINV((ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.u3.ip&sinfo->dipmsk[IP_CT_DIR_REPLY].s_addr) != sinfo->tuple[IP_CT_DIR_REPLY].dst.ip, IPT_CONNTRACK_REPLDST))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_STATUS) {
+ if (!ct || FWINV((ct->status & sinfo->statusmask) == 0, IPT_CONNTRACK_STATUS))
+ return 0;
+ }
+
+ if(sinfo->flags & IPT_CONNTRACK_EXPIRES) {
+ unsigned long expires;
+
+ if(!ct)
+ return 0;
+
+ expires = timer_pending(&ct->timeout) ? (ct->timeout.expires - jiffies)/HZ : 0;
+
+ if (FWINV(!(expires >= sinfo->expires_min && expires <= sinfo->expires_max), IPT_CONNTRACK_EXPIRES))
+ return 0;
+ }
+
+ return 1;
+}
+
+#endif /* CONFIG_NF_IP_CONNTRACK */
+
static int check(const char *tablename,
const struct ipt_ip *ip,
void *matchinfo,
diff --git a/net/ipv4/netfilter/ipt_helper.c b/net/ipv4/netfilter/ipt_helper.c
index 3e7dd014de4..bf14e1c7798 100644
--- a/net/ipv4/netfilter/ipt_helper.c
+++ b/net/ipv4/netfilter/ipt_helper.c
@@ -13,9 +13,15 @@
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter.h>
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
#include <linux/netfilter_ipv4/ip_conntrack.h>
#include <linux/netfilter_ipv4/ip_conntrack_core.h>
#include <linux/netfilter_ipv4/ip_conntrack_helper.h>
+#else
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#endif
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_helper.h>
@@ -29,6 +35,7 @@ MODULE_DESCRIPTION("iptables helper match module");
#define DEBUGP(format, args...)
#endif
+#if defined(CONFIG_IP_NF_CONNTRACK) || defined(CONFIG_IP_NF_CONNTRACK_MODULE)
static int
match(const struct sk_buff *skb,
const struct net_device *in,
@@ -73,6 +80,53 @@ out_unlock:
return ret;
}
+#else /* CONFIG_IP_NF_CONNTRACK */
+
+static int
+match(const struct sk_buff *skb,
+ const struct net_device *in,
+ const struct net_device *out,
+ const void *matchinfo,
+ int offset,
+ int *hotdrop)
+{
+ const struct ipt_helper_info *info = matchinfo;
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ int ret = info->invert;
+
+ ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
+ if (!ct) {
+ DEBUGP("ipt_helper: Eek! invalid conntrack?\n");
+ return ret;
+ }
+
+ if (!ct->master) {
+ DEBUGP("ipt_helper: conntrack %p has no master\n", ct);
+ return ret;
+ }
+
+ read_lock_bh(&nf_conntrack_lock);
+ if (!ct->master->helper) {
+ DEBUGP("ipt_helper: master ct %p has no helper\n",
+ exp->expectant);
+ goto out_unlock;
+ }
+
+ DEBUGP("master's name = %s , info->name = %s\n",
+ ct->master->helper->name, info->name);
+
+ if (info->name[0] == '\0')
+ ret ^= 1;
+ else
+ ret ^= !strncmp(ct->master->helper->name, info->name,
+ strlen(ct->master->helper->name));
+out_unlock:
+ read_unlock_bh(&nf_conntrack_lock);
+ return ret;
+}
+#endif
+
static int check(const char *tablename,
const struct ipt_ip *ip,
void *matchinfo,
diff --git a/net/ipv4/netfilter/ipt_state.c b/net/ipv4/netfilter/ipt_state.c
index b1511b97ea5..4d7f16b70ce 100644
--- a/net/ipv4/netfilter/ipt_state.c
+++ b/net/ipv4/netfilter/ipt_state.c
@@ -10,7 +10,7 @@
#include <linux/module.h>
#include <linux/skbuff.h>
-#include <linux/netfilter_ipv4/ip_conntrack.h>
+#include <net/netfilter/nf_conntrack_compat.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv4/ipt_state.h>
@@ -30,9 +30,9 @@ match(const struct sk_buff *skb,
enum ip_conntrack_info ctinfo;
unsigned int statebit;
- if (skb->nfct == &ip_conntrack_untracked.ct_general)
+ if (nf_ct_is_untracked(skb))
statebit = IPT_STATE_UNTRACKED;
- else if (!ip_conntrack_get(skb, &ctinfo))
+ else if (!nf_ct_get_ctinfo(skb, &ctinfo))
statebit = IPT_STATE_INVALID;
else
statebit = IPT_STATE_BIT(ctinfo);
diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
new file mode 100644
index 00000000000..8202c1c0afa
--- /dev/null
+++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c
@@ -0,0 +1,571 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - move L3 protocol dependent part to this file.
+ * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - add get_features() to support various size of conntrack
+ * structures.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_standalone.c
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
+
+static int ipv4_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ u_int32_t _addrs[2], *ap;
+ ap = skb_header_pointer(skb, nhoff + offsetof(struct iphdr, saddr),
+ sizeof(u_int32_t) * 2, _addrs);
+ if (ap == NULL)
+ return 0;
+
+ tuple->src.u3.ip = ap[0];
+ tuple->dst.u3.ip = ap[1];
+
+ return 1;
+}
+
+static int ipv4_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ tuple->src.u3.ip = orig->dst.u3.ip;
+ tuple->dst.u3.ip = orig->src.u3.ip;
+
+ return 1;
+}
+
+static int ipv4_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "src=%u.%u.%u.%u dst=%u.%u.%u.%u ",
+ NIPQUAD(tuple->src.u3.ip),
+ NIPQUAD(tuple->dst.u3.ip));
+}
+
+static int ipv4_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ return 0;
+}
+
+/* Returns new sk_buff, or NULL */
+static struct sk_buff *
+nf_ct_ipv4_gather_frags(struct sk_buff *skb, u_int32_t user)
+{
+ skb_orphan(skb);
+
+ local_bh_disable();
+ skb = ip_defrag(skb, user);
+ local_bh_enable();
+
+ if (skb)
+ ip_send_check(skb->nh.iph);
+
+ return skb;
+}
+
+static int
+ipv4_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
+ u_int8_t *protonum)
+{
+ /* Never happen */
+ if ((*pskb)->nh.iph->frag_off & htons(IP_OFFSET)) {
+ if (net_ratelimit()) {
+ printk(KERN_ERR "ipv4_prepare: Frag of proto %u (hook=%u)\n",
+ (*pskb)->nh.iph->protocol, hooknum);
+ }
+ return -NF_DROP;
+ }
+
+ *dataoff = (*pskb)->nh.raw - (*pskb)->data + (*pskb)->nh.iph->ihl*4;
+ *protonum = (*pskb)->nh.iph->protocol;
+
+ return NF_ACCEPT;
+}
+
+int nat_module_is_loaded = 0;
+static u_int32_t ipv4_get_features(const struct nf_conntrack_tuple *tuple)
+{
+ if (nat_module_is_loaded)
+ return NF_CT_F_NAT;
+
+ return NF_CT_F_BASIC;
+}
+
+static unsigned int ipv4_confirm(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ /* We've seen it coming out the other side: confirm it */
+ return nf_conntrack_confirm(pskb);
+}
+
+static unsigned int ipv4_conntrack_help(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ /* This is where we call the helper: as the packet goes out. */
+ ct = nf_ct_get(*pskb, &ctinfo);
+ if (ct && ct->helper) {
+ unsigned int ret;
+ ret = ct->helper->help(pskb,
+ (*pskb)->nh.raw - (*pskb)->data
+ + (*pskb)->nh.iph->ihl*4,
+ ct, ctinfo);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+ return NF_ACCEPT;
+}
+
+static unsigned int ipv4_conntrack_defrag(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+#if !defined(CONFIG_IP_NF_NAT) && !defined(CONFIG_IP_NF_NAT_MODULE)
+ /* Previously seen (loopback)? Ignore. Do this before
+ fragment check. */
+ if ((*pskb)->nfct)
+ return NF_ACCEPT;
+#endif
+
+ /* Gather fragments. */
+ if ((*pskb)->nh.iph->frag_off & htons(IP_MF|IP_OFFSET)) {
+ *pskb = nf_ct_ipv4_gather_frags(*pskb,
+ hooknum == NF_IP_PRE_ROUTING ?
+ IP_DEFRAG_CONNTRACK_IN :
+ IP_DEFRAG_CONNTRACK_OUT);
+ if (!*pskb)
+ return NF_STOLEN;
+ }
+ return NF_ACCEPT;
+}
+
+static unsigned int ipv4_refrag(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct rtable *rt = (struct rtable *)(*pskb)->dst;
+
+ /* We've seen it coming out the other side: confirm */
+ if (ipv4_confirm(hooknum, pskb, in, out, okfn) != NF_ACCEPT)
+ return NF_DROP;
+
+ /* Local packets are never produced too large for their
+ interface. We degfragment them at LOCAL_OUT, however,
+ so we have to refragment them here. */
+ if ((*pskb)->len > dst_mtu(&rt->u.dst) &&
+ !skb_shinfo(*pskb)->tso_size) {
+ /* No hook can be after us, so this should be OK. */
+ ip_fragment(*pskb, okfn);
+ return NF_STOLEN;
+ }
+ return NF_ACCEPT;
+}
+
+static unsigned int ipv4_conntrack_in(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ return nf_conntrack_in(PF_INET, hooknum, pskb);
+}
+
+static unsigned int ipv4_conntrack_local(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ /* root is playing with raw sockets. */
+ if ((*pskb)->len < sizeof(struct iphdr)
+ || (*pskb)->nh.iph->ihl * 4 < sizeof(struct iphdr)) {
+ if (net_ratelimit())
+ printk("ipt_hook: happy cracking.\n");
+ return NF_ACCEPT;
+ }
+ return nf_conntrack_in(PF_INET, hooknum, pskb);
+}
+
+/* Connection tracking may drop packets, but never alters them, so
+ make it the first hook. */
+static struct nf_hook_ops ipv4_conntrack_defrag_ops = {
+ .hook = ipv4_conntrack_defrag,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_PRE_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
+};
+
+static struct nf_hook_ops ipv4_conntrack_in_ops = {
+ .hook = ipv4_conntrack_in,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_PRE_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK,
+};
+
+static struct nf_hook_ops ipv4_conntrack_defrag_local_out_ops = {
+ .hook = ipv4_conntrack_defrag,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_LOCAL_OUT,
+ .priority = NF_IP_PRI_CONNTRACK_DEFRAG,
+};
+
+static struct nf_hook_ops ipv4_conntrack_local_out_ops = {
+ .hook = ipv4_conntrack_local,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_LOCAL_OUT,
+ .priority = NF_IP_PRI_CONNTRACK,
+};
+
+/* helpers */
+static struct nf_hook_ops ipv4_conntrack_helper_out_ops = {
+ .hook = ipv4_conntrack_help,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+};
+
+static struct nf_hook_ops ipv4_conntrack_helper_in_ops = {
+ .hook = ipv4_conntrack_help,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_HELPER,
+};
+
+
+/* Refragmenter; last chance. */
+static struct nf_hook_ops ipv4_conntrack_out_ops = {
+ .hook = ipv4_refrag,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_POST_ROUTING,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
+};
+
+static struct nf_hook_ops ipv4_conntrack_local_in_ops = {
+ .hook = ipv4_confirm,
+ .owner = THIS_MODULE,
+ .pf = PF_INET,
+ .hooknum = NF_IP_LOCAL_IN,
+ .priority = NF_IP_PRI_CONNTRACK_CONFIRM,
+};
+
+#ifdef CONFIG_SYSCTL
+/* From nf_conntrack_proto_icmp.c */
+extern unsigned long nf_ct_icmp_timeout;
+static struct ctl_table_header *nf_ct_ipv4_sysctl_header;
+
+static ctl_table nf_ct_sysctl_table[] = {
+ {
+ .ctl_name = NET_NF_CONNTRACK_ICMP_TIMEOUT,
+ .procname = "nf_conntrack_icmp_timeout",
+ .data = &nf_ct_icmp_timeout,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_netfilter_table[] = {
+ {
+ .ctl_name = NET_NETFILTER,
+ .procname = "netfilter",
+ .mode = 0555,
+ .child = nf_ct_sysctl_table,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+ {
+ .ctl_name = CTL_NET,
+ .procname = "net",
+ .mode = 0555,
+ .child = nf_ct_netfilter_table,
+ },
+ { .ctl_name = 0 }
+};
+#endif
+
+/* Fast function for those who don't want to parse /proc (and I don't
+ blame them). */
+/* Reversing the socket's dst/src point of view gives us the reply
+ mapping. */
+static int
+getorigdst(struct sock *sk, int optval, void __user *user, int *len)
+{
+ struct inet_sock *inet = inet_sk(sk);
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conntrack_tuple tuple;
+
+ NF_CT_TUPLE_U_BLANK(&tuple);
+ tuple.src.u3.ip = inet->rcv_saddr;
+ tuple.src.u.tcp.port = inet->sport;
+ tuple.dst.u3.ip = inet->daddr;
+ tuple.dst.u.tcp.port = inet->dport;
+ tuple.src.l3num = PF_INET;
+ tuple.dst.protonum = IPPROTO_TCP;
+
+ /* We only do TCP at the moment: is there a better way? */
+ if (strcmp(sk->sk_prot->name, "TCP")) {
+ DEBUGP("SO_ORIGINAL_DST: Not a TCP socket\n");
+ return -ENOPROTOOPT;
+ }
+
+ if ((unsigned int) *len < sizeof(struct sockaddr_in)) {
+ DEBUGP("SO_ORIGINAL_DST: len %u not %u\n",
+ *len, sizeof(struct sockaddr_in));
+ return -EINVAL;
+ }
+
+ h = nf_conntrack_find_get(&tuple, NULL);
+ if (h) {
+ struct sockaddr_in sin;
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+
+ sin.sin_family = AF_INET;
+ sin.sin_port = ct->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.dst.u.tcp.port;
+ sin.sin_addr.s_addr = ct->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.dst.u3.ip;
+
+ DEBUGP("SO_ORIGINAL_DST: %u.%u.%u.%u %u\n",
+ NIPQUAD(sin.sin_addr.s_addr), ntohs(sin.sin_port));
+ nf_ct_put(ct);
+ if (copy_to_user(user, &sin, sizeof(sin)) != 0)
+ return -EFAULT;
+ else
+ return 0;
+ }
+ DEBUGP("SO_ORIGINAL_DST: Can't find %u.%u.%u.%u/%u-%u.%u.%u.%u/%u.\n",
+ NIPQUAD(tuple.src.u3.ip), ntohs(tuple.src.u.tcp.port),
+ NIPQUAD(tuple.dst.u3.ip), ntohs(tuple.dst.u.tcp.port));
+ return -ENOENT;
+}
+
+static struct nf_sockopt_ops so_getorigdst = {
+ .pf = PF_INET,
+ .get_optmin = SO_ORIGINAL_DST,
+ .get_optmax = SO_ORIGINAL_DST+1,
+ .get = &getorigdst,
+};
+
+struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4 = {
+ .l3proto = PF_INET,
+ .name = "ipv4",
+ .pkt_to_tuple = ipv4_pkt_to_tuple,
+ .invert_tuple = ipv4_invert_tuple,
+ .print_tuple = ipv4_print_tuple,
+ .print_conntrack = ipv4_print_conntrack,
+ .prepare = ipv4_prepare,
+ .get_features = ipv4_get_features,
+ .me = THIS_MODULE,
+};
+
+extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp4;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp4;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_icmp;
+static int init_or_cleanup(int init)
+{
+ int ret = 0;
+
+ if (!init) goto cleanup;
+
+ ret = nf_register_sockopt(&so_getorigdst);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to register netfilter socket option\n");
+ goto cleanup_nothing;
+ }
+
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp4);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register tcp.\n");
+ goto cleanup_sockopt;
+ }
+
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp4);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register udp.\n");
+ goto cleanup_tcp;
+ }
+
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmp);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register icmp.\n");
+ goto cleanup_udp;
+ }
+
+ ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv4);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register ipv4\n");
+ goto cleanup_icmp;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_defrag_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register pre-routing defrag hook.\n");
+ goto cleanup_ipv4;
+ }
+ ret = nf_register_hook(&ipv4_conntrack_defrag_local_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register local_out defrag hook.\n");
+ goto cleanup_defragops;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_in_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register pre-routing hook.\n");
+ goto cleanup_defraglocalops;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_local_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register local out hook.\n");
+ goto cleanup_inops;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_helper_in_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register local helper hook.\n");
+ goto cleanup_inandlocalops;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_helper_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register postrouting helper hook.\n");
+ goto cleanup_helperinops;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register post-routing hook.\n");
+ goto cleanup_helperoutops;
+ }
+
+ ret = nf_register_hook(&ipv4_conntrack_local_in_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv4: can't register local in hook.\n");
+ goto cleanup_inoutandlocalops;
+ }
+
+#ifdef CONFIG_SYSCTL
+ nf_ct_ipv4_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+ if (nf_ct_ipv4_sysctl_header == NULL) {
+ printk("nf_conntrack: can't register to sysctl.\n");
+ ret = -ENOMEM;
+ goto cleanup_localinops;
+ }
+#endif
+
+ /* For use by REJECT target */
+ ip_ct_attach = __nf_conntrack_attach;
+
+ return ret;
+
+ cleanup:
+ synchronize_net();
+ ip_ct_attach = NULL;
+#ifdef CONFIG_SYSCTL
+ unregister_sysctl_table(nf_ct_ipv4_sysctl_header);
+ cleanup_localinops:
+#endif
+ nf_unregister_hook(&ipv4_conntrack_local_in_ops);
+ cleanup_inoutandlocalops:
+ nf_unregister_hook(&ipv4_conntrack_out_ops);
+ cleanup_helperoutops:
+ nf_unregister_hook(&ipv4_conntrack_helper_out_ops);
+ cleanup_helperinops:
+ nf_unregister_hook(&ipv4_conntrack_helper_in_ops);
+ cleanup_inandlocalops:
+ nf_unregister_hook(&ipv4_conntrack_local_out_ops);
+ cleanup_inops:
+ nf_unregister_hook(&ipv4_conntrack_in_ops);
+ cleanup_defraglocalops:
+ nf_unregister_hook(&ipv4_conntrack_defrag_local_out_ops);
+ cleanup_defragops:
+ nf_unregister_hook(&ipv4_conntrack_defrag_ops);
+ cleanup_ipv4:
+ nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv4);
+ cleanup_icmp:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmp);
+ cleanup_udp:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp4);
+ cleanup_tcp:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp4);
+ cleanup_sockopt:
+ nf_unregister_sockopt(&so_getorigdst);
+ cleanup_nothing:
+ return ret;
+}
+
+MODULE_LICENSE("GPL");
+
+static int __init init(void)
+{
+ need_nf_conntrack();
+ return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+ init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+
+void need_ip_conntrack(void)
+{
+}
+
+EXPORT_SYMBOL(need_ip_conntrack);
+EXPORT_SYMBOL(nf_ct_ipv4_gather_frags);
diff --git a/net/ipv4/netfilter/nf_conntrack_proto_icmp.c b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
new file mode 100644
index 00000000000..7ddb5c08f7b
--- /dev/null
+++ b/net/ipv4/netfilter/nf_conntrack_proto_icmp.c
@@ -0,0 +1,301 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - enable working with Layer 3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_icmp.c
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/in.h>
+#include <linux/icmp.h>
+#include <linux/seq_file.h>
+#include <net/ip.h>
+#include <net/checksum.h>
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+unsigned long nf_ct_icmp_timeout = 30*HZ;
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static int icmp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ struct icmphdr _hdr, *hp;
+
+ hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ if (hp == NULL)
+ return 0;
+
+ tuple->dst.u.icmp.type = hp->type;
+ tuple->src.u.icmp.id = hp->un.echo.id;
+ tuple->dst.u.icmp.code = hp->code;
+
+ return 1;
+}
+
+static int icmp_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ /* Add 1; spaces filled with 0. */
+ static u_int8_t invmap[]
+ = { [ICMP_ECHO] = ICMP_ECHOREPLY + 1,
+ [ICMP_ECHOREPLY] = ICMP_ECHO + 1,
+ [ICMP_TIMESTAMP] = ICMP_TIMESTAMPREPLY + 1,
+ [ICMP_TIMESTAMPREPLY] = ICMP_TIMESTAMP + 1,
+ [ICMP_INFO_REQUEST] = ICMP_INFO_REPLY + 1,
+ [ICMP_INFO_REPLY] = ICMP_INFO_REQUEST + 1,
+ [ICMP_ADDRESS] = ICMP_ADDRESSREPLY + 1,
+ [ICMP_ADDRESSREPLY] = ICMP_ADDRESS + 1};
+
+ if (orig->dst.u.icmp.type >= sizeof(invmap)
+ || !invmap[orig->dst.u.icmp.type])
+ return 0;
+
+ tuple->src.u.icmp.id = orig->src.u.icmp.id;
+ tuple->dst.u.icmp.type = invmap[orig->dst.u.icmp.type] - 1;
+ tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
+ return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int icmp_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "type=%u code=%u id=%u ",
+ tuple->dst.u.icmp.type,
+ tuple->dst.u.icmp.code,
+ ntohs(tuple->src.u.icmp.id));
+}
+
+/* Print out the private part of the conntrack. */
+static int icmp_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ return 0;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int icmp_packet(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ /* Try to delete connection immediately after all replies:
+ won't actually vanish as we still have skb, and del_timer
+ means this will only run once even if count hits zero twice
+ (theoretically possible with SMP) */
+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
+ if (atomic_dec_and_test(&ct->proto.icmp.count)
+ && del_timer(&ct->timeout))
+ ct->timeout.function((unsigned long)ct);
+ } else {
+ atomic_inc(&ct->proto.icmp.count);
+ nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
+ nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmp_timeout);
+ }
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int icmp_new(struct nf_conn *conntrack,
+ const struct sk_buff *skb, unsigned int dataoff)
+{
+ static u_int8_t valid_new[]
+ = { [ICMP_ECHO] = 1,
+ [ICMP_TIMESTAMP] = 1,
+ [ICMP_INFO_REQUEST] = 1,
+ [ICMP_ADDRESS] = 1 };
+
+ if (conntrack->tuplehash[0].tuple.dst.u.icmp.type >= sizeof(valid_new)
+ || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type]) {
+ /* Can't create a new ICMP `conn' with this. */
+ DEBUGP("icmp: can't create new conn with type %u\n",
+ conntrack->tuplehash[0].tuple.dst.u.icmp.type);
+ NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
+ return 0;
+ }
+ atomic_set(&conntrack->proto.icmp.count, 0);
+ return 1;
+}
+
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
+/* Returns conntrack if it dealt with ICMP, and filled in skb fields */
+static int
+icmp_error_message(struct sk_buff *skb,
+ enum ip_conntrack_info *ctinfo,
+ unsigned int hooknum)
+{
+ struct nf_conntrack_tuple innertuple, origtuple;
+ struct {
+ struct icmphdr icmp;
+ struct iphdr ip;
+ } _in, *inside;
+ struct nf_conntrack_protocol *innerproto;
+ struct nf_conntrack_tuple_hash *h;
+ int dataoff;
+
+ NF_CT_ASSERT(skb->nfct == NULL);
+
+ /* Not enough header? */
+ inside = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_in), &_in);
+ if (inside == NULL)
+ return -NF_ACCEPT;
+
+ /* Ignore ICMP's containing fragments (shouldn't happen) */
+ if (inside->ip.frag_off & htons(IP_OFFSET)) {
+ DEBUGP("icmp_error_message: fragment of proto %u\n",
+ inside->ip.protocol);
+ return -NF_ACCEPT;
+ }
+
+ innerproto = nf_ct_find_proto(PF_INET, inside->ip.protocol);
+ dataoff = skb->nh.iph->ihl*4 + sizeof(inside->icmp);
+ /* Are they talking about one of our connections? */
+ if (!nf_ct_get_tuple(skb, dataoff, dataoff + inside->ip.ihl*4, PF_INET,
+ inside->ip.protocol, &origtuple,
+ &nf_conntrack_l3proto_ipv4, innerproto)) {
+ DEBUGP("icmp_error_message: ! get_tuple p=%u",
+ inside->ip.protocol);
+ return -NF_ACCEPT;
+ }
+
+ /* Ordinarily, we'd expect the inverted tupleproto, but it's
+ been preserved inside the ICMP. */
+ if (!nf_ct_invert_tuple(&innertuple, &origtuple,
+ &nf_conntrack_l3proto_ipv4, innerproto)) {
+ DEBUGP("icmp_error_message: no match\n");
+ return -NF_ACCEPT;
+ }
+
+ *ctinfo = IP_CT_RELATED;
+
+ h = nf_conntrack_find_get(&innertuple, NULL);
+ if (!h) {
+ /* Locally generated ICMPs will match inverted if they
+ haven't been SNAT'ed yet */
+ /* FIXME: NAT code has to handle half-done double NAT --RR */
+ if (hooknum == NF_IP_LOCAL_OUT)
+ h = nf_conntrack_find_get(&origtuple, NULL);
+
+ if (!h) {
+ DEBUGP("icmp_error_message: no match\n");
+ return -NF_ACCEPT;
+ }
+
+ /* Reverse direction from that found */
+ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+ *ctinfo += IP_CT_IS_REPLY;
+ } else {
+ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+ *ctinfo += IP_CT_IS_REPLY;
+ }
+
+ /* Update skb to refer to this connection */
+ skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
+ skb->nfctinfo = *ctinfo;
+ return -NF_ACCEPT;
+}
+
+/* Small and modified version of icmp_rcv */
+static int
+icmp_error(struct sk_buff *skb, unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum)
+{
+ struct icmphdr _ih, *icmph;
+
+ /* Not enough header? */
+ icmph = skb_header_pointer(skb, skb->nh.iph->ihl*4, sizeof(_ih), &_ih);
+ if (icmph == NULL) {
+ if (LOG_INVALID(IPPROTO_ICMP))
+ nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
+ "nf_ct_icmp: short packet ");
+ return -NF_ACCEPT;
+ }
+
+ /* See ip_conntrack_proto_tcp.c */
+ if (hooknum != NF_IP_PRE_ROUTING)
+ goto checksum_skipped;
+
+ switch (skb->ip_summed) {
+ case CHECKSUM_HW:
+ if (!(u16)csum_fold(skb->csum))
+ break;
+ if (LOG_INVALID(IPPROTO_ICMP))
+ nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
+ "nf_ct_icmp: bad HW ICMP checksum ");
+ return -NF_ACCEPT;
+ case CHECKSUM_NONE:
+ if ((u16)csum_fold(skb_checksum(skb, 0, skb->len, 0))) {
+ if (LOG_INVALID(IPPROTO_ICMP))
+ nf_log_packet(PF_INET, 0, skb, NULL, NULL,
+ NULL,
+ "nf_ct_icmp: bad ICMP checksum ");
+ return -NF_ACCEPT;
+ }
+ default:
+ break;
+ }
+
+checksum_skipped:
+ /*
+ * 18 is the highest 'known' ICMP type. Anything else is a mystery
+ *
+ * RFC 1122: 3.2.2 Unknown ICMP messages types MUST be silently
+ * discarded.
+ */
+ if (icmph->type > NR_ICMP_TYPES) {
+ if (LOG_INVALID(IPPROTO_ICMP))
+ nf_log_packet(PF_INET, 0, skb, NULL, NULL, NULL,
+ "nf_ct_icmp: invalid ICMP type ");
+ return -NF_ACCEPT;
+ }
+
+ /* Need to track icmp error message? */
+ if (icmph->type != ICMP_DEST_UNREACH
+ && icmph->type != ICMP_SOURCE_QUENCH
+ && icmph->type != ICMP_TIME_EXCEEDED
+ && icmph->type != ICMP_PARAMETERPROB
+ && icmph->type != ICMP_REDIRECT)
+ return NF_ACCEPT;
+
+ return icmp_error_message(skb, ctinfo, hooknum);
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_icmp =
+{
+ .list = { NULL, NULL },
+ .l3proto = PF_INET,
+ .proto = IPPROTO_ICMP,
+ .name = "icmp",
+ .pkt_to_tuple = icmp_pkt_to_tuple,
+ .invert_tuple = icmp_invert_tuple,
+ .print_tuple = icmp_print_tuple,
+ .print_conntrack = icmp_print_conntrack,
+ .packet = icmp_packet,
+ .new = icmp_new,
+ .error = icmp_error,
+ .destroy = NULL,
+ .me = NULL
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_icmp);
diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
index 65268562351..01444a02b48 100644
--- a/net/ipv4/sysctl_net_ipv4.c
+++ b/net/ipv4/sysctl_net_ipv4.c
@@ -645,6 +645,14 @@ ctl_table ipv4_table[] = {
.proc_handler = &proc_tcp_congestion_control,
.strategy = &sysctl_tcp_congestion_control,
},
+ {
+ .ctl_name = NET_TCP_ABC,
+ .procname = "tcp_abc",
+ .data = &sysctl_tcp_abc,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
{ .ctl_name = 0 }
};
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 72b7c22e1ea..9ac7a4f46bd 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1640,7 +1640,7 @@ int tcp_disconnect(struct sock *sk, int flags)
} else if (tcp_need_reset(old_state) ||
(tp->snd_nxt != tp->write_seq &&
(1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
- /* The last check adjusts for discrepance of Linux wrt. RFC
+ /* The last check adjusts for discrepancy of Linux wrt. RFC
* states
*/
tcp_send_active_reset(sk, gfp_any());
@@ -1669,6 +1669,7 @@ int tcp_disconnect(struct sock *sk, int flags)
tp->packets_out = 0;
tp->snd_ssthresh = 0x7fffffff;
tp->snd_cwnd_cnt = 0;
+ tp->bytes_acked = 0;
tcp_set_ca_state(sk, TCP_CA_Open);
tcp_clear_retrans(tp);
inet_csk_delack_init(sk);
diff --git a/net/ipv4/tcp_bic.c b/net/ipv4/tcp_bic.c
index ae35e060904..1d0cd86621b 100644
--- a/net/ipv4/tcp_bic.c
+++ b/net/ipv4/tcp_bic.c
@@ -217,17 +217,15 @@ static void bictcp_cong_avoid(struct sock *sk, u32 ack,
bictcp_low_utilization(sk, data_acked);
- if (in_flight < tp->snd_cwnd)
+ if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
- /* In "safe" area, increase. */
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- } else {
+ if (tp->snd_cwnd <= tp->snd_ssthresh)
+ tcp_slow_start(tp);
+ else {
bictcp_update(ca, tp->snd_cwnd);
- /* In dangerous area, increase slowly.
+ /* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
*/
if (tp->snd_cwnd_cnt >= ca->cnt) {
diff --git a/net/ipv4/tcp_cong.c b/net/ipv4/tcp_cong.c
index bbf2d6624e8..c7cc62c8dc1 100644
--- a/net/ipv4/tcp_cong.c
+++ b/net/ipv4/tcp_cong.c
@@ -186,24 +186,32 @@ void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 rtt, u32 in_flight,
{
struct tcp_sock *tp = tcp_sk(sk);
- if (in_flight < tp->snd_cwnd)
+ if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
- /* In "safe" area, increase. */
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- } else {
- /* In dangerous area, increase slowly.
- * In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd
- */
- if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- tp->snd_cwnd_cnt = 0;
- } else
- tp->snd_cwnd_cnt++;
- }
+ /* In "safe" area, increase. */
+ if (tp->snd_cwnd <= tp->snd_ssthresh)
+ tcp_slow_start(tp);
+
+ /* In dangerous area, increase slowly. */
+ else if (sysctl_tcp_abc) {
+ /* RFC3465: Apppriate Byte Count
+ * increase once for each full cwnd acked
+ */
+ if (tp->bytes_acked >= tp->snd_cwnd*tp->mss_cache) {
+ tp->bytes_acked -= tp->snd_cwnd*tp->mss_cache;
+ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+ tp->snd_cwnd++;
+ }
+ } else {
+ /* In theory this is tp->snd_cwnd += 1 / tp->snd_cwnd */
+ if (tp->snd_cwnd_cnt >= tp->snd_cwnd) {
+ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+ tp->snd_cwnd++;
+ tp->snd_cwnd_cnt = 0;
+ } else
+ tp->snd_cwnd_cnt++;
+ }
}
EXPORT_SYMBOL_GPL(tcp_reno_cong_avoid);
diff --git a/net/ipv4/tcp_highspeed.c b/net/ipv4/tcp_highspeed.c
index 6acc04bde08..82b3c189bd7 100644
--- a/net/ipv4/tcp_highspeed.c
+++ b/net/ipv4/tcp_highspeed.c
@@ -111,18 +111,17 @@ static void hstcp_init(struct sock *sk)
}
static void hstcp_cong_avoid(struct sock *sk, u32 adk, u32 rtt,
- u32 in_flight, int good)
+ u32 in_flight, u32 pkts_acked)
{
struct tcp_sock *tp = tcp_sk(sk);
struct hstcp *ca = inet_csk_ca(sk);
- if (in_flight < tp->snd_cwnd)
+ if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- } else {
+ if (tp->snd_cwnd <= tp->snd_ssthresh)
+ tcp_slow_start(tp);
+ else {
/* Update AIMD parameters */
if (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd) {
while (tp->snd_cwnd > hstcp_aimd_vals[ca->ai].cwnd &&
diff --git a/net/ipv4/tcp_htcp.c b/net/ipv4/tcp_htcp.c
index e47b37984e9..3284cfb993e 100644
--- a/net/ipv4/tcp_htcp.c
+++ b/net/ipv4/tcp_htcp.c
@@ -207,14 +207,13 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
struct tcp_sock *tp = tcp_sk(sk);
struct htcp *ca = inet_csk_ca(sk);
- if (in_flight < tp->snd_cwnd)
+ if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
- /* In "safe" area, increase. */
- if (tp->snd_cwnd < tp->snd_cwnd_clamp)
- tp->snd_cwnd++;
- } else {
+ if (tp->snd_cwnd <= tp->snd_ssthresh)
+ tcp_slow_start(tp);
+ else {
+
measure_rtt(sk);
/* keep track of number of round-trip times since last backoff event */
@@ -224,7 +223,7 @@ static void htcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
htcp_alpha_update(ca);
}
- /* In dangerous area, increase slowly.
+ /* In dangerous area, increase slowly.
* In theory this is tp->snd_cwnd += alpha / tp->snd_cwnd
*/
if ((tp->snd_cwnd_cnt++ * ca->alpha)>>7 >= tp->snd_cwnd) {
diff --git a/net/ipv4/tcp_hybla.c b/net/ipv4/tcp_hybla.c
index 77add63623d..40dbb387751 100644
--- a/net/ipv4/tcp_hybla.c
+++ b/net/ipv4/tcp_hybla.c
@@ -100,12 +100,12 @@ static void hybla_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
ca->minrtt = tp->srtt;
}
+ if (!tcp_is_cwnd_limited(sk, in_flight))
+ return;
+
if (!ca->hybla_en)
return tcp_reno_cong_avoid(sk, ack, rtt, in_flight, flag);
- if (in_flight < tp->snd_cwnd)
- return;
-
if (ca->rho == 0)
hybla_recalc_param(sk);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 3e98b57578d..40a26b7157b 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -42,7 +42,7 @@
* Andi Kleen : Moved open_request checking here
* and process RSTs for open_requests.
* Andi Kleen : Better prune_queue, and other fixes.
- * Andrey Savochkin: Fix RTT measurements in the presnce of
+ * Andrey Savochkin: Fix RTT measurements in the presence of
* timestamps.
* Andrey Savochkin: Check sequence numbers correctly when
* removing SACKs due to in sequence incoming
@@ -89,6 +89,7 @@ int sysctl_tcp_frto;
int sysctl_tcp_nometrics_save;
int sysctl_tcp_moderate_rcvbuf = 1;
+int sysctl_tcp_abc = 1;
#define FLAG_DATA 0x01 /* Incoming frame contained data. */
#define FLAG_WIN_UPDATE 0x02 /* Incoming ACK was a window update. */
@@ -223,7 +224,7 @@ static void tcp_fixup_sndbuf(struct sock *sk)
* of receiver window. Check #2.
*
* The scheme does not work when sender sends good segments opening
- * window and then starts to feed us spagetti. But it should work
+ * window and then starts to feed us spaghetti. But it should work
* in common situations. Otherwise, we have to rely on queue collapsing.
*/
@@ -233,7 +234,7 @@ static int __tcp_grow_window(const struct sock *sk, struct tcp_sock *tp,
{
/* Optimize this! */
int truesize = tcp_win_from_space(skb->truesize)/2;
- int window = tcp_full_space(sk)/2;
+ int window = tcp_win_from_space(sysctl_tcp_rmem[2])/2;
while (tp->rcv_ssthresh <= window) {
if (truesize <= skb->len)
@@ -277,7 +278,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
int rcvmem = tp->advmss + MAX_TCP_HEADER + 16 + sizeof(struct sk_buff);
/* Try to select rcvbuf so that 4 mss-sized segments
- * will fit to window and correspoding skbs will fit to our rcvbuf.
+ * will fit to window and corresponding skbs will fit to our rcvbuf.
* (was 3; 4 is minimum to allow fast retransmit to work.)
*/
while (tcp_win_from_space(rcvmem) < tp->advmss)
@@ -286,7 +287,7 @@ static void tcp_fixup_rcvbuf(struct sock *sk)
sk->sk_rcvbuf = min(4 * rcvmem, sysctl_tcp_rmem[2]);
}
-/* 4. Try to fixup all. It is made iimediately after connection enters
+/* 4. Try to fixup all. It is made immediately after connection enters
* established state.
*/
static void tcp_init_buffer_space(struct sock *sk)
@@ -326,37 +327,18 @@ static void tcp_init_buffer_space(struct sock *sk)
static void tcp_clamp_window(struct sock *sk, struct tcp_sock *tp)
{
struct inet_connection_sock *icsk = inet_csk(sk);
- struct sk_buff *skb;
- unsigned int app_win = tp->rcv_nxt - tp->copied_seq;
- int ofo_win = 0;
icsk->icsk_ack.quick = 0;
- skb_queue_walk(&tp->out_of_order_queue, skb) {
- ofo_win += skb->len;
- }
-
- /* If overcommit is due to out of order segments,
- * do not clamp window. Try to expand rcvbuf instead.
- */
- if (ofo_win) {
- if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
- !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
- !tcp_memory_pressure &&
- atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0])
- sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
- sysctl_tcp_rmem[2]);
+ if (sk->sk_rcvbuf < sysctl_tcp_rmem[2] &&
+ !(sk->sk_userlocks & SOCK_RCVBUF_LOCK) &&
+ !tcp_memory_pressure &&
+ atomic_read(&tcp_memory_allocated) < sysctl_tcp_mem[0]) {
+ sk->sk_rcvbuf = min(atomic_read(&sk->sk_rmem_alloc),
+ sysctl_tcp_rmem[2]);
}
- if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf) {
- app_win += ofo_win;
- if (atomic_read(&sk->sk_rmem_alloc) >= 2 * sk->sk_rcvbuf)
- app_win >>= 1;
- if (app_win > icsk->icsk_ack.rcv_mss)
- app_win -= icsk->icsk_ack.rcv_mss;
- app_win = max(app_win, 2U*tp->advmss);
-
+ if (atomic_read(&sk->sk_rmem_alloc) > sk->sk_rcvbuf)
tp->rcv_ssthresh = min(tp->window_clamp, 2U*tp->advmss);
- }
}
/* Receiver "autotuning" code.
@@ -385,8 +367,8 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
* are stalled on filesystem I/O.
*
* Also, since we are only going for a minimum in the
- * non-timestamp case, we do not smoothe things out
- * else with timestamps disabled convergance takes too
+ * non-timestamp case, we do not smoother things out
+ * else with timestamps disabled convergence takes too
* long.
*/
if (!win_dep) {
@@ -395,7 +377,7 @@ static void tcp_rcv_rtt_update(struct tcp_sock *tp, u32 sample, int win_dep)
} else if (m < new_sample)
new_sample = m << 3;
} else {
- /* No previous mesaure. */
+ /* No previous measure. */
new_sample = m << 3;
}
@@ -524,7 +506,7 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
if (icsk->icsk_ack.ato > icsk->icsk_rto)
icsk->icsk_ack.ato = icsk->icsk_rto;
} else if (m > icsk->icsk_rto) {
- /* Too long gap. Apparently sender falled to
+ /* Too long gap. Apparently sender failed to
* restart window, so that we send ACKs quickly.
*/
tcp_incr_quickack(sk);
@@ -548,10 +530,9 @@ static void tcp_event_data_recv(struct sock *sk, struct tcp_sock *tp, struct sk_
* To save cycles in the RFC 1323 implementation it was better to break
* it up into three procedures. -- erics
*/
-static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
+static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt)
{
struct tcp_sock *tp = tcp_sk(sk);
- const struct inet_connection_sock *icsk = inet_csk(sk);
long m = mrtt; /* RTT */
/* The following amusing code comes from Jacobson's
@@ -565,7 +546,7 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
*
* Funny. This algorithm seems to be very broken.
* These formulae increase RTO, when it should be decreased, increase
- * too slowly, when it should be incresed fastly, decrease too fastly
+ * too slowly, when it should be increased fastly, decrease too fastly
* etc. I guess in BSD RTO takes ONE value, so that it is absolutely
* does not matter how to _calculate_ it. Seems, it was trap
* that VJ failed to avoid. 8)
@@ -610,9 +591,6 @@ static void tcp_rtt_estimator(struct sock *sk, const __u32 mrtt, u32 *usrtt)
tp->mdev_max = tp->rttvar = max(tp->mdev, TCP_RTO_MIN);
tp->rtt_seq = tp->snd_nxt;
}
-
- if (icsk->icsk_ca_ops->rtt_sample)
- icsk->icsk_ca_ops->rtt_sample(sk, *usrtt);
}
/* Calculate rto without backoff. This is the second half of Van Jacobson's
@@ -629,14 +607,14 @@ static inline void tcp_set_rto(struct sock *sk)
* at least by solaris and freebsd. "Erratic ACKs" has _nothing_
* to do with delayed acks, because at cwnd>2 true delack timeout
* is invisible. Actually, Linux-2.4 also generates erratic
- * ACKs in some curcumstances.
+ * ACKs in some circumstances.
*/
inet_csk(sk)->icsk_rto = (tp->srtt >> 3) + tp->rttvar;
/* 2. Fixups made earlier cannot be right.
* If we do not estimate RTO correctly without them,
* all the algo is pure shit and should be replaced
- * with correct one. It is exaclty, which we pretend to do.
+ * with correct one. It is exactly, which we pretend to do.
*/
}
@@ -794,7 +772,7 @@ static void tcp_init_metrics(struct sock *sk)
* to make it more realistic.
*
* A bit of theory. RTT is time passed after "normal" sized packet
- * is sent until it is ACKed. In normal curcumstances sending small
+ * is sent until it is ACKed. In normal circumstances sending small
* packets force peer to delay ACKs and calculation is correct too.
* The algorithm is adaptive and, provided we follow specs, it
* NEVER underestimate RTT. BUT! If peer tries to make some clever
@@ -919,18 +897,32 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
int prior_fackets;
u32 lost_retrans = 0;
int flag = 0;
+ int dup_sack = 0;
int i;
if (!tp->sacked_out)
tp->fackets_out = 0;
prior_fackets = tp->fackets_out;
- for (i=0; i<num_sacks; i++, sp++) {
- struct sk_buff *skb;
- __u32 start_seq = ntohl(sp->start_seq);
- __u32 end_seq = ntohl(sp->end_seq);
- int fack_count = 0;
- int dup_sack = 0;
+ /* SACK fastpath:
+ * if the only SACK change is the increase of the end_seq of
+ * the first block then only apply that SACK block
+ * and use retrans queue hinting otherwise slowpath */
+ flag = 1;
+ for (i = 0; i< num_sacks; i++) {
+ __u32 start_seq = ntohl(sp[i].start_seq);
+ __u32 end_seq = ntohl(sp[i].end_seq);
+
+ if (i == 0){
+ if (tp->recv_sack_cache[i].start_seq != start_seq)
+ flag = 0;
+ } else {
+ if ((tp->recv_sack_cache[i].start_seq != start_seq) ||
+ (tp->recv_sack_cache[i].end_seq != end_seq))
+ flag = 0;
+ }
+ tp->recv_sack_cache[i].start_seq = start_seq;
+ tp->recv_sack_cache[i].end_seq = end_seq;
/* Check for D-SACK. */
if (i == 0) {
@@ -962,15 +954,58 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (before(ack, prior_snd_una - tp->max_window))
return 0;
}
+ }
+
+ if (flag)
+ num_sacks = 1;
+ else {
+ int j;
+ tp->fastpath_skb_hint = NULL;
+
+ /* order SACK blocks to allow in order walk of the retrans queue */
+ for (i = num_sacks-1; i > 0; i--) {
+ for (j = 0; j < i; j++){
+ if (after(ntohl(sp[j].start_seq),
+ ntohl(sp[j+1].start_seq))){
+ sp[j].start_seq = htonl(tp->recv_sack_cache[j+1].start_seq);
+ sp[j].end_seq = htonl(tp->recv_sack_cache[j+1].end_seq);
+ sp[j+1].start_seq = htonl(tp->recv_sack_cache[j].start_seq);
+ sp[j+1].end_seq = htonl(tp->recv_sack_cache[j].end_seq);
+ }
+
+ }
+ }
+ }
+
+ /* clear flag as used for different purpose in following code */
+ flag = 0;
+
+ for (i=0; i<num_sacks; i++, sp++) {
+ struct sk_buff *skb;
+ __u32 start_seq = ntohl(sp->start_seq);
+ __u32 end_seq = ntohl(sp->end_seq);
+ int fack_count;
+
+ /* Use SACK fastpath hint if valid */
+ if (tp->fastpath_skb_hint) {
+ skb = tp->fastpath_skb_hint;
+ fack_count = tp->fastpath_cnt_hint;
+ } else {
+ skb = sk->sk_write_queue.next;
+ fack_count = 0;
+ }
/* Event "B" in the comment above. */
if (after(end_seq, tp->high_seq))
flag |= FLAG_DATA_LOST;
- sk_stream_for_retrans_queue(skb, sk) {
+ sk_stream_for_retrans_queue_from(skb, sk) {
int in_sack, pcount;
u8 sacked;
+ tp->fastpath_skb_hint = skb;
+ tp->fastpath_cnt_hint = fack_count;
+
/* The retransmission queue is always in order, so
* we can short-circuit the walk early.
*/
@@ -1045,6 +1080,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
TCP_SKB_CB(skb)->sacked &= ~(TCPCB_LOST|TCPCB_SACKED_RETRANS);
tp->lost_out -= tcp_skb_pcount(skb);
tp->retrans_out -= tcp_skb_pcount(skb);
+
+ /* clear lost hint */
+ tp->retransmit_skb_hint = NULL;
}
} else {
/* New sack for not retransmitted frame,
@@ -1057,6 +1095,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
if (sacked & TCPCB_LOST) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
tp->lost_out -= tcp_skb_pcount(skb);
+
+ /* clear lost hint */
+ tp->retransmit_skb_hint = NULL;
}
}
@@ -1080,6 +1121,7 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
(TCP_SKB_CB(skb)->sacked&TCPCB_SACKED_RETRANS)) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
+ tp->retransmit_skb_hint = NULL;
}
}
}
@@ -1107,6 +1149,9 @@ tcp_sacktag_write_queue(struct sock *sk, struct sk_buff *ack_skb, u32 prior_snd_
TCP_SKB_CB(skb)->sacked &= ~TCPCB_SACKED_RETRANS;
tp->retrans_out -= tcp_skb_pcount(skb);
+ /* clear lost hint */
+ tp->retransmit_skb_hint = NULL;
+
if (!(TCP_SKB_CB(skb)->sacked&(TCPCB_LOST|TCPCB_SACKED_ACKED))) {
tp->lost_out += tcp_skb_pcount(skb);
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
@@ -1214,6 +1259,8 @@ static void tcp_enter_frto_loss(struct sock *sk)
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->frto_highmark;
TCP_ECN_queue_cwr(tp);
+
+ clear_all_retrans_hints(tp);
}
void tcp_clear_retrans(struct tcp_sock *tp)
@@ -1251,6 +1298,7 @@ void tcp_enter_loss(struct sock *sk, int how)
tp->snd_cwnd_cnt = 0;
tp->snd_cwnd_stamp = tcp_time_stamp;
+ tp->bytes_acked = 0;
tcp_clear_retrans(tp);
/* Push undo marker, if it was plain RTO and nothing
@@ -1279,6 +1327,8 @@ void tcp_enter_loss(struct sock *sk, int how)
tcp_set_ca_state(sk, TCP_CA_Loss);
tp->high_seq = tp->snd_nxt;
TCP_ECN_queue_cwr(tp);
+
+ clear_all_retrans_hints(tp);
}
static int tcp_check_sack_reneging(struct sock *sk)
@@ -1503,17 +1553,37 @@ static void tcp_mark_head_lost(struct sock *sk, struct tcp_sock *tp,
int packets, u32 high_seq)
{
struct sk_buff *skb;
- int cnt = packets;
+ int cnt;
- BUG_TRAP(cnt <= tp->packets_out);
+ BUG_TRAP(packets <= tp->packets_out);
+ if (tp->lost_skb_hint) {
+ skb = tp->lost_skb_hint;
+ cnt = tp->lost_cnt_hint;
+ } else {
+ skb = sk->sk_write_queue.next;
+ cnt = 0;
+ }
- sk_stream_for_retrans_queue(skb, sk) {
- cnt -= tcp_skb_pcount(skb);
- if (cnt < 0 || after(TCP_SKB_CB(skb)->end_seq, high_seq))
+ sk_stream_for_retrans_queue_from(skb, sk) {
+ /* TODO: do this better */
+ /* this is not the most efficient way to do this... */
+ tp->lost_skb_hint = skb;
+ tp->lost_cnt_hint = cnt;
+ cnt += tcp_skb_pcount(skb);
+ if (cnt > packets || after(TCP_SKB_CB(skb)->end_seq, high_seq))
break;
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
+
+ /* clear xmit_retransmit_queue hints
+ * if this is beyond hint */
+ if(tp->retransmit_skb_hint != NULL &&
+ before(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(tp->retransmit_skb_hint)->seq)) {
+
+ tp->retransmit_skb_hint = NULL;
+ }
}
}
tcp_sync_left_out(tp);
@@ -1540,13 +1610,28 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
if (tcp_head_timedout(sk, tp)) {
struct sk_buff *skb;
- sk_stream_for_retrans_queue(skb, sk) {
- if (tcp_skb_timedout(sk, skb) &&
- !(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
+ skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
+ : sk->sk_write_queue.next;
+
+ sk_stream_for_retrans_queue_from(skb, sk) {
+ if (!tcp_skb_timedout(sk, skb))
+ break;
+
+ if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
+
+ /* clear xmit_retrans hint */
+ if (tp->retransmit_skb_hint &&
+ before(TCP_SKB_CB(skb)->seq,
+ TCP_SKB_CB(tp->retransmit_skb_hint)->seq))
+
+ tp->retransmit_skb_hint = NULL;
}
}
+
+ tp->scoreboard_skb_hint = skb;
+
tcp_sync_left_out(tp);
}
}
@@ -1626,6 +1711,10 @@ static void tcp_undo_cwr(struct sock *sk, const int undo)
}
tcp_moderate_cwnd(tp);
tp->snd_cwnd_stamp = tcp_time_stamp;
+
+ /* There is something screwy going on with the retrans hints after
+ an undo */
+ clear_all_retrans_hints(tp);
}
static inline int tcp_may_undo(struct tcp_sock *tp)
@@ -1709,6 +1798,9 @@ static int tcp_try_undo_loss(struct sock *sk, struct tcp_sock *tp)
sk_stream_for_retrans_queue(skb, sk) {
TCP_SKB_CB(skb)->sacked &= ~TCPCB_LOST;
}
+
+ clear_all_retrans_hints(tp);
+
DBGUNDO(sk, tp, "partial loss");
tp->lost_out = 0;
tp->left_out = tp->sacked_out;
@@ -1908,6 +2000,7 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
TCP_ECN_queue_cwr(tp);
}
+ tp->bytes_acked = 0;
tp->snd_cwnd_cnt = 0;
tcp_set_ca_state(sk, TCP_CA_Recovery);
}
@@ -1919,9 +2012,9 @@ tcp_fastretrans_alert(struct sock *sk, u32 prior_snd_una,
}
/* Read draft-ietf-tcplw-high-performance before mucking
- * with this code. (Superceeds RFC1323)
+ * with this code. (Supersedes RFC1323)
*/
-static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
+static void tcp_ack_saw_tstamp(struct sock *sk, int flag)
{
/* RTTM Rule: A TSecr value received in a segment is used to
* update the averaged RTT measurement only if the segment
@@ -1932,7 +2025,7 @@ static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
* 1998/04/10 Andrey V. Savochkin <saw@msu.ru>
*
* Changed: reset backoff as soon as we see the first valid sample.
- * If we do not, we get strongly overstimated rto. With timestamps
+ * If we do not, we get strongly overestimated rto. With timestamps
* samples are accepted even from very old segments: f.e., when rtt=1
* increases to 8, we retransmit 5 times and after 8 seconds delayed
* answer arrives rto becomes 120 seconds! If at least one of segments
@@ -1940,13 +2033,13 @@ static void tcp_ack_saw_tstamp(struct sock *sk, u32 *usrtt, int flag)
*/
struct tcp_sock *tp = tcp_sk(sk);
const __u32 seq_rtt = tcp_time_stamp - tp->rx_opt.rcv_tsecr;
- tcp_rtt_estimator(sk, seq_rtt, usrtt);
+ tcp_rtt_estimator(sk, seq_rtt);
tcp_set_rto(sk);
inet_csk(sk)->icsk_backoff = 0;
tcp_bound_rto(sk);
}
-static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag)
+static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, int flag)
{
/* We don't have a timestamp. Can only use
* packets that are not retransmitted to determine
@@ -1960,21 +2053,21 @@ static void tcp_ack_no_tstamp(struct sock *sk, u32 seq_rtt, u32 *usrtt, int flag
if (flag & FLAG_RETRANS_DATA_ACKED)
return;
- tcp_rtt_estimator(sk, seq_rtt, usrtt);
+ tcp_rtt_estimator(sk, seq_rtt);
tcp_set_rto(sk);
inet_csk(sk)->icsk_backoff = 0;
tcp_bound_rto(sk);
}
static inline void tcp_ack_update_rtt(struct sock *sk, const int flag,
- const s32 seq_rtt, u32 *usrtt)
+ const s32 seq_rtt)
{
const struct tcp_sock *tp = tcp_sk(sk);
/* Note that peer MAY send zero echo. In this case it is ignored. (rfc1323) */
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr)
- tcp_ack_saw_tstamp(sk, usrtt, flag);
+ tcp_ack_saw_tstamp(sk, flag);
else if (seq_rtt >= 0)
- tcp_ack_no_tstamp(sk, seq_rtt, usrtt, flag);
+ tcp_ack_no_tstamp(sk, seq_rtt, flag);
}
static inline void tcp_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
@@ -2054,20 +2147,27 @@ static int tcp_tso_acked(struct sock *sk, struct sk_buff *skb,
return acked;
}
+static inline u32 tcp_usrtt(const struct sk_buff *skb)
+{
+ struct timeval tv, now;
+
+ do_gettimeofday(&now);
+ skb_get_timestamp(skb, &tv);
+ return (now.tv_sec - tv.tv_sec) * 1000000 + (now.tv_usec - tv.tv_usec);
+}
/* Remove acknowledged frames from the retransmission queue. */
-static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt)
+static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p)
{
struct tcp_sock *tp = tcp_sk(sk);
+ const struct inet_connection_sock *icsk = inet_csk(sk);
struct sk_buff *skb;
__u32 now = tcp_time_stamp;
int acked = 0;
__s32 seq_rtt = -1;
- struct timeval usnow;
u32 pkts_acked = 0;
-
- if (seq_usrtt)
- do_gettimeofday(&usnow);
+ void (*rtt_sample)(struct sock *sk, u32 usrtt)
+ = icsk->icsk_ca_ops->rtt_sample;
while ((skb = skb_peek(&sk->sk_write_queue)) &&
skb != sk->sk_send_head) {
@@ -2107,16 +2207,11 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
tp->retrans_out -= tcp_skb_pcount(skb);
acked |= FLAG_RETRANS_DATA_ACKED;
seq_rtt = -1;
- } else if (seq_rtt < 0)
+ } else if (seq_rtt < 0) {
seq_rtt = now - scb->when;
- if (seq_usrtt) {
- struct timeval tv;
-
- skb_get_timestamp(skb, &tv);
- *seq_usrtt = (usnow.tv_sec - tv.tv_sec) * 1000000
- + (usnow.tv_usec - tv.tv_usec);
+ if (rtt_sample)
+ (*rtt_sample)(sk, tcp_usrtt(skb));
}
-
if (sacked & TCPCB_SACKED_ACKED)
tp->sacked_out -= tcp_skb_pcount(skb);
if (sacked & TCPCB_LOST)
@@ -2126,17 +2221,20 @@ static int tcp_clean_rtx_queue(struct sock *sk, __s32 *seq_rtt_p, s32 *seq_usrtt
!before(scb->end_seq, tp->snd_up))
tp->urg_mode = 0;
}
- } else if (seq_rtt < 0)
+ } else if (seq_rtt < 0) {
seq_rtt = now - scb->when;
+ if (rtt_sample)
+ (*rtt_sample)(sk, tcp_usrtt(skb));
+ }
tcp_dec_pcount_approx(&tp->fackets_out, skb);
tcp_packets_out_dec(tp, skb);
__skb_unlink(skb, &sk->sk_write_queue);
sk_stream_free_skb(sk, skb);
+ clear_all_retrans_hints(tp);
}
if (acked&FLAG_ACKED) {
- const struct inet_connection_sock *icsk = inet_csk(sk);
- tcp_ack_update_rtt(sk, acked, seq_rtt, seq_usrtt);
+ tcp_ack_update_rtt(sk, acked, seq_rtt);
tcp_ack_packets_out(sk, tp);
if (icsk->icsk_ca_ops->pkts_acked)
@@ -2284,7 +2382,7 @@ static void tcp_process_frto(struct sock *sk, u32 prior_snd_una)
}
/* F-RTO affects on two new ACKs following RTO.
- * At latest on third ACK the TCP behavor is back to normal.
+ * At latest on third ACK the TCP behavior is back to normal.
*/
tp->frto_counter = (tp->frto_counter + 1) % 3;
}
@@ -2299,7 +2397,6 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
u32 ack = TCP_SKB_CB(skb)->ack_seq;
u32 prior_in_flight;
s32 seq_rtt;
- s32 seq_usrtt = 0;
int prior_packets;
/* If the ack is newer than sent or older than previous acks
@@ -2311,6 +2408,9 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
if (before(ack, prior_snd_una))
goto old_ack;
+ if (sysctl_tcp_abc && icsk->icsk_ca_state < TCP_CA_CWR)
+ tp->bytes_acked += ack - prior_snd_una;
+
if (!(flag&FLAG_SLOWPATH) && after(ack, prior_snd_una)) {
/* Window is constant, pure forward advance.
* No more checks are required.
@@ -2352,14 +2452,13 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag)
prior_in_flight = tcp_packets_in_flight(tp);
/* See if we can take anything off of the retransmit queue. */
- flag |= tcp_clean_rtx_queue(sk, &seq_rtt,
- icsk->icsk_ca_ops->rtt_sample ? &seq_usrtt : NULL);
+ flag |= tcp_clean_rtx_queue(sk, &seq_rtt);
if (tp->frto_counter)
tcp_process_frto(sk, prior_snd_una);
if (tcp_ack_is_dubious(sk, flag)) {
- /* Advanve CWND, if state allows this. */
+ /* Advance CWND, if state allows this. */
if ((flag & FLAG_DATA_ACKED) && tcp_may_raise_cwnd(sk, flag))
tcp_cong_avoid(sk, ack, seq_rtt, prior_in_flight, 0);
tcp_fastretrans_alert(sk, prior_snd_una, prior_packets, flag);
@@ -3148,7 +3247,7 @@ tcp_collapse(struct sock *sk, struct sk_buff_head *list,
{
struct sk_buff *skb;
- /* First, check that queue is collapsable and find
+ /* First, check that queue is collapsible and find
* the point where collapsing can be useful. */
for (skb = head; skb != tail; ) {
/* No new bits? It is possible on ofo queue. */
@@ -3456,7 +3555,7 @@ static __inline__ void tcp_ack_snd_check(struct sock *sk)
/*
* This routine is only called when we have urgent data
- * signalled. Its the 'slow' part of tcp_urg. It could be
+ * signaled. Its the 'slow' part of tcp_urg. It could be
* moved inline now as tcp_urg is only called from one
* place. We handle URGent data wrong. We have to - as
* BSD still doesn't use the correction from RFC961.
@@ -3501,7 +3600,7 @@ static void tcp_check_urg(struct sock * sk, struct tcphdr * th)
* urgent. To do this requires some care. We cannot just ignore
* tp->copied_seq since we would read the last urgent byte again
* as data, nor can we alter copied_seq until this data arrives
- * or we break the sematics of SIOCATMARK (and thus sockatmark())
+ * or we break the semantics of SIOCATMARK (and thus sockatmark())
*
* NOTE. Double Dutch. Rendering to plain English: author of comment
* above did something sort of send("A", MSG_OOB); send("B", MSG_OOB);
@@ -3646,7 +3745,7 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
tp->rx_opt.saw_tstamp = 0;
/* pred_flags is 0xS?10 << 16 + snd_wnd
- * if header_predition is to be made
+ * if header_prediction is to be made
* 'S' will always be tp->tcp_header_len >> 2
* '?' will be 0 for the fast path, otherwise pred_flags is 0 to
* turn it off (when there are holes in the receive
@@ -4242,7 +4341,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
*/
if (tp->rx_opt.saw_tstamp && tp->rx_opt.rcv_tsecr &&
!tp->srtt)
- tcp_ack_saw_tstamp(sk, NULL, 0);
+ tcp_ack_saw_tstamp(sk, 0);
if (tp->rx_opt.tstamp_ok)
tp->advmss -= TCPOLEN_TSTAMP_ALIGNED;
@@ -4372,6 +4471,7 @@ discard:
EXPORT_SYMBOL(sysctl_tcp_ecn);
EXPORT_SYMBOL(sysctl_tcp_reordering);
+EXPORT_SYMBOL(sysctl_tcp_abc);
EXPORT_SYMBOL(tcp_parse_options);
EXPORT_SYMBOL(tcp_rcv_established);
EXPORT_SYMBOL(tcp_rcv_state_process);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 634dabb558f..4d5021e1929 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -39,7 +39,7 @@
* request_sock handling and moved
* most of it into the af independent code.
* Added tail drop and some other bugfixes.
- * Added new listen sematics.
+ * Added new listen semantics.
* Mike McLagan : Routing by source
* Juan Jose Ciarlante: ip_dynaddr bits
* Andi Kleen: various fixes.
@@ -1110,24 +1110,18 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
static int tcp_v4_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_HW) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
- skb->nh.iph->daddr, skb->csum))
+ skb->nh.iph->daddr, skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
-
- LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v4 csum failed\n");
- skb->ip_summed = CHECKSUM_NONE;
+ }
}
+
+ skb->csum = csum_tcpudp_nofold(skb->nh.iph->saddr, skb->nh.iph->daddr,
+ skb->len, IPPROTO_TCP, 0);
+
if (skb->len <= 76) {
- if (tcp_v4_check(skb->h.th, skb->len, skb->nh.iph->saddr,
- skb->nh.iph->daddr,
- skb_checksum(skb, 0, skb->len, 0)))
- return -1;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->csum = ~tcp_v4_check(skb->h.th, skb->len,
- skb->nh.iph->saddr,
- skb->nh.iph->daddr, 0);
+ return __skb_checksum_complete(skb);
}
return 0;
}
@@ -1216,10 +1210,10 @@ int tcp_v4_rcv(struct sk_buff *skb)
/* An explanation is required here, I think.
* Packet length and doff are validated by header prediction,
- * provided case of th->doff==0 is elimineted.
+ * provided case of th->doff==0 is eliminated.
* So, we defer the checks. */
if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
- tcp_v4_checksum_init(skb) < 0))
+ tcp_v4_checksum_init(skb)))
goto bad_packet;
th = skb->h.th;
diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
index b1a63b2c6b4..1b66a2ac432 100644
--- a/net/ipv4/tcp_minisocks.c
+++ b/net/ipv4/tcp_minisocks.c
@@ -158,7 +158,7 @@ kill_with_rst:
/* I am shamed, but failed to make it more elegant.
* Yes, it is direct reference to IP, which is impossible
* to generalize to IPv6. Taking into account that IPv6
- * do not undertsnad recycling in any case, it not
+ * do not understand recycling in any case, it not
* a big problem in practice. --ANK */
if (tw->tw_family == AF_INET &&
tcp_death_row.sysctl_tw_recycle && tcptw->tw_ts_recent_stamp &&
@@ -194,7 +194,7 @@ kill_with_rst:
/* In window segment, it may be only reset or bare ack. */
if (th->rst) {
- /* This is TIME_WAIT assasination, in two flavors.
+ /* This is TIME_WAIT assassination, in two flavors.
* Oh well... nobody has a sufficient solution to this
* protocol bug yet.
*/
@@ -380,6 +380,7 @@ struct sock *tcp_create_openreq_child(struct sock *sk, struct request_sock *req,
*/
newtp->snd_cwnd = 2;
newtp->snd_cwnd_cnt = 0;
+ newtp->bytes_acked = 0;
newtp->frto_counter = 0;
newtp->frto_highmark = 0;
@@ -550,7 +551,7 @@ struct sock *tcp_check_req(struct sock *sk,struct sk_buff *skb,
/* RFC793 page 36: "If the connection is in any non-synchronized state ...
* and the incoming segment acknowledges something not yet
- * sent (the segment carries an unaccaptable ACK) ...
+ * sent (the segment carries an unacceptable ACK) ...
* a reset is sent."
*
* Invalid ACK: reset will be sent by listening socket
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index b907456a79f..029c70dfb58 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -436,6 +436,8 @@ int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, unsigned int mss
u16 flags;
BUG_ON(len > skb->len);
+
+ clear_all_retrans_hints(tp);
nsize = skb_headlen(skb) - len;
if (nsize < 0)
nsize = 0;
@@ -599,7 +601,7 @@ int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len)
for TCP options, but includes only bare TCP header.
tp->rx_opt.mss_clamp is mss negotiated at connection setup.
- It is minumum of user_mss and mss received with SYN.
+ It is minimum of user_mss and mss received with SYN.
It also does not include TCP options.
tp->pmtu_cookie is last pmtu, seen by this function.
@@ -1171,7 +1173,7 @@ u32 __tcp_select_window(struct sock *sk)
{
struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
- /* MSS for the peer's data. Previous verions used mss_clamp
+ /* MSS for the peer's data. Previous versions used mss_clamp
* here. I don't know if the value based on our guesses
* of peer's MSS is better for the performance. It's more correct
* but may be worse for the performance because of rcv_mss
@@ -1260,7 +1262,10 @@ static void tcp_retrans_try_collapse(struct sock *sk, struct sk_buff *skb, int m
BUG_ON(tcp_skb_pcount(skb) != 1 ||
tcp_skb_pcount(next_skb) != 1);
- /* Ok. We will be able to collapse the packet. */
+ /* changing transmit queue under us so clear hints */
+ clear_all_retrans_hints(tp);
+
+ /* Ok. We will be able to collapse the packet. */
__skb_unlink(next_skb, &sk->sk_write_queue);
memcpy(skb_put(skb, next_skb_size), next_skb->data, next_skb_size);
@@ -1330,6 +1335,8 @@ void tcp_simple_retransmit(struct sock *sk)
}
}
+ clear_all_retrans_hints(tp);
+
if (!lost)
return;
@@ -1361,7 +1368,7 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
int err;
/* Do not sent more than we queued. 1/4 is reserved for possible
- * copying overhead: frgagmentation, tunneling, mangling etc.
+ * copying overhead: fragmentation, tunneling, mangling etc.
*/
if (atomic_read(&sk->sk_wmem_alloc) >
min(sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), sk->sk_sndbuf))
@@ -1468,13 +1475,25 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
const struct inet_connection_sock *icsk = inet_csk(sk);
struct tcp_sock *tp = tcp_sk(sk);
struct sk_buff *skb;
- int packet_cnt = tp->lost_out;
+ int packet_cnt;
+
+ if (tp->retransmit_skb_hint) {
+ skb = tp->retransmit_skb_hint;
+ packet_cnt = tp->retransmit_cnt_hint;
+ }else{
+ skb = sk->sk_write_queue.next;
+ packet_cnt = 0;
+ }
/* First pass: retransmit lost packets. */
- if (packet_cnt) {
- sk_stream_for_retrans_queue(skb, sk) {
+ if (tp->lost_out) {
+ sk_stream_for_retrans_queue_from(skb, sk) {
__u8 sacked = TCP_SKB_CB(skb)->sacked;
+ /* we could do better than to assign each time */
+ tp->retransmit_skb_hint = skb;
+ tp->retransmit_cnt_hint = packet_cnt;
+
/* Assume this retransmit will generate
* only one packet for congestion window
* calculation purposes. This works because
@@ -1485,10 +1504,12 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (tcp_packets_in_flight(tp) >= tp->snd_cwnd)
return;
- if (sacked&TCPCB_LOST) {
+ if (sacked & TCPCB_LOST) {
if (!(sacked&(TCPCB_SACKED_ACKED|TCPCB_SACKED_RETRANS))) {
- if (tcp_retransmit_skb(sk, skb))
+ if (tcp_retransmit_skb(sk, skb)) {
+ tp->retransmit_skb_hint = NULL;
return;
+ }
if (icsk->icsk_ca_state != TCP_CA_Loss)
NET_INC_STATS_BH(LINUX_MIB_TCPFASTRETRANS);
else
@@ -1501,8 +1522,8 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
TCP_RTO_MAX);
}
- packet_cnt -= tcp_skb_pcount(skb);
- if (packet_cnt <= 0)
+ packet_cnt += tcp_skb_pcount(skb);
+ if (packet_cnt >= tp->lost_out)
break;
}
}
@@ -1528,9 +1549,18 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
if (tcp_may_send_now(sk, tp))
return;
- packet_cnt = 0;
+ if (tp->forward_skb_hint) {
+ skb = tp->forward_skb_hint;
+ packet_cnt = tp->forward_cnt_hint;
+ } else{
+ skb = sk->sk_write_queue.next;
+ packet_cnt = 0;
+ }
+
+ sk_stream_for_retrans_queue_from(skb, sk) {
+ tp->forward_cnt_hint = packet_cnt;
+ tp->forward_skb_hint = skb;
- sk_stream_for_retrans_queue(skb, sk) {
/* Similar to the retransmit loop above we
* can pretend that the retransmitted SKB
* we send out here will be composed of one
@@ -1547,8 +1577,10 @@ void tcp_xmit_retransmit_queue(struct sock *sk)
continue;
/* Ok, retransmit it. */
- if (tcp_retransmit_skb(sk, skb))
+ if (tcp_retransmit_skb(sk, skb)) {
+ tp->forward_skb_hint = NULL;
break;
+ }
if (skb == skb_peek(&sk->sk_write_queue))
inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
@@ -2058,3 +2090,4 @@ EXPORT_SYMBOL(tcp_connect);
EXPORT_SYMBOL(tcp_make_synack);
EXPORT_SYMBOL(tcp_simple_retransmit);
EXPORT_SYMBOL(tcp_sync_mss);
+EXPORT_SYMBOL(sysctl_tcp_tso_win_divisor);
diff --git a/net/ipv4/tcp_scalable.c b/net/ipv4/tcp_scalable.c
index 327770bf552..26d7486ee50 100644
--- a/net/ipv4/tcp_scalable.c
+++ b/net/ipv4/tcp_scalable.c
@@ -20,20 +20,20 @@ static void tcp_scalable_cong_avoid(struct sock *sk, u32 ack, u32 rtt,
u32 in_flight, int flag)
{
struct tcp_sock *tp = tcp_sk(sk);
- if (in_flight < tp->snd_cwnd)
+
+ if (!tcp_is_cwnd_limited(sk, in_flight))
return;
- if (tp->snd_cwnd <= tp->snd_ssthresh) {
- tp->snd_cwnd++;
- } else {
+ if (tp->snd_cwnd <= tp->snd_ssthresh)
+ tcp_slow_start(tp);
+ else {
tp->snd_cwnd_cnt++;
if (tp->snd_cwnd_cnt > min(tp->snd_cwnd, TCP_SCALABLE_AI_CNT)){
- tp->snd_cwnd++;
+ if (tp->snd_cwnd < tp->snd_cwnd_clamp)
+ tp->snd_cwnd++;
tp->snd_cwnd_cnt = 0;
}
}
- tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
- tp->snd_cwnd_stamp = tcp_time_stamp;
}
static u32 tcp_scalable_ssthresh(struct sock *sk)
diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
index 415ee47ac1c..e1880959614 100644
--- a/net/ipv4/tcp_timer.c
+++ b/net/ipv4/tcp_timer.c
@@ -58,7 +58,7 @@ static void tcp_write_err(struct sock *sk)
* to prevent DoS attacks. It is called when a retransmission timeout
* or zero probe timeout occurs on orphaned socket.
*
- * Criterium is still not confirmed experimentally and may change.
+ * Criteria is still not confirmed experimentally and may change.
* We kill the socket, if:
* 1. If number of orphaned sockets exceeds an administratively configured
* limit.
@@ -132,7 +132,7 @@ static int tcp_write_timeout(struct sock *sk)
hole detection. :-(
It is place to make it. It is not made. I do not want
- to make it. It is disguisting. It does not work in any
+ to make it. It is disgusting. It does not work in any
case. Let me to cite the same draft, which requires for
us to implement this:
diff --git a/net/ipv4/tcp_vegas.c b/net/ipv4/tcp_vegas.c
index 93c5f92070f..b7d296a8ac6 100644
--- a/net/ipv4/tcp_vegas.c
+++ b/net/ipv4/tcp_vegas.c
@@ -236,8 +236,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
/* We don't have enough RTT samples to do the Vegas
* calculation, so we'll behave like Reno.
*/
- if (tp->snd_cwnd > tp->snd_ssthresh)
- tp->snd_cwnd++;
+ tcp_reno_cong_avoid(sk, ack, seq_rtt, in_flight, flag);
} else {
u32 rtt, target_cwnd, diff;
@@ -275,7 +274,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
*/
diff = (old_wnd << V_PARAM_SHIFT) - target_cwnd;
- if (tp->snd_cwnd < tp->snd_ssthresh) {
+ if (tp->snd_cwnd <= tp->snd_ssthresh) {
/* Slow start. */
if (diff > gamma) {
/* Going too fast. Time to slow down
@@ -295,6 +294,7 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
V_PARAM_SHIFT)+1);
}
+ tcp_slow_start(tp);
} else {
/* Congestion avoidance. */
u32 next_snd_cwnd;
@@ -327,37 +327,17 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack,
else if (next_snd_cwnd < tp->snd_cwnd)
tp->snd_cwnd--;
}
- }
- /* Wipe the slate clean for the next RTT. */
- vegas->cntRTT = 0;
- vegas->minRTT = 0x7fffffff;
+ if (tp->snd_cwnd < 2)
+ tp->snd_cwnd = 2;
+ else if (tp->snd_cwnd > tp->snd_cwnd_clamp)
+ tp->snd_cwnd = tp->snd_cwnd_clamp;
+ }
}
- /* The following code is executed for every ack we receive,
- * except for conditions checked in should_advance_cwnd()
- * before the call to tcp_cong_avoid(). Mainly this means that
- * we only execute this code if the ack actually acked some
- * data.
- */
-
- /* If we are in slow start, increase our cwnd in response to this ACK.
- * (If we are not in slow start then we are in congestion avoidance,
- * and adjust our congestion window only once per RTT. See the code
- * above.)
- */
- if (tp->snd_cwnd <= tp->snd_ssthresh)
- tp->snd_cwnd++;
-
- /* to keep cwnd from growing without bound */
- tp->snd_cwnd = min_t(u32, tp->snd_cwnd, tp->snd_cwnd_clamp);
-
- /* Make sure that we are never so timid as to reduce our cwnd below
- * 2 MSS.
- *
- * Going below 2 MSS would risk huge delayed ACKs from our receiver.
- */
- tp->snd_cwnd = max(tp->snd_cwnd, 2U);
+ /* Wipe the slate clean for the next RTT. */
+ vegas->cntRTT = 0;
+ vegas->minRTT = 0x7fffffff;
}
/* Extract info for Tcp socket info provided via netlink. */
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index e0bd1013cb0..2422a5f7195 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -761,7 +761,7 @@ int udp_ioctl(struct sock *sk, int cmd, unsigned long arg)
static __inline__ int __udp_checksum_complete(struct sk_buff *skb)
{
- return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
+ return __skb_checksum_complete(skb);
}
static __inline__ int udp_checksum_complete(struct sk_buff *skb)
@@ -1100,11 +1100,8 @@ static int udp_checksum_init(struct sk_buff *skb, struct udphdr *uh,
if (uh->check == 0) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
} else if (skb->ip_summed == CHECKSUM_HW) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!udp_check(uh, ulen, saddr, daddr, skb->csum))
- return 0;
- LIMIT_NETDEBUG(KERN_DEBUG "udp v4 hw csum failure.\n");
- skb->ip_summed = CHECKSUM_NONE;
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->csum = csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index b7a5f51238b..ddcf7754eec 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -1022,6 +1022,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
continue;
}
+#ifdef CONFIG_IPV6_PRIVACY
/* Rule 7: Prefer public address
* Note: prefer temprary address if use_tempaddr >= 2
*/
@@ -1042,7 +1043,7 @@ int ipv6_dev_get_saddr(struct net_device *daddr_dev,
if (hiscore.attrs & IPV6_SADDR_SCORE_PRIVACY)
continue;
}
-
+#endif
/* Rule 8: Use longest matching prefix */
if (hiscore.rule < 8)
hiscore.matchlen = ipv6_addr_diff(&ifa_result->addr, daddr);
diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
index 23e540365a1..1bdf0fb8bf8 100644
--- a/net/ipv6/icmp.c
+++ b/net/ipv6/icmp.c
@@ -585,17 +585,16 @@ static int icmpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
daddr = &skb->nh.ipv6h->daddr;
/* Perform checksum. */
- if (skb->ip_summed == CHECKSUM_HW) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
- skb->csum)) {
- LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 hw checksum failed\n");
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
- if (skb->ip_summed == CHECKSUM_NONE) {
- if (csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
- skb_checksum(skb, 0, skb->len, 0))) {
+ switch (skb->ip_summed) {
+ case CHECKSUM_HW:
+ if (!csum_ipv6_magic(saddr, daddr, skb->len, IPPROTO_ICMPV6,
+ skb->csum))
+ break;
+ /* fall through */
+ case CHECKSUM_NONE:
+ skb->csum = ~csum_ipv6_magic(saddr, daddr, skb->len,
+ IPPROTO_ICMPV6, 0);
+ if (__skb_checksum_complete(skb)) {
LIMIT_NETDEBUG(KERN_DEBUG "ICMPv6 checksum failed [%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x > %04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x]\n",
NIP6(*saddr), NIP6(*daddr));
goto discard_it;
diff --git a/net/ipv6/ip6_input.c b/net/ipv6/ip6_input.c
index 6e348042693..a6026d2787d 100644
--- a/net/ipv6/ip6_input.c
+++ b/net/ipv6/ip6_input.c
@@ -176,6 +176,11 @@ resubmit:
if (ipprot->flags & INET6_PROTO_FINAL) {
struct ipv6hdr *hdr;
+ /* Free reference early: we don't need it any more,
+ and it may hold ip_conntrack module loaded
+ indefinitely. */
+ nf_reset(skb);
+
skb_postpull_rcsum(skb, skb->nh.raw,
skb->h.raw - skb->nh.raw);
hdr = skb->nh.ipv6h;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index dbd9767b32e..c1fa693511a 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -441,9 +441,15 @@ static void ip6_copy_metadata(struct sk_buff *to, struct sk_buff *from)
#ifdef CONFIG_NETFILTER
to->nfmark = from->nfmark;
/* Connection association is same as pre-frag packet */
+ nf_conntrack_put(to->nfct);
to->nfct = from->nfct;
nf_conntrack_get(to->nfct);
to->nfctinfo = from->nfctinfo;
+#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+ nf_conntrack_put_reasm(to->nfct_reasm);
+ to->nfct_reasm = from->nfct_reasm;
+ nf_conntrack_get_reasm(to->nfct_reasm);
+#endif
#ifdef CONFIG_BRIDGE_NETFILTER
nf_bridge_put(to->nf_bridge);
to->nf_bridge = from->nf_bridge;
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index e6b0e3954c0..e315d0f80af 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -525,6 +525,7 @@ ip6ip6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
if ((t = ip6ip6_tnl_lookup(&ipv6h->saddr, &ipv6h->daddr)) != NULL) {
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb)) {
+ read_unlock(&ip6ip6_lock);
kfree_skb(skb);
return 0;
}
diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig
index bb7ccfe33f2..971ba60bf6e 100644
--- a/net/ipv6/netfilter/Kconfig
+++ b/net/ipv6/netfilter/Kconfig
@@ -278,5 +278,19 @@ config IP6_NF_RAW
If you want to compile it as a module, say M here and read
<file:Documentation/modules.txt>. If unsure, say `N'.
+config NF_CONNTRACK_IPV6
+ tristate "IPv6 support for new connection tracking (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && NF_CONNTRACK
+ ---help---
+ Connection tracking keeps a record of what packets have passed
+ through your machine, in order to figure out how they are related
+ into connections.
+
+ This is IPv6 support on Layer 3 independent connection tracking.
+ Layer 3 independent connection tracking is experimental scheme
+ which generalize ip_conntrack to support other layer 3 protocols.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
endmenu
diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile
index 2b2c370e8b1..9ab5b2ca1f5 100644
--- a/net/ipv6/netfilter/Makefile
+++ b/net/ipv6/netfilter/Makefile
@@ -27,3 +27,9 @@ obj-$(CONFIG_IP6_NF_TARGET_LOG) += ip6t_LOG.o
obj-$(CONFIG_IP6_NF_RAW) += ip6table_raw.o
obj-$(CONFIG_IP6_NF_MATCH_HL) += ip6t_hl.o
obj-$(CONFIG_IP6_NF_TARGET_REJECT) += ip6t_REJECT.o
+
+# objects for l3 independent conntrack
+nf_conntrack_ipv6-objs := nf_conntrack_l3proto_ipv6.o nf_conntrack_proto_icmpv6.o nf_conntrack_reasm.o
+
+# l3 independent conntrack
+obj-$(CONFIG_NF_CONNTRACK_IPV6) += nf_conntrack_ipv6.o
diff --git a/net/ipv6/netfilter/ip6t_MARK.c b/net/ipv6/netfilter/ip6t_MARK.c
index 0c7584f9217..eab8fb864ee 100644
--- a/net/ipv6/netfilter/ip6t_MARK.c
+++ b/net/ipv6/netfilter/ip6t_MARK.c
@@ -56,9 +56,9 @@ checkentry(const char *tablename,
return 1;
}
-static struct ip6t_target ip6t_mark_reg = {
- .name = "MARK",
- .target = target,
+static struct ip6t_target ip6t_mark_reg = {
+ .name = "MARK",
+ .target = target,
.checkentry = checkentry,
.me = THIS_MODULE
};
diff --git a/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
new file mode 100644
index 00000000000..e2c90b3a807
--- /dev/null
+++ b/net/ipv6/netfilter/nf_conntrack_l3proto_ipv6.c
@@ -0,0 +1,556 @@
+/*
+ * Copyright (C)2004 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - support Layer 3 protocol independent connection tracking.
+ * Based on the original ip_conntrack code which had the following
+ * copyright information:
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - add get_features() to support various size of conntrack
+ * structures.
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ipv6.h>
+#include <linux/in6.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/ipv6.h>
+
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
+
+static int ipv6_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ u_int32_t _addrs[8], *ap;
+
+ ap = skb_header_pointer(skb, nhoff + offsetof(struct ipv6hdr, saddr),
+ sizeof(_addrs), _addrs);
+ if (ap == NULL)
+ return 0;
+
+ memcpy(tuple->src.u3.ip6, ap, sizeof(tuple->src.u3.ip6));
+ memcpy(tuple->dst.u3.ip6, ap + 4, sizeof(tuple->dst.u3.ip6));
+
+ return 1;
+}
+
+static int ipv6_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ memcpy(tuple->src.u3.ip6, orig->dst.u3.ip6, sizeof(tuple->src.u3.ip6));
+ memcpy(tuple->dst.u3.ip6, orig->src.u3.ip6, sizeof(tuple->dst.u3.ip6));
+
+ return 1;
+}
+
+static int ipv6_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "src=%x:%x:%x:%x:%x:%x:%x:%x dst=%x:%x:%x:%x:%x:%x:%x:%x ",
+ NIP6(*((struct in6_addr *)tuple->src.u3.ip6)),
+ NIP6(*((struct in6_addr *)tuple->dst.u3.ip6)));
+}
+
+static int ipv6_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ return 0;
+}
+
+/*
+ * Based on ipv6_skip_exthdr() in net/ipv6/exthdr.c
+ *
+ * This function parses (probably truncated) exthdr set "hdr"
+ * of length "len". "nexthdrp" initially points to some place,
+ * where type of the first header can be found.
+ *
+ * It skips all well-known exthdrs, and returns pointer to the start
+ * of unparsable area i.e. the first header with unknown type.
+ * if success, *nexthdr is updated by type/protocol of this header.
+ *
+ * NOTES: - it may return pointer pointing beyond end of packet,
+ * if the last recognized header is truncated in the middle.
+ * - if packet is truncated, so that all parsed headers are skipped,
+ * it returns -1.
+ * - if packet is fragmented, return pointer of the fragment header.
+ * - ESP is unparsable for now and considered like
+ * normal payload protocol.
+ * - Note also special handling of AUTH header. Thanks to IPsec wizards.
+ */
+
+int nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp,
+ int len)
+{
+ u8 nexthdr = *nexthdrp;
+
+ while (ipv6_ext_hdr(nexthdr)) {
+ struct ipv6_opt_hdr hdr;
+ int hdrlen;
+
+ if (len < (int)sizeof(struct ipv6_opt_hdr))
+ return -1;
+ if (nexthdr == NEXTHDR_NONE)
+ break;
+ if (nexthdr == NEXTHDR_FRAGMENT)
+ break;
+ if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+ BUG();
+ if (nexthdr == NEXTHDR_AUTH)
+ hdrlen = (hdr.hdrlen+2)<<2;
+ else
+ hdrlen = ipv6_optlen(&hdr);
+
+ nexthdr = hdr.nexthdr;
+ len -= hdrlen;
+ start += hdrlen;
+ }
+
+ *nexthdrp = nexthdr;
+ return start;
+}
+
+static int
+ipv6_prepare(struct sk_buff **pskb, unsigned int hooknum, unsigned int *dataoff,
+ u_int8_t *protonum)
+{
+ unsigned int extoff;
+ unsigned char pnum;
+ int protoff;
+
+ extoff = (u8*)((*pskb)->nh.ipv6h + 1) - (*pskb)->data;
+ pnum = (*pskb)->nh.ipv6h->nexthdr;
+
+ protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
+ (*pskb)->len - extoff);
+
+ /*
+ * (protoff == (*pskb)->len) mean that the packet doesn't have no data
+ * except of IPv6 & ext headers. but it's tracked anyway. - YK
+ */
+ if ((protoff < 0) || (protoff > (*pskb)->len)) {
+ DEBUGP("ip6_conntrack_core: can't find proto in pkt\n");
+ NF_CT_STAT_INC(error);
+ NF_CT_STAT_INC(invalid);
+ return -NF_ACCEPT;
+ }
+
+ *dataoff = protoff;
+ *protonum = pnum;
+ return NF_ACCEPT;
+}
+
+static u_int32_t ipv6_get_features(const struct nf_conntrack_tuple *tuple)
+{
+ return NF_CT_F_BASIC;
+}
+
+static unsigned int ipv6_confirm(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ /* This is where we call the helper: as the packet goes out. */
+ ct = nf_ct_get(*pskb, &ctinfo);
+ if (ct && ct->helper) {
+ unsigned int ret, protoff;
+ unsigned int extoff = (u8*)((*pskb)->nh.ipv6h + 1)
+ - (*pskb)->data;
+ unsigned char pnum = (*pskb)->nh.ipv6h->nexthdr;
+
+ protoff = nf_ct_ipv6_skip_exthdr(*pskb, extoff, &pnum,
+ (*pskb)->len - extoff);
+ if (protoff < 0 || protoff > (*pskb)->len ||
+ pnum == NEXTHDR_FRAGMENT) {
+ DEBUGP("proto header not found\n");
+ return NF_ACCEPT;
+ }
+
+ ret = ct->helper->help(pskb, protoff, ct, ctinfo);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+
+ /* We've seen it coming out the other side: confirm it */
+
+ return nf_conntrack_confirm(pskb);
+}
+
+extern struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb);
+extern void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+ struct net_device *in,
+ struct net_device *out,
+ int (*okfn)(struct sk_buff *));
+static unsigned int ipv6_defrag(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct sk_buff *reasm;
+
+ /* Previously seen (loopback)? */
+ if ((*pskb)->nfct)
+ return NF_ACCEPT;
+
+ reasm = nf_ct_frag6_gather(*pskb);
+
+ /* queued */
+ if (reasm == NULL)
+ return NF_STOLEN;
+
+ /* error occured or not fragmented */
+ if (reasm == *pskb)
+ return NF_ACCEPT;
+
+ nf_ct_frag6_output(hooknum, reasm, (struct net_device *)in,
+ (struct net_device *)out, okfn);
+
+ return NF_STOLEN;
+}
+
+static unsigned int ipv6_conntrack_in(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct sk_buff *reasm = (*pskb)->nfct_reasm;
+
+ /* This packet is fragmented and has reassembled packet. */
+ if (reasm) {
+ /* Reassembled packet isn't parsed yet ? */
+ if (!reasm->nfct) {
+ unsigned int ret;
+
+ ret = nf_conntrack_in(PF_INET6, hooknum, &reasm);
+ if (ret != NF_ACCEPT)
+ return ret;
+ }
+ nf_conntrack_get(reasm->nfct);
+ (*pskb)->nfct = reasm->nfct;
+ return NF_ACCEPT;
+ }
+
+ return nf_conntrack_in(PF_INET6, hooknum, pskb);
+}
+
+static unsigned int ipv6_conntrack_local(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ /* root is playing with raw sockets. */
+ if ((*pskb)->len < sizeof(struct ipv6hdr)) {
+ if (net_ratelimit())
+ printk("ipv6_conntrack_local: packet too short\n");
+ return NF_ACCEPT;
+ }
+ return ipv6_conntrack_in(hooknum, pskb, in, out, okfn);
+}
+
+/* Connection tracking may drop packets, but never alters them, so
+ make it the first hook. */
+static struct nf_hook_ops ipv6_conntrack_defrag_ops = {
+ .hook = ipv6_defrag,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_PRE_ROUTING,
+ .priority = NF_IP6_PRI_CONNTRACK_DEFRAG,
+};
+
+static struct nf_hook_ops ipv6_conntrack_in_ops = {
+ .hook = ipv6_conntrack_in,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_PRE_ROUTING,
+ .priority = NF_IP6_PRI_CONNTRACK,
+};
+
+static struct nf_hook_ops ipv6_conntrack_local_out_ops = {
+ .hook = ipv6_conntrack_local,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_LOCAL_OUT,
+ .priority = NF_IP6_PRI_CONNTRACK,
+};
+
+static struct nf_hook_ops ipv6_conntrack_defrag_local_out_ops = {
+ .hook = ipv6_defrag,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_LOCAL_OUT,
+ .priority = NF_IP6_PRI_CONNTRACK_DEFRAG,
+};
+
+/* Refragmenter; last chance. */
+static struct nf_hook_ops ipv6_conntrack_out_ops = {
+ .hook = ipv6_confirm,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_POST_ROUTING,
+ .priority = NF_IP6_PRI_LAST,
+};
+
+static struct nf_hook_ops ipv6_conntrack_local_in_ops = {
+ .hook = ipv6_confirm,
+ .owner = THIS_MODULE,
+ .pf = PF_INET6,
+ .hooknum = NF_IP6_LOCAL_IN,
+ .priority = NF_IP6_PRI_LAST-1,
+};
+
+#ifdef CONFIG_SYSCTL
+
+/* From nf_conntrack_proto_icmpv6.c */
+extern unsigned long nf_ct_icmpv6_timeout;
+
+/* From nf_conntrack_frag6.c */
+extern unsigned long nf_ct_frag6_timeout;
+extern unsigned long nf_ct_frag6_low_thresh;
+extern unsigned long nf_ct_frag6_high_thresh;
+
+static struct ctl_table_header *nf_ct_ipv6_sysctl_header;
+
+static ctl_table nf_ct_sysctl_table[] = {
+ {
+ .ctl_name = NET_NF_CONNTRACK_ICMPV6_TIMEOUT,
+ .procname = "nf_conntrack_icmpv6_timeout",
+ .data = &nf_ct_icmpv6_timeout,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_FRAG6_TIMEOUT,
+ .procname = "nf_conntrack_frag6_timeout",
+ .data = &nf_ct_frag6_timeout,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_FRAG6_LOW_THRESH,
+ .procname = "nf_conntrack_frag6_low_thresh",
+ .data = &nf_ct_frag6_low_thresh,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_FRAG6_HIGH_THRESH,
+ .procname = "nf_conntrack_frag6_high_thresh",
+ .data = &nf_ct_frag6_high_thresh,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_netfilter_table[] = {
+ {
+ .ctl_name = NET_NETFILTER,
+ .procname = "netfilter",
+ .mode = 0555,
+ .child = nf_ct_sysctl_table,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+ {
+ .ctl_name = CTL_NET,
+ .procname = "net",
+ .mode = 0555,
+ .child = nf_ct_netfilter_table,
+ },
+ { .ctl_name = 0 }
+};
+#endif
+
+struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6 = {
+ .l3proto = PF_INET6,
+ .name = "ipv6",
+ .pkt_to_tuple = ipv6_pkt_to_tuple,
+ .invert_tuple = ipv6_invert_tuple,
+ .print_tuple = ipv6_print_tuple,
+ .print_conntrack = ipv6_print_conntrack,
+ .prepare = ipv6_prepare,
+ .get_features = ipv6_get_features,
+ .me = THIS_MODULE,
+};
+
+extern struct nf_conntrack_protocol nf_conntrack_protocol_tcp6;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_udp6;
+extern struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6;
+extern int nf_ct_frag6_init(void);
+extern void nf_ct_frag6_cleanup(void);
+static int init_or_cleanup(int init)
+{
+ int ret = 0;
+
+ if (!init) goto cleanup;
+
+ ret = nf_ct_frag6_init();
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't initialize frag6.\n");
+ goto cleanup_nothing;
+ }
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_tcp6);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register tcp.\n");
+ goto cleanup_frag6;
+ }
+
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_udp6);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register udp.\n");
+ goto cleanup_tcp;
+ }
+
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_icmpv6);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register icmpv6.\n");
+ goto cleanup_udp;
+ }
+
+ ret = nf_conntrack_l3proto_register(&nf_conntrack_l3proto_ipv6);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register ipv6\n");
+ goto cleanup_icmpv6;
+ }
+
+ ret = nf_register_hook(&ipv6_conntrack_defrag_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register pre-routing defrag "
+ "hook.\n");
+ goto cleanup_ipv6;
+ }
+
+ ret = nf_register_hook(&ipv6_conntrack_defrag_local_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register local_out defrag "
+ "hook.\n");
+ goto cleanup_defragops;
+ }
+
+ ret = nf_register_hook(&ipv6_conntrack_in_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register pre-routing hook.\n");
+ goto cleanup_defraglocalops;
+ }
+
+ ret = nf_register_hook(&ipv6_conntrack_local_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register local out hook.\n");
+ goto cleanup_inops;
+ }
+
+ ret = nf_register_hook(&ipv6_conntrack_out_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register post-routing hook.\n");
+ goto cleanup_inandlocalops;
+ }
+
+ ret = nf_register_hook(&ipv6_conntrack_local_in_ops);
+ if (ret < 0) {
+ printk("nf_conntrack_ipv6: can't register local in hook.\n");
+ goto cleanup_inoutandlocalops;
+ }
+
+#ifdef CONFIG_SYSCTL
+ nf_ct_ipv6_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+ if (nf_ct_ipv6_sysctl_header == NULL) {
+ printk("nf_conntrack: can't register to sysctl.\n");
+ ret = -ENOMEM;
+ goto cleanup_localinops;
+ }
+#endif
+ return ret;
+
+ cleanup:
+ synchronize_net();
+#ifdef CONFIG_SYSCTL
+ unregister_sysctl_table(nf_ct_ipv6_sysctl_header);
+ cleanup_localinops:
+#endif
+ nf_unregister_hook(&ipv6_conntrack_local_in_ops);
+ cleanup_inoutandlocalops:
+ nf_unregister_hook(&ipv6_conntrack_out_ops);
+ cleanup_inandlocalops:
+ nf_unregister_hook(&ipv6_conntrack_local_out_ops);
+ cleanup_inops:
+ nf_unregister_hook(&ipv6_conntrack_in_ops);
+ cleanup_defraglocalops:
+ nf_unregister_hook(&ipv6_conntrack_defrag_local_out_ops);
+ cleanup_defragops:
+ nf_unregister_hook(&ipv6_conntrack_defrag_ops);
+ cleanup_ipv6:
+ nf_conntrack_l3proto_unregister(&nf_conntrack_l3proto_ipv6);
+ cleanup_icmpv6:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_icmpv6);
+ cleanup_udp:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_udp6);
+ cleanup_tcp:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_tcp6);
+ cleanup_frag6:
+ nf_ct_frag6_cleanup();
+ cleanup_nothing:
+ return ret;
+}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Yasuyuki KOZAKAI @USAGI <yasuyuki.kozakai@toshiba.co.jp>");
+
+static int __init init(void)
+{
+ need_nf_conntrack();
+ return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+ init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+
+void need_ip6_conntrack(void)
+{
+}
+
+EXPORT_SYMBOL(need_ip6_conntrack);
diff --git a/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
new file mode 100644
index 00000000000..c0f1da5497a
--- /dev/null
+++ b/net/ipv6/netfilter/nf_conntrack_proto_icmpv6.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (C)2003,2004 USAGI/WIDE Project
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - ICMPv6 tracking support. Derived from the original ip_conntrack code
+ * net/ipv4/netfilter/ip_conntrack_proto_icmp.c which had the following
+ * copyright information:
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/in6.h>
+#include <linux/icmpv6.h>
+#include <linux/ipv6.h>
+#include <net/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <linux/seq_file.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack_tuple.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
+
+unsigned long nf_ct_icmpv6_timeout = 30*HZ;
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static int icmpv6_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ struct icmp6hdr _hdr, *hp;
+
+ hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ if (hp == NULL)
+ return 0;
+ tuple->dst.u.icmp.type = hp->icmp6_type;
+ tuple->src.u.icmp.id = hp->icmp6_identifier;
+ tuple->dst.u.icmp.code = hp->icmp6_code;
+
+ return 1;
+}
+
+static int icmpv6_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ /* Add 1; spaces filled with 0. */
+ static u_int8_t invmap[] = {
+ [ICMPV6_ECHO_REQUEST - 128] = ICMPV6_ECHO_REPLY + 1,
+ [ICMPV6_ECHO_REPLY - 128] = ICMPV6_ECHO_REQUEST + 1,
+ [ICMPV6_NI_QUERY - 128] = ICMPV6_NI_QUERY + 1,
+ [ICMPV6_NI_REPLY - 128] = ICMPV6_NI_REPLY +1
+ };
+
+ __u8 type = orig->dst.u.icmp.type - 128;
+ if (type >= sizeof(invmap) || !invmap[type])
+ return 0;
+
+ tuple->src.u.icmp.id = orig->src.u.icmp.id;
+ tuple->dst.u.icmp.type = invmap[type] - 1;
+ tuple->dst.u.icmp.code = orig->dst.u.icmp.code;
+ return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int icmpv6_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "type=%u code=%u id=%u ",
+ tuple->dst.u.icmp.type,
+ tuple->dst.u.icmp.code,
+ ntohs(tuple->src.u.icmp.id));
+}
+
+/* Print out the private part of the conntrack. */
+static int icmpv6_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ return 0;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int icmpv6_packet(struct nf_conn *ct,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ /* Try to delete connection immediately after all replies:
+ won't actually vanish as we still have skb, and del_timer
+ means this will only run once even if count hits zero twice
+ (theoretically possible with SMP) */
+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY) {
+ if (atomic_dec_and_test(&ct->proto.icmp.count)
+ && del_timer(&ct->timeout))
+ ct->timeout.function((unsigned long)ct);
+ } else {
+ atomic_inc(&ct->proto.icmp.count);
+ nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
+ nf_ct_refresh_acct(ct, ctinfo, skb, nf_ct_icmpv6_timeout);
+ }
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int icmpv6_new(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff)
+{
+ static u_int8_t valid_new[] = {
+ [ICMPV6_ECHO_REQUEST - 128] = 1,
+ [ICMPV6_NI_QUERY - 128] = 1
+ };
+
+ if (conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128 >= sizeof(valid_new)
+ || !valid_new[conntrack->tuplehash[0].tuple.dst.u.icmp.type - 128]) {
+ /* Can't create a new ICMPv6 `conn' with this. */
+ DEBUGP("icmp: can't create new conn with type %u\n",
+ conntrack->tuplehash[0].tuple.dst.u.icmp.type);
+ NF_CT_DUMP_TUPLE(&conntrack->tuplehash[0].tuple);
+ return 0;
+ }
+ atomic_set(&conntrack->proto.icmp.count, 0);
+ return 1;
+}
+
+extern int
+nf_ct_ipv6_skip_exthdr(struct sk_buff *skb, int start, u8 *nexthdrp, int len);
+extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
+static int
+icmpv6_error_message(struct sk_buff *skb,
+ unsigned int icmp6off,
+ enum ip_conntrack_info *ctinfo,
+ unsigned int hooknum)
+{
+ struct nf_conntrack_tuple intuple, origtuple;
+ struct nf_conntrack_tuple_hash *h;
+ struct icmp6hdr _hdr, *hp;
+ unsigned int inip6off;
+ struct nf_conntrack_protocol *inproto;
+ u_int8_t inprotonum;
+ unsigned int inprotoff;
+
+ NF_CT_ASSERT(skb->nfct == NULL);
+
+ hp = skb_header_pointer(skb, icmp6off, sizeof(_hdr), &_hdr);
+ if (hp == NULL) {
+ DEBUGP("icmpv6_error: Can't get ICMPv6 hdr.\n");
+ return -NF_ACCEPT;
+ }
+
+ inip6off = icmp6off + sizeof(_hdr);
+ if (skb_copy_bits(skb, inip6off+offsetof(struct ipv6hdr, nexthdr),
+ &inprotonum, sizeof(inprotonum)) != 0) {
+ DEBUGP("icmpv6_error: Can't get nexthdr in inner IPv6 header.\n");
+ return -NF_ACCEPT;
+ }
+ inprotoff = nf_ct_ipv6_skip_exthdr(skb,
+ inip6off + sizeof(struct ipv6hdr),
+ &inprotonum,
+ skb->len - inip6off
+ - sizeof(struct ipv6hdr));
+
+ if ((inprotoff < 0) || (inprotoff > skb->len) ||
+ (inprotonum == NEXTHDR_FRAGMENT)) {
+ DEBUGP("icmpv6_error: Can't get protocol header in ICMPv6 payload.\n");
+ return -NF_ACCEPT;
+ }
+
+ inproto = nf_ct_find_proto(PF_INET6, inprotonum);
+
+ /* Are they talking about one of our connections? */
+ if (!nf_ct_get_tuple(skb, inip6off, inprotoff, PF_INET6, inprotonum,
+ &origtuple, &nf_conntrack_l3proto_ipv6, inproto)) {
+ DEBUGP("icmpv6_error: Can't get tuple\n");
+ return -NF_ACCEPT;
+ }
+
+ /* Ordinarily, we'd expect the inverted tupleproto, but it's
+ been preserved inside the ICMP. */
+ if (!nf_ct_invert_tuple(&intuple, &origtuple,
+ &nf_conntrack_l3proto_ipv6, inproto)) {
+ DEBUGP("icmpv6_error: Can't invert tuple\n");
+ return -NF_ACCEPT;
+ }
+
+ *ctinfo = IP_CT_RELATED;
+
+ h = nf_conntrack_find_get(&intuple, NULL);
+ if (!h) {
+ DEBUGP("icmpv6_error: no match\n");
+ return -NF_ACCEPT;
+ } else {
+ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY)
+ *ctinfo += IP_CT_IS_REPLY;
+ }
+
+ /* Update skb to refer to this connection */
+ skb->nfct = &nf_ct_tuplehash_to_ctrack(h)->ct_general;
+ skb->nfctinfo = *ctinfo;
+ return -NF_ACCEPT;
+}
+
+static int
+icmpv6_error(struct sk_buff *skb, unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo, int pf, unsigned int hooknum)
+{
+ struct icmp6hdr _ih, *icmp6h;
+
+ icmp6h = skb_header_pointer(skb, dataoff, sizeof(_ih), &_ih);
+ if (icmp6h == NULL) {
+ if (LOG_INVALID(IPPROTO_ICMPV6))
+ nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
+ "nf_ct_icmpv6: short packet ");
+ return -NF_ACCEPT;
+ }
+
+ if (hooknum != NF_IP6_PRE_ROUTING)
+ goto skipped;
+
+ /* Ignore it if the checksum's bogus. */
+ if (csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+ skb->len - dataoff, IPPROTO_ICMPV6,
+ skb_checksum(skb, dataoff,
+ skb->len - dataoff, 0))) {
+ nf_log_packet(PF_INET6, 0, skb, NULL, NULL, NULL,
+ "nf_ct_icmpv6: ICMPv6 checksum failed\n");
+ return -NF_ACCEPT;
+ }
+
+skipped:
+
+ /* is not error message ? */
+ if (icmp6h->icmp6_type >= 128)
+ return NF_ACCEPT;
+
+ return icmpv6_error_message(skb, dataoff, ctinfo, hooknum);
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_icmpv6 =
+{
+ .l3proto = PF_INET6,
+ .proto = IPPROTO_ICMPV6,
+ .name = "icmpv6",
+ .pkt_to_tuple = icmpv6_pkt_to_tuple,
+ .invert_tuple = icmpv6_invert_tuple,
+ .print_tuple = icmpv6_print_tuple,
+ .print_conntrack = icmpv6_print_conntrack,
+ .packet = icmpv6_packet,
+ .new = icmpv6_new,
+ .error = icmpv6_error,
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_icmpv6);
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
new file mode 100644
index 00000000000..7640b9bb769
--- /dev/null
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -0,0 +1,885 @@
+/*
+ * IPv6 fragment reassembly for connection tracking
+ *
+ * Copyright (C)2004 USAGI/WIDE Project
+ *
+ * Author:
+ * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ *
+ * Based on: net/ipv6/reassembly.c
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/socket.h>
+#include <linux/sockios.h>
+#include <linux/jiffies.h>
+#include <linux/net.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/in6.h>
+#include <linux/ipv6.h>
+#include <linux/icmpv6.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+
+#include <net/sock.h>
+#include <net/snmp.h>
+
+#include <net/ipv6.h>
+#include <net/protocol.h>
+#include <net/transp_v6.h>
+#include <net/rawv6.h>
+#include <net/ndisc.h>
+#include <net/addrconf.h>
+#include <linux/sysctl.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv6.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+#define NF_CT_FRAG6_HIGH_THRESH 262144 /* == 256*1024 */
+#define NF_CT_FRAG6_LOW_THRESH 196608 /* == 192*1024 */
+#define NF_CT_FRAG6_TIMEOUT IPV6_FRAG_TIMEOUT
+
+int nf_ct_frag6_high_thresh = 256*1024;
+int nf_ct_frag6_low_thresh = 192*1024;
+int nf_ct_frag6_timeout = IPV6_FRAG_TIMEOUT;
+
+struct nf_ct_frag6_skb_cb
+{
+ struct inet6_skb_parm h;
+ int offset;
+ struct sk_buff *orig;
+};
+
+#define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb))
+
+struct nf_ct_frag6_queue
+{
+ struct nf_ct_frag6_queue *next;
+ struct list_head lru_list; /* lru list member */
+
+ __u32 id; /* fragment id */
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+
+ spinlock_t lock;
+ atomic_t refcnt;
+ struct timer_list timer; /* expire timer */
+ struct sk_buff *fragments;
+ int len;
+ int meat;
+ struct timeval stamp;
+ unsigned int csum;
+ __u8 last_in; /* has first/last segment arrived? */
+#define COMPLETE 4
+#define FIRST_IN 2
+#define LAST_IN 1
+ __u16 nhoffset;
+ struct nf_ct_frag6_queue **pprev;
+};
+
+/* Hash table. */
+
+#define FRAG6Q_HASHSZ 64
+
+static struct nf_ct_frag6_queue *nf_ct_frag6_hash[FRAG6Q_HASHSZ];
+static rwlock_t nf_ct_frag6_lock = RW_LOCK_UNLOCKED;
+static u32 nf_ct_frag6_hash_rnd;
+static LIST_HEAD(nf_ct_frag6_lru_list);
+int nf_ct_frag6_nqueues = 0;
+
+static __inline__ void __fq_unlink(struct nf_ct_frag6_queue *fq)
+{
+ if (fq->next)
+ fq->next->pprev = fq->pprev;
+ *fq->pprev = fq->next;
+ list_del(&fq->lru_list);
+ nf_ct_frag6_nqueues--;
+}
+
+static __inline__ void fq_unlink(struct nf_ct_frag6_queue *fq)
+{
+ write_lock(&nf_ct_frag6_lock);
+ __fq_unlink(fq);
+ write_unlock(&nf_ct_frag6_lock);
+}
+
+static unsigned int ip6qhashfn(u32 id, struct in6_addr *saddr,
+ struct in6_addr *daddr)
+{
+ u32 a, b, c;
+
+ a = saddr->s6_addr32[0];
+ b = saddr->s6_addr32[1];
+ c = saddr->s6_addr32[2];
+
+ a += JHASH_GOLDEN_RATIO;
+ b += JHASH_GOLDEN_RATIO;
+ c += nf_ct_frag6_hash_rnd;
+ __jhash_mix(a, b, c);
+
+ a += saddr->s6_addr32[3];
+ b += daddr->s6_addr32[0];
+ c += daddr->s6_addr32[1];
+ __jhash_mix(a, b, c);
+
+ a += daddr->s6_addr32[2];
+ b += daddr->s6_addr32[3];
+ c += id;
+ __jhash_mix(a, b, c);
+
+ return c & (FRAG6Q_HASHSZ - 1);
+}
+
+static struct timer_list nf_ct_frag6_secret_timer;
+int nf_ct_frag6_secret_interval = 10 * 60 * HZ;
+
+static void nf_ct_frag6_secret_rebuild(unsigned long dummy)
+{
+ unsigned long now = jiffies;
+ int i;
+
+ write_lock(&nf_ct_frag6_lock);
+ get_random_bytes(&nf_ct_frag6_hash_rnd, sizeof(u32));
+ for (i = 0; i < FRAG6Q_HASHSZ; i++) {
+ struct nf_ct_frag6_queue *q;
+
+ q = nf_ct_frag6_hash[i];
+ while (q) {
+ struct nf_ct_frag6_queue *next = q->next;
+ unsigned int hval = ip6qhashfn(q->id,
+ &q->saddr,
+ &q->daddr);
+
+ if (hval != i) {
+ /* Unlink. */
+ if (q->next)
+ q->next->pprev = q->pprev;
+ *q->pprev = q->next;
+
+ /* Relink to new hash chain. */
+ if ((q->next = nf_ct_frag6_hash[hval]) != NULL)
+ q->next->pprev = &q->next;
+ nf_ct_frag6_hash[hval] = q;
+ q->pprev = &nf_ct_frag6_hash[hval];
+ }
+
+ q = next;
+ }
+ }
+ write_unlock(&nf_ct_frag6_lock);
+
+ mod_timer(&nf_ct_frag6_secret_timer, now + nf_ct_frag6_secret_interval);
+}
+
+atomic_t nf_ct_frag6_mem = ATOMIC_INIT(0);
+
+/* Memory Tracking Functions. */
+static inline void frag_kfree_skb(struct sk_buff *skb)
+{
+ atomic_sub(skb->truesize, &nf_ct_frag6_mem);
+ if (NFCT_FRAG6_CB(skb)->orig)
+ kfree_skb(NFCT_FRAG6_CB(skb)->orig);
+
+ kfree_skb(skb);
+}
+
+static inline void frag_free_queue(struct nf_ct_frag6_queue *fq)
+{
+ atomic_sub(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem);
+ kfree(fq);
+}
+
+static inline struct nf_ct_frag6_queue *frag_alloc_queue(void)
+{
+ struct nf_ct_frag6_queue *fq = kmalloc(sizeof(struct nf_ct_frag6_queue), GFP_ATOMIC);
+
+ if (!fq)
+ return NULL;
+ atomic_add(sizeof(struct nf_ct_frag6_queue), &nf_ct_frag6_mem);
+ return fq;
+}
+
+/* Destruction primitives. */
+
+/* Complete destruction of fq. */
+static void nf_ct_frag6_destroy(struct nf_ct_frag6_queue *fq)
+{
+ struct sk_buff *fp;
+
+ BUG_TRAP(fq->last_in&COMPLETE);
+ BUG_TRAP(del_timer(&fq->timer) == 0);
+
+ /* Release all fragment data. */
+ fp = fq->fragments;
+ while (fp) {
+ struct sk_buff *xp = fp->next;
+
+ frag_kfree_skb(fp);
+ fp = xp;
+ }
+
+ frag_free_queue(fq);
+}
+
+static __inline__ void fq_put(struct nf_ct_frag6_queue *fq)
+{
+ if (atomic_dec_and_test(&fq->refcnt))
+ nf_ct_frag6_destroy(fq);
+}
+
+/* Kill fq entry. It is not destroyed immediately,
+ * because caller (and someone more) holds reference count.
+ */
+static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq)
+{
+ if (del_timer(&fq->timer))
+ atomic_dec(&fq->refcnt);
+
+ if (!(fq->last_in & COMPLETE)) {
+ fq_unlink(fq);
+ atomic_dec(&fq->refcnt);
+ fq->last_in |= COMPLETE;
+ }
+}
+
+static void nf_ct_frag6_evictor(void)
+{
+ struct nf_ct_frag6_queue *fq;
+ struct list_head *tmp;
+
+ for (;;) {
+ if (atomic_read(&nf_ct_frag6_mem) <= nf_ct_frag6_low_thresh)
+ return;
+ read_lock(&nf_ct_frag6_lock);
+ if (list_empty(&nf_ct_frag6_lru_list)) {
+ read_unlock(&nf_ct_frag6_lock);
+ return;
+ }
+ tmp = nf_ct_frag6_lru_list.next;
+ fq = list_entry(tmp, struct nf_ct_frag6_queue, lru_list);
+ atomic_inc(&fq->refcnt);
+ read_unlock(&nf_ct_frag6_lock);
+
+ spin_lock(&fq->lock);
+ if (!(fq->last_in&COMPLETE))
+ fq_kill(fq);
+ spin_unlock(&fq->lock);
+
+ fq_put(fq);
+ }
+}
+
+static void nf_ct_frag6_expire(unsigned long data)
+{
+ struct nf_ct_frag6_queue *fq = (struct nf_ct_frag6_queue *) data;
+
+ spin_lock(&fq->lock);
+
+ if (fq->last_in & COMPLETE)
+ goto out;
+
+ fq_kill(fq);
+
+out:
+ spin_unlock(&fq->lock);
+ fq_put(fq);
+}
+
+/* Creation primitives. */
+
+
+static struct nf_ct_frag6_queue *nf_ct_frag6_intern(unsigned int hash,
+ struct nf_ct_frag6_queue *fq_in)
+{
+ struct nf_ct_frag6_queue *fq;
+
+ write_lock(&nf_ct_frag6_lock);
+#ifdef CONFIG_SMP
+ for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+ if (fq->id == fq_in->id &&
+ !ipv6_addr_cmp(&fq_in->saddr, &fq->saddr) &&
+ !ipv6_addr_cmp(&fq_in->daddr, &fq->daddr)) {
+ atomic_inc(&fq->refcnt);
+ write_unlock(&nf_ct_frag6_lock);
+ fq_in->last_in |= COMPLETE;
+ fq_put(fq_in);
+ return fq;
+ }
+ }
+#endif
+ fq = fq_in;
+
+ if (!mod_timer(&fq->timer, jiffies + nf_ct_frag6_timeout))
+ atomic_inc(&fq->refcnt);
+
+ atomic_inc(&fq->refcnt);
+ if ((fq->next = nf_ct_frag6_hash[hash]) != NULL)
+ fq->next->pprev = &fq->next;
+ nf_ct_frag6_hash[hash] = fq;
+ fq->pprev = &nf_ct_frag6_hash[hash];
+ INIT_LIST_HEAD(&fq->lru_list);
+ list_add_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
+ nf_ct_frag6_nqueues++;
+ write_unlock(&nf_ct_frag6_lock);
+ return fq;
+}
+
+
+static struct nf_ct_frag6_queue *
+nf_ct_frag6_create(unsigned int hash, u32 id, struct in6_addr *src, struct in6_addr *dst)
+{
+ struct nf_ct_frag6_queue *fq;
+
+ if ((fq = frag_alloc_queue()) == NULL) {
+ DEBUGP("Can't alloc new queue\n");
+ goto oom;
+ }
+
+ memset(fq, 0, sizeof(struct nf_ct_frag6_queue));
+
+ fq->id = id;
+ ipv6_addr_copy(&fq->saddr, src);
+ ipv6_addr_copy(&fq->daddr, dst);
+
+ init_timer(&fq->timer);
+ fq->timer.function = nf_ct_frag6_expire;
+ fq->timer.data = (long) fq;
+ fq->lock = SPIN_LOCK_UNLOCKED;
+ atomic_set(&fq->refcnt, 1);
+
+ return nf_ct_frag6_intern(hash, fq);
+
+oom:
+ return NULL;
+}
+
+static __inline__ struct nf_ct_frag6_queue *
+fq_find(u32 id, struct in6_addr *src, struct in6_addr *dst)
+{
+ struct nf_ct_frag6_queue *fq;
+ unsigned int hash = ip6qhashfn(id, src, dst);
+
+ read_lock(&nf_ct_frag6_lock);
+ for (fq = nf_ct_frag6_hash[hash]; fq; fq = fq->next) {
+ if (fq->id == id &&
+ !ipv6_addr_cmp(src, &fq->saddr) &&
+ !ipv6_addr_cmp(dst, &fq->daddr)) {
+ atomic_inc(&fq->refcnt);
+ read_unlock(&nf_ct_frag6_lock);
+ return fq;
+ }
+ }
+ read_unlock(&nf_ct_frag6_lock);
+
+ return nf_ct_frag6_create(hash, id, src, dst);
+}
+
+
+static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb,
+ struct frag_hdr *fhdr, int nhoff)
+{
+ struct sk_buff *prev, *next;
+ int offset, end;
+
+ if (fq->last_in & COMPLETE) {
+ DEBUGP("Allready completed\n");
+ goto err;
+ }
+
+ offset = ntohs(fhdr->frag_off) & ~0x7;
+ end = offset + (ntohs(skb->nh.ipv6h->payload_len) -
+ ((u8 *) (fhdr + 1) - (u8 *) (skb->nh.ipv6h + 1)));
+
+ if ((unsigned int)end > IPV6_MAXPLEN) {
+ DEBUGP("offset is too large.\n");
+ return -1;
+ }
+
+ if (skb->ip_summed == CHECKSUM_HW)
+ skb->csum = csum_sub(skb->csum,
+ csum_partial(skb->nh.raw,
+ (u8*)(fhdr + 1) - skb->nh.raw,
+ 0));
+
+ /* Is this the final fragment? */
+ if (!(fhdr->frag_off & htons(IP6_MF))) {
+ /* If we already have some bits beyond end
+ * or have different end, the segment is corrupted.
+ */
+ if (end < fq->len ||
+ ((fq->last_in & LAST_IN) && end != fq->len)) {
+ DEBUGP("already received last fragment\n");
+ goto err;
+ }
+ fq->last_in |= LAST_IN;
+ fq->len = end;
+ } else {
+ /* Check if the fragment is rounded to 8 bytes.
+ * Required by the RFC.
+ */
+ if (end & 0x7) {
+ /* RFC2460 says always send parameter problem in
+ * this case. -DaveM
+ */
+ DEBUGP("the end of this fragment is not rounded to 8 bytes.\n");
+ return -1;
+ }
+ if (end > fq->len) {
+ /* Some bits beyond end -> corruption. */
+ if (fq->last_in & LAST_IN) {
+ DEBUGP("last packet already reached.\n");
+ goto err;
+ }
+ fq->len = end;
+ }
+ }
+
+ if (end == offset)
+ goto err;
+
+ /* Point into the IP datagram 'data' part. */
+ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) {
+ DEBUGP("queue: message is too short.\n");
+ goto err;
+ }
+ if (end-offset < skb->len) {
+ if (pskb_trim(skb, end - offset)) {
+ DEBUGP("Can't trim\n");
+ goto err;
+ }
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+
+ /* Find out which fragments are in front and at the back of us
+ * in the chain of fragments so far. We must know where to put
+ * this fragment, right?
+ */
+ prev = NULL;
+ for (next = fq->fragments; next != NULL; next = next->next) {
+ if (NFCT_FRAG6_CB(next)->offset >= offset)
+ break; /* bingo! */
+ prev = next;
+ }
+
+ /* We found where to put this one. Check for overlap with
+ * preceding fragment, and, if needed, align things so that
+ * any overlaps are eliminated.
+ */
+ if (prev) {
+ int i = (NFCT_FRAG6_CB(prev)->offset + prev->len) - offset;
+
+ if (i > 0) {
+ offset += i;
+ if (end <= offset) {
+ DEBUGP("overlap\n");
+ goto err;
+ }
+ if (!pskb_pull(skb, i)) {
+ DEBUGP("Can't pull\n");
+ goto err;
+ }
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->ip_summed = CHECKSUM_NONE;
+ }
+ }
+
+ /* Look for overlap with succeeding segments.
+ * If we can merge fragments, do it.
+ */
+ while (next && NFCT_FRAG6_CB(next)->offset < end) {
+ /* overlap is 'i' bytes */
+ int i = end - NFCT_FRAG6_CB(next)->offset;
+
+ if (i < next->len) {
+ /* Eat head of the next overlapped fragment
+ * and leave the loop. The next ones cannot overlap.
+ */
+ DEBUGP("Eat head of the overlapped parts.: %d", i);
+ if (!pskb_pull(next, i))
+ goto err;
+
+ /* next fragment */
+ NFCT_FRAG6_CB(next)->offset += i;
+ fq->meat -= i;
+ if (next->ip_summed != CHECKSUM_UNNECESSARY)
+ next->ip_summed = CHECKSUM_NONE;
+ break;
+ } else {
+ struct sk_buff *free_it = next;
+
+ /* Old fragmnet is completely overridden with
+ * new one drop it.
+ */
+ next = next->next;
+
+ if (prev)
+ prev->next = next;
+ else
+ fq->fragments = next;
+
+ fq->meat -= free_it->len;
+ frag_kfree_skb(free_it);
+ }
+ }
+
+ NFCT_FRAG6_CB(skb)->offset = offset;
+
+ /* Insert this fragment in the chain of fragments. */
+ skb->next = next;
+ if (prev)
+ prev->next = skb;
+ else
+ fq->fragments = skb;
+
+ skb->dev = NULL;
+ skb_get_timestamp(skb, &fq->stamp);
+ fq->meat += skb->len;
+ atomic_add(skb->truesize, &nf_ct_frag6_mem);
+
+ /* The first fragment.
+ * nhoffset is obtained from the first fragment, of course.
+ */
+ if (offset == 0) {
+ fq->nhoffset = nhoff;
+ fq->last_in |= FIRST_IN;
+ }
+ write_lock(&nf_ct_frag6_lock);
+ list_move_tail(&fq->lru_list, &nf_ct_frag6_lru_list);
+ write_unlock(&nf_ct_frag6_lock);
+ return 0;
+
+err:
+ return -1;
+}
+
+/*
+ * Check if this packet is complete.
+ * Returns NULL on failure by any reason, and pointer
+ * to current nexthdr field in reassembled frame.
+ *
+ * It is called with locked fq, and caller must check that
+ * queue is eligible for reassembly i.e. it is not COMPLETE,
+ * the last and the first frames arrived and all the bits are here.
+ */
+static struct sk_buff *
+nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev)
+{
+ struct sk_buff *fp, *op, *head = fq->fragments;
+ int payload_len;
+
+ fq_kill(fq);
+
+ BUG_TRAP(head != NULL);
+ BUG_TRAP(NFCT_FRAG6_CB(head)->offset == 0);
+
+ /* Unfragmented part is taken from the first segment. */
+ payload_len = (head->data - head->nh.raw) - sizeof(struct ipv6hdr) + fq->len - sizeof(struct frag_hdr);
+ if (payload_len > IPV6_MAXPLEN) {
+ DEBUGP("payload len is too large.\n");
+ goto out_oversize;
+ }
+
+ /* Head of list must not be cloned. */
+ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) {
+ DEBUGP("skb is cloned but can't expand head");
+ goto out_oom;
+ }
+
+ /* If the first fragment is fragmented itself, we split
+ * it to two chunks: the first with data and paged part
+ * and the second, holding only fragments. */
+ if (skb_shinfo(head)->frag_list) {
+ struct sk_buff *clone;
+ int i, plen = 0;
+
+ if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) {
+ DEBUGP("Can't alloc skb\n");
+ goto out_oom;
+ }
+ clone->next = head->next;
+ head->next = clone;
+ skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
+ skb_shinfo(head)->frag_list = NULL;
+ for (i=0; i<skb_shinfo(head)->nr_frags; i++)
+ plen += skb_shinfo(head)->frags[i].size;
+ clone->len = clone->data_len = head->data_len - plen;
+ head->data_len -= clone->len;
+ head->len -= clone->len;
+ clone->csum = 0;
+ clone->ip_summed = head->ip_summed;
+
+ NFCT_FRAG6_CB(clone)->orig = NULL;
+ atomic_add(clone->truesize, &nf_ct_frag6_mem);
+ }
+
+ /* We have to remove fragment header from datagram and to relocate
+ * header in order to calculate ICV correctly. */
+ head->nh.raw[fq->nhoffset] = head->h.raw[0];
+ memmove(head->head + sizeof(struct frag_hdr), head->head,
+ (head->data - head->head) - sizeof(struct frag_hdr));
+ head->mac.raw += sizeof(struct frag_hdr);
+ head->nh.raw += sizeof(struct frag_hdr);
+
+ skb_shinfo(head)->frag_list = head->next;
+ head->h.raw = head->data;
+ skb_push(head, head->data - head->nh.raw);
+ atomic_sub(head->truesize, &nf_ct_frag6_mem);
+
+ for (fp=head->next; fp; fp = fp->next) {
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_HW)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ atomic_sub(fp->truesize, &nf_ct_frag6_mem);
+ }
+
+ head->next = NULL;
+ head->dev = dev;
+ skb_set_timestamp(head, &fq->stamp);
+ head->nh.ipv6h->payload_len = htons(payload_len);
+
+ /* Yes, and fold redundant checksum back. 8) */
+ if (head->ip_summed == CHECKSUM_HW)
+ head->csum = csum_partial(head->nh.raw, head->h.raw-head->nh.raw, head->csum);
+
+ fq->fragments = NULL;
+
+ /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */
+ fp = skb_shinfo(head)->frag_list;
+ if (NFCT_FRAG6_CB(fp)->orig == NULL)
+ /* at above code, head skb is divided into two skbs. */
+ fp = fp->next;
+
+ op = NFCT_FRAG6_CB(head)->orig;
+ for (; fp; fp = fp->next) {
+ struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig;
+
+ op->next = orig;
+ op = orig;
+ NFCT_FRAG6_CB(fp)->orig = NULL;
+ }
+
+ return head;
+
+out_oversize:
+ if (net_ratelimit())
+ printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len);
+ goto out_fail;
+out_oom:
+ if (net_ratelimit())
+ printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n");
+out_fail:
+ return NULL;
+}
+
+/*
+ * find the header just before Fragment Header.
+ *
+ * if success return 0 and set ...
+ * (*prevhdrp): the value of "Next Header Field" in the header
+ * just before Fragment Header.
+ * (*prevhoff): the offset of "Next Header Field" in the header
+ * just before Fragment Header.
+ * (*fhoff) : the offset of Fragment Header.
+ *
+ * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c
+ *
+ */
+static int
+find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff)
+{
+ u8 nexthdr = skb->nh.ipv6h->nexthdr;
+ u8 prev_nhoff = (u8 *)&skb->nh.ipv6h->nexthdr - skb->data;
+ int start = (u8 *)(skb->nh.ipv6h+1) - skb->data;
+ int len = skb->len - start;
+ u8 prevhdr = NEXTHDR_IPV6;
+
+ while (nexthdr != NEXTHDR_FRAGMENT) {
+ struct ipv6_opt_hdr hdr;
+ int hdrlen;
+
+ if (!ipv6_ext_hdr(nexthdr)) {
+ return -1;
+ }
+ if (len < (int)sizeof(struct ipv6_opt_hdr)) {
+ DEBUGP("too short\n");
+ return -1;
+ }
+ if (nexthdr == NEXTHDR_NONE) {
+ DEBUGP("next header is none\n");
+ return -1;
+ }
+ if (skb_copy_bits(skb, start, &hdr, sizeof(hdr)))
+ BUG();
+ if (nexthdr == NEXTHDR_AUTH)
+ hdrlen = (hdr.hdrlen+2)<<2;
+ else
+ hdrlen = ipv6_optlen(&hdr);
+
+ prevhdr = nexthdr;
+ prev_nhoff = start;
+
+ nexthdr = hdr.nexthdr;
+ len -= hdrlen;
+ start += hdrlen;
+ }
+
+ if (len < 0)
+ return -1;
+
+ *prevhdrp = prevhdr;
+ *prevhoff = prev_nhoff;
+ *fhoff = start;
+
+ return 0;
+}
+
+struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb)
+{
+ struct sk_buff *clone;
+ struct net_device *dev = skb->dev;
+ struct frag_hdr *fhdr;
+ struct nf_ct_frag6_queue *fq;
+ struct ipv6hdr *hdr;
+ int fhoff, nhoff;
+ u8 prevhdr;
+ struct sk_buff *ret_skb = NULL;
+
+ /* Jumbo payload inhibits frag. header */
+ if (skb->nh.ipv6h->payload_len == 0) {
+ DEBUGP("payload len = 0\n");
+ return skb;
+ }
+
+ if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0)
+ return skb;
+
+ clone = skb_clone(skb, GFP_ATOMIC);
+ if (clone == NULL) {
+ DEBUGP("Can't clone skb\n");
+ return skb;
+ }
+
+ NFCT_FRAG6_CB(clone)->orig = skb;
+
+ if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) {
+ DEBUGP("message is too short.\n");
+ goto ret_orig;
+ }
+
+ clone->h.raw = clone->data + fhoff;
+ hdr = clone->nh.ipv6h;
+ fhdr = (struct frag_hdr *)clone->h.raw;
+
+ if (!(fhdr->frag_off & htons(0xFFF9))) {
+ DEBUGP("Invalid fragment offset\n");
+ /* It is not a fragmented frame */
+ goto ret_orig;
+ }
+
+ if (atomic_read(&nf_ct_frag6_mem) > nf_ct_frag6_high_thresh)
+ nf_ct_frag6_evictor();
+
+ fq = fq_find(fhdr->identification, &hdr->saddr, &hdr->daddr);
+ if (fq == NULL) {
+ DEBUGP("Can't find and can't create new queue\n");
+ goto ret_orig;
+ }
+
+ spin_lock(&fq->lock);
+
+ if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) {
+ spin_unlock(&fq->lock);
+ DEBUGP("Can't insert skb to queue\n");
+ fq_put(fq);
+ goto ret_orig;
+ }
+
+ if (fq->last_in == (FIRST_IN|LAST_IN) && fq->meat == fq->len) {
+ ret_skb = nf_ct_frag6_reasm(fq, dev);
+ if (ret_skb == NULL)
+ DEBUGP("Can't reassemble fragmented packets\n");
+ }
+ spin_unlock(&fq->lock);
+
+ fq_put(fq);
+ return ret_skb;
+
+ret_orig:
+ kfree_skb(clone);
+ return skb;
+}
+
+void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb,
+ struct net_device *in, struct net_device *out,
+ int (*okfn)(struct sk_buff *))
+{
+ struct sk_buff *s, *s2;
+
+ for (s = NFCT_FRAG6_CB(skb)->orig; s;) {
+ nf_conntrack_put_reasm(s->nfct_reasm);
+ nf_conntrack_get_reasm(skb);
+ s->nfct_reasm = skb;
+
+ s2 = s->next;
+ NF_HOOK_THRESH(PF_INET6, hooknum, s, in, out, okfn,
+ NF_IP6_PRI_CONNTRACK_DEFRAG + 1);
+ s = s2;
+ }
+ nf_conntrack_put_reasm(skb);
+}
+
+int nf_ct_frag6_kfree_frags(struct sk_buff *skb)
+{
+ struct sk_buff *s, *s2;
+
+ for (s = NFCT_FRAG6_CB(skb)->orig; s; s = s2) {
+
+ s2 = s->next;
+ kfree_skb(s);
+ }
+
+ kfree_skb(skb);
+
+ return 0;
+}
+
+int nf_ct_frag6_init(void)
+{
+ nf_ct_frag6_hash_rnd = (u32) ((num_physpages ^ (num_physpages>>7)) ^
+ (jiffies ^ (jiffies >> 6)));
+
+ init_timer(&nf_ct_frag6_secret_timer);
+ nf_ct_frag6_secret_timer.function = nf_ct_frag6_secret_rebuild;
+ nf_ct_frag6_secret_timer.expires = jiffies
+ + nf_ct_frag6_secret_interval;
+ add_timer(&nf_ct_frag6_secret_timer);
+
+ return 0;
+}
+
+void nf_ct_frag6_cleanup(void)
+{
+ del_timer(&nf_ct_frag6_secret_timer);
+ nf_ct_frag6_evictor();
+}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index a1265a320b1..8e9628f1c4c 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -174,8 +174,10 @@ int ipv6_raw_deliver(struct sk_buff *skb, int nexthdr)
struct sk_buff *clone = skb_clone(skb, GFP_ATOMIC);
/* Not releasing hash table! */
- if (clone)
+ if (clone) {
+ nf_reset(clone);
rawv6_rcv(sk, clone);
+ }
}
sk = __raw_v6_lookup(sk_next(sk), nexthdr, daddr, saddr,
IP6CB(skb)->iif);
@@ -296,13 +298,10 @@ void rawv6_err(struct sock *sk, struct sk_buff *skb,
static inline int rawv6_rcv_skb(struct sock * sk, struct sk_buff * skb)
{
if ((raw6_sk(sk)->checksum || sk->sk_filter) &&
- skb->ip_summed != CHECKSUM_UNNECESSARY) {
- if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
- /* FIXME: increment a raw6 drops counter here */
- kfree_skb(skb);
- return 0;
- }
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ skb_checksum_complete(skb)) {
+ /* FIXME: increment a raw6 drops counter here */
+ kfree_skb(skb);
+ return 0;
}
/* Charge it to the socket. */
@@ -335,32 +334,25 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
if (!rp->checksum)
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
- if (skb->ip_summed == CHECKSUM_HW) {
- skb_postpull_rcsum(skb, skb->nh.raw,
- skb->h.raw - skb->nh.raw);
+ if (skb->ip_summed == CHECKSUM_HW) {
+ skb_postpull_rcsum(skb, skb->nh.raw,
+ skb->h.raw - skb->nh.raw);
+ if (!csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+ &skb->nh.ipv6h->daddr,
+ skb->len, inet->num, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (csum_ipv6_magic(&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,
- skb->len, inet->num, skb->csum)) {
- LIMIT_NETDEBUG(KERN_DEBUG "raw v6 hw csum failure.\n");
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
- if (skb->ip_summed == CHECKSUM_NONE)
- skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,
- skb->len, inet->num, 0);
}
+ if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+ skb->csum = ~csum_ipv6_magic(&skb->nh.ipv6h->saddr,
+ &skb->nh.ipv6h->daddr,
+ skb->len, inet->num, 0);
if (inet->hdrincl) {
- if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
- (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
+ if (skb_checksum_complete(skb)) {
/* FIXME: increment a raw6 drops counter here */
kfree_skb(skb);
return 0;
}
- skb->ip_summed = CHECKSUM_UNNECESSARY;
}
rawv6_rcv_skb(sk, skb);
@@ -405,7 +397,7 @@ static int rawv6_recvmsg(struct kiocb *iocb, struct sock *sk,
if (skb->ip_summed==CHECKSUM_UNNECESSARY) {
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else if (msg->msg_flags&MSG_TRUNC) {
- if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
+ if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied);
} else {
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 227e99ed510..f7f42c3e96c 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1710,7 +1710,7 @@ static void fib6_dump_end(struct netlink_callback *cb)
static int fib6_dump_done(struct netlink_callback *cb)
{
fib6_dump_end(cb);
- return cb->done(cb);
+ return cb->done ? cb->done(cb) : 0;
}
int inet6_dump_fib(struct sk_buff *skb, struct netlink_callback *cb)
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index d746d3b27ef..62c0e5bd931 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1401,20 +1401,18 @@ out:
static int tcp_v6_checksum_init(struct sk_buff *skb)
{
if (skb->ip_summed == CHECKSUM_HW) {
- skb->ip_summed = CHECKSUM_UNNECESSARY;
if (!tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,skb->csum))
+ &skb->nh.ipv6h->daddr,skb->csum)) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
return 0;
- LIMIT_NETDEBUG(KERN_DEBUG "hw tcp v6 csum failed\n");
+ }
}
+
+ skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
+ &skb->nh.ipv6h->daddr, 0);
+
if (skb->len <= 76) {
- if (tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,skb_checksum(skb, 0, skb->len, 0)))
- return -1;
- skb->ip_summed = CHECKSUM_UNNECESSARY;
- } else {
- skb->csum = ~tcp_v6_check(skb->h.th,skb->len,&skb->nh.ipv6h->saddr,
- &skb->nh.ipv6h->daddr,0);
+ return __skb_checksum_complete(skb);
}
return 0;
}
@@ -1575,7 +1573,7 @@ static int tcp_v6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
goto discard_it;
if ((skb->ip_summed != CHECKSUM_UNNECESSARY &&
- tcp_v6_checksum_init(skb) < 0))
+ tcp_v6_checksum_init(skb)))
goto bad_packet;
th = skb->h.th;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index bf9519341fd..e671153b47b 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -248,7 +248,7 @@ try_again:
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
} else if (msg->msg_flags&MSG_TRUNC) {
- if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
+ if (__skb_checksum_complete(skb))
goto csum_copy_err;
err = skb_copy_datagram_iovec(skb, sizeof(struct udphdr), msg->msg_iov,
copied);
@@ -363,13 +363,10 @@ static inline int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb)
return -1;
}
- if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
- if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
- UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
- kfree_skb(skb);
- return 0;
- }
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (skb_checksum_complete(skb)) {
+ UDP6_INC_STATS_BH(UDP_MIB_INERRORS);
+ kfree_skb(skb);
+ return 0;
}
if (sock_queue_rcv_skb(sk,skb)<0) {
@@ -491,13 +488,10 @@ static int udpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
uh = skb->h.uh;
}
- if (skb->ip_summed==CHECKSUM_HW) {
+ if (skb->ip_summed == CHECKSUM_HW &&
+ !csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum))
skb->ip_summed = CHECKSUM_UNNECESSARY;
- if (csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, skb->csum)) {
- LIMIT_NETDEBUG(KERN_DEBUG "udp v6 hw csum failure.\n");
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
+
if (skb->ip_summed != CHECKSUM_UNNECESSARY)
skb->csum = ~csum_ipv6_magic(saddr, daddr, ulen, IPPROTO_UDP, 0);
@@ -521,8 +515,7 @@ static int udpv6_rcv(struct sk_buff **pskb, unsigned int *nhoffp)
if (!xfrm6_policy_check(NULL, XFRM_POLICY_IN, skb))
goto discard;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY &&
- (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum)))
+ if (skb_checksum_complete(skb))
goto discard;
UDP6_INC_STATS_BH(UDP_MIB_NOPORTS);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8296b38bf27..a84f9221e5f 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -1,3 +1,6 @@
+menu "Core Netfilter Configuration"
+ depends on NET && NETFILTER
+
config NETFILTER_NETLINK
tristate "Netfilter netlink interface"
help
@@ -22,3 +25,74 @@ config NETFILTER_NETLINK_LOG
and is also scheduled to replace the old syslog-based ipt_LOG
and ip6t_LOG modules.
+config NF_CONNTRACK
+ tristate "Layer 3 Independent Connection tracking (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && IP_NF_CONNTRACK=n
+ default n
+ ---help---
+ Connection tracking keeps a record of what packets have passed
+ through your machine, in order to figure out how they are related
+ into connections.
+
+ Layer 3 independent connection tracking is experimental scheme
+ which generalize ip_conntrack to support other layer 3 protocols.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+config NF_CT_ACCT
+ bool "Connection tracking flow accounting"
+ depends on NF_CONNTRACK
+ help
+ If this option is enabled, the connection tracking code will
+ keep per-flow packet and byte counters.
+
+ Those counters can be used for flow-based accounting or the
+ `connbytes' match.
+
+ If unsure, say `N'.
+
+config NF_CONNTRACK_MARK
+ bool 'Connection mark tracking support'
+ depends on NF_CONNTRACK
+ help
+ This option enables support for connection marks, used by the
+ `CONNMARK' target and `connmark' match. Similar to the mark value
+ of packets, but this mark value is kept in the conntrack session
+ instead of the individual packets.
+
+config NF_CONNTRACK_EVENTS
+ bool "Connection tracking events"
+ depends on NF_CONNTRACK
+ help
+ If this option is enabled, the connection tracking code will
+ provide a notifier chain that can be used by other kernel code
+ to get notified aboutchanges in the connection tracking state.
+
+ If unsure, say `N'.
+
+config NF_CT_PROTO_SCTP
+ tristate 'SCTP protocol on new connection tracking support (EXPERIMENTAL)'
+ depends on EXPERIMENTAL && NF_CONNTRACK
+ default n
+ help
+ With this option enabled, the layer 3 independent connection
+ tracking code will be able to do state tracking on SCTP connections.
+
+ If you want to compile it as a module, say M here and read
+ Documentation/modules.txt. If unsure, say `N'.
+
+config NF_CONNTRACK_FTP
+ tristate "FTP support on new connection tracking (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && NF_CONNTRACK
+ help
+ Tracking FTP connections is problematic: special helpers are
+ required for tracking them, and doing masquerading and other forms
+ of Network Address Translation on them.
+
+ This is FTP support on Layer 3 independent connection tracking.
+ Layer 3 independent connection tracking is experimental scheme
+ which generalize ip_conntrack to support other layer 3 protocols.
+
+ To compile it as a module, choose M here. If unsure, say N.
+
+endmenu
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index b3b44f8b415..55f019ad2c0 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -5,3 +5,11 @@ obj-$(CONFIG_NETFILTER) = netfilter.o
obj-$(CONFIG_NETFILTER_NETLINK) += nfnetlink.o
obj-$(CONFIG_NETFILTER_NETLINK_QUEUE) += nfnetlink_queue.o
obj-$(CONFIG_NETFILTER_NETLINK_LOG) += nfnetlink_log.o
+
+nf_conntrack-objs := nf_conntrack_core.o nf_conntrack_standalone.o nf_conntrack_l3proto_generic.o nf_conntrack_proto_generic.o nf_conntrack_proto_tcp.o nf_conntrack_proto_udp.o
+
+obj-$(CONFIG_NF_CONNTRACK) += nf_conntrack.o
+obj-$(CONFIG_NF_CONNTRACK_FTP) += nf_conntrack_ftp.o
+
+# SCTP protocol connection tracking
+obj-$(CONFIG_NF_CT_PROTO_SCTP) += nf_conntrack_proto_sctp.o
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
new file mode 100644
index 00000000000..9a67c796b38
--- /dev/null
+++ b/net/netfilter/nf_conntrack_core.c
@@ -0,0 +1,1538 @@
+/* Connection state tracking for netfilter. This is separated from,
+ but required by, the NAT layer; it can also be used by an iptables
+ extension. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2005 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 23 Apr 2001: Harald Welte <laforge@gnumonks.org>
+ * - new API and handling of conntrack/nat helpers
+ * - now capable of multiple expectations for one master
+ * 16 Jul 2002: Harald Welte <laforge@gnumonks.org>
+ * - add usage/reference counts to ip_conntrack_expect
+ * - export ip_conntrack[_expect]_{find_get,put} functions
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol denendent part.
+ * 23 Mar 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - add support various size of conntrack structures.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_core.c
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/vmalloc.h>
+#include <linux/stddef.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/jhash.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/moduleparam.h>
+#include <linux/notifier.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/socket.h>
+
+/* This rwlock protects the main hash table, protocol/helper/expected
+ registrations, conntrack timers*/
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#define NF_CONNTRACK_VERSION "0.4.1"
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DEFINE_RWLOCK(nf_conntrack_lock);
+
+/* nf_conntrack_standalone needs this */
+atomic_t nf_conntrack_count = ATOMIC_INIT(0);
+
+void (*nf_conntrack_destroyed)(struct nf_conn *conntrack) = NULL;
+LIST_HEAD(nf_conntrack_expect_list);
+struct nf_conntrack_protocol **nf_ct_protos[PF_MAX];
+struct nf_conntrack_l3proto *nf_ct_l3protos[PF_MAX];
+static LIST_HEAD(helpers);
+unsigned int nf_conntrack_htable_size = 0;
+int nf_conntrack_max;
+struct list_head *nf_conntrack_hash;
+static kmem_cache_t *nf_conntrack_expect_cachep;
+struct nf_conn nf_conntrack_untracked;
+unsigned int nf_ct_log_invalid;
+static LIST_HEAD(unconfirmed);
+static int nf_conntrack_vmalloc;
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+struct notifier_block *nf_conntrack_chain;
+struct notifier_block *nf_conntrack_expect_chain;
+
+DEFINE_PER_CPU(struct nf_conntrack_ecache, nf_conntrack_ecache);
+
+/* deliver cached events and clear cache entry - must be called with locally
+ * disabled softirqs */
+static inline void
+__nf_ct_deliver_cached_events(struct nf_conntrack_ecache *ecache)
+{
+ DEBUGP("ecache: delivering events for %p\n", ecache->ct);
+ if (nf_ct_is_confirmed(ecache->ct) && !nf_ct_is_dying(ecache->ct)
+ && ecache->events)
+ notifier_call_chain(&nf_conntrack_chain, ecache->events,
+ ecache->ct);
+
+ ecache->events = 0;
+ nf_ct_put(ecache->ct);
+ ecache->ct = NULL;
+}
+
+/* Deliver all cached events for a particular conntrack. This is called
+ * by code prior to async packet handling for freeing the skb */
+void nf_ct_deliver_cached_events(const struct nf_conn *ct)
+{
+ struct nf_conntrack_ecache *ecache;
+
+ local_bh_disable();
+ ecache = &__get_cpu_var(nf_conntrack_ecache);
+ if (ecache->ct == ct)
+ __nf_ct_deliver_cached_events(ecache);
+ local_bh_enable();
+}
+
+/* Deliver cached events for old pending events, if current conntrack != old */
+void __nf_ct_event_cache_init(struct nf_conn *ct)
+{
+ struct nf_conntrack_ecache *ecache;
+
+ /* take care of delivering potentially old events */
+ ecache = &__get_cpu_var(nf_conntrack_ecache);
+ BUG_ON(ecache->ct == ct);
+ if (ecache->ct)
+ __nf_ct_deliver_cached_events(ecache);
+ /* initialize for this conntrack/packet */
+ ecache->ct = ct;
+ nf_conntrack_get(&ct->ct_general);
+}
+
+/* flush the event cache - touches other CPU's data and must not be called
+ * while packets are still passing through the code */
+static void nf_ct_event_cache_flush(void)
+{
+ struct nf_conntrack_ecache *ecache;
+ int cpu;
+
+ for_each_cpu(cpu) {
+ ecache = &per_cpu(nf_conntrack_ecache, cpu);
+ if (ecache->ct)
+ nf_ct_put(ecache->ct);
+ }
+}
+#else
+static inline void nf_ct_event_cache_flush(void) {}
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+
+DEFINE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
+EXPORT_PER_CPU_SYMBOL(nf_conntrack_stat);
+
+/*
+ * This scheme offers various size of "struct nf_conn" dependent on
+ * features(helper, nat, ...)
+ */
+
+#define NF_CT_FEATURES_NAMELEN 256
+static struct {
+ /* name of slab cache. printed in /proc/slabinfo */
+ char *name;
+
+ /* size of slab cache */
+ size_t size;
+
+ /* slab cache pointer */
+ kmem_cache_t *cachep;
+
+ /* allocated slab cache + modules which uses this slab cache */
+ int use;
+
+ /* Initialization */
+ int (*init_conntrack)(struct nf_conn *, u_int32_t);
+
+} nf_ct_cache[NF_CT_F_NUM];
+
+/* protect members of nf_ct_cache except of "use" */
+DEFINE_RWLOCK(nf_ct_cache_lock);
+
+/* This avoids calling kmem_cache_create() with same name simultaneously */
+DECLARE_MUTEX(nf_ct_cache_mutex);
+
+extern struct nf_conntrack_protocol nf_conntrack_generic_protocol;
+struct nf_conntrack_protocol *
+nf_ct_find_proto(u_int16_t l3proto, u_int8_t protocol)
+{
+ if (unlikely(nf_ct_protos[l3proto] == NULL))
+ return &nf_conntrack_generic_protocol;
+
+ return nf_ct_protos[l3proto][protocol];
+}
+
+static int nf_conntrack_hash_rnd_initted;
+static unsigned int nf_conntrack_hash_rnd;
+
+static u_int32_t __hash_conntrack(const struct nf_conntrack_tuple *tuple,
+ unsigned int size, unsigned int rnd)
+{
+ unsigned int a, b;
+ a = jhash((void *)tuple->src.u3.all, sizeof(tuple->src.u3.all),
+ ((tuple->src.l3num) << 16) | tuple->dst.protonum);
+ b = jhash((void *)tuple->dst.u3.all, sizeof(tuple->dst.u3.all),
+ (tuple->src.u.all << 16) | tuple->dst.u.all);
+
+ return jhash_2words(a, b, rnd) % size;
+}
+
+static inline u_int32_t hash_conntrack(const struct nf_conntrack_tuple *tuple)
+{
+ return __hash_conntrack(tuple, nf_conntrack_htable_size,
+ nf_conntrack_hash_rnd);
+}
+
+/* Initialize "struct nf_conn" which has spaces for helper */
+static int
+init_conntrack_for_helper(struct nf_conn *conntrack, u_int32_t features)
+{
+
+ conntrack->help = (union nf_conntrack_help *)
+ (((unsigned long)conntrack->data
+ + (__alignof__(union nf_conntrack_help) - 1))
+ & (~((unsigned long)(__alignof__(union nf_conntrack_help) -1))));
+ return 0;
+}
+
+int nf_conntrack_register_cache(u_int32_t features, const char *name,
+ size_t size,
+ int (*init)(struct nf_conn *, u_int32_t))
+{
+ int ret = 0;
+ char *cache_name;
+ kmem_cache_t *cachep;
+
+ DEBUGP("nf_conntrack_register_cache: features=0x%x, name=%s, size=%d\n",
+ features, name, size);
+
+ if (features < NF_CT_F_BASIC || features >= NF_CT_F_NUM) {
+ DEBUGP("nf_conntrack_register_cache: invalid features.: 0x%x\n",
+ features);
+ return -EINVAL;
+ }
+
+ down(&nf_ct_cache_mutex);
+
+ write_lock_bh(&nf_ct_cache_lock);
+ /* e.g: multiple helpers are loaded */
+ if (nf_ct_cache[features].use > 0) {
+ DEBUGP("nf_conntrack_register_cache: already resisterd.\n");
+ if ((!strncmp(nf_ct_cache[features].name, name,
+ NF_CT_FEATURES_NAMELEN))
+ && nf_ct_cache[features].size == size
+ && nf_ct_cache[features].init_conntrack == init) {
+ DEBUGP("nf_conntrack_register_cache: reusing.\n");
+ nf_ct_cache[features].use++;
+ ret = 0;
+ } else
+ ret = -EBUSY;
+
+ write_unlock_bh(&nf_ct_cache_lock);
+ up(&nf_ct_cache_mutex);
+ return ret;
+ }
+ write_unlock_bh(&nf_ct_cache_lock);
+
+ /*
+ * The memory space for name of slab cache must be alive until
+ * cache is destroyed.
+ */
+ cache_name = kmalloc(sizeof(char)*NF_CT_FEATURES_NAMELEN, GFP_ATOMIC);
+ if (cache_name == NULL) {
+ DEBUGP("nf_conntrack_register_cache: can't alloc cache_name\n");
+ ret = -ENOMEM;
+ goto out_up_mutex;
+ }
+
+ if (strlcpy(cache_name, name, NF_CT_FEATURES_NAMELEN)
+ >= NF_CT_FEATURES_NAMELEN) {
+ printk("nf_conntrack_register_cache: name too long\n");
+ ret = -EINVAL;
+ goto out_free_name;
+ }
+
+ cachep = kmem_cache_create(cache_name, size, 0, 0,
+ NULL, NULL);
+ if (!cachep) {
+ printk("nf_conntrack_register_cache: Can't create slab cache "
+ "for the features = 0x%x\n", features);
+ ret = -ENOMEM;
+ goto out_free_name;
+ }
+
+ write_lock_bh(&nf_ct_cache_lock);
+ nf_ct_cache[features].use = 1;
+ nf_ct_cache[features].size = size;
+ nf_ct_cache[features].init_conntrack = init;
+ nf_ct_cache[features].cachep = cachep;
+ nf_ct_cache[features].name = cache_name;
+ write_unlock_bh(&nf_ct_cache_lock);
+
+ goto out_up_mutex;
+
+out_free_name:
+ kfree(cache_name);
+out_up_mutex:
+ up(&nf_ct_cache_mutex);
+ return ret;
+}
+
+/* FIXME: In the current, only nf_conntrack_cleanup() can call this function. */
+void nf_conntrack_unregister_cache(u_int32_t features)
+{
+ kmem_cache_t *cachep;
+ char *name;
+
+ /*
+ * This assures that kmem_cache_create() isn't called before destroying
+ * slab cache.
+ */
+ DEBUGP("nf_conntrack_unregister_cache: 0x%04x\n", features);
+ down(&nf_ct_cache_mutex);
+
+ write_lock_bh(&nf_ct_cache_lock);
+ if (--nf_ct_cache[features].use > 0) {
+ write_unlock_bh(&nf_ct_cache_lock);
+ up(&nf_ct_cache_mutex);
+ return;
+ }
+ cachep = nf_ct_cache[features].cachep;
+ name = nf_ct_cache[features].name;
+ nf_ct_cache[features].cachep = NULL;
+ nf_ct_cache[features].name = NULL;
+ nf_ct_cache[features].init_conntrack = NULL;
+ nf_ct_cache[features].size = 0;
+ write_unlock_bh(&nf_ct_cache_lock);
+
+ synchronize_net();
+
+ kmem_cache_destroy(cachep);
+ kfree(name);
+
+ up(&nf_ct_cache_mutex);
+}
+
+int
+nf_ct_get_tuple(const struct sk_buff *skb,
+ unsigned int nhoff,
+ unsigned int dataoff,
+ u_int16_t l3num,
+ u_int8_t protonum,
+ struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_protocol *protocol)
+{
+ NF_CT_TUPLE_U_BLANK(tuple);
+
+ tuple->src.l3num = l3num;
+ if (l3proto->pkt_to_tuple(skb, nhoff, tuple) == 0)
+ return 0;
+
+ tuple->dst.protonum = protonum;
+ tuple->dst.dir = IP_CT_DIR_ORIGINAL;
+
+ return protocol->pkt_to_tuple(skb, dataoff, tuple);
+}
+
+int
+nf_ct_invert_tuple(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_l3proto *l3proto,
+ const struct nf_conntrack_protocol *protocol)
+{
+ NF_CT_TUPLE_U_BLANK(inverse);
+
+ inverse->src.l3num = orig->src.l3num;
+ if (l3proto->invert_tuple(inverse, orig) == 0)
+ return 0;
+
+ inverse->dst.dir = !orig->dst.dir;
+
+ inverse->dst.protonum = orig->dst.protonum;
+ return protocol->invert_tuple(inverse, orig);
+}
+
+/* nf_conntrack_expect helper functions */
+static void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
+{
+ ASSERT_WRITE_LOCK(&nf_conntrack_lock);
+ NF_CT_ASSERT(!timer_pending(&exp_timeout));
+ list_del(&exp->list);
+ NF_CT_STAT_INC(expect_delete);
+ exp->master->expecting--;
+ nf_conntrack_expect_put(exp);
+}
+
+static void expectation_timed_out(unsigned long ul_expect)
+{
+ struct nf_conntrack_expect *exp = (void *)ul_expect;
+
+ write_lock_bh(&nf_conntrack_lock);
+ nf_ct_unlink_expect(exp);
+ write_unlock_bh(&nf_conntrack_lock);
+ nf_conntrack_expect_put(exp);
+}
+
+/* If an expectation for this connection is found, it gets delete from
+ * global list then returned. */
+static struct nf_conntrack_expect *
+find_expectation(const struct nf_conntrack_tuple *tuple)
+{
+ struct nf_conntrack_expect *i;
+
+ list_for_each_entry(i, &nf_conntrack_expect_list, list) {
+ /* If master is not in hash table yet (ie. packet hasn't left
+ this machine yet), how can other end know about expected?
+ Hence these are not the droids you are looking for (if
+ master ct never got confirmed, we'd hold a reference to it
+ and weird things would happen to future packets). */
+ if (nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask)
+ && nf_ct_is_confirmed(i->master)) {
+ if (i->flags & NF_CT_EXPECT_PERMANENT) {
+ atomic_inc(&i->use);
+ return i;
+ } else if (del_timer(&i->timeout)) {
+ nf_ct_unlink_expect(i);
+ return i;
+ }
+ }
+ }
+ return NULL;
+}
+
+/* delete all expectations for this conntrack */
+static void remove_expectations(struct nf_conn *ct)
+{
+ struct nf_conntrack_expect *i, *tmp;
+
+ /* Optimization: most connection never expect any others. */
+ if (ct->expecting == 0)
+ return;
+
+ list_for_each_entry_safe(i, tmp, &nf_conntrack_expect_list, list) {
+ if (i->master == ct && del_timer(&i->timeout)) {
+ nf_ct_unlink_expect(i);
+ nf_conntrack_expect_put(i);
+ }
+ }
+}
+
+static void
+clean_from_lists(struct nf_conn *ct)
+{
+ unsigned int ho, hr;
+
+ DEBUGP("clean_from_lists(%p)\n", ct);
+ ASSERT_WRITE_LOCK(&nf_conntrack_lock);
+
+ ho = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ hr = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+ LIST_DELETE(&nf_conntrack_hash[ho], &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+ LIST_DELETE(&nf_conntrack_hash[hr], &ct->tuplehash[IP_CT_DIR_REPLY]);
+
+ /* Destroy all pending expectations */
+ remove_expectations(ct);
+}
+
+static void
+destroy_conntrack(struct nf_conntrack *nfct)
+{
+ struct nf_conn *ct = (struct nf_conn *)nfct;
+ struct nf_conntrack_l3proto *l3proto;
+ struct nf_conntrack_protocol *proto;
+
+ DEBUGP("destroy_conntrack(%p)\n", ct);
+ NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
+ NF_CT_ASSERT(!timer_pending(&ct->timeout));
+
+ nf_conntrack_event(IPCT_DESTROY, ct);
+ set_bit(IPS_DYING_BIT, &ct->status);
+
+ /* To make sure we don't get any weird locking issues here:
+ * destroy_conntrack() MUST NOT be called with a write lock
+ * to nf_conntrack_lock!!! -HW */
+ l3proto = nf_ct_find_l3proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num);
+ if (l3proto && l3proto->destroy)
+ l3proto->destroy(ct);
+
+ proto = nf_ct_find_proto(ct->tuplehash[IP_CT_DIR_REPLY].tuple.src.l3num,
+ ct->tuplehash[IP_CT_DIR_REPLY].tuple.dst.protonum);
+ if (proto && proto->destroy)
+ proto->destroy(ct);
+
+ if (nf_conntrack_destroyed)
+ nf_conntrack_destroyed(ct);
+
+ write_lock_bh(&nf_conntrack_lock);
+ /* Expectations will have been removed in clean_from_lists,
+ * except TFTP can create an expectation on the first packet,
+ * before connection is in the list, so we need to clean here,
+ * too. */
+ remove_expectations(ct);
+
+ /* We overload first tuple to link into unconfirmed list. */
+ if (!nf_ct_is_confirmed(ct)) {
+ BUG_ON(list_empty(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list));
+ list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+ }
+
+ NF_CT_STAT_INC(delete);
+ write_unlock_bh(&nf_conntrack_lock);
+
+ if (ct->master)
+ nf_ct_put(ct->master);
+
+ DEBUGP("destroy_conntrack: returning ct=%p to slab\n", ct);
+ nf_conntrack_free(ct);
+}
+
+static void death_by_timeout(unsigned long ul_conntrack)
+{
+ struct nf_conn *ct = (void *)ul_conntrack;
+
+ write_lock_bh(&nf_conntrack_lock);
+ /* Inside lock so preempt is disabled on module removal path.
+ * Otherwise we can get spurious warnings. */
+ NF_CT_STAT_INC(delete_list);
+ clean_from_lists(ct);
+ write_unlock_bh(&nf_conntrack_lock);
+ nf_ct_put(ct);
+}
+
+static inline int
+conntrack_tuple_cmp(const struct nf_conntrack_tuple_hash *i,
+ const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack)
+{
+ ASSERT_READ_LOCK(&nf_conntrack_lock);
+ return nf_ct_tuplehash_to_ctrack(i) != ignored_conntrack
+ && nf_ct_tuple_equal(tuple, &i->tuple);
+}
+
+static struct nf_conntrack_tuple_hash *
+__nf_conntrack_find(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack)
+{
+ struct nf_conntrack_tuple_hash *h;
+ unsigned int hash = hash_conntrack(tuple);
+
+ ASSERT_READ_LOCK(&nf_conntrack_lock);
+ list_for_each_entry(h, &nf_conntrack_hash[hash], list) {
+ if (conntrack_tuple_cmp(h, tuple, ignored_conntrack)) {
+ NF_CT_STAT_INC(found);
+ return h;
+ }
+ NF_CT_STAT_INC(searched);
+ }
+
+ return NULL;
+}
+
+/* Find a connection corresponding to a tuple. */
+struct nf_conntrack_tuple_hash *
+nf_conntrack_find_get(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack)
+{
+ struct nf_conntrack_tuple_hash *h;
+
+ read_lock_bh(&nf_conntrack_lock);
+ h = __nf_conntrack_find(tuple, ignored_conntrack);
+ if (h)
+ atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
+ read_unlock_bh(&nf_conntrack_lock);
+
+ return h;
+}
+
+/* Confirm a connection given skb; places it in hash table */
+int
+__nf_conntrack_confirm(struct sk_buff **pskb)
+{
+ unsigned int hash, repl_hash;
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ ct = nf_ct_get(*pskb, &ctinfo);
+
+ /* ipt_REJECT uses nf_conntrack_attach to attach related
+ ICMP/TCP RST packets in other direction. Actual packet
+ which created connection will be IP_CT_NEW or for an
+ expected connection, IP_CT_RELATED. */
+ if (CTINFO2DIR(ctinfo) != IP_CT_DIR_ORIGINAL)
+ return NF_ACCEPT;
+
+ hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
+ repl_hash = hash_conntrack(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
+
+ /* We're not in hash table, and we refuse to set up related
+ connections for unconfirmed conns. But packet copies and
+ REJECT will give spurious warnings here. */
+ /* NF_CT_ASSERT(atomic_read(&ct->ct_general.use) == 1); */
+
+ /* No external references means noone else could have
+ confirmed us. */
+ NF_CT_ASSERT(!nf_ct_is_confirmed(ct));
+ DEBUGP("Confirming conntrack %p\n", ct);
+
+ write_lock_bh(&nf_conntrack_lock);
+
+ /* See if there's one in the list already, including reverse:
+ NAT could have grabbed it without realizing, since we're
+ not in the hash. If there is, we lost race. */
+ if (!LIST_FIND(&nf_conntrack_hash[hash],
+ conntrack_tuple_cmp,
+ struct nf_conntrack_tuple_hash *,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple, NULL)
+ && !LIST_FIND(&nf_conntrack_hash[repl_hash],
+ conntrack_tuple_cmp,
+ struct nf_conntrack_tuple_hash *,
+ &ct->tuplehash[IP_CT_DIR_REPLY].tuple, NULL)) {
+ /* Remove from unconfirmed list */
+ list_del(&ct->tuplehash[IP_CT_DIR_ORIGINAL].list);
+
+ list_prepend(&nf_conntrack_hash[hash],
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL]);
+ list_prepend(&nf_conntrack_hash[repl_hash],
+ &ct->tuplehash[IP_CT_DIR_REPLY]);
+ /* Timer relative to confirmation time, not original
+ setting time, otherwise we'd get timer wrap in
+ weird delay cases. */
+ ct->timeout.expires += jiffies;
+ add_timer(&ct->timeout);
+ atomic_inc(&ct->ct_general.use);
+ set_bit(IPS_CONFIRMED_BIT, &ct->status);
+ NF_CT_STAT_INC(insert);
+ write_unlock_bh(&nf_conntrack_lock);
+ if (ct->helper)
+ nf_conntrack_event_cache(IPCT_HELPER, *pskb);
+#ifdef CONFIG_NF_NAT_NEEDED
+ if (test_bit(IPS_SRC_NAT_DONE_BIT, &ct->status) ||
+ test_bit(IPS_DST_NAT_DONE_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_NATINFO, *pskb);
+#endif
+ nf_conntrack_event_cache(master_ct(ct) ?
+ IPCT_RELATED : IPCT_NEW, *pskb);
+ return NF_ACCEPT;
+ }
+
+ NF_CT_STAT_INC(insert_failed);
+ write_unlock_bh(&nf_conntrack_lock);
+ return NF_DROP;
+}
+
+/* Returns true if a connection correspondings to the tuple (required
+ for NAT). */
+int
+nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
+ const struct nf_conn *ignored_conntrack)
+{
+ struct nf_conntrack_tuple_hash *h;
+
+ read_lock_bh(&nf_conntrack_lock);
+ h = __nf_conntrack_find(tuple, ignored_conntrack);
+ read_unlock_bh(&nf_conntrack_lock);
+
+ return h != NULL;
+}
+
+/* There's a small race here where we may free a just-assured
+ connection. Too bad: we're in trouble anyway. */
+static inline int unreplied(const struct nf_conntrack_tuple_hash *i)
+{
+ return !(test_bit(IPS_ASSURED_BIT,
+ &nf_ct_tuplehash_to_ctrack(i)->status));
+}
+
+static int early_drop(struct list_head *chain)
+{
+ /* Traverse backwards: gives us oldest, which is roughly LRU */
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct = NULL;
+ int dropped = 0;
+
+ read_lock_bh(&nf_conntrack_lock);
+ h = LIST_FIND_B(chain, unreplied, struct nf_conntrack_tuple_hash *);
+ if (h) {
+ ct = nf_ct_tuplehash_to_ctrack(h);
+ atomic_inc(&ct->ct_general.use);
+ }
+ read_unlock_bh(&nf_conntrack_lock);
+
+ if (!ct)
+ return dropped;
+
+ if (del_timer(&ct->timeout)) {
+ death_by_timeout((unsigned long)ct);
+ dropped = 1;
+ NF_CT_STAT_INC(early_drop);
+ }
+ nf_ct_put(ct);
+ return dropped;
+}
+
+static inline int helper_cmp(const struct nf_conntrack_helper *i,
+ const struct nf_conntrack_tuple *rtuple)
+{
+ return nf_ct_tuple_mask_cmp(rtuple, &i->tuple, &i->mask);
+}
+
+static struct nf_conntrack_helper *
+nf_ct_find_helper(const struct nf_conntrack_tuple *tuple)
+{
+ return LIST_FIND(&helpers, helper_cmp,
+ struct nf_conntrack_helper *,
+ tuple);
+}
+
+static struct nf_conn *
+__nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl,
+ const struct nf_conntrack_l3proto *l3proto)
+{
+ struct nf_conn *conntrack = NULL;
+ u_int32_t features = 0;
+
+ if (!nf_conntrack_hash_rnd_initted) {
+ get_random_bytes(&nf_conntrack_hash_rnd, 4);
+ nf_conntrack_hash_rnd_initted = 1;
+ }
+
+ if (nf_conntrack_max
+ && atomic_read(&nf_conntrack_count) >= nf_conntrack_max) {
+ unsigned int hash = hash_conntrack(orig);
+ /* Try dropping from this hash chain. */
+ if (!early_drop(&nf_conntrack_hash[hash])) {
+ if (net_ratelimit())
+ printk(KERN_WARNING
+ "nf_conntrack: table full, dropping"
+ " packet.\n");
+ return ERR_PTR(-ENOMEM);
+ }
+ }
+
+ /* find features needed by this conntrack. */
+ features = l3proto->get_features(orig);
+ read_lock_bh(&nf_conntrack_lock);
+ if (nf_ct_find_helper(repl) != NULL)
+ features |= NF_CT_F_HELP;
+ read_unlock_bh(&nf_conntrack_lock);
+
+ DEBUGP("nf_conntrack_alloc: features=0x%x\n", features);
+
+ read_lock_bh(&nf_ct_cache_lock);
+
+ if (!nf_ct_cache[features].use) {
+ DEBUGP("nf_conntrack_alloc: not supported features = 0x%x\n",
+ features);
+ goto out;
+ }
+
+ conntrack = kmem_cache_alloc(nf_ct_cache[features].cachep, GFP_ATOMIC);
+ if (conntrack == NULL) {
+ DEBUGP("nf_conntrack_alloc: Can't alloc conntrack from cache\n");
+ goto out;
+ }
+
+ memset(conntrack, 0, nf_ct_cache[features].size);
+ conntrack->features = features;
+ if (nf_ct_cache[features].init_conntrack &&
+ nf_ct_cache[features].init_conntrack(conntrack, features) < 0) {
+ DEBUGP("nf_conntrack_alloc: failed to init\n");
+ kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
+ conntrack = NULL;
+ goto out;
+ }
+
+ atomic_set(&conntrack->ct_general.use, 1);
+ conntrack->ct_general.destroy = destroy_conntrack;
+ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple = *orig;
+ conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *repl;
+ /* Don't set timer yet: wait for confirmation */
+ init_timer(&conntrack->timeout);
+ conntrack->timeout.data = (unsigned long)conntrack;
+ conntrack->timeout.function = death_by_timeout;
+
+ atomic_inc(&nf_conntrack_count);
+out:
+ read_unlock_bh(&nf_ct_cache_lock);
+ return conntrack;
+}
+
+struct nf_conn *nf_conntrack_alloc(const struct nf_conntrack_tuple *orig,
+ const struct nf_conntrack_tuple *repl)
+{
+ struct nf_conntrack_l3proto *l3proto;
+
+ l3proto = nf_ct_find_l3proto(orig->src.l3num);
+ return __nf_conntrack_alloc(orig, repl, l3proto);
+}
+
+void nf_conntrack_free(struct nf_conn *conntrack)
+{
+ u_int32_t features = conntrack->features;
+ NF_CT_ASSERT(features >= NF_CT_F_BASIC && features < NF_CT_F_NUM);
+ DEBUGP("nf_conntrack_free: features = 0x%x, conntrack=%p\n", features,
+ conntrack);
+ kmem_cache_free(nf_ct_cache[features].cachep, conntrack);
+ atomic_dec(&nf_conntrack_count);
+}
+
+/* Allocate a new conntrack: we return -ENOMEM if classification
+ failed due to stress. Otherwise it really is unclassifiable. */
+static struct nf_conntrack_tuple_hash *
+init_conntrack(const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_l3proto *l3proto,
+ struct nf_conntrack_protocol *protocol,
+ struct sk_buff *skb,
+ unsigned int dataoff)
+{
+ struct nf_conn *conntrack;
+ struct nf_conntrack_tuple repl_tuple;
+ struct nf_conntrack_expect *exp;
+
+ if (!nf_ct_invert_tuple(&repl_tuple, tuple, l3proto, protocol)) {
+ DEBUGP("Can't invert tuple.\n");
+ return NULL;
+ }
+
+ conntrack = __nf_conntrack_alloc(tuple, &repl_tuple, l3proto);
+ if (conntrack == NULL || IS_ERR(conntrack)) {
+ DEBUGP("Can't allocate conntrack.\n");
+ return (struct nf_conntrack_tuple_hash *)conntrack;
+ }
+
+ if (!protocol->new(conntrack, skb, dataoff)) {
+ nf_conntrack_free(conntrack);
+ DEBUGP("init conntrack: can't track with proto module\n");
+ return NULL;
+ }
+
+ write_lock_bh(&nf_conntrack_lock);
+ exp = find_expectation(tuple);
+
+ if (exp) {
+ DEBUGP("conntrack: expectation arrives ct=%p exp=%p\n",
+ conntrack, exp);
+ /* Welcome, Mr. Bond. We've been expecting you... */
+ __set_bit(IPS_EXPECTED_BIT, &conntrack->status);
+ conntrack->master = exp->master;
+#ifdef CONFIG_NF_CONNTRACK_MARK
+ conntrack->mark = exp->master->mark;
+#endif
+ nf_conntrack_get(&conntrack->master->ct_general);
+ NF_CT_STAT_INC(expect_new);
+ } else {
+ conntrack->helper = nf_ct_find_helper(&repl_tuple);
+
+ NF_CT_STAT_INC(new);
+ }
+
+ /* Overload tuple linked list to put us in unconfirmed list. */
+ list_add(&conntrack->tuplehash[IP_CT_DIR_ORIGINAL].list, &unconfirmed);
+
+ write_unlock_bh(&nf_conntrack_lock);
+
+ if (exp) {
+ if (exp->expectfn)
+ exp->expectfn(conntrack, exp);
+ nf_conntrack_expect_put(exp);
+ }
+
+ return &conntrack->tuplehash[IP_CT_DIR_ORIGINAL];
+}
+
+/* On success, returns conntrack ptr, sets skb->nfct and ctinfo */
+static inline struct nf_conn *
+resolve_normal_ct(struct sk_buff *skb,
+ unsigned int dataoff,
+ u_int16_t l3num,
+ u_int8_t protonum,
+ struct nf_conntrack_l3proto *l3proto,
+ struct nf_conntrack_protocol *proto,
+ int *set_reply,
+ enum ip_conntrack_info *ctinfo)
+{
+ struct nf_conntrack_tuple tuple;
+ struct nf_conntrack_tuple_hash *h;
+ struct nf_conn *ct;
+
+ if (!nf_ct_get_tuple(skb, (unsigned int)(skb->nh.raw - skb->data),
+ dataoff, l3num, protonum, &tuple, l3proto,
+ proto)) {
+ DEBUGP("resolve_normal_ct: Can't get tuple\n");
+ return NULL;
+ }
+
+ /* look for tuple match */
+ h = nf_conntrack_find_get(&tuple, NULL);
+ if (!h) {
+ h = init_conntrack(&tuple, l3proto, proto, skb, dataoff);
+ if (!h)
+ return NULL;
+ if (IS_ERR(h))
+ return (void *)h;
+ }
+ ct = nf_ct_tuplehash_to_ctrack(h);
+
+ /* It exists; we have (non-exclusive) reference. */
+ if (NF_CT_DIRECTION(h) == IP_CT_DIR_REPLY) {
+ *ctinfo = IP_CT_ESTABLISHED + IP_CT_IS_REPLY;
+ /* Please set reply bit if this packet OK */
+ *set_reply = 1;
+ } else {
+ /* Once we've had two way comms, always ESTABLISHED. */
+ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) {
+ DEBUGP("nf_conntrack_in: normal packet for %p\n", ct);
+ *ctinfo = IP_CT_ESTABLISHED;
+ } else if (test_bit(IPS_EXPECTED_BIT, &ct->status)) {
+ DEBUGP("nf_conntrack_in: related packet for %p\n", ct);
+ *ctinfo = IP_CT_RELATED;
+ } else {
+ DEBUGP("nf_conntrack_in: new packet for %p\n", ct);
+ *ctinfo = IP_CT_NEW;
+ }
+ *set_reply = 0;
+ }
+ skb->nfct = &ct->ct_general;
+ skb->nfctinfo = *ctinfo;
+ return ct;
+}
+
+unsigned int
+nf_conntrack_in(int pf, unsigned int hooknum, struct sk_buff **pskb)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+ struct nf_conntrack_l3proto *l3proto;
+ struct nf_conntrack_protocol *proto;
+ unsigned int dataoff;
+ u_int8_t protonum;
+ int set_reply = 0;
+ int ret;
+
+ /* Previously seen (loopback or untracked)? Ignore. */
+ if ((*pskb)->nfct) {
+ NF_CT_STAT_INC(ignore);
+ return NF_ACCEPT;
+ }
+
+ l3proto = nf_ct_find_l3proto((u_int16_t)pf);
+ if ((ret = l3proto->prepare(pskb, hooknum, &dataoff, &protonum)) <= 0) {
+ DEBUGP("not prepared to track yet or error occured\n");
+ return -ret;
+ }
+
+ proto = nf_ct_find_proto((u_int16_t)pf, protonum);
+
+ /* It may be an special packet, error, unclean...
+ * inverse of the return code tells to the netfilter
+ * core what to do with the packet. */
+ if (proto->error != NULL &&
+ (ret = proto->error(*pskb, dataoff, &ctinfo, pf, hooknum)) <= 0) {
+ NF_CT_STAT_INC(error);
+ NF_CT_STAT_INC(invalid);
+ return -ret;
+ }
+
+ ct = resolve_normal_ct(*pskb, dataoff, pf, protonum, l3proto, proto,
+ &set_reply, &ctinfo);
+ if (!ct) {
+ /* Not valid part of a connection */
+ NF_CT_STAT_INC(invalid);
+ return NF_ACCEPT;
+ }
+
+ if (IS_ERR(ct)) {
+ /* Too stressed to deal. */
+ NF_CT_STAT_INC(drop);
+ return NF_DROP;
+ }
+
+ NF_CT_ASSERT((*pskb)->nfct);
+
+ ret = proto->packet(ct, *pskb, dataoff, ctinfo, pf, hooknum);
+ if (ret < 0) {
+ /* Invalid: inverse of the return code tells
+ * the netfilter core what to do */
+ DEBUGP("nf_conntrack_in: Can't track with proto module\n");
+ nf_conntrack_put((*pskb)->nfct);
+ (*pskb)->nfct = NULL;
+ NF_CT_STAT_INC(invalid);
+ return -ret;
+ }
+
+ if (set_reply && !test_and_set_bit(IPS_SEEN_REPLY_BIT, &ct->status))
+ nf_conntrack_event_cache(IPCT_STATUS, *pskb);
+
+ return ret;
+}
+
+int nf_ct_invert_tuplepr(struct nf_conntrack_tuple *inverse,
+ const struct nf_conntrack_tuple *orig)
+{
+ return nf_ct_invert_tuple(inverse, orig,
+ nf_ct_find_l3proto(orig->src.l3num),
+ nf_ct_find_proto(orig->src.l3num,
+ orig->dst.protonum));
+}
+
+/* Would two expected things clash? */
+static inline int expect_clash(const struct nf_conntrack_expect *a,
+ const struct nf_conntrack_expect *b)
+{
+ /* Part covered by intersection of masks must be unequal,
+ otherwise they clash */
+ struct nf_conntrack_tuple intersect_mask;
+ int count;
+
+ intersect_mask.src.l3num = a->mask.src.l3num & b->mask.src.l3num;
+ intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all;
+ intersect_mask.dst.u.all = a->mask.dst.u.all & b->mask.dst.u.all;
+ intersect_mask.dst.protonum = a->mask.dst.protonum
+ & b->mask.dst.protonum;
+
+ for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+ intersect_mask.src.u3.all[count] =
+ a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
+ }
+
+ for (count = 0; count < NF_CT_TUPLE_L3SIZE; count++){
+ intersect_mask.dst.u3.all[count] =
+ a->mask.dst.u3.all[count] & b->mask.dst.u3.all[count];
+ }
+
+ return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask);
+}
+
+static inline int expect_matches(const struct nf_conntrack_expect *a,
+ const struct nf_conntrack_expect *b)
+{
+ return a->master == b->master
+ && nf_ct_tuple_equal(&a->tuple, &b->tuple)
+ && nf_ct_tuple_equal(&a->mask, &b->mask);
+}
+
+/* Generally a bad idea to call this: could have matched already. */
+void nf_conntrack_unexpect_related(struct nf_conntrack_expect *exp)
+{
+ struct nf_conntrack_expect *i;
+
+ write_lock_bh(&nf_conntrack_lock);
+ /* choose the the oldest expectation to evict */
+ list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
+ if (expect_matches(i, exp) && del_timer(&i->timeout)) {
+ nf_ct_unlink_expect(i);
+ write_unlock_bh(&nf_conntrack_lock);
+ nf_conntrack_expect_put(i);
+ return;
+ }
+ }
+ write_unlock_bh(&nf_conntrack_lock);
+}
+
+/* We don't increase the master conntrack refcount for non-fulfilled
+ * conntracks. During the conntrack destruction, the expectations are
+ * always killed before the conntrack itself */
+struct nf_conntrack_expect *nf_conntrack_expect_alloc(struct nf_conn *me)
+{
+ struct nf_conntrack_expect *new;
+
+ new = kmem_cache_alloc(nf_conntrack_expect_cachep, GFP_ATOMIC);
+ if (!new) {
+ DEBUGP("expect_related: OOM allocating expect\n");
+ return NULL;
+ }
+ new->master = me;
+ atomic_set(&new->use, 1);
+ return new;
+}
+
+void nf_conntrack_expect_put(struct nf_conntrack_expect *exp)
+{
+ if (atomic_dec_and_test(&exp->use))
+ kmem_cache_free(nf_conntrack_expect_cachep, exp);
+}
+
+static void nf_conntrack_expect_insert(struct nf_conntrack_expect *exp)
+{
+ atomic_inc(&exp->use);
+ exp->master->expecting++;
+ list_add(&exp->list, &nf_conntrack_expect_list);
+
+ init_timer(&exp->timeout);
+ exp->timeout.data = (unsigned long)exp;
+ exp->timeout.function = expectation_timed_out;
+ exp->timeout.expires = jiffies + exp->master->helper->timeout * HZ;
+ add_timer(&exp->timeout);
+
+ atomic_inc(&exp->use);
+ NF_CT_STAT_INC(expect_create);
+}
+
+/* Race with expectations being used means we could have none to find; OK. */
+static void evict_oldest_expect(struct nf_conn *master)
+{
+ struct nf_conntrack_expect *i;
+
+ list_for_each_entry_reverse(i, &nf_conntrack_expect_list, list) {
+ if (i->master == master) {
+ if (del_timer(&i->timeout)) {
+ nf_ct_unlink_expect(i);
+ nf_conntrack_expect_put(i);
+ }
+ break;
+ }
+ }
+}
+
+static inline int refresh_timer(struct nf_conntrack_expect *i)
+{
+ if (!del_timer(&i->timeout))
+ return 0;
+
+ i->timeout.expires = jiffies + i->master->helper->timeout*HZ;
+ add_timer(&i->timeout);
+ return 1;
+}
+
+int nf_conntrack_expect_related(struct nf_conntrack_expect *expect)
+{
+ struct nf_conntrack_expect *i;
+ int ret;
+
+ DEBUGP("nf_conntrack_expect_related %p\n", related_to);
+ DEBUGP("tuple: "); NF_CT_DUMP_TUPLE(&expect->tuple);
+ DEBUGP("mask: "); NF_CT_DUMP_TUPLE(&expect->mask);
+
+ write_lock_bh(&nf_conntrack_lock);
+ list_for_each_entry(i, &nf_conntrack_expect_list, list) {
+ if (expect_matches(i, expect)) {
+ /* Refresh timer: if it's dying, ignore.. */
+ if (refresh_timer(i)) {
+ ret = 0;
+ goto out;
+ }
+ } else if (expect_clash(i, expect)) {
+ ret = -EBUSY;
+ goto out;
+ }
+ }
+ /* Will be over limit? */
+ if (expect->master->helper->max_expected &&
+ expect->master->expecting >= expect->master->helper->max_expected)
+ evict_oldest_expect(expect->master);
+
+ nf_conntrack_expect_insert(expect);
+ nf_conntrack_expect_event(IPEXP_NEW, expect);
+ ret = 0;
+out:
+ write_unlock_bh(&nf_conntrack_lock);
+ return ret;
+}
+
+/* Alter reply tuple (maybe alter helper). This is for NAT, and is
+ implicitly racy: see __nf_conntrack_confirm */
+void nf_conntrack_alter_reply(struct nf_conn *conntrack,
+ const struct nf_conntrack_tuple *newreply)
+{
+ write_lock_bh(&nf_conntrack_lock);
+ /* Should be unconfirmed, so not in hash table yet */
+ NF_CT_ASSERT(!nf_ct_is_confirmed(conntrack));
+
+ DEBUGP("Altering reply tuple of %p to ", conntrack);
+ NF_CT_DUMP_TUPLE(newreply);
+
+ conntrack->tuplehash[IP_CT_DIR_REPLY].tuple = *newreply;
+ if (!conntrack->master && conntrack->expecting == 0)
+ conntrack->helper = nf_ct_find_helper(newreply);
+ write_unlock_bh(&nf_conntrack_lock);
+}
+
+int nf_conntrack_helper_register(struct nf_conntrack_helper *me)
+{
+ int ret;
+ BUG_ON(me->timeout == 0);
+
+ ret = nf_conntrack_register_cache(NF_CT_F_HELP, "nf_conntrack:help",
+ sizeof(struct nf_conn)
+ + sizeof(union nf_conntrack_help)
+ + __alignof__(union nf_conntrack_help),
+ init_conntrack_for_helper);
+ if (ret < 0) {
+ printk(KERN_ERR "nf_conntrack_helper_reigster: Unable to create slab cache for conntracks\n");
+ return ret;
+ }
+ write_lock_bh(&nf_conntrack_lock);
+ list_prepend(&helpers, me);
+ write_unlock_bh(&nf_conntrack_lock);
+
+ return 0;
+}
+
+static inline int unhelp(struct nf_conntrack_tuple_hash *i,
+ const struct nf_conntrack_helper *me)
+{
+ if (nf_ct_tuplehash_to_ctrack(i)->helper == me) {
+ nf_conntrack_event(IPCT_HELPER, nf_ct_tuplehash_to_ctrack(i));
+ nf_ct_tuplehash_to_ctrack(i)->helper = NULL;
+ }
+ return 0;
+}
+
+void nf_conntrack_helper_unregister(struct nf_conntrack_helper *me)
+{
+ unsigned int i;
+ struct nf_conntrack_expect *exp, *tmp;
+
+ /* Need write lock here, to delete helper. */
+ write_lock_bh(&nf_conntrack_lock);
+ LIST_DELETE(&helpers, me);
+
+ /* Get rid of expectations */
+ list_for_each_entry_safe(exp, tmp, &nf_conntrack_expect_list, list) {
+ if (exp->master->helper == me && del_timer(&exp->timeout)) {
+ nf_ct_unlink_expect(exp);
+ nf_conntrack_expect_put(exp);
+ }
+ }
+
+ /* Get rid of expecteds, set helpers to NULL. */
+ LIST_FIND_W(&unconfirmed, unhelp, struct nf_conntrack_tuple_hash*, me);
+ for (i = 0; i < nf_conntrack_htable_size; i++)
+ LIST_FIND_W(&nf_conntrack_hash[i], unhelp,
+ struct nf_conntrack_tuple_hash *, me);
+ write_unlock_bh(&nf_conntrack_lock);
+
+ /* Someone could be still looking at the helper in a bh. */
+ synchronize_net();
+}
+
+/* Refresh conntrack for this many jiffies and do accounting if do_acct is 1 */
+void __nf_ct_refresh_acct(struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ const struct sk_buff *skb,
+ unsigned long extra_jiffies,
+ int do_acct)
+{
+ int event = 0;
+
+ NF_CT_ASSERT(ct->timeout.data == (unsigned long)ct);
+ NF_CT_ASSERT(skb);
+
+ write_lock_bh(&nf_conntrack_lock);
+
+ /* If not in hash table, timer will not be active yet */
+ if (!nf_ct_is_confirmed(ct)) {
+ ct->timeout.expires = extra_jiffies;
+ event = IPCT_REFRESH;
+ } else {
+ /* Need del_timer for race avoidance (may already be dying). */
+ if (del_timer(&ct->timeout)) {
+ ct->timeout.expires = jiffies + extra_jiffies;
+ add_timer(&ct->timeout);
+ event = IPCT_REFRESH;
+ }
+ }
+
+#ifdef CONFIG_NF_CT_ACCT
+ if (do_acct) {
+ ct->counters[CTINFO2DIR(ctinfo)].packets++;
+ ct->counters[CTINFO2DIR(ctinfo)].bytes +=
+ skb->len - (unsigned int)(skb->nh.raw - skb->data);
+ if ((ct->counters[CTINFO2DIR(ctinfo)].packets & 0x80000000)
+ || (ct->counters[CTINFO2DIR(ctinfo)].bytes & 0x80000000))
+ event |= IPCT_COUNTER_FILLING;
+ }
+#endif
+
+ write_unlock_bh(&nf_conntrack_lock);
+
+ /* must be unlocked when calling event cache */
+ if (event)
+ nf_conntrack_event_cache(event, skb);
+}
+
+/* Used by ipt_REJECT and ip6t_REJECT. */
+void __nf_conntrack_attach(struct sk_buff *nskb, struct sk_buff *skb)
+{
+ struct nf_conn *ct;
+ enum ip_conntrack_info ctinfo;
+
+ /* This ICMP is in reverse direction to the packet which caused it */
+ ct = nf_ct_get(skb, &ctinfo);
+ if (CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL)
+ ctinfo = IP_CT_RELATED + IP_CT_IS_REPLY;
+ else
+ ctinfo = IP_CT_RELATED;
+
+ /* Attach to new skbuff, and increment count */
+ nskb->nfct = &ct->ct_general;
+ nskb->nfctinfo = ctinfo;
+ nf_conntrack_get(nskb->nfct);
+}
+
+static inline int
+do_iter(const struct nf_conntrack_tuple_hash *i,
+ int (*iter)(struct nf_conn *i, void *data),
+ void *data)
+{
+ return iter(nf_ct_tuplehash_to_ctrack(i), data);
+}
+
+/* Bring out ya dead! */
+static struct nf_conntrack_tuple_hash *
+get_next_corpse(int (*iter)(struct nf_conn *i, void *data),
+ void *data, unsigned int *bucket)
+{
+ struct nf_conntrack_tuple_hash *h = NULL;
+
+ write_lock_bh(&nf_conntrack_lock);
+ for (; *bucket < nf_conntrack_htable_size; (*bucket)++) {
+ h = LIST_FIND_W(&nf_conntrack_hash[*bucket], do_iter,
+ struct nf_conntrack_tuple_hash *, iter, data);
+ if (h)
+ break;
+ }
+ if (!h)
+ h = LIST_FIND_W(&unconfirmed, do_iter,
+ struct nf_conntrack_tuple_hash *, iter, data);
+ if (h)
+ atomic_inc(&nf_ct_tuplehash_to_ctrack(h)->ct_general.use);
+ write_unlock_bh(&nf_conntrack_lock);
+
+ return h;
+}
+
+void
+nf_ct_iterate_cleanup(int (*iter)(struct nf_conn *i, void *data), void *data)
+{
+ struct nf_conntrack_tuple_hash *h;
+ unsigned int bucket = 0;
+
+ while ((h = get_next_corpse(iter, data, &bucket)) != NULL) {
+ struct nf_conn *ct = nf_ct_tuplehash_to_ctrack(h);
+ /* Time to push up daises... */
+ if (del_timer(&ct->timeout))
+ death_by_timeout((unsigned long)ct);
+ /* ... else the timer will get him soon. */
+
+ nf_ct_put(ct);
+ }
+}
+
+static int kill_all(struct nf_conn *i, void *data)
+{
+ return 1;
+}
+
+static void free_conntrack_hash(struct list_head *hash, int vmalloced, int size)
+{
+ if (vmalloced)
+ vfree(hash);
+ else
+ free_pages((unsigned long)hash,
+ get_order(sizeof(struct list_head) * size));
+}
+
+/* Mishearing the voices in his head, our hero wonders how he's
+ supposed to kill the mall. */
+void nf_conntrack_cleanup(void)
+{
+ int i;
+
+ /* This makes sure all current packets have passed through
+ netfilter framework. Roll on, two-stage module
+ delete... */
+ synchronize_net();
+
+ nf_ct_event_cache_flush();
+ i_see_dead_people:
+ nf_ct_iterate_cleanup(kill_all, NULL);
+ if (atomic_read(&nf_conntrack_count) != 0) {
+ schedule();
+ goto i_see_dead_people;
+ }
+
+ for (i = 0; i < NF_CT_F_NUM; i++) {
+ if (nf_ct_cache[i].use == 0)
+ continue;
+
+ NF_CT_ASSERT(nf_ct_cache[i].use == 1);
+ nf_ct_cache[i].use = 1;
+ nf_conntrack_unregister_cache(i);
+ }
+ kmem_cache_destroy(nf_conntrack_expect_cachep);
+ free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
+ nf_conntrack_htable_size);
+}
+
+static struct list_head *alloc_hashtable(int size, int *vmalloced)
+{
+ struct list_head *hash;
+ unsigned int i;
+
+ *vmalloced = 0;
+ hash = (void*)__get_free_pages(GFP_KERNEL,
+ get_order(sizeof(struct list_head)
+ * size));
+ if (!hash) {
+ *vmalloced = 1;
+ printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
+ hash = vmalloc(sizeof(struct list_head) * size);
+ }
+
+ if (hash)
+ for (i = 0; i < size; i++)
+ INIT_LIST_HEAD(&hash[i]);
+
+ return hash;
+}
+
+int set_hashsize(const char *val, struct kernel_param *kp)
+{
+ int i, bucket, hashsize, vmalloced;
+ int old_vmalloced, old_size;
+ int rnd;
+ struct list_head *hash, *old_hash;
+ struct nf_conntrack_tuple_hash *h;
+
+ /* On boot, we can set this without any fancy locking. */
+ if (!nf_conntrack_htable_size)
+ return param_set_uint(val, kp);
+
+ hashsize = simple_strtol(val, NULL, 0);
+ if (!hashsize)
+ return -EINVAL;
+
+ hash = alloc_hashtable(hashsize, &vmalloced);
+ if (!hash)
+ return -ENOMEM;
+
+ /* We have to rehahs for the new table anyway, so we also can
+ * use a newrandom seed */
+ get_random_bytes(&rnd, 4);
+
+ write_lock_bh(&nf_conntrack_lock);
+ for (i = 0; i < nf_conntrack_htable_size; i++) {
+ while (!list_empty(&nf_conntrack_hash[i])) {
+ h = list_entry(nf_conntrack_hash[i].next,
+ struct nf_conntrack_tuple_hash, list);
+ list_del(&h->list);
+ bucket = __hash_conntrack(&h->tuple, hashsize, rnd);
+ list_add_tail(&h->list, &hash[bucket]);
+ }
+ }
+ old_size = nf_conntrack_htable_size;
+ old_vmalloced = nf_conntrack_vmalloc;
+ old_hash = nf_conntrack_hash;
+
+ nf_conntrack_htable_size = hashsize;
+ nf_conntrack_vmalloc = vmalloced;
+ nf_conntrack_hash = hash;
+ nf_conntrack_hash_rnd = rnd;
+ write_unlock_bh(&nf_conntrack_lock);
+
+ free_conntrack_hash(old_hash, old_vmalloced, old_size);
+ return 0;
+}
+
+module_param_call(hashsize, set_hashsize, param_get_uint,
+ &nf_conntrack_htable_size, 0600);
+
+int __init nf_conntrack_init(void)
+{
+ unsigned int i;
+ int ret;
+
+ /* Idea from tcp.c: use 1/16384 of memory. On i386: 32MB
+ * machine has 256 buckets. >= 1GB machines have 8192 buckets. */
+ if (!nf_conntrack_htable_size) {
+ nf_conntrack_htable_size
+ = (((num_physpages << PAGE_SHIFT) / 16384)
+ / sizeof(struct list_head));
+ if (num_physpages > (1024 * 1024 * 1024 / PAGE_SIZE))
+ nf_conntrack_htable_size = 8192;
+ if (nf_conntrack_htable_size < 16)
+ nf_conntrack_htable_size = 16;
+ }
+ nf_conntrack_max = 8 * nf_conntrack_htable_size;
+
+ printk("nf_conntrack version %s (%u buckets, %d max)\n",
+ NF_CONNTRACK_VERSION, nf_conntrack_htable_size,
+ nf_conntrack_max);
+
+ nf_conntrack_hash = alloc_hashtable(nf_conntrack_htable_size,
+ &nf_conntrack_vmalloc);
+ if (!nf_conntrack_hash) {
+ printk(KERN_ERR "Unable to create nf_conntrack_hash\n");
+ goto err_out;
+ }
+
+ ret = nf_conntrack_register_cache(NF_CT_F_BASIC, "nf_conntrack:basic",
+ sizeof(struct nf_conn), NULL);
+ if (ret < 0) {
+ printk(KERN_ERR "Unable to create nf_conn slab cache\n");
+ goto err_free_hash;
+ }
+
+ nf_conntrack_expect_cachep = kmem_cache_create("nf_conntrack_expect",
+ sizeof(struct nf_conntrack_expect),
+ 0, 0, NULL, NULL);
+ if (!nf_conntrack_expect_cachep) {
+ printk(KERN_ERR "Unable to create nf_expect slab cache\n");
+ goto err_free_conntrack_slab;
+ }
+
+ /* Don't NEED lock here, but good form anyway. */
+ write_lock_bh(&nf_conntrack_lock);
+ for (i = 0; i < PF_MAX; i++)
+ nf_ct_l3protos[i] = &nf_conntrack_generic_l3proto;
+ write_unlock_bh(&nf_conntrack_lock);
+
+ /* Set up fake conntrack:
+ - to never be deleted, not in any hashes */
+ atomic_set(&nf_conntrack_untracked.ct_general.use, 1);
+ /* - and look it like as a confirmed connection */
+ set_bit(IPS_CONFIRMED_BIT, &nf_conntrack_untracked.status);
+
+ return ret;
+
+err_free_conntrack_slab:
+ nf_conntrack_unregister_cache(NF_CT_F_BASIC);
+err_free_hash:
+ free_conntrack_hash(nf_conntrack_hash, nf_conntrack_vmalloc,
+ nf_conntrack_htable_size);
+err_out:
+ return -ENOMEM;
+}
diff --git a/net/netfilter/nf_conntrack_ftp.c b/net/netfilter/nf_conntrack_ftp.c
new file mode 100644
index 00000000000..65080e269f2
--- /dev/null
+++ b/net/netfilter/nf_conntrack_ftp.c
@@ -0,0 +1,698 @@
+/* FTP extension for connection tracking. */
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - enable working with Layer 3 protocol independent connection tracking.
+ * - track EPRT and EPSV commands with IPv6 address.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_ftp.c
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netfilter.h>
+#include <linux/ip.h>
+#include <linux/ipv6.h>
+#include <linux/ctype.h>
+#include <net/checksum.h>
+#include <net/tcp.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter/nf_conntrack_ftp.h>
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Rusty Russell <rusty@rustcorp.com.au>");
+MODULE_DESCRIPTION("ftp connection tracking helper");
+
+/* This is slow, but it's simple. --RR */
+static char *ftp_buffer;
+
+static DEFINE_SPINLOCK(nf_ftp_lock);
+
+#define MAX_PORTS 8
+static u_int16_t ports[MAX_PORTS];
+static unsigned int ports_c;
+module_param_array(ports, ushort, &ports_c, 0400);
+
+static int loose;
+module_param(loose, int, 0600);
+
+unsigned int (*nf_nat_ftp_hook)(struct sk_buff **pskb,
+ enum ip_conntrack_info ctinfo,
+ enum ip_ct_ftp_type type,
+ unsigned int matchoff,
+ unsigned int matchlen,
+ struct nf_conntrack_expect *exp,
+ u32 *seq);
+EXPORT_SYMBOL_GPL(nf_nat_ftp_hook);
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+static int try_rfc959(const char *, size_t, struct nf_conntrack_man *, char);
+static int try_eprt(const char *, size_t, struct nf_conntrack_man *, char);
+static int try_epsv_response(const char *, size_t, struct nf_conntrack_man *,
+ char);
+
+static struct ftp_search {
+ enum ip_conntrack_dir dir;
+ const char *pattern;
+ size_t plen;
+ char skip;
+ char term;
+ enum ip_ct_ftp_type ftptype;
+ int (*getnum)(const char *, size_t, struct nf_conntrack_man *, char);
+} search[] = {
+ {
+ IP_CT_DIR_ORIGINAL,
+ "PORT", sizeof("PORT") - 1, ' ', '\r',
+ IP_CT_FTP_PORT,
+ try_rfc959,
+ },
+ {
+ IP_CT_DIR_REPLY,
+ "227 ", sizeof("227 ") - 1, '(', ')',
+ IP_CT_FTP_PASV,
+ try_rfc959,
+ },
+ {
+ IP_CT_DIR_ORIGINAL,
+ "EPRT", sizeof("EPRT") - 1, ' ', '\r',
+ IP_CT_FTP_EPRT,
+ try_eprt,
+ },
+ {
+ IP_CT_DIR_REPLY,
+ "229 ", sizeof("229 ") - 1, '(', ')',
+ IP_CT_FTP_EPSV,
+ try_epsv_response,
+ },
+};
+
+/* This code is based on inet_pton() in glibc-2.2.4 */
+static int
+get_ipv6_addr(const char *src, size_t dlen, struct in6_addr *dst, u_int8_t term)
+{
+ static const char xdigits[] = "0123456789abcdef";
+ u_int8_t tmp[16], *tp, *endp, *colonp;
+ int ch, saw_xdigit;
+ u_int32_t val;
+ size_t clen = 0;
+
+ tp = memset(tmp, '\0', sizeof(tmp));
+ endp = tp + sizeof(tmp);
+ colonp = NULL;
+
+ /* Leading :: requires some special handling. */
+ if (*src == ':'){
+ if (*++src != ':') {
+ DEBUGP("invalid \":\" at the head of addr\n");
+ return 0;
+ }
+ clen++;
+ }
+
+ saw_xdigit = 0;
+ val = 0;
+ while ((clen < dlen) && (*src != term)) {
+ const char *pch;
+
+ ch = tolower(*src++);
+ clen++;
+
+ pch = strchr(xdigits, ch);
+ if (pch != NULL) {
+ val <<= 4;
+ val |= (pch - xdigits);
+ if (val > 0xffff)
+ return 0;
+
+ saw_xdigit = 1;
+ continue;
+ }
+ if (ch != ':') {
+ DEBUGP("get_ipv6_addr: invalid char. \'%c\'\n", ch);
+ return 0;
+ }
+
+ if (!saw_xdigit) {
+ if (colonp) {
+ DEBUGP("invalid location of \"::\".\n");
+ return 0;
+ }
+ colonp = tp;
+ continue;
+ } else if (*src == term) {
+ DEBUGP("trancated IPv6 addr\n");
+ return 0;
+ }
+
+ if (tp + 2 > endp)
+ return 0;
+ *tp++ = (u_int8_t) (val >> 8) & 0xff;
+ *tp++ = (u_int8_t) val & 0xff;
+
+ saw_xdigit = 0;
+ val = 0;
+ continue;
+ }
+ if (saw_xdigit) {
+ if (tp + 2 > endp)
+ return 0;
+ *tp++ = (u_int8_t) (val >> 8) & 0xff;
+ *tp++ = (u_int8_t) val & 0xff;
+ }
+ if (colonp != NULL) {
+ /*
+ * Since some memmove()'s erroneously fail to handle
+ * overlapping regions, we'll do the shift by hand.
+ */
+ const int n = tp - colonp;
+ int i;
+
+ if (tp == endp)
+ return 0;
+
+ for (i = 1; i <= n; i++) {
+ endp[- i] = colonp[n - i];
+ colonp[n - i] = 0;
+ }
+ tp = endp;
+ }
+ if (tp != endp || (*src != term))
+ return 0;
+
+ memcpy(dst->s6_addr, tmp, sizeof(dst->s6_addr));
+ return clen;
+}
+
+static int try_number(const char *data, size_t dlen, u_int32_t array[],
+ int array_size, char sep, char term)
+{
+ u_int32_t i, len;
+
+ memset(array, 0, sizeof(array[0])*array_size);
+
+ /* Keep data pointing at next char. */
+ for (i = 0, len = 0; len < dlen && i < array_size; len++, data++) {
+ if (*data >= '0' && *data <= '9') {
+ array[i] = array[i]*10 + *data - '0';
+ }
+ else if (*data == sep)
+ i++;
+ else {
+ /* Unexpected character; true if it's the
+ terminator and we're finished. */
+ if (*data == term && i == array_size - 1)
+ return len;
+
+ DEBUGP("Char %u (got %u nums) `%u' unexpected\n",
+ len, i, *data);
+ return 0;
+ }
+ }
+ DEBUGP("Failed to fill %u numbers separated by %c\n", array_size, sep);
+
+ return 0;
+}
+
+/* Returns 0, or length of numbers: 192,168,1,1,5,6 */
+static int try_rfc959(const char *data, size_t dlen,
+ struct nf_conntrack_man *cmd, char term)
+{
+ int length;
+ u_int32_t array[6];
+
+ length = try_number(data, dlen, array, 6, ',', term);
+ if (length == 0)
+ return 0;
+
+ cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16) |
+ (array[2] << 8) | array[3]);
+ cmd->u.tcp.port = htons((array[4] << 8) | array[5]);
+ return length;
+}
+
+/* Grab port: number up to delimiter */
+static int get_port(const char *data, int start, size_t dlen, char delim,
+ u_int16_t *port)
+{
+ u_int16_t tmp_port = 0;
+ int i;
+
+ for (i = start; i < dlen; i++) {
+ /* Finished? */
+ if (data[i] == delim) {
+ if (tmp_port == 0)
+ break;
+ *port = htons(tmp_port);
+ DEBUGP("get_port: return %d\n", tmp_port);
+ return i + 1;
+ }
+ else if (data[i] >= '0' && data[i] <= '9')
+ tmp_port = tmp_port*10 + data[i] - '0';
+ else { /* Some other crap */
+ DEBUGP("get_port: invalid char.\n");
+ break;
+ }
+ }
+ return 0;
+}
+
+/* Returns 0, or length of numbers: |1|132.235.1.2|6275| or |2|3ffe::1|6275| */
+static int try_eprt(const char *data, size_t dlen, struct nf_conntrack_man *cmd,
+ char term)
+{
+ char delim;
+ int length;
+
+ /* First character is delimiter, then "1" for IPv4 or "2" for IPv6,
+ then delimiter again. */
+ if (dlen <= 3) {
+ DEBUGP("EPRT: too short\n");
+ return 0;
+ }
+ delim = data[0];
+ if (isdigit(delim) || delim < 33 || delim > 126 || data[2] != delim) {
+ DEBUGP("try_eprt: invalid delimitter.\n");
+ return 0;
+ }
+
+ if ((cmd->l3num == PF_INET && data[1] != '1') ||
+ (cmd->l3num == PF_INET6 && data[1] != '2')) {
+ DEBUGP("EPRT: invalid protocol number.\n");
+ return 0;
+ }
+
+ DEBUGP("EPRT: Got %c%c%c\n", delim, data[1], delim);
+
+ if (data[1] == '1') {
+ u_int32_t array[4];
+
+ /* Now we have IP address. */
+ length = try_number(data + 3, dlen - 3, array, 4, '.', delim);
+ if (length != 0)
+ cmd->u3.ip = htonl((array[0] << 24) | (array[1] << 16)
+ | (array[2] << 8) | array[3]);
+ } else {
+ /* Now we have IPv6 address. */
+ length = get_ipv6_addr(data + 3, dlen - 3,
+ (struct in6_addr *)cmd->u3.ip6, delim);
+ }
+
+ if (length == 0)
+ return 0;
+ DEBUGP("EPRT: Got IP address!\n");
+ /* Start offset includes initial "|1|", and trailing delimiter */
+ return get_port(data, 3 + length + 1, dlen, delim, &cmd->u.tcp.port);
+}
+
+/* Returns 0, or length of numbers: |||6446| */
+static int try_epsv_response(const char *data, size_t dlen,
+ struct nf_conntrack_man *cmd, char term)
+{
+ char delim;
+
+ /* Three delimiters. */
+ if (dlen <= 3) return 0;
+ delim = data[0];
+ if (isdigit(delim) || delim < 33 || delim > 126
+ || data[1] != delim || data[2] != delim)
+ return 0;
+
+ return get_port(data, 3, dlen, delim, &cmd->u.tcp.port);
+}
+
+/* Return 1 for match, 0 for accept, -1 for partial. */
+static int find_pattern(const char *data, size_t dlen,
+ const char *pattern, size_t plen,
+ char skip, char term,
+ unsigned int *numoff,
+ unsigned int *numlen,
+ struct nf_conntrack_man *cmd,
+ int (*getnum)(const char *, size_t,
+ struct nf_conntrack_man *, char))
+{
+ size_t i;
+
+ DEBUGP("find_pattern `%s': dlen = %u\n", pattern, dlen);
+ if (dlen == 0)
+ return 0;
+
+ if (dlen <= plen) {
+ /* Short packet: try for partial? */
+ if (strnicmp(data, pattern, dlen) == 0)
+ return -1;
+ else return 0;
+ }
+
+ if (strnicmp(data, pattern, plen) != 0) {
+#if 0
+ size_t i;
+
+ DEBUGP("ftp: string mismatch\n");
+ for (i = 0; i < plen; i++) {
+ DEBUGP("ftp:char %u `%c'(%u) vs `%c'(%u)\n",
+ i, data[i], data[i],
+ pattern[i], pattern[i]);
+ }
+#endif
+ return 0;
+ }
+
+ DEBUGP("Pattern matches!\n");
+ /* Now we've found the constant string, try to skip
+ to the 'skip' character */
+ for (i = plen; data[i] != skip; i++)
+ if (i == dlen - 1) return -1;
+
+ /* Skip over the last character */
+ i++;
+
+ DEBUGP("Skipped up to `%c'!\n", skip);
+
+ *numoff = i;
+ *numlen = getnum(data + i, dlen - i, cmd, term);
+ if (!*numlen)
+ return -1;
+
+ DEBUGP("Match succeeded!\n");
+ return 1;
+}
+
+/* Look up to see if we're just after a \n. */
+static int find_nl_seq(u32 seq, const struct ip_ct_ftp_master *info, int dir)
+{
+ unsigned int i;
+
+ for (i = 0; i < info->seq_aft_nl_num[dir]; i++)
+ if (info->seq_aft_nl[dir][i] == seq)
+ return 1;
+ return 0;
+}
+
+/* We don't update if it's older than what we have. */
+static void update_nl_seq(u32 nl_seq, struct ip_ct_ftp_master *info, int dir,
+ struct sk_buff *skb)
+{
+ unsigned int i, oldest = NUM_SEQ_TO_REMEMBER;
+
+ /* Look for oldest: if we find exact match, we're done. */
+ for (i = 0; i < info->seq_aft_nl_num[dir]; i++) {
+ if (info->seq_aft_nl[dir][i] == nl_seq)
+ return;
+
+ if (oldest == info->seq_aft_nl_num[dir]
+ || before(info->seq_aft_nl[dir][i], oldest))
+ oldest = i;
+ }
+
+ if (info->seq_aft_nl_num[dir] < NUM_SEQ_TO_REMEMBER) {
+ info->seq_aft_nl[dir][info->seq_aft_nl_num[dir]++] = nl_seq;
+ nf_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
+ } else if (oldest != NUM_SEQ_TO_REMEMBER) {
+ info->seq_aft_nl[dir][oldest] = nl_seq;
+ nf_conntrack_event_cache(IPCT_HELPINFO_VOLATILE, skb);
+ }
+}
+
+static int help(struct sk_buff **pskb,
+ unsigned int protoff,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo)
+{
+ unsigned int dataoff, datalen;
+ struct tcphdr _tcph, *th;
+ char *fb_ptr;
+ int ret;
+ u32 seq;
+ int dir = CTINFO2DIR(ctinfo);
+ unsigned int matchlen, matchoff;
+ struct ip_ct_ftp_master *ct_ftp_info = &ct->help->ct_ftp_info;
+ struct nf_conntrack_expect *exp;
+ struct nf_conntrack_man cmd = {};
+
+ unsigned int i;
+ int found = 0, ends_in_nl;
+
+ /* Until there's been traffic both ways, don't look in packets. */
+ if (ctinfo != IP_CT_ESTABLISHED
+ && ctinfo != IP_CT_ESTABLISHED+IP_CT_IS_REPLY) {
+ DEBUGP("ftp: Conntrackinfo = %u\n", ctinfo);
+ return NF_ACCEPT;
+ }
+
+ th = skb_header_pointer(*pskb, protoff, sizeof(_tcph), &_tcph);
+ if (th == NULL)
+ return NF_ACCEPT;
+
+ dataoff = protoff + th->doff * 4;
+ /* No data? */
+ if (dataoff >= (*pskb)->len) {
+ DEBUGP("ftp: dataoff(%u) >= skblen(%u)\n", dataoff,
+ (*pskb)->len);
+ return NF_ACCEPT;
+ }
+ datalen = (*pskb)->len - dataoff;
+
+ spin_lock_bh(&nf_ftp_lock);
+ fb_ptr = skb_header_pointer(*pskb, dataoff, datalen, ftp_buffer);
+ BUG_ON(fb_ptr == NULL);
+
+ ends_in_nl = (fb_ptr[datalen - 1] == '\n');
+ seq = ntohl(th->seq) + datalen;
+
+ /* Look up to see if we're just after a \n. */
+ if (!find_nl_seq(ntohl(th->seq), ct_ftp_info, dir)) {
+ /* Now if this ends in \n, update ftp info. */
+ DEBUGP("nf_conntrack_ftp_help: wrong seq pos %s(%u) or %s(%u)\n",
+ ct_ftp_info->seq_aft_nl_num[dir] > 0 ? "" : "(UNSET)",
+ ct_ftp_info->seq_aft_nl[dir][0],
+ ct_ftp_info->seq_aft_nl_num[dir] > 1 ? "" : "(UNSET)",
+ ct_ftp_info->seq_aft_nl[dir][1]);
+ ret = NF_ACCEPT;
+ goto out_update_nl;
+ }
+
+ /* Initialize IP/IPv6 addr to expected address (it's not mentioned
+ in EPSV responses) */
+ cmd.l3num = ct->tuplehash[dir].tuple.src.l3num;
+ memcpy(cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
+ sizeof(cmd.u3.all));
+
+ for (i = 0; i < ARRAY_SIZE(search); i++) {
+ if (search[i].dir != dir) continue;
+
+ found = find_pattern(fb_ptr, datalen,
+ search[i].pattern,
+ search[i].plen,
+ search[i].skip,
+ search[i].term,
+ &matchoff, &matchlen,
+ &cmd,
+ search[i].getnum);
+ if (found) break;
+ }
+ if (found == -1) {
+ /* We don't usually drop packets. After all, this is
+ connection tracking, not packet filtering.
+ However, it is necessary for accurate tracking in
+ this case. */
+ if (net_ratelimit())
+ printk("conntrack_ftp: partial %s %u+%u\n",
+ search[i].pattern,
+ ntohl(th->seq), datalen);
+ ret = NF_DROP;
+ goto out;
+ } else if (found == 0) { /* No match */
+ ret = NF_ACCEPT;
+ goto out_update_nl;
+ }
+
+ DEBUGP("conntrack_ftp: match `%.*s' (%u bytes at %u)\n",
+ (int)matchlen, fb_ptr + matchoff,
+ matchlen, ntohl(th->seq) + matchoff);
+
+ exp = nf_conntrack_expect_alloc(ct);
+ if (exp == NULL) {
+ ret = NF_DROP;
+ goto out;
+ }
+
+ /* We refer to the reverse direction ("!dir") tuples here,
+ * because we're expecting something in the other direction.
+ * Doesn't matter unless NAT is happening. */
+ exp->tuple.dst.u3 = ct->tuplehash[!dir].tuple.dst.u3;
+
+ /* Update the ftp info */
+ if ((cmd.l3num == ct->tuplehash[dir].tuple.src.l3num) &&
+ memcmp(&cmd.u3.all, &ct->tuplehash[dir].tuple.src.u3.all,
+ sizeof(cmd.u3.all))) {
+ /* Enrico Scholz's passive FTP to partially RNAT'd ftp
+ server: it really wants us to connect to a
+ different IP address. Simply don't record it for
+ NAT. */
+ if (cmd.l3num == PF_INET) {
+ DEBUGP("conntrack_ftp: NOT RECORDING: %u,%u,%u,%u != %u.%u.%u.%u\n",
+ NIPQUAD(cmd.u3.ip),
+ NIPQUAD(ct->tuplehash[dir].tuple.src.u3.ip));
+ } else {
+ DEBUGP("conntrack_ftp: NOT RECORDING: %x:%x:%x:%x:%x:%x:%x:%x != %x:%x:%x:%x:%x:%x:%x:%x\n",
+ NIP6(*((struct in6_addr *)cmd.u3.ip6)),
+ NIP6(*((struct in6_addr *)ct->tuplehash[dir]
+ .tuple.src.u3.ip6)));
+ }
+
+ /* Thanks to Cristiano Lincoln Mattos
+ <lincoln@cesar.org.br> for reporting this potential
+ problem (DMZ machines opening holes to internal
+ networks, or the packet filter itself). */
+ if (!loose) {
+ ret = NF_ACCEPT;
+ goto out_put_expect;
+ }
+ memcpy(&exp->tuple.dst.u3, &cmd.u3.all,
+ sizeof(exp->tuple.dst.u3));
+ }
+
+ exp->tuple.src.u3 = ct->tuplehash[!dir].tuple.src.u3;
+ exp->tuple.src.l3num = cmd.l3num;
+ exp->tuple.src.u.tcp.port = 0;
+ exp->tuple.dst.u.tcp.port = cmd.u.tcp.port;
+ exp->tuple.dst.protonum = IPPROTO_TCP;
+
+ exp->mask = (struct nf_conntrack_tuple)
+ { .src = { .l3num = 0xFFFF,
+ .u = { .tcp = { 0 }},
+ },
+ .dst = { .protonum = 0xFF,
+ .u = { .tcp = { 0xFFFF }},
+ },
+ };
+ if (cmd.l3num == PF_INET) {
+ exp->mask.src.u3.ip = 0xFFFFFFFF;
+ exp->mask.dst.u3.ip = 0xFFFFFFFF;
+ } else {
+ memset(exp->mask.src.u3.ip6, 0xFF,
+ sizeof(exp->mask.src.u3.ip6));
+ memset(exp->mask.dst.u3.ip6, 0xFF,
+ sizeof(exp->mask.src.u3.ip6));
+ }
+
+ exp->expectfn = NULL;
+ exp->flags = 0;
+
+ /* Now, NAT might want to mangle the packet, and register the
+ * (possibly changed) expectation itself. */
+ if (nf_nat_ftp_hook)
+ ret = nf_nat_ftp_hook(pskb, ctinfo, search[i].ftptype,
+ matchoff, matchlen, exp, &seq);
+ else {
+ /* Can't expect this? Best to drop packet now. */
+ if (nf_conntrack_expect_related(exp) != 0)
+ ret = NF_DROP;
+ else
+ ret = NF_ACCEPT;
+ }
+
+out_put_expect:
+ nf_conntrack_expect_put(exp);
+
+out_update_nl:
+ /* Now if this ends in \n, update ftp info. Seq may have been
+ * adjusted by NAT code. */
+ if (ends_in_nl)
+ update_nl_seq(seq, ct_ftp_info, dir, *pskb);
+ out:
+ spin_unlock_bh(&nf_ftp_lock);
+ return ret;
+}
+
+static struct nf_conntrack_helper ftp[MAX_PORTS][2];
+static char ftp_names[MAX_PORTS][2][sizeof("ftp-65535")];
+
+/* don't make this __exit, since it's called from __init ! */
+static void fini(void)
+{
+ int i, j;
+ for (i = 0; i < ports_c; i++) {
+ for (j = 0; j < 2; j++) {
+ if (ftp[i][j].me == NULL)
+ continue;
+
+ DEBUGP("nf_ct_ftp: unregistering helper for pf: %d "
+ "port: %d\n",
+ ftp[i][j].tuple.src.l3num, ports[i]);
+ nf_conntrack_helper_unregister(&ftp[i][j]);
+ }
+ }
+
+ kfree(ftp_buffer);
+}
+
+static int __init init(void)
+{
+ int i, j = -1, ret = 0;
+ char *tmpname;
+
+ ftp_buffer = kmalloc(65536, GFP_KERNEL);
+ if (!ftp_buffer)
+ return -ENOMEM;
+
+ if (ports_c == 0)
+ ports[ports_c++] = FTP_PORT;
+
+ /* FIXME should be configurable whether IPv4 and IPv6 FTP connections
+ are tracked or not - YK */
+ for (i = 0; i < ports_c; i++) {
+ memset(&ftp[i], 0, sizeof(struct nf_conntrack_helper));
+
+ ftp[i][0].tuple.src.l3num = PF_INET;
+ ftp[i][1].tuple.src.l3num = PF_INET6;
+ for (j = 0; j < 2; j++) {
+ ftp[i][j].tuple.src.u.tcp.port = htons(ports[i]);
+ ftp[i][j].tuple.dst.protonum = IPPROTO_TCP;
+ ftp[i][j].mask.src.u.tcp.port = 0xFFFF;
+ ftp[i][j].mask.dst.protonum = 0xFF;
+ ftp[i][j].max_expected = 1;
+ ftp[i][j].timeout = 5 * 60; /* 5 Minutes */
+ ftp[i][j].me = THIS_MODULE;
+ ftp[i][j].help = help;
+ tmpname = &ftp_names[i][j][0];
+ if (ports[i] == FTP_PORT)
+ sprintf(tmpname, "ftp");
+ else
+ sprintf(tmpname, "ftp-%d", ports[i]);
+ ftp[i][j].name = tmpname;
+
+ DEBUGP("nf_ct_ftp: registering helper for pf: %d "
+ "port: %d\n",
+ ftp[i][j].tuple.src.l3num, ports[i]);
+ ret = nf_conntrack_helper_register(&ftp[i][j]);
+ if (ret) {
+ printk("nf_ct_ftp: failed to register helper "
+ " for pf: %d port: %d\n",
+ ftp[i][j].tuple.src.l3num, ports[i]);
+ fini();
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+module_init(init);
+module_exit(fini);
diff --git a/net/netfilter/nf_conntrack_l3proto_generic.c b/net/netfilter/nf_conntrack_l3proto_generic.c
new file mode 100644
index 00000000000..7de4f06c63c
--- /dev/null
+++ b/net/netfilter/nf_conntrack_l3proto_generic.c
@@ -0,0 +1,98 @@
+/*
+ * (C) 2003,2004 USAGI/WIDE Project <http://www.linux-ipv6.org>
+ *
+ * Based largely upon the original ip_conntrack code which
+ * had the following copyright information:
+ *
+ * (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Author:
+ * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/icmp.h>
+#include <linux/sysctl.h>
+#include <net/ip.h>
+
+#include <linux/netfilter_ipv4.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+DECLARE_PER_CPU(struct nf_conntrack_stat, nf_conntrack_stat);
+
+static int generic_pkt_to_tuple(const struct sk_buff *skb, unsigned int nhoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
+ memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
+
+ return 1;
+}
+
+static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ memset(&tuple->src.u3, 0, sizeof(tuple->src.u3));
+ memset(&tuple->dst.u3, 0, sizeof(tuple->dst.u3));
+
+ return 1;
+}
+
+static int generic_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return 0;
+}
+
+static int generic_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ return 0;
+}
+
+static int
+generic_prepare(struct sk_buff **pskb, unsigned int hooknum,
+ unsigned int *dataoff, u_int8_t *protonum)
+{
+ /* Never track !!! */
+ return -NF_ACCEPT;
+}
+
+
+static u_int32_t generic_get_features(const struct nf_conntrack_tuple *tuple)
+
+{
+ return NF_CT_F_BASIC;
+}
+
+struct nf_conntrack_l3proto nf_conntrack_generic_l3proto = {
+ .l3proto = PF_UNSPEC,
+ .name = "unknown",
+ .pkt_to_tuple = generic_pkt_to_tuple,
+ .invert_tuple = generic_invert_tuple,
+ .print_tuple = generic_print_tuple,
+ .print_conntrack = generic_print_conntrack,
+ .prepare = generic_prepare,
+ .get_features = generic_get_features,
+ .me = THIS_MODULE,
+};
diff --git a/net/netfilter/nf_conntrack_proto_generic.c b/net/netfilter/nf_conntrack_proto_generic.c
new file mode 100644
index 00000000000..36425f6c833
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_generic.c
@@ -0,0 +1,85 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - enable working with L3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_generic.c
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+unsigned long nf_ct_generic_timeout = 600*HZ;
+
+static int generic_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ tuple->src.u.all = 0;
+ tuple->dst.u.all = 0;
+
+ return 1;
+}
+
+static int generic_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ tuple->src.u.all = 0;
+ tuple->dst.u.all = 0;
+
+ return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int generic_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return 0;
+}
+
+/* Print out the private part of the conntrack. */
+static int generic_print_conntrack(struct seq_file *s,
+ const struct nf_conn *state)
+{
+ return 0;
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int packet(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ nf_ct_refresh_acct(conntrack, ctinfo, skb, nf_ct_generic_timeout);
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int new(struct nf_conn *conntrack, const struct sk_buff *skb,
+ unsigned int dataoff)
+{
+ return 1;
+}
+
+struct nf_conntrack_protocol nf_conntrack_generic_protocol =
+{
+ .l3proto = PF_UNSPEC,
+ .proto = 0,
+ .name = "unknown",
+ .pkt_to_tuple = generic_pkt_to_tuple,
+ .invert_tuple = generic_invert_tuple,
+ .print_tuple = generic_print_tuple,
+ .print_conntrack = generic_print_conntrack,
+ .packet = packet,
+ .new = new,
+};
diff --git a/net/netfilter/nf_conntrack_proto_sctp.c b/net/netfilter/nf_conntrack_proto_sctp.c
new file mode 100644
index 00000000000..3a600f77b4e
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_sctp.c
@@ -0,0 +1,670 @@
+/*
+ * Connection tracking protocol helper module for SCTP.
+ *
+ * SCTP is defined in RFC 2960. References to various sections in this code
+ * are to this RFC.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 17 Oct 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - enable working with L3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/ip_conntrack_sctp.c
+ */
+
+/*
+ * Added support for proc manipulation of timeouts.
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/sctp.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+#if 0
+#define DEBUGP(format, ...) printk(format, ## __VA_ARGS__)
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* Protects conntrack->proto.sctp */
+static DEFINE_RWLOCK(sctp_lock);
+
+/* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+ closely. They're more complex. --RR
+
+ And so for me for SCTP :D -Kiran */
+
+static const char *sctp_conntrack_names[] = {
+ "NONE",
+ "CLOSED",
+ "COOKIE_WAIT",
+ "COOKIE_ECHOED",
+ "ESTABLISHED",
+ "SHUTDOWN_SENT",
+ "SHUTDOWN_RECD",
+ "SHUTDOWN_ACK_SENT",
+};
+
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+static unsigned long nf_ct_sctp_timeout_closed = 10 SECS;
+static unsigned long nf_ct_sctp_timeout_cookie_wait = 3 SECS;
+static unsigned long nf_ct_sctp_timeout_cookie_echoed = 3 SECS;
+static unsigned long nf_ct_sctp_timeout_established = 5 DAYS;
+static unsigned long nf_ct_sctp_timeout_shutdown_sent = 300 SECS / 1000;
+static unsigned long nf_ct_sctp_timeout_shutdown_recd = 300 SECS / 1000;
+static unsigned long nf_ct_sctp_timeout_shutdown_ack_sent = 3 SECS;
+
+static unsigned long * sctp_timeouts[]
+= { NULL, /* SCTP_CONNTRACK_NONE */
+ &nf_ct_sctp_timeout_closed, /* SCTP_CONNTRACK_CLOSED */
+ &nf_ct_sctp_timeout_cookie_wait, /* SCTP_CONNTRACK_COOKIE_WAIT */
+ &nf_ct_sctp_timeout_cookie_echoed, /* SCTP_CONNTRACK_COOKIE_ECHOED */
+ &nf_ct_sctp_timeout_established, /* SCTP_CONNTRACK_ESTABLISHED */
+ &nf_ct_sctp_timeout_shutdown_sent, /* SCTP_CONNTRACK_SHUTDOWN_SENT */
+ &nf_ct_sctp_timeout_shutdown_recd, /* SCTP_CONNTRACK_SHUTDOWN_RECD */
+ &nf_ct_sctp_timeout_shutdown_ack_sent /* SCTP_CONNTRACK_SHUTDOWN_ACK_SENT */
+ };
+
+#define sNO SCTP_CONNTRACK_NONE
+#define sCL SCTP_CONNTRACK_CLOSED
+#define sCW SCTP_CONNTRACK_COOKIE_WAIT
+#define sCE SCTP_CONNTRACK_COOKIE_ECHOED
+#define sES SCTP_CONNTRACK_ESTABLISHED
+#define sSS SCTP_CONNTRACK_SHUTDOWN_SENT
+#define sSR SCTP_CONNTRACK_SHUTDOWN_RECD
+#define sSA SCTP_CONNTRACK_SHUTDOWN_ACK_SENT
+#define sIV SCTP_CONNTRACK_MAX
+
+/*
+ These are the descriptions of the states:
+
+NOTE: These state names are tantalizingly similar to the states of an
+SCTP endpoint. But the interpretation of the states is a little different,
+considering that these are the states of the connection and not of an end
+point. Please note the subtleties. -Kiran
+
+NONE - Nothing so far.
+COOKIE WAIT - We have seen an INIT chunk in the original direction, or also
+ an INIT_ACK chunk in the reply direction.
+COOKIE ECHOED - We have seen a COOKIE_ECHO chunk in the original direction.
+ESTABLISHED - We have seen a COOKIE_ACK in the reply direction.
+SHUTDOWN_SENT - We have seen a SHUTDOWN chunk in the original direction.
+SHUTDOWN_RECD - We have seen a SHUTDOWN chunk in the reply directoin.
+SHUTDOWN_ACK_SENT - We have seen a SHUTDOWN_ACK chunk in the direction opposite
+ to that of the SHUTDOWN chunk.
+CLOSED - We have seen a SHUTDOWN_COMPLETE chunk in the direction of
+ the SHUTDOWN chunk. Connection is closed.
+*/
+
+/* TODO
+ - I have assumed that the first INIT is in the original direction.
+ This messes things when an INIT comes in the reply direction in CLOSED
+ state.
+ - Check the error type in the reply dir before transitioning from
+cookie echoed to closed.
+ - Sec 5.2.4 of RFC 2960
+ - Multi Homing support.
+*/
+
+/* SCTP conntrack state transitions */
+static enum sctp_conntrack sctp_conntracks[2][9][SCTP_CONNTRACK_MAX] = {
+ {
+/* ORIGINAL */
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
+/* init */ {sCW, sCW, sCW, sCE, sES, sSS, sSR, sSA},
+/* init_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},
+/* abort */ {sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown */ {sCL, sCL, sCW, sCE, sSS, sSS, sSR, sSA},
+/* shutdown_ack */ {sSA, sCL, sCW, sCE, sES, sSA, sSA, sSA},
+/* error */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant have Stale cookie*/
+/* cookie_echo */ {sCL, sCL, sCE, sCE, sES, sSS, sSR, sSA},/* 5.2.4 - Big TODO */
+/* cookie_ack */ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in orig dir */
+/* shutdown_comp*/ {sCL, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+ },
+ {
+/* REPLY */
+/* sNO, sCL, sCW, sCE, sES, sSS, sSR, sSA */
+/* init */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* INIT in sCL Big TODO */
+/* init_ack */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},
+/* abort */ {sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL},
+/* shutdown */ {sIV, sCL, sCW, sCE, sSR, sSS, sSR, sSA},
+/* shutdown_ack */ {sIV, sCL, sCW, sCE, sES, sSA, sSA, sSA},
+/* error */ {sIV, sCL, sCW, sCL, sES, sSS, sSR, sSA},
+/* cookie_echo */ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sSA},/* Cant come in reply dir */
+/* cookie_ack */ {sIV, sCL, sCW, sES, sES, sSS, sSR, sSA},
+/* shutdown_comp*/ {sIV, sCL, sCW, sCE, sES, sSS, sSR, sCL}
+ }
+};
+
+static int sctp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ sctp_sctphdr_t _hdr, *hp;
+
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ /* Actually only need first 8 bytes. */
+ hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
+ if (hp == NULL)
+ return 0;
+
+ tuple->src.u.sctp.port = hp->source;
+ tuple->dst.u.sctp.port = hp->dest;
+ return 1;
+}
+
+static int sctp_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ tuple->src.u.sctp.port = orig->dst.u.sctp.port;
+ tuple->dst.u.sctp.port = orig->src.u.sctp.port;
+ return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int sctp_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ return seq_printf(s, "sport=%hu dport=%hu ",
+ ntohs(tuple->src.u.sctp.port),
+ ntohs(tuple->dst.u.sctp.port));
+}
+
+/* Print out the private part of the conntrack. */
+static int sctp_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ enum sctp_conntrack state;
+
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ read_lock_bh(&sctp_lock);
+ state = conntrack->proto.sctp.state;
+ read_unlock_bh(&sctp_lock);
+
+ return seq_printf(s, "%s ", sctp_conntrack_names[state]);
+}
+
+#define for_each_sctp_chunk(skb, sch, _sch, offset, dataoff, count) \
+for (offset = dataoff + sizeof(sctp_sctphdr_t), count = 0; \
+ offset < skb->len && \
+ (sch = skb_header_pointer(skb, offset, sizeof(_sch), &_sch)); \
+ offset += (htons(sch->length) + 3) & ~3, count++)
+
+/* Some validity checks to make sure the chunks are fine */
+static int do_basic_checks(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ char *map)
+{
+ u_int32_t offset, count;
+ sctp_chunkhdr_t _sch, *sch;
+ int flag;
+
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ flag = 0;
+
+ for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ DEBUGP("Chunk Num: %d Type: %d\n", count, sch->type);
+
+ if (sch->type == SCTP_CID_INIT
+ || sch->type == SCTP_CID_INIT_ACK
+ || sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+ flag = 1;
+ }
+
+ /* Cookie Ack/Echo chunks not the first OR
+ Init / Init Ack / Shutdown compl chunks not the only chunks */
+ if ((sch->type == SCTP_CID_COOKIE_ACK
+ || sch->type == SCTP_CID_COOKIE_ECHO
+ || flag)
+ && count !=0 ) {
+ DEBUGP("Basic checks failed\n");
+ return 1;
+ }
+
+ if (map) {
+ set_bit(sch->type, (void *)map);
+ }
+ }
+
+ DEBUGP("Basic checks passed\n");
+ return 0;
+}
+
+static int new_state(enum ip_conntrack_dir dir,
+ enum sctp_conntrack cur_state,
+ int chunk_type)
+{
+ int i;
+
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ DEBUGP("Chunk type: %d\n", chunk_type);
+
+ switch (chunk_type) {
+ case SCTP_CID_INIT:
+ DEBUGP("SCTP_CID_INIT\n");
+ i = 0; break;
+ case SCTP_CID_INIT_ACK:
+ DEBUGP("SCTP_CID_INIT_ACK\n");
+ i = 1; break;
+ case SCTP_CID_ABORT:
+ DEBUGP("SCTP_CID_ABORT\n");
+ i = 2; break;
+ case SCTP_CID_SHUTDOWN:
+ DEBUGP("SCTP_CID_SHUTDOWN\n");
+ i = 3; break;
+ case SCTP_CID_SHUTDOWN_ACK:
+ DEBUGP("SCTP_CID_SHUTDOWN_ACK\n");
+ i = 4; break;
+ case SCTP_CID_ERROR:
+ DEBUGP("SCTP_CID_ERROR\n");
+ i = 5; break;
+ case SCTP_CID_COOKIE_ECHO:
+ DEBUGP("SCTP_CID_COOKIE_ECHO\n");
+ i = 6; break;
+ case SCTP_CID_COOKIE_ACK:
+ DEBUGP("SCTP_CID_COOKIE_ACK\n");
+ i = 7; break;
+ case SCTP_CID_SHUTDOWN_COMPLETE:
+ DEBUGP("SCTP_CID_SHUTDOWN_COMPLETE\n");
+ i = 8; break;
+ default:
+ /* Other chunks like DATA, SACK, HEARTBEAT and
+ its ACK do not cause a change in state */
+ DEBUGP("Unknown chunk type, Will stay in %s\n",
+ sctp_conntrack_names[cur_state]);
+ return cur_state;
+ }
+
+ DEBUGP("dir: %d cur_state: %s chunk_type: %d new_state: %s\n",
+ dir, sctp_conntrack_names[cur_state], chunk_type,
+ sctp_conntrack_names[sctp_conntracks[dir][i][cur_state]]);
+
+ return sctp_conntracks[dir][i][cur_state];
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int sctp_packet(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ enum sctp_conntrack newconntrack, oldsctpstate;
+ sctp_sctphdr_t _sctph, *sh;
+ sctp_chunkhdr_t _sch, *sch;
+ u_int32_t offset, count;
+ char map[256 / sizeof (char)] = {0};
+
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ return -1;
+
+ if (do_basic_checks(conntrack, skb, dataoff, map) != 0)
+ return -1;
+
+ /* Check the verification tag (Sec 8.5) */
+ if (!test_bit(SCTP_CID_INIT, (void *)map)
+ && !test_bit(SCTP_CID_SHUTDOWN_COMPLETE, (void *)map)
+ && !test_bit(SCTP_CID_COOKIE_ECHO, (void *)map)
+ && !test_bit(SCTP_CID_ABORT, (void *)map)
+ && !test_bit(SCTP_CID_SHUTDOWN_ACK, (void *)map)
+ && (sh->vtag != conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
+ DEBUGP("Verification tag check failed\n");
+ return -1;
+ }
+
+ oldsctpstate = newconntrack = SCTP_CONNTRACK_MAX;
+ for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ write_lock_bh(&sctp_lock);
+
+ /* Special cases of Verification tag check (Sec 8.5.1) */
+ if (sch->type == SCTP_CID_INIT) {
+ /* Sec 8.5.1 (A) */
+ if (sh->vtag != 0) {
+ write_unlock_bh(&sctp_lock);
+ return -1;
+ }
+ } else if (sch->type == SCTP_CID_ABORT) {
+ /* Sec 8.5.1 (B) */
+ if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
+ && !(sh->vtag == conntrack->proto.sctp.vtag
+ [1 - CTINFO2DIR(ctinfo)])) {
+ write_unlock_bh(&sctp_lock);
+ return -1;
+ }
+ } else if (sch->type == SCTP_CID_SHUTDOWN_COMPLETE) {
+ /* Sec 8.5.1 (C) */
+ if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])
+ && !(sh->vtag == conntrack->proto.sctp.vtag
+ [1 - CTINFO2DIR(ctinfo)]
+ && (sch->flags & 1))) {
+ write_unlock_bh(&sctp_lock);
+ return -1;
+ }
+ } else if (sch->type == SCTP_CID_COOKIE_ECHO) {
+ /* Sec 8.5.1 (D) */
+ if (!(sh->vtag == conntrack->proto.sctp.vtag[CTINFO2DIR(ctinfo)])) {
+ write_unlock_bh(&sctp_lock);
+ return -1;
+ }
+ }
+
+ oldsctpstate = conntrack->proto.sctp.state;
+ newconntrack = new_state(CTINFO2DIR(ctinfo), oldsctpstate, sch->type);
+
+ /* Invalid */
+ if (newconntrack == SCTP_CONNTRACK_MAX) {
+ DEBUGP("nf_conntrack_sctp: Invalid dir=%i ctype=%u conntrack=%u\n",
+ CTINFO2DIR(ctinfo), sch->type, oldsctpstate);
+ write_unlock_bh(&sctp_lock);
+ return -1;
+ }
+
+ /* If it is an INIT or an INIT ACK note down the vtag */
+ if (sch->type == SCTP_CID_INIT
+ || sch->type == SCTP_CID_INIT_ACK) {
+ sctp_inithdr_t _inithdr, *ih;
+
+ ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
+ sizeof(_inithdr), &_inithdr);
+ if (ih == NULL) {
+ write_unlock_bh(&sctp_lock);
+ return -1;
+ }
+ DEBUGP("Setting vtag %x for dir %d\n",
+ ih->init_tag, !CTINFO2DIR(ctinfo));
+ conntrack->proto.sctp.vtag[!CTINFO2DIR(ctinfo)] = ih->init_tag;
+ }
+
+ conntrack->proto.sctp.state = newconntrack;
+ if (oldsctpstate != newconntrack)
+ nf_conntrack_event_cache(IPCT_PROTOINFO, skb);
+ write_unlock_bh(&sctp_lock);
+ }
+
+ nf_ct_refresh_acct(conntrack, ctinfo, skb, *sctp_timeouts[newconntrack]);
+
+ if (oldsctpstate == SCTP_CONNTRACK_COOKIE_ECHOED
+ && CTINFO2DIR(ctinfo) == IP_CT_DIR_REPLY
+ && newconntrack == SCTP_CONNTRACK_ESTABLISHED) {
+ DEBUGP("Setting assured bit\n");
+ set_bit(IPS_ASSURED_BIT, &conntrack->status);
+ nf_conntrack_event_cache(IPCT_STATUS, skb);
+ }
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int sctp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
+ unsigned int dataoff)
+{
+ enum sctp_conntrack newconntrack;
+ sctp_sctphdr_t _sctph, *sh;
+ sctp_chunkhdr_t _sch, *sch;
+ u_int32_t offset, count;
+ char map[256 / sizeof (char)] = {0};
+
+ DEBUGP(__FUNCTION__);
+ DEBUGP("\n");
+
+ sh = skb_header_pointer(skb, dataoff, sizeof(_sctph), &_sctph);
+ if (sh == NULL)
+ return 0;
+
+ if (do_basic_checks(conntrack, skb, dataoff, map) != 0)
+ return 0;
+
+ /* If an OOTB packet has any of these chunks discard (Sec 8.4) */
+ if ((test_bit (SCTP_CID_ABORT, (void *)map))
+ || (test_bit (SCTP_CID_SHUTDOWN_COMPLETE, (void *)map))
+ || (test_bit (SCTP_CID_COOKIE_ACK, (void *)map))) {
+ return 0;
+ }
+
+ newconntrack = SCTP_CONNTRACK_MAX;
+ for_each_sctp_chunk (skb, sch, _sch, offset, dataoff, count) {
+ /* Don't need lock here: this conntrack not in circulation yet */
+ newconntrack = new_state(IP_CT_DIR_ORIGINAL,
+ SCTP_CONNTRACK_NONE, sch->type);
+
+ /* Invalid: delete conntrack */
+ if (newconntrack == SCTP_CONNTRACK_MAX) {
+ DEBUGP("nf_conntrack_sctp: invalid new deleting.\n");
+ return 0;
+ }
+
+ /* Copy the vtag into the state info */
+ if (sch->type == SCTP_CID_INIT) {
+ if (sh->vtag == 0) {
+ sctp_inithdr_t _inithdr, *ih;
+
+ ih = skb_header_pointer(skb, offset + sizeof(sctp_chunkhdr_t),
+ sizeof(_inithdr), &_inithdr);
+ if (ih == NULL)
+ return 0;
+
+ DEBUGP("Setting vtag %x for new conn\n",
+ ih->init_tag);
+
+ conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] =
+ ih->init_tag;
+ } else {
+ /* Sec 8.5.1 (A) */
+ return 0;
+ }
+ }
+ /* If it is a shutdown ack OOTB packet, we expect a return
+ shutdown complete, otherwise an ABORT Sec 8.4 (5) and (8) */
+ else {
+ DEBUGP("Setting vtag %x for new conn OOTB\n",
+ sh->vtag);
+ conntrack->proto.sctp.vtag[IP_CT_DIR_REPLY] = sh->vtag;
+ }
+
+ conntrack->proto.sctp.state = newconntrack;
+ }
+
+ return 1;
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_sctp4 = {
+ .l3proto = PF_INET,
+ .proto = IPPROTO_SCTP,
+ .name = "sctp",
+ .pkt_to_tuple = sctp_pkt_to_tuple,
+ .invert_tuple = sctp_invert_tuple,
+ .print_tuple = sctp_print_tuple,
+ .print_conntrack = sctp_print_conntrack,
+ .packet = sctp_packet,
+ .new = sctp_new,
+ .destroy = NULL,
+ .me = THIS_MODULE
+};
+
+struct nf_conntrack_protocol nf_conntrack_protocol_sctp6 = {
+ .l3proto = PF_INET6,
+ .proto = IPPROTO_SCTP,
+ .name = "sctp",
+ .pkt_to_tuple = sctp_pkt_to_tuple,
+ .invert_tuple = sctp_invert_tuple,
+ .print_tuple = sctp_print_tuple,
+ .print_conntrack = sctp_print_conntrack,
+ .packet = sctp_packet,
+ .new = sctp_new,
+ .destroy = NULL,
+ .me = THIS_MODULE
+};
+
+#ifdef CONFIG_SYSCTL
+static ctl_table nf_ct_sysctl_table[] = {
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_CLOSED,
+ .procname = "nf_conntrack_sctp_timeout_closed",
+ .data = &nf_ct_sctp_timeout_closed,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_WAIT,
+ .procname = "nf_conntrack_sctp_timeout_cookie_wait",
+ .data = &nf_ct_sctp_timeout_cookie_wait,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_COOKIE_ECHOED,
+ .procname = "nf_conntrack_sctp_timeout_cookie_echoed",
+ .data = &nf_ct_sctp_timeout_cookie_echoed,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_ESTABLISHED,
+ .procname = "nf_conntrack_sctp_timeout_established",
+ .data = &nf_ct_sctp_timeout_established,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_SENT,
+ .procname = "nf_conntrack_sctp_timeout_shutdown_sent",
+ .data = &nf_ct_sctp_timeout_shutdown_sent,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_RECD,
+ .procname = "nf_conntrack_sctp_timeout_shutdown_recd",
+ .data = &nf_ct_sctp_timeout_shutdown_recd,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_SCTP_TIMEOUT_SHUTDOWN_ACK_SENT,
+ .procname = "nf_conntrack_sctp_timeout_shutdown_ack_sent",
+ .data = &nf_ct_sctp_timeout_shutdown_ack_sent,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_netfilter_table[] = {
+ {
+ .ctl_name = NET_NETFILTER,
+ .procname = "netfilter",
+ .mode = 0555,
+ .child = nf_ct_sysctl_table,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+ {
+ .ctl_name = CTL_NET,
+ .procname = "net",
+ .mode = 0555,
+ .child = nf_ct_netfilter_table,
+ },
+ { .ctl_name = 0 }
+};
+
+static struct ctl_table_header *nf_ct_sysctl_header;
+#endif
+
+int __init init(void)
+{
+ int ret;
+
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp4);
+ if (ret) {
+ printk("nf_conntrack_proto_sctp4: protocol register failed\n");
+ goto out;
+ }
+ ret = nf_conntrack_protocol_register(&nf_conntrack_protocol_sctp6);
+ if (ret) {
+ printk("nf_conntrack_proto_sctp6: protocol register failed\n");
+ goto cleanup_sctp4;
+ }
+
+#ifdef CONFIG_SYSCTL
+ nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+ if (nf_ct_sysctl_header == NULL) {
+ printk("nf_conntrack_proto_sctp: can't register to sysctl.\n");
+ goto cleanup;
+ }
+#endif
+
+ return ret;
+
+#ifdef CONFIG_SYSCTL
+ cleanup:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6);
+#endif
+ cleanup_sctp4:
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4);
+ out:
+ DEBUGP("SCTP conntrack module loading %s\n",
+ ret ? "failed": "succeeded");
+ return ret;
+}
+
+void __exit fini(void)
+{
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp6);
+ nf_conntrack_protocol_unregister(&nf_conntrack_protocol_sctp4);
+#ifdef CONFIG_SYSCTL
+ unregister_sysctl_table(nf_ct_sysctl_header);
+#endif
+ DEBUGP("SCTP conntrack module unloaded\n");
+}
+
+module_init(init);
+module_exit(fini);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Kiran Kumar Immidi");
+MODULE_DESCRIPTION("Netfilter connection tracking protocol helper for SCTP");
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
new file mode 100644
index 00000000000..83d90dd624f
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -0,0 +1,1162 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>:
+ * - Real stateful connection tracking
+ * - Modified state transitions table
+ * - Window scaling support added
+ * - SACK support added
+ *
+ * Willy Tarreau:
+ * - State table bugfixes
+ * - More robust state changes
+ * - Tuning timer parameters
+ *
+ * 27 Oct 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - genelized Layer 3 protocol part.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_tcp.c
+ *
+ * version 2.2
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/in.h>
+#include <linux/tcp.h>
+#include <linux/spinlock.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+
+#include <net/tcp.h>
+
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+#if 0
+#define DEBUGP printk
+#define DEBUGP_VARS
+#else
+#define DEBUGP(format, args...)
+#endif
+
+/* Protects conntrack->proto.tcp */
+static DEFINE_RWLOCK(tcp_lock);
+
+/* "Be conservative in what you do,
+ be liberal in what you accept from others."
+ If it's non-zero, we mark only out of window RST segments as INVALID. */
+int nf_ct_tcp_be_liberal = 0;
+
+/* When connection is picked up from the middle, how many packets are required
+ to pass in each direction when we assume we are in sync - if any side uses
+ window scaling, we lost the game.
+ If it is set to zero, we disable picking up already established
+ connections. */
+int nf_ct_tcp_loose = 3;
+
+/* Max number of the retransmitted packets without receiving an (acceptable)
+ ACK from the destination. If this number is reached, a shorter timer
+ will be started. */
+int nf_ct_tcp_max_retrans = 3;
+
+ /* FIXME: Examine ipfilter's timeouts and conntrack transitions more
+ closely. They're more complex. --RR */
+
+static const char *tcp_conntrack_names[] = {
+ "NONE",
+ "SYN_SENT",
+ "SYN_RECV",
+ "ESTABLISHED",
+ "FIN_WAIT",
+ "CLOSE_WAIT",
+ "LAST_ACK",
+ "TIME_WAIT",
+ "CLOSE",
+ "LISTEN"
+};
+
+#define SECS * HZ
+#define MINS * 60 SECS
+#define HOURS * 60 MINS
+#define DAYS * 24 HOURS
+
+unsigned long nf_ct_tcp_timeout_syn_sent = 2 MINS;
+unsigned long nf_ct_tcp_timeout_syn_recv = 60 SECS;
+unsigned long nf_ct_tcp_timeout_established = 5 DAYS;
+unsigned long nf_ct_tcp_timeout_fin_wait = 2 MINS;
+unsigned long nf_ct_tcp_timeout_close_wait = 60 SECS;
+unsigned long nf_ct_tcp_timeout_last_ack = 30 SECS;
+unsigned long nf_ct_tcp_timeout_time_wait = 2 MINS;
+unsigned long nf_ct_tcp_timeout_close = 10 SECS;
+
+/* RFC1122 says the R2 limit should be at least 100 seconds.
+ Linux uses 15 packets as limit, which corresponds
+ to ~13-30min depending on RTO. */
+unsigned long nf_ct_tcp_timeout_max_retrans = 5 MINS;
+
+static unsigned long * tcp_timeouts[]
+= { NULL, /* TCP_CONNTRACK_NONE */
+ &nf_ct_tcp_timeout_syn_sent, /* TCP_CONNTRACK_SYN_SENT, */
+ &nf_ct_tcp_timeout_syn_recv, /* TCP_CONNTRACK_SYN_RECV, */
+ &nf_ct_tcp_timeout_established, /* TCP_CONNTRACK_ESTABLISHED, */
+ &nf_ct_tcp_timeout_fin_wait, /* TCP_CONNTRACK_FIN_WAIT, */
+ &nf_ct_tcp_timeout_close_wait, /* TCP_CONNTRACK_CLOSE_WAIT, */
+ &nf_ct_tcp_timeout_last_ack, /* TCP_CONNTRACK_LAST_ACK, */
+ &nf_ct_tcp_timeout_time_wait, /* TCP_CONNTRACK_TIME_WAIT, */
+ &nf_ct_tcp_timeout_close, /* TCP_CONNTRACK_CLOSE, */
+ NULL, /* TCP_CONNTRACK_LISTEN */
+ };
+
+#define sNO TCP_CONNTRACK_NONE
+#define sSS TCP_CONNTRACK_SYN_SENT
+#define sSR TCP_CONNTRACK_SYN_RECV
+#define sES TCP_CONNTRACK_ESTABLISHED
+#define sFW TCP_CONNTRACK_FIN_WAIT
+#define sCW TCP_CONNTRACK_CLOSE_WAIT
+#define sLA TCP_CONNTRACK_LAST_ACK
+#define sTW TCP_CONNTRACK_TIME_WAIT
+#define sCL TCP_CONNTRACK_CLOSE
+#define sLI TCP_CONNTRACK_LISTEN
+#define sIV TCP_CONNTRACK_MAX
+#define sIG TCP_CONNTRACK_IGNORE
+
+/* What TCP flags are set from RST/SYN/FIN/ACK. */
+enum tcp_bit_set {
+ TCP_SYN_SET,
+ TCP_SYNACK_SET,
+ TCP_FIN_SET,
+ TCP_ACK_SET,
+ TCP_RST_SET,
+ TCP_NONE_SET,
+};
+
+/*
+ * The TCP state transition table needs a few words...
+ *
+ * We are the man in the middle. All the packets go through us
+ * but might get lost in transit to the destination.
+ * It is assumed that the destinations can't receive segments
+ * we haven't seen.
+ *
+ * The checked segment is in window, but our windows are *not*
+ * equivalent with the ones of the sender/receiver. We always
+ * try to guess the state of the current sender.
+ *
+ * The meaning of the states are:
+ *
+ * NONE: initial state
+ * SYN_SENT: SYN-only packet seen
+ * SYN_RECV: SYN-ACK packet seen
+ * ESTABLISHED: ACK packet seen
+ * FIN_WAIT: FIN packet seen
+ * CLOSE_WAIT: ACK seen (after FIN)
+ * LAST_ACK: FIN seen (after FIN)
+ * TIME_WAIT: last ACK seen
+ * CLOSE: closed connection
+ *
+ * LISTEN state is not used.
+ *
+ * Packets marked as IGNORED (sIG):
+ * if they may be either invalid or valid
+ * and the receiver may send back a connection
+ * closing RST or a SYN/ACK.
+ *
+ * Packets marked as INVALID (sIV):
+ * if they are invalid
+ * or we do not support the request (simultaneous open)
+ */
+static enum tcp_conntrack tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
+ {
+/* ORIGINAL */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*syn*/ { sSS, sSS, sIG, sIG, sIG, sIG, sIG, sSS, sSS, sIV },
+/*
+ * sNO -> sSS Initialize a new connection
+ * sSS -> sSS Retransmitted SYN
+ * sSR -> sIG Late retransmitted SYN?
+ * sES -> sIG Error: SYNs in window outside the SYN_SENT state
+ * are errors. Receiver will reply with RST
+ * and close the connection.
+ * Or we are not in sync and hold a dead connection.
+ * sFW -> sIG
+ * sCW -> sIG
+ * sLA -> sIG
+ * sTW -> sSS Reopened connection (RFC 1122).
+ * sCL -> sSS
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*synack*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
+/*
+ * A SYN/ACK from the client is always invalid:
+ * - either it tries to set up a simultaneous open, which is
+ * not supported;
+ * - or the firewall has just been inserted between the two hosts
+ * during the session set-up. The SYN will be retransmitted
+ * by the true client (or it'll time out).
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+/*
+ * sNO -> sIV Too late and no reason to do anything...
+ * sSS -> sIV Client migth not send FIN in this state:
+ * we enforce waiting for a SYN/ACK reply first.
+ * sSR -> sFW Close started.
+ * sES -> sFW
+ * sFW -> sLA FIN seen in both directions, waiting for
+ * the last ACK.
+ * Migth be a retransmitted FIN as well...
+ * sCW -> sLA
+ * sLA -> sLA Retransmitted FIN. Remain in the same state.
+ * sTW -> sTW
+ * sCL -> sCL
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*ack*/ { sES, sIV, sES, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+/*
+ * sNO -> sES Assumed.
+ * sSS -> sIV ACK is invalid: we haven't seen a SYN/ACK yet.
+ * sSR -> sES Established state is reached.
+ * sES -> sES :-)
+ * sFW -> sCW Normal close request answered by ACK.
+ * sCW -> sCW
+ * sLA -> sTW Last ACK detected.
+ * sTW -> sTW Retransmitted last ACK. Remain in the same state.
+ * sCL -> sCL
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
+/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+ },
+ {
+/* REPLY */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*syn*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV },
+/*
+ * sNO -> sIV Never reached.
+ * sSS -> sIV Simultaneous open, not supported
+ * sSR -> sIV Simultaneous open, not supported.
+ * sES -> sIV Server may not initiate a connection.
+ * sFW -> sIV
+ * sCW -> sIV
+ * sLA -> sIV
+ * sTW -> sIV Reopened connection, but server may not do it.
+ * sCL -> sIV
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*synack*/ { sIV, sSR, sSR, sIG, sIG, sIG, sIG, sIG, sIG, sIV },
+/*
+ * sSS -> sSR Standard open.
+ * sSR -> sSR Retransmitted SYN/ACK.
+ * sES -> sIG Late retransmitted SYN/ACK?
+ * sFW -> sIG Might be SYN/ACK answering ignored SYN
+ * sCW -> sIG
+ * sLA -> sIG
+ * sTW -> sIG
+ * sCL -> sIG
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV },
+/*
+ * sSS -> sIV Server might not send FIN in this state.
+ * sSR -> sFW Close started.
+ * sES -> sFW
+ * sFW -> sLA FIN seen in both directions.
+ * sCW -> sLA
+ * sLA -> sLA Retransmitted FIN.
+ * sTW -> sTW
+ * sCL -> sCL
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*ack*/ { sIV, sIV, sSR, sES, sCW, sCW, sTW, sTW, sCL, sIV },
+/*
+ * sSS -> sIV Might be a half-open connection.
+ * sSR -> sSR Might answer late resent SYN.
+ * sES -> sES :-)
+ * sFW -> sCW Normal close request answered by ACK.
+ * sCW -> sCW
+ * sLA -> sTW Last ACK detected.
+ * sTW -> sTW Retransmitted last ACK.
+ * sCL -> sCL
+ */
+/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sLI */
+/*rst*/ { sIV, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sCL, sIV },
+/*none*/ { sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sIV }
+ }
+};
+
+static int tcp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ struct tcphdr _hdr, *hp;
+
+ /* Actually only need first 8 bytes. */
+ hp = skb_header_pointer(skb, dataoff, 8, &_hdr);
+ if (hp == NULL)
+ return 0;
+
+ tuple->src.u.tcp.port = hp->source;
+ tuple->dst.u.tcp.port = hp->dest;
+
+ return 1;
+}
+
+static int tcp_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ tuple->src.u.tcp.port = orig->dst.u.tcp.port;
+ tuple->dst.u.tcp.port = orig->src.u.tcp.port;
+ return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int tcp_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "sport=%hu dport=%hu ",
+ ntohs(tuple->src.u.tcp.port),
+ ntohs(tuple->dst.u.tcp.port));
+}
+
+/* Print out the private part of the conntrack. */
+static int tcp_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ enum tcp_conntrack state;
+
+ read_lock_bh(&tcp_lock);
+ state = conntrack->proto.tcp.state;
+ read_unlock_bh(&tcp_lock);
+
+ return seq_printf(s, "%s ", tcp_conntrack_names[state]);
+}
+
+static unsigned int get_conntrack_index(const struct tcphdr *tcph)
+{
+ if (tcph->rst) return TCP_RST_SET;
+ else if (tcph->syn) return (tcph->ack ? TCP_SYNACK_SET : TCP_SYN_SET);
+ else if (tcph->fin) return TCP_FIN_SET;
+ else if (tcph->ack) return TCP_ACK_SET;
+ else return TCP_NONE_SET;
+}
+
+/* TCP connection tracking based on 'Real Stateful TCP Packet Filtering
+ in IP Filter' by Guido van Rooij.
+
+ http://www.nluug.nl/events/sane2000/papers.html
+ http://www.iae.nl/users/guido/papers/tcp_filtering.ps.gz
+
+ The boundaries and the conditions are changed according to RFC793:
+ the packet must intersect the window (i.e. segments may be
+ after the right or before the left edge) and thus receivers may ACK
+ segments after the right edge of the window.
+
+ td_maxend = max(sack + max(win,1)) seen in reply packets
+ td_maxwin = max(max(win, 1)) + (sack - ack) seen in sent packets
+ td_maxwin += seq + len - sender.td_maxend
+ if seq + len > sender.td_maxend
+ td_end = max(seq + len) seen in sent packets
+
+ I. Upper bound for valid data: seq <= sender.td_maxend
+ II. Lower bound for valid data: seq + len >= sender.td_end - receiver.td_maxwin
+ III. Upper bound for valid ack: sack <= receiver.td_end
+ IV. Lower bound for valid ack: ack >= receiver.td_end - MAXACKWINDOW
+
+ where sack is the highest right edge of sack block found in the packet.
+
+ The upper bound limit for a valid ack is not ignored -
+ we doesn't have to deal with fragments.
+*/
+
+static inline __u32 segment_seq_plus_len(__u32 seq,
+ size_t len,
+ unsigned int dataoff,
+ struct tcphdr *tcph)
+{
+ /* XXX Should I use payload length field in IP/IPv6 header ?
+ * - YK */
+ return (seq + len - dataoff - tcph->doff*4
+ + (tcph->syn ? 1 : 0) + (tcph->fin ? 1 : 0));
+}
+
+/* Fixme: what about big packets? */
+#define MAXACKWINCONST 66000
+#define MAXACKWINDOW(sender) \
+ ((sender)->td_maxwin > MAXACKWINCONST ? (sender)->td_maxwin \
+ : MAXACKWINCONST)
+
+/*
+ * Simplified tcp_parse_options routine from tcp_input.c
+ */
+static void tcp_options(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct tcphdr *tcph,
+ struct ip_ct_tcp_state *state)
+{
+ unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
+ unsigned char *ptr;
+ int length = (tcph->doff*4) - sizeof(struct tcphdr);
+
+ if (!length)
+ return;
+
+ ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
+ length, buff);
+ BUG_ON(ptr == NULL);
+
+ state->td_scale =
+ state->flags = 0;
+
+ while (length > 0) {
+ int opcode=*ptr++;
+ int opsize;
+
+ switch (opcode) {
+ case TCPOPT_EOL:
+ return;
+ case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
+ length--;
+ continue;
+ default:
+ opsize=*ptr++;
+ if (opsize < 2) /* "silly options" */
+ return;
+ if (opsize > length)
+ break; /* don't parse partial options */
+
+ if (opcode == TCPOPT_SACK_PERM
+ && opsize == TCPOLEN_SACK_PERM)
+ state->flags |= IP_CT_TCP_FLAG_SACK_PERM;
+ else if (opcode == TCPOPT_WINDOW
+ && opsize == TCPOLEN_WINDOW) {
+ state->td_scale = *(u_int8_t *)ptr;
+
+ if (state->td_scale > 14) {
+ /* See RFC1323 */
+ state->td_scale = 14;
+ }
+ state->flags |=
+ IP_CT_TCP_FLAG_WINDOW_SCALE;
+ }
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+ }
+}
+
+static void tcp_sack(const struct sk_buff *skb, unsigned int dataoff,
+ struct tcphdr *tcph, __u32 *sack)
+{
+ unsigned char buff[(15 * 4) - sizeof(struct tcphdr)];
+ unsigned char *ptr;
+ int length = (tcph->doff*4) - sizeof(struct tcphdr);
+ __u32 tmp;
+
+ if (!length)
+ return;
+
+ ptr = skb_header_pointer(skb, dataoff + sizeof(struct tcphdr),
+ length, buff);
+ BUG_ON(ptr == NULL);
+
+ /* Fast path for timestamp-only option */
+ if (length == TCPOLEN_TSTAMP_ALIGNED*4
+ && *(__u32 *)ptr ==
+ __constant_ntohl((TCPOPT_NOP << 24)
+ | (TCPOPT_NOP << 16)
+ | (TCPOPT_TIMESTAMP << 8)
+ | TCPOLEN_TIMESTAMP))
+ return;
+
+ while (length > 0) {
+ int opcode = *ptr++;
+ int opsize, i;
+
+ switch (opcode) {
+ case TCPOPT_EOL:
+ return;
+ case TCPOPT_NOP: /* Ref: RFC 793 section 3.1 */
+ length--;
+ continue;
+ default:
+ opsize = *ptr++;
+ if (opsize < 2) /* "silly options" */
+ return;
+ if (opsize > length)
+ break; /* don't parse partial options */
+
+ if (opcode == TCPOPT_SACK
+ && opsize >= (TCPOLEN_SACK_BASE
+ + TCPOLEN_SACK_PERBLOCK)
+ && !((opsize - TCPOLEN_SACK_BASE)
+ % TCPOLEN_SACK_PERBLOCK)) {
+ for (i = 0;
+ i < (opsize - TCPOLEN_SACK_BASE);
+ i += TCPOLEN_SACK_PERBLOCK) {
+ memcpy(&tmp, (__u32 *)(ptr + i) + 1,
+ sizeof(__u32));
+ tmp = ntohl(tmp);
+
+ if (after(tmp, *sack))
+ *sack = tmp;
+ }
+ return;
+ }
+ ptr += opsize - 2;
+ length -= opsize;
+ }
+ }
+}
+
+static int tcp_in_window(struct ip_ct_tcp *state,
+ enum ip_conntrack_dir dir,
+ unsigned int index,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct tcphdr *tcph,
+ int pf)
+{
+ struct ip_ct_tcp_state *sender = &state->seen[dir];
+ struct ip_ct_tcp_state *receiver = &state->seen[!dir];
+ __u32 seq, ack, sack, end, win, swin;
+ int res;
+
+ /*
+ * Get the required data from the packet.
+ */
+ seq = ntohl(tcph->seq);
+ ack = sack = ntohl(tcph->ack_seq);
+ win = ntohs(tcph->window);
+ end = segment_seq_plus_len(seq, skb->len, dataoff, tcph);
+
+ if (receiver->flags & IP_CT_TCP_FLAG_SACK_PERM)
+ tcp_sack(skb, dataoff, tcph, &sack);
+
+ DEBUGP("tcp_in_window: START\n");
+ DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+ "seq=%u ack=%u sack=%u win=%u end=%u\n",
+ NIPQUAD(iph->saddr), ntohs(tcph->source),
+ NIPQUAD(iph->daddr), ntohs(tcph->dest),
+ seq, ack, sack, win, end);
+ DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
+ "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+ sender->td_end, sender->td_maxend, sender->td_maxwin,
+ sender->td_scale,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+ receiver->td_scale);
+
+ if (sender->td_end == 0) {
+ /*
+ * Initialize sender data.
+ */
+ if (tcph->syn && tcph->ack) {
+ /*
+ * Outgoing SYN-ACK in reply to a SYN.
+ */
+ sender->td_end =
+ sender->td_maxend = end;
+ sender->td_maxwin = (win == 0 ? 1 : win);
+
+ tcp_options(skb, dataoff, tcph, sender);
+ /*
+ * RFC 1323:
+ * Both sides must send the Window Scale option
+ * to enable window scaling in either direction.
+ */
+ if (!(sender->flags & IP_CT_TCP_FLAG_WINDOW_SCALE
+ && receiver->flags & IP_CT_TCP_FLAG_WINDOW_SCALE))
+ sender->td_scale =
+ receiver->td_scale = 0;
+ } else {
+ /*
+ * We are in the middle of a connection,
+ * its history is lost for us.
+ * Let's try to use the data from the packet.
+ */
+ sender->td_end = end;
+ sender->td_maxwin = (win == 0 ? 1 : win);
+ sender->td_maxend = end + sender->td_maxwin;
+ }
+ } else if (((state->state == TCP_CONNTRACK_SYN_SENT
+ && dir == IP_CT_DIR_ORIGINAL)
+ || (state->state == TCP_CONNTRACK_SYN_RECV
+ && dir == IP_CT_DIR_REPLY))
+ && after(end, sender->td_end)) {
+ /*
+ * RFC 793: "if a TCP is reinitialized ... then it need
+ * not wait at all; it must only be sure to use sequence
+ * numbers larger than those recently used."
+ */
+ sender->td_end =
+ sender->td_maxend = end;
+ sender->td_maxwin = (win == 0 ? 1 : win);
+
+ tcp_options(skb, dataoff, tcph, sender);
+ }
+
+ if (!(tcph->ack)) {
+ /*
+ * If there is no ACK, just pretend it was set and OK.
+ */
+ ack = sack = receiver->td_end;
+ } else if (((tcp_flag_word(tcph) & (TCP_FLAG_ACK|TCP_FLAG_RST)) ==
+ (TCP_FLAG_ACK|TCP_FLAG_RST))
+ && (ack == 0)) {
+ /*
+ * Broken TCP stacks, that set ACK in RST packets as well
+ * with zero ack value.
+ */
+ ack = sack = receiver->td_end;
+ }
+
+ if (seq == end
+ && (!tcph->rst
+ || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT)))
+ /*
+ * Packets contains no data: we assume it is valid
+ * and check the ack value only.
+ * However RST segments are always validated by their
+ * SEQ number, except when seq == 0 (reset sent answering
+ * SYN.
+ */
+ seq = end = sender->td_end;
+
+ DEBUGP("tcp_in_window: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+ "seq=%u ack=%u sack =%u win=%u end=%u\n",
+ NIPQUAD(iph->saddr), ntohs(tcph->source),
+ NIPQUAD(iph->daddr), ntohs(tcph->dest),
+ seq, ack, sack, win, end);
+ DEBUGP("tcp_in_window: sender end=%u maxend=%u maxwin=%u scale=%i "
+ "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+ sender->td_end, sender->td_maxend, sender->td_maxwin,
+ sender->td_scale,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+ receiver->td_scale);
+
+ DEBUGP("tcp_in_window: I=%i II=%i III=%i IV=%i\n",
+ before(seq, sender->td_maxend + 1),
+ after(end, sender->td_end - receiver->td_maxwin - 1),
+ before(sack, receiver->td_end + 1),
+ after(ack, receiver->td_end - MAXACKWINDOW(sender)));
+
+ if (sender->loose || receiver->loose ||
+ (before(seq, sender->td_maxend + 1) &&
+ after(end, sender->td_end - receiver->td_maxwin - 1) &&
+ before(sack, receiver->td_end + 1) &&
+ after(ack, receiver->td_end - MAXACKWINDOW(sender)))) {
+ /*
+ * Take into account window scaling (RFC 1323).
+ */
+ if (!tcph->syn)
+ win <<= sender->td_scale;
+
+ /*
+ * Update sender data.
+ */
+ swin = win + (sack - ack);
+ if (sender->td_maxwin < swin)
+ sender->td_maxwin = swin;
+ if (after(end, sender->td_end))
+ sender->td_end = end;
+ /*
+ * Update receiver data.
+ */
+ if (after(end, sender->td_maxend))
+ receiver->td_maxwin += end - sender->td_maxend;
+ if (after(sack + win, receiver->td_maxend - 1)) {
+ receiver->td_maxend = sack + win;
+ if (win == 0)
+ receiver->td_maxend++;
+ }
+
+ /*
+ * Check retransmissions.
+ */
+ if (index == TCP_ACK_SET) {
+ if (state->last_dir == dir
+ && state->last_seq == seq
+ && state->last_ack == ack
+ && state->last_end == end)
+ state->retrans++;
+ else {
+ state->last_dir = dir;
+ state->last_seq = seq;
+ state->last_ack = ack;
+ state->last_end = end;
+ state->retrans = 0;
+ }
+ }
+ /*
+ * Close the window of disabled window tracking :-)
+ */
+ if (sender->loose)
+ sender->loose--;
+
+ res = 1;
+ } else {
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: %s ",
+ before(seq, sender->td_maxend + 1) ?
+ after(end, sender->td_end - receiver->td_maxwin - 1) ?
+ before(sack, receiver->td_end + 1) ?
+ after(ack, receiver->td_end - MAXACKWINDOW(sender)) ? "BUG"
+ : "ACK is under the lower bound (possible overly delayed ACK)"
+ : "ACK is over the upper bound (ACKed data not seen yet)"
+ : "SEQ is under the lower bound (already ACKed data retransmitted)"
+ : "SEQ is over the upper bound (over the window of the receiver)");
+
+ res = nf_ct_tcp_be_liberal;
+ }
+
+ DEBUGP("tcp_in_window: res=%i sender end=%u maxend=%u maxwin=%u "
+ "receiver end=%u maxend=%u maxwin=%u\n",
+ res, sender->td_end, sender->td_maxend, sender->td_maxwin,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin);
+
+ return res;
+}
+
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+/* Update sender->td_end after NAT successfully mangled the packet */
+/* Caller must linearize skb at tcp header. */
+void nf_conntrack_tcp_update(struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conn *conntrack,
+ int dir)
+{
+ struct tcphdr *tcph = (void *)skb->data + dataoff;
+ __u32 end;
+#ifdef DEBUGP_VARS
+ struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[dir];
+ struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[!dir];
+#endif
+
+ end = segment_seq_plus_len(ntohl(tcph->seq), skb->len, dataoff, tcph);
+
+ write_lock_bh(&tcp_lock);
+ /*
+ * We have to worry for the ack in the reply packet only...
+ */
+ if (after(end, conntrack->proto.tcp.seen[dir].td_end))
+ conntrack->proto.tcp.seen[dir].td_end = end;
+ conntrack->proto.tcp.last_end = end;
+ write_unlock_bh(&tcp_lock);
+ DEBUGP("tcp_update: sender end=%u maxend=%u maxwin=%u scale=%i "
+ "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+ sender->td_end, sender->td_maxend, sender->td_maxwin,
+ sender->td_scale,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+ receiver->td_scale);
+}
+
+#endif
+
+#define TH_FIN 0x01
+#define TH_SYN 0x02
+#define TH_RST 0x04
+#define TH_PUSH 0x08
+#define TH_ACK 0x10
+#define TH_URG 0x20
+#define TH_ECE 0x40
+#define TH_CWR 0x80
+
+/* table of valid flag combinations - ECE and CWR are always valid */
+static u8 tcp_valid_flags[(TH_FIN|TH_SYN|TH_RST|TH_PUSH|TH_ACK|TH_URG) + 1] =
+{
+ [TH_SYN] = 1,
+ [TH_SYN|TH_ACK] = 1,
+ [TH_SYN|TH_ACK|TH_PUSH] = 1,
+ [TH_RST] = 1,
+ [TH_RST|TH_ACK] = 1,
+ [TH_RST|TH_ACK|TH_PUSH] = 1,
+ [TH_FIN|TH_ACK] = 1,
+ [TH_ACK] = 1,
+ [TH_ACK|TH_PUSH] = 1,
+ [TH_ACK|TH_URG] = 1,
+ [TH_ACK|TH_URG|TH_PUSH] = 1,
+ [TH_FIN|TH_ACK|TH_PUSH] = 1,
+ [TH_FIN|TH_ACK|TH_URG] = 1,
+ [TH_FIN|TH_ACK|TH_URG|TH_PUSH] = 1,
+};
+
+/* Protect conntrack agaist broken packets. Code taken from ipt_unclean.c. */
+static int tcp_error(struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf,
+ unsigned int hooknum,
+ int(*csum)(const struct sk_buff *,unsigned int))
+{
+ struct tcphdr _tcph, *th;
+ unsigned int tcplen = skb->len - dataoff;
+ u_int8_t tcpflags;
+
+ /* Smaller that minimal TCP header? */
+ th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
+ if (th == NULL) {
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: short packet ");
+ return -NF_ACCEPT;
+ }
+
+ /* Not whole TCP header or malformed packet */
+ if (th->doff*4 < sizeof(struct tcphdr) || tcplen < th->doff*4) {
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: truncated/malformed packet ");
+ return -NF_ACCEPT;
+ }
+
+ /* Checksum invalid? Ignore.
+ * We skip checking packets on the outgoing path
+ * because the semantic of CHECKSUM_HW is different there
+ * and moreover root might send raw packets.
+ */
+ /* FIXME: Source route IP option packets --RR */
+ if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
+ (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING))
+ && skb->ip_summed != CHECKSUM_UNNECESSARY
+ && csum(skb, dataoff)) {
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: bad TCP checksum ");
+ return -NF_ACCEPT;
+ }
+
+ /* Check TCP flags. */
+ tcpflags = (((u_int8_t *)th)[13] & ~(TH_ECE|TH_CWR));
+ if (!tcp_valid_flags[tcpflags]) {
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: invalid TCP flag combination ");
+ return -NF_ACCEPT;
+ }
+
+ return NF_ACCEPT;
+}
+
+static int csum4(const struct sk_buff *skb, unsigned int dataoff)
+{
+ return csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
+ skb->len - dataoff, IPPROTO_TCP,
+ skb->ip_summed == CHECKSUM_HW ? skb->csum
+ : skb_checksum(skb, dataoff,
+ skb->len - dataoff, 0));
+}
+
+static int csum6(const struct sk_buff *skb, unsigned int dataoff)
+{
+ return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+ skb->len - dataoff, IPPROTO_TCP,
+ skb->ip_summed == CHECKSUM_HW ? skb->csum
+ : skb_checksum(skb, dataoff, skb->len - dataoff,
+ 0));
+}
+
+static int tcp_error4(struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ return tcp_error(skb, dataoff, ctinfo, pf, hooknum, csum4);
+}
+
+static int tcp_error6(struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ return tcp_error(skb, dataoff, ctinfo, pf, hooknum, csum6);
+}
+
+/* Returns verdict for packet, or -1 for invalid. */
+static int tcp_packet(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ enum tcp_conntrack new_state, old_state;
+ enum ip_conntrack_dir dir;
+ struct tcphdr *th, _tcph;
+ unsigned long timeout;
+ unsigned int index;
+
+ th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
+ BUG_ON(th == NULL);
+
+ write_lock_bh(&tcp_lock);
+ old_state = conntrack->proto.tcp.state;
+ dir = CTINFO2DIR(ctinfo);
+ index = get_conntrack_index(th);
+ new_state = tcp_conntracks[dir][index][old_state];
+
+ switch (new_state) {
+ case TCP_CONNTRACK_IGNORE:
+ /* Either SYN in ORIGINAL
+ * or SYN/ACK in REPLY. */
+ if (index == TCP_SYNACK_SET
+ && conntrack->proto.tcp.last_index == TCP_SYN_SET
+ && conntrack->proto.tcp.last_dir != dir
+ && ntohl(th->ack_seq) ==
+ conntrack->proto.tcp.last_end) {
+ /* This SYN/ACK acknowledges a SYN that we earlier
+ * ignored as invalid. This means that the client and
+ * the server are both in sync, while the firewall is
+ * not. We kill this session and block the SYN/ACK so
+ * that the client cannot but retransmit its SYN and
+ * thus initiate a clean new session.
+ */
+ write_unlock_bh(&tcp_lock);
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: killing out of sync session ");
+ if (del_timer(&conntrack->timeout))
+ conntrack->timeout.function((unsigned long)
+ conntrack);
+ return -NF_DROP;
+ }
+ conntrack->proto.tcp.last_index = index;
+ conntrack->proto.tcp.last_dir = dir;
+ conntrack->proto.tcp.last_seq = ntohl(th->seq);
+ conntrack->proto.tcp.last_end =
+ segment_seq_plus_len(ntohl(th->seq), skb->len, dataoff, th);
+
+ write_unlock_bh(&tcp_lock);
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: invalid packed ignored ");
+ return NF_ACCEPT;
+ case TCP_CONNTRACK_MAX:
+ /* Invalid packet */
+ DEBUGP("nf_ct_tcp: Invalid dir=%i index=%u ostate=%u\n",
+ dir, get_conntrack_index(th),
+ old_state);
+ write_unlock_bh(&tcp_lock);
+ if (LOG_INVALID(IPPROTO_TCP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_tcp: invalid state ");
+ return -NF_ACCEPT;
+ case TCP_CONNTRACK_SYN_SENT:
+ if (old_state < TCP_CONNTRACK_TIME_WAIT)
+ break;
+ if ((conntrack->proto.tcp.seen[dir].flags &
+ IP_CT_TCP_FLAG_CLOSE_INIT)
+ || after(ntohl(th->seq),
+ conntrack->proto.tcp.seen[dir].td_end)) {
+ /* Attempt to reopen a closed connection.
+ * Delete this connection and look up again. */
+ write_unlock_bh(&tcp_lock);
+ if (del_timer(&conntrack->timeout))
+ conntrack->timeout.function((unsigned long)
+ conntrack);
+ return -NF_REPEAT;
+ }
+ case TCP_CONNTRACK_CLOSE:
+ if (index == TCP_RST_SET
+ && test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)
+ && conntrack->proto.tcp.last_index == TCP_SYN_SET
+ && ntohl(th->ack_seq) == conntrack->proto.tcp.last_end) {
+ /* RST sent to invalid SYN we had let trough
+ * SYN was in window then, tear down connection.
+ * We skip window checking, because packet might ACK
+ * segments we ignored in the SYN. */
+ goto in_window;
+ }
+ /* Just fall trough */
+ default:
+ /* Keep compilers happy. */
+ break;
+ }
+
+ if (!tcp_in_window(&conntrack->proto.tcp, dir, index,
+ skb, dataoff, th, pf)) {
+ write_unlock_bh(&tcp_lock);
+ return -NF_ACCEPT;
+ }
+ in_window:
+ /* From now on we have got in-window packets */
+ conntrack->proto.tcp.last_index = index;
+
+ DEBUGP("tcp_conntracks: src=%u.%u.%u.%u:%hu dst=%u.%u.%u.%u:%hu "
+ "syn=%i ack=%i fin=%i rst=%i old=%i new=%i\n",
+ NIPQUAD(iph->saddr), ntohs(th->source),
+ NIPQUAD(iph->daddr), ntohs(th->dest),
+ (th->syn ? 1 : 0), (th->ack ? 1 : 0),
+ (th->fin ? 1 : 0), (th->rst ? 1 : 0),
+ old_state, new_state);
+
+ conntrack->proto.tcp.state = new_state;
+ if (old_state != new_state
+ && (new_state == TCP_CONNTRACK_FIN_WAIT
+ || new_state == TCP_CONNTRACK_CLOSE))
+ conntrack->proto.tcp.seen[dir].flags |= IP_CT_TCP_FLAG_CLOSE_INIT;
+ timeout = conntrack->proto.tcp.retrans >= nf_ct_tcp_max_retrans
+ && *tcp_timeouts[new_state] > nf_ct_tcp_timeout_max_retrans
+ ? nf_ct_tcp_timeout_max_retrans : *tcp_timeouts[new_state];
+ write_unlock_bh(&tcp_lock);
+
+ nf_conntrack_event_cache(IPCT_PROTOINFO_VOLATILE, skb);
+ if (new_state != old_state)
+ nf_conntrack_event_cache(IPCT_PROTOINFO, skb);
+
+ if (!test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
+ /* If only reply is a RST, we can consider ourselves not to
+ have an established connection: this is a fairly common
+ problem case, so we can delete the conntrack
+ immediately. --RR */
+ if (th->rst) {
+ if (del_timer(&conntrack->timeout))
+ conntrack->timeout.function((unsigned long)
+ conntrack);
+ return NF_ACCEPT;
+ }
+ } else if (!test_bit(IPS_ASSURED_BIT, &conntrack->status)
+ && (old_state == TCP_CONNTRACK_SYN_RECV
+ || old_state == TCP_CONNTRACK_ESTABLISHED)
+ && new_state == TCP_CONNTRACK_ESTABLISHED) {
+ /* Set ASSURED if we see see valid ack in ESTABLISHED
+ after SYN_RECV or a valid answer for a picked up
+ connection. */
+ set_bit(IPS_ASSURED_BIT, &conntrack->status);
+ nf_conntrack_event_cache(IPCT_STATUS, skb);
+ }
+ nf_ct_refresh_acct(conntrack, ctinfo, skb, timeout);
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int tcp_new(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff)
+{
+ enum tcp_conntrack new_state;
+ struct tcphdr *th, _tcph;
+#ifdef DEBUGP_VARS
+ struct ip_ct_tcp_state *sender = &conntrack->proto.tcp.seen[0];
+ struct ip_ct_tcp_state *receiver = &conntrack->proto.tcp.seen[1];
+#endif
+
+ th = skb_header_pointer(skb, dataoff, sizeof(_tcph), &_tcph);
+ BUG_ON(th == NULL);
+
+ /* Don't need lock here: this conntrack not in circulation yet */
+ new_state
+ = tcp_conntracks[0][get_conntrack_index(th)]
+ [TCP_CONNTRACK_NONE];
+
+ /* Invalid: delete conntrack */
+ if (new_state >= TCP_CONNTRACK_MAX) {
+ DEBUGP("nf_ct_tcp: invalid new deleting.\n");
+ return 0;
+ }
+
+ if (new_state == TCP_CONNTRACK_SYN_SENT) {
+ /* SYN packet */
+ conntrack->proto.tcp.seen[0].td_end =
+ segment_seq_plus_len(ntohl(th->seq), skb->len,
+ dataoff, th);
+ conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+ if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
+ conntrack->proto.tcp.seen[0].td_maxwin = 1;
+ conntrack->proto.tcp.seen[0].td_maxend =
+ conntrack->proto.tcp.seen[0].td_end;
+
+ tcp_options(skb, dataoff, th, &conntrack->proto.tcp.seen[0]);
+ conntrack->proto.tcp.seen[1].flags = 0;
+ conntrack->proto.tcp.seen[0].loose =
+ conntrack->proto.tcp.seen[1].loose = 0;
+ } else if (nf_ct_tcp_loose == 0) {
+ /* Don't try to pick up connections. */
+ return 0;
+ } else {
+ /*
+ * We are in the middle of a connection,
+ * its history is lost for us.
+ * Let's try to use the data from the packet.
+ */
+ conntrack->proto.tcp.seen[0].td_end =
+ segment_seq_plus_len(ntohl(th->seq), skb->len,
+ dataoff, th);
+ conntrack->proto.tcp.seen[0].td_maxwin = ntohs(th->window);
+ if (conntrack->proto.tcp.seen[0].td_maxwin == 0)
+ conntrack->proto.tcp.seen[0].td_maxwin = 1;
+ conntrack->proto.tcp.seen[0].td_maxend =
+ conntrack->proto.tcp.seen[0].td_end +
+ conntrack->proto.tcp.seen[0].td_maxwin;
+ conntrack->proto.tcp.seen[0].td_scale = 0;
+
+ /* We assume SACK. Should we assume window scaling too? */
+ conntrack->proto.tcp.seen[0].flags =
+ conntrack->proto.tcp.seen[1].flags = IP_CT_TCP_FLAG_SACK_PERM;
+ conntrack->proto.tcp.seen[0].loose =
+ conntrack->proto.tcp.seen[1].loose = nf_ct_tcp_loose;
+ }
+
+ conntrack->proto.tcp.seen[1].td_end = 0;
+ conntrack->proto.tcp.seen[1].td_maxend = 0;
+ conntrack->proto.tcp.seen[1].td_maxwin = 1;
+ conntrack->proto.tcp.seen[1].td_scale = 0;
+
+ /* tcp_packet will set them */
+ conntrack->proto.tcp.state = TCP_CONNTRACK_NONE;
+ conntrack->proto.tcp.last_index = TCP_NONE_SET;
+
+ DEBUGP("tcp_new: sender end=%u maxend=%u maxwin=%u scale=%i "
+ "receiver end=%u maxend=%u maxwin=%u scale=%i\n",
+ sender->td_end, sender->td_maxend, sender->td_maxwin,
+ sender->td_scale,
+ receiver->td_end, receiver->td_maxend, receiver->td_maxwin,
+ receiver->td_scale);
+ return 1;
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_tcp4 =
+{
+ .l3proto = PF_INET,
+ .proto = IPPROTO_TCP,
+ .name = "tcp",
+ .pkt_to_tuple = tcp_pkt_to_tuple,
+ .invert_tuple = tcp_invert_tuple,
+ .print_tuple = tcp_print_tuple,
+ .print_conntrack = tcp_print_conntrack,
+ .packet = tcp_packet,
+ .new = tcp_new,
+ .error = tcp_error4,
+};
+
+struct nf_conntrack_protocol nf_conntrack_protocol_tcp6 =
+{
+ .l3proto = PF_INET6,
+ .proto = IPPROTO_TCP,
+ .name = "tcp",
+ .pkt_to_tuple = tcp_pkt_to_tuple,
+ .invert_tuple = tcp_invert_tuple,
+ .print_tuple = tcp_print_tuple,
+ .print_conntrack = tcp_print_conntrack,
+ .packet = tcp_packet,
+ .new = tcp_new,
+ .error = tcp_error6,
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_tcp4);
+EXPORT_SYMBOL(nf_conntrack_protocol_tcp6);
diff --git a/net/netfilter/nf_conntrack_proto_udp.c b/net/netfilter/nf_conntrack_proto_udp.c
new file mode 100644
index 00000000000..3cae7ce420d
--- /dev/null
+++ b/net/netfilter/nf_conntrack_proto_udp.c
@@ -0,0 +1,216 @@
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - enable working with Layer 3 protocol independent connection tracking.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_proto_udp.c
+ */
+
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/timer.h>
+#include <linux/module.h>
+#include <linux/netfilter.h>
+#include <linux/udp.h>
+#include <linux/seq_file.h>
+#include <linux/skbuff.h>
+#include <linux/ipv6.h>
+#include <net/ip6_checksum.h>
+#include <net/checksum.h>
+#include <linux/netfilter.h>
+#include <linux/netfilter_ipv4.h>
+#include <linux/netfilter_ipv6.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+
+unsigned long nf_ct_udp_timeout = 30*HZ;
+unsigned long nf_ct_udp_timeout_stream = 180*HZ;
+
+static int udp_pkt_to_tuple(const struct sk_buff *skb,
+ unsigned int dataoff,
+ struct nf_conntrack_tuple *tuple)
+{
+ struct udphdr _hdr, *hp;
+
+ /* Actually only need first 8 bytes. */
+ hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ if (hp == NULL)
+ return 0;
+
+ tuple->src.u.udp.port = hp->source;
+ tuple->dst.u.udp.port = hp->dest;
+
+ return 1;
+}
+
+static int udp_invert_tuple(struct nf_conntrack_tuple *tuple,
+ const struct nf_conntrack_tuple *orig)
+{
+ tuple->src.u.udp.port = orig->dst.u.udp.port;
+ tuple->dst.u.udp.port = orig->src.u.udp.port;
+ return 1;
+}
+
+/* Print out the per-protocol part of the tuple. */
+static int udp_print_tuple(struct seq_file *s,
+ const struct nf_conntrack_tuple *tuple)
+{
+ return seq_printf(s, "sport=%hu dport=%hu ",
+ ntohs(tuple->src.u.udp.port),
+ ntohs(tuple->dst.u.udp.port));
+}
+
+/* Print out the private part of the conntrack. */
+static int udp_print_conntrack(struct seq_file *s,
+ const struct nf_conn *conntrack)
+{
+ return 0;
+}
+
+/* Returns verdict for packet, and may modify conntracktype */
+static int udp_packet(struct nf_conn *conntrack,
+ const struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ /* If we've seen traffic both ways, this is some kind of UDP
+ stream. Extend timeout. */
+ if (test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)) {
+ nf_ct_refresh_acct(conntrack, ctinfo, skb,
+ nf_ct_udp_timeout_stream);
+ /* Also, more likely to be important, and not a probe */
+ if (!test_and_set_bit(IPS_ASSURED_BIT, &conntrack->status))
+ nf_conntrack_event_cache(IPCT_STATUS, skb);
+ } else
+ nf_ct_refresh_acct(conntrack, ctinfo, skb, nf_ct_udp_timeout);
+
+ return NF_ACCEPT;
+}
+
+/* Called when a new connection for this protocol found. */
+static int udp_new(struct nf_conn *conntrack, const struct sk_buff *skb,
+ unsigned int dataoff)
+{
+ return 1;
+}
+
+static int udp_error(struct sk_buff *skb, unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf,
+ unsigned int hooknum,
+ int (*csum)(const struct sk_buff *, unsigned int))
+{
+ unsigned int udplen = skb->len - dataoff;
+ struct udphdr _hdr, *hdr;
+
+ /* Header is too small? */
+ hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr);
+ if (hdr == NULL) {
+ if (LOG_INVALID(IPPROTO_UDP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_udp: short packet ");
+ return -NF_ACCEPT;
+ }
+
+ /* Truncated/malformed packets */
+ if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) {
+ if (LOG_INVALID(IPPROTO_UDP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_udp: truncated/malformed packet ");
+ return -NF_ACCEPT;
+ }
+
+ /* Packet with no checksum */
+ if (!hdr->check)
+ return NF_ACCEPT;
+
+ /* Checksum invalid? Ignore.
+ * We skip checking packets on the outgoing path
+ * because the semantic of CHECKSUM_HW is different there
+ * and moreover root might send raw packets.
+ * FIXME: Source route IP option packets --RR */
+ if (((pf == PF_INET && hooknum == NF_IP_PRE_ROUTING) ||
+ (pf == PF_INET6 && hooknum == NF_IP6_PRE_ROUTING))
+ && skb->ip_summed != CHECKSUM_UNNECESSARY
+ && csum(skb, dataoff)) {
+ if (LOG_INVALID(IPPROTO_UDP))
+ nf_log_packet(pf, 0, skb, NULL, NULL, NULL,
+ "nf_ct_udp: bad UDP checksum ");
+ return -NF_ACCEPT;
+ }
+
+ return NF_ACCEPT;
+}
+
+static int csum4(const struct sk_buff *skb, unsigned int dataoff)
+{
+ return csum_tcpudp_magic(skb->nh.iph->saddr, skb->nh.iph->daddr,
+ skb->len - dataoff, IPPROTO_UDP,
+ skb->ip_summed == CHECKSUM_HW ? skb->csum
+ : skb_checksum(skb, dataoff,
+ skb->len - dataoff, 0));
+}
+
+static int csum6(const struct sk_buff *skb, unsigned int dataoff)
+{
+ return csum_ipv6_magic(&skb->nh.ipv6h->saddr, &skb->nh.ipv6h->daddr,
+ skb->len - dataoff, IPPROTO_UDP,
+ skb->ip_summed == CHECKSUM_HW ? skb->csum
+ : skb_checksum(skb, dataoff, skb->len - dataoff,
+ 0));
+}
+
+static int udp_error4(struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ return udp_error(skb, dataoff, ctinfo, pf, hooknum, csum4);
+}
+
+static int udp_error6(struct sk_buff *skb,
+ unsigned int dataoff,
+ enum ip_conntrack_info *ctinfo,
+ int pf,
+ unsigned int hooknum)
+{
+ return udp_error(skb, dataoff, ctinfo, pf, hooknum, csum6);
+}
+
+struct nf_conntrack_protocol nf_conntrack_protocol_udp4 =
+{
+ .l3proto = PF_INET,
+ .proto = IPPROTO_UDP,
+ .name = "udp",
+ .pkt_to_tuple = udp_pkt_to_tuple,
+ .invert_tuple = udp_invert_tuple,
+ .print_tuple = udp_print_tuple,
+ .print_conntrack = udp_print_conntrack,
+ .packet = udp_packet,
+ .new = udp_new,
+ .error = udp_error4,
+};
+
+struct nf_conntrack_protocol nf_conntrack_protocol_udp6 =
+{
+ .l3proto = PF_INET6,
+ .proto = IPPROTO_UDP,
+ .name = "udp",
+ .pkt_to_tuple = udp_pkt_to_tuple,
+ .invert_tuple = udp_invert_tuple,
+ .print_tuple = udp_print_tuple,
+ .print_conntrack = udp_print_conntrack,
+ .packet = udp_packet,
+ .new = udp_new,
+ .error = udp_error6,
+};
+
+EXPORT_SYMBOL(nf_conntrack_protocol_udp4);
+EXPORT_SYMBOL(nf_conntrack_protocol_udp6);
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
new file mode 100644
index 00000000000..45224db4fe2
--- /dev/null
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -0,0 +1,869 @@
+/* This file contains all the functions required for the standalone
+ nf_conntrack module.
+
+ These are not required by the compatibility layer.
+*/
+
+/* (C) 1999-2001 Paul `Rusty' Russell
+ * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * 16 Dec 2003: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
+ * - generalize L3 protocol dependent part.
+ *
+ * Derived from net/ipv4/netfilter/ip_conntrack_standalone.c
+ */
+
+#include <linux/config.h>
+#include <linux/types.h>
+#include <linux/netfilter.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/percpu.h>
+#include <linux/netdevice.h>
+#ifdef CONFIG_SYSCTL
+#include <linux/sysctl.h>
+#endif
+
+#define ASSERT_READ_LOCK(x)
+#define ASSERT_WRITE_LOCK(x)
+
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_l3proto.h>
+#include <net/netfilter/nf_conntrack_protocol.h>
+#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <linux/netfilter_ipv4/listhelp.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(format, args...)
+#endif
+
+MODULE_LICENSE("GPL");
+
+extern atomic_t nf_conntrack_count;
+DECLARE_PER_CPU(struct ip_conntrack_stat, nf_conntrack_stat);
+
+static int kill_l3proto(struct nf_conn *i, void *data)
+{
+ return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
+ ((struct nf_conntrack_l3proto *)data)->l3proto);
+}
+
+static int kill_proto(struct nf_conn *i, void *data)
+{
+ struct nf_conntrack_protocol *proto;
+ proto = (struct nf_conntrack_protocol *)data;
+ return (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum ==
+ proto->proto) &&
+ (i->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num ==
+ proto->l3proto);
+}
+
+#ifdef CONFIG_PROC_FS
+static int
+print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple,
+ struct nf_conntrack_l3proto *l3proto,
+ struct nf_conntrack_protocol *proto)
+{
+ return l3proto->print_tuple(s, tuple) || proto->print_tuple(s, tuple);
+}
+
+#ifdef CONFIG_NF_CT_ACCT
+static unsigned int
+seq_print_counters(struct seq_file *s,
+ const struct ip_conntrack_counter *counter)
+{
+ return seq_printf(s, "packets=%llu bytes=%llu ",
+ (unsigned long long)counter->packets,
+ (unsigned long long)counter->bytes);
+}
+#else
+#define seq_print_counters(x, y) 0
+#endif
+
+struct ct_iter_state {
+ unsigned int bucket;
+};
+
+static struct list_head *ct_get_first(struct seq_file *seq)
+{
+ struct ct_iter_state *st = seq->private;
+
+ for (st->bucket = 0;
+ st->bucket < nf_conntrack_htable_size;
+ st->bucket++) {
+ if (!list_empty(&nf_conntrack_hash[st->bucket]))
+ return nf_conntrack_hash[st->bucket].next;
+ }
+ return NULL;
+}
+
+static struct list_head *ct_get_next(struct seq_file *seq, struct list_head *head)
+{
+ struct ct_iter_state *st = seq->private;
+
+ head = head->next;
+ while (head == &nf_conntrack_hash[st->bucket]) {
+ if (++st->bucket >= nf_conntrack_htable_size)
+ return NULL;
+ head = nf_conntrack_hash[st->bucket].next;
+ }
+ return head;
+}
+
+static struct list_head *ct_get_idx(struct seq_file *seq, loff_t pos)
+{
+ struct list_head *head = ct_get_first(seq);
+
+ if (head)
+ while (pos && (head = ct_get_next(seq, head)))
+ pos--;
+ return pos ? NULL : head;
+}
+
+static void *ct_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ read_lock_bh(&nf_conntrack_lock);
+ return ct_get_idx(seq, *pos);
+}
+
+static void *ct_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return ct_get_next(s, v);
+}
+
+static void ct_seq_stop(struct seq_file *s, void *v)
+{
+ read_unlock_bh(&nf_conntrack_lock);
+}
+
+/* return 0 on success, 1 in case of error */
+static int ct_seq_show(struct seq_file *s, void *v)
+{
+ const struct nf_conntrack_tuple_hash *hash = v;
+ const struct nf_conn *conntrack = nf_ct_tuplehash_to_ctrack(hash);
+ struct nf_conntrack_l3proto *l3proto;
+ struct nf_conntrack_protocol *proto;
+
+ ASSERT_READ_LOCK(&nf_conntrack_lock);
+ NF_CT_ASSERT(conntrack);
+
+ /* we only want to print DIR_ORIGINAL */
+ if (NF_CT_DIRECTION(hash))
+ return 0;
+
+ l3proto = nf_ct_find_l3proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.src.l3num);
+
+ NF_CT_ASSERT(l3proto);
+ proto = nf_ct_find_proto(conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.src.l3num,
+ conntrack->tuplehash[IP_CT_DIR_ORIGINAL]
+ .tuple.dst.protonum);
+ NF_CT_ASSERT(proto);
+
+ if (seq_printf(s, "%-8s %u %-8s %u %ld ",
+ l3proto->name,
+ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.src.l3num,
+ proto->name,
+ conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple.dst.protonum,
+ timer_pending(&conntrack->timeout)
+ ? (long)(conntrack->timeout.expires - jiffies)/HZ : 0) != 0)
+ return -ENOSPC;
+
+ if (l3proto->print_conntrack(s, conntrack))
+ return -ENOSPC;
+
+ if (proto->print_conntrack(s, conntrack))
+ return -ENOSPC;
+
+ if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ l3proto, proto))
+ return -ENOSPC;
+
+ if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_ORIGINAL]))
+ return -ENOSPC;
+
+ if (!(test_bit(IPS_SEEN_REPLY_BIT, &conntrack->status)))
+ if (seq_printf(s, "[UNREPLIED] "))
+ return -ENOSPC;
+
+ if (print_tuple(s, &conntrack->tuplehash[IP_CT_DIR_REPLY].tuple,
+ l3proto, proto))
+ return -ENOSPC;
+
+ if (seq_print_counters(s, &conntrack->counters[IP_CT_DIR_REPLY]))
+ return -ENOSPC;
+
+ if (test_bit(IPS_ASSURED_BIT, &conntrack->status))
+ if (seq_printf(s, "[ASSURED] "))
+ return -ENOSPC;
+
+#if defined(CONFIG_NF_CONNTRACK_MARK)
+ if (seq_printf(s, "mark=%u ", conntrack->mark))
+ return -ENOSPC;
+#endif
+
+ if (seq_printf(s, "use=%u\n", atomic_read(&conntrack->ct_general.use)))
+ return -ENOSPC;
+
+ return 0;
+}
+
+static struct seq_operations ct_seq_ops = {
+ .start = ct_seq_start,
+ .next = ct_seq_next,
+ .stop = ct_seq_stop,
+ .show = ct_seq_show
+};
+
+static int ct_open(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq;
+ struct ct_iter_state *st;
+ int ret;
+
+ st = kmalloc(sizeof(struct ct_iter_state), GFP_KERNEL);
+ if (st == NULL)
+ return -ENOMEM;
+ ret = seq_open(file, &ct_seq_ops);
+ if (ret)
+ goto out_free;
+ seq = file->private_data;
+ seq->private = st;
+ memset(st, 0, sizeof(struct ct_iter_state));
+ return ret;
+out_free:
+ kfree(st);
+ return ret;
+}
+
+static struct file_operations ct_file_ops = {
+ .owner = THIS_MODULE,
+ .open = ct_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+
+/* expects */
+static void *exp_seq_start(struct seq_file *s, loff_t *pos)
+{
+ struct list_head *e = &nf_conntrack_expect_list;
+ loff_t i;
+
+ /* strange seq_file api calls stop even if we fail,
+ * thus we need to grab lock since stop unlocks */
+ read_lock_bh(&nf_conntrack_lock);
+
+ if (list_empty(e))
+ return NULL;
+
+ for (i = 0; i <= *pos; i++) {
+ e = e->next;
+ if (e == &nf_conntrack_expect_list)
+ return NULL;
+ }
+ return e;
+}
+
+static void *exp_seq_next(struct seq_file *s, void *v, loff_t *pos)
+{
+ struct list_head *e = v;
+
+ ++*pos;
+ e = e->next;
+
+ if (e == &nf_conntrack_expect_list)
+ return NULL;
+
+ return e;
+}
+
+static void exp_seq_stop(struct seq_file *s, void *v)
+{
+ read_unlock_bh(&nf_conntrack_lock);
+}
+
+static int exp_seq_show(struct seq_file *s, void *v)
+{
+ struct nf_conntrack_expect *expect = v;
+
+ if (expect->timeout.function)
+ seq_printf(s, "%ld ", timer_pending(&expect->timeout)
+ ? (long)(expect->timeout.expires - jiffies)/HZ : 0);
+ else
+ seq_printf(s, "- ");
+ seq_printf(s, "l3proto = %u proto=%u ",
+ expect->tuple.src.l3num,
+ expect->tuple.dst.protonum);
+ print_tuple(s, &expect->tuple,
+ nf_ct_find_l3proto(expect->tuple.src.l3num),
+ nf_ct_find_proto(expect->tuple.src.l3num,
+ expect->tuple.dst.protonum));
+ return seq_putc(s, '\n');
+}
+
+static struct seq_operations exp_seq_ops = {
+ .start = exp_seq_start,
+ .next = exp_seq_next,
+ .stop = exp_seq_stop,
+ .show = exp_seq_show
+};
+
+static int exp_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &exp_seq_ops);
+}
+
+static struct file_operations exp_file_ops = {
+ .owner = THIS_MODULE,
+ .open = exp_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release
+};
+
+static void *ct_cpu_seq_start(struct seq_file *seq, loff_t *pos)
+{
+ int cpu;
+
+ if (*pos == 0)
+ return SEQ_START_TOKEN;
+
+ for (cpu = *pos-1; cpu < NR_CPUS; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return &per_cpu(nf_conntrack_stat, cpu);
+ }
+
+ return NULL;
+}
+
+static void *ct_cpu_seq_next(struct seq_file *seq, void *v, loff_t *pos)
+{
+ int cpu;
+
+ for (cpu = *pos; cpu < NR_CPUS; ++cpu) {
+ if (!cpu_possible(cpu))
+ continue;
+ *pos = cpu + 1;
+ return &per_cpu(nf_conntrack_stat, cpu);
+ }
+
+ return NULL;
+}
+
+static void ct_cpu_seq_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int ct_cpu_seq_show(struct seq_file *seq, void *v)
+{
+ unsigned int nr_conntracks = atomic_read(&nf_conntrack_count);
+ struct ip_conntrack_stat *st = v;
+
+ if (v == SEQ_START_TOKEN) {
+ seq_printf(seq, "entries searched found new invalid ignore delete delete_list insert insert_failed drop early_drop icmp_error expect_new expect_create expect_delete\n");
+ return 0;
+ }
+
+ seq_printf(seq, "%08x %08x %08x %08x %08x %08x %08x %08x "
+ "%08x %08x %08x %08x %08x %08x %08x %08x \n",
+ nr_conntracks,
+ st->searched,
+ st->found,
+ st->new,
+ st->invalid,
+ st->ignore,
+ st->delete,
+ st->delete_list,
+ st->insert,
+ st->insert_failed,
+ st->drop,
+ st->early_drop,
+ st->error,
+
+ st->expect_new,
+ st->expect_create,
+ st->expect_delete
+ );
+ return 0;
+}
+
+static struct seq_operations ct_cpu_seq_ops = {
+ .start = ct_cpu_seq_start,
+ .next = ct_cpu_seq_next,
+ .stop = ct_cpu_seq_stop,
+ .show = ct_cpu_seq_show,
+};
+
+static int ct_cpu_seq_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &ct_cpu_seq_ops);
+}
+
+static struct file_operations ct_cpu_seq_fops = {
+ .owner = THIS_MODULE,
+ .open = ct_cpu_seq_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release_private,
+};
+#endif /* CONFIG_PROC_FS */
+
+/* Sysctl support */
+
+#ifdef CONFIG_SYSCTL
+
+/* From nf_conntrack_core.c */
+extern int nf_conntrack_max;
+extern unsigned int nf_conntrack_htable_size;
+
+/* From nf_conntrack_proto_tcp.c */
+extern unsigned long nf_ct_tcp_timeout_syn_sent;
+extern unsigned long nf_ct_tcp_timeout_syn_recv;
+extern unsigned long nf_ct_tcp_timeout_established;
+extern unsigned long nf_ct_tcp_timeout_fin_wait;
+extern unsigned long nf_ct_tcp_timeout_close_wait;
+extern unsigned long nf_ct_tcp_timeout_last_ack;
+extern unsigned long nf_ct_tcp_timeout_time_wait;
+extern unsigned long nf_ct_tcp_timeout_close;
+extern unsigned long nf_ct_tcp_timeout_max_retrans;
+extern int nf_ct_tcp_loose;
+extern int nf_ct_tcp_be_liberal;
+extern int nf_ct_tcp_max_retrans;
+
+/* From nf_conntrack_proto_udp.c */
+extern unsigned long nf_ct_udp_timeout;
+extern unsigned long nf_ct_udp_timeout_stream;
+
+/* From nf_conntrack_proto_generic.c */
+extern unsigned long nf_ct_generic_timeout;
+
+/* Log invalid packets of a given protocol */
+static int log_invalid_proto_min = 0;
+static int log_invalid_proto_max = 255;
+
+static struct ctl_table_header *nf_ct_sysctl_header;
+
+static ctl_table nf_ct_sysctl_table[] = {
+ {
+ .ctl_name = NET_NF_CONNTRACK_MAX,
+ .procname = "nf_conntrack_max",
+ .data = &nf_conntrack_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_COUNT,
+ .procname = "nf_conntrack_count",
+ .data = &nf_conntrack_count,
+ .maxlen = sizeof(int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_BUCKETS,
+ .procname = "nf_conntrack_buckets",
+ .data = &nf_conntrack_htable_size,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_SENT,
+ .procname = "nf_conntrack_tcp_timeout_syn_sent",
+ .data = &nf_ct_tcp_timeout_syn_sent,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_SYN_RECV,
+ .procname = "nf_conntrack_tcp_timeout_syn_recv",
+ .data = &nf_ct_tcp_timeout_syn_recv,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_ESTABLISHED,
+ .procname = "nf_conntrack_tcp_timeout_established",
+ .data = &nf_ct_tcp_timeout_established,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_FIN_WAIT,
+ .procname = "nf_conntrack_tcp_timeout_fin_wait",
+ .data = &nf_ct_tcp_timeout_fin_wait,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE_WAIT,
+ .procname = "nf_conntrack_tcp_timeout_close_wait",
+ .data = &nf_ct_tcp_timeout_close_wait,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_LAST_ACK,
+ .procname = "nf_conntrack_tcp_timeout_last_ack",
+ .data = &nf_ct_tcp_timeout_last_ack,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_TIME_WAIT,
+ .procname = "nf_conntrack_tcp_timeout_time_wait",
+ .data = &nf_ct_tcp_timeout_time_wait,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_CLOSE,
+ .procname = "nf_conntrack_tcp_timeout_close",
+ .data = &nf_ct_tcp_timeout_close,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_UDP_TIMEOUT,
+ .procname = "nf_conntrack_udp_timeout",
+ .data = &nf_ct_udp_timeout,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_UDP_TIMEOUT_STREAM,
+ .procname = "nf_conntrack_udp_timeout_stream",
+ .data = &nf_ct_udp_timeout_stream,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_GENERIC_TIMEOUT,
+ .procname = "nf_conntrack_generic_timeout",
+ .data = &nf_ct_generic_timeout,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_LOG_INVALID,
+ .procname = "nf_conntrack_log_invalid",
+ .data = &nf_ct_log_invalid,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_minmax,
+ .strategy = &sysctl_intvec,
+ .extra1 = &log_invalid_proto_min,
+ .extra2 = &log_invalid_proto_max,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_TIMEOUT_MAX_RETRANS,
+ .procname = "nf_conntrack_tcp_timeout_max_retrans",
+ .data = &nf_ct_tcp_timeout_max_retrans,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec_jiffies,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_LOOSE,
+ .procname = "nf_conntrack_tcp_loose",
+ .data = &nf_ct_tcp_loose,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_BE_LIBERAL,
+ .procname = "nf_conntrack_tcp_be_liberal",
+ .data = &nf_ct_tcp_be_liberal,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_TCP_MAX_RETRANS,
+ .procname = "nf_conntrack_tcp_max_retrans",
+ .data = &nf_ct_tcp_max_retrans,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+
+ { .ctl_name = 0 }
+};
+
+#define NET_NF_CONNTRACK_MAX 2089
+
+static ctl_table nf_ct_netfilter_table[] = {
+ {
+ .ctl_name = NET_NETFILTER,
+ .procname = "netfilter",
+ .mode = 0555,
+ .child = nf_ct_sysctl_table,
+ },
+ {
+ .ctl_name = NET_NF_CONNTRACK_MAX,
+ .procname = "nf_conntrack_max",
+ .data = &nf_conntrack_max,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table nf_ct_net_table[] = {
+ {
+ .ctl_name = CTL_NET,
+ .procname = "net",
+ .mode = 0555,
+ .child = nf_ct_netfilter_table,
+ },
+ { .ctl_name = 0 }
+};
+EXPORT_SYMBOL(nf_ct_log_invalid);
+#endif /* CONFIG_SYSCTL */
+
+static int init_or_cleanup(int init)
+{
+#ifdef CONFIG_PROC_FS
+ struct proc_dir_entry *proc, *proc_exp, *proc_stat;
+#endif
+ int ret = 0;
+
+ if (!init) goto cleanup;
+
+ ret = nf_conntrack_init();
+ if (ret < 0)
+ goto cleanup_nothing;
+
+#ifdef CONFIG_PROC_FS
+ proc = proc_net_fops_create("nf_conntrack", 0440, &ct_file_ops);
+ if (!proc) goto cleanup_init;
+
+ proc_exp = proc_net_fops_create("nf_conntrack_expect", 0440,
+ &exp_file_ops);
+ if (!proc_exp) goto cleanup_proc;
+
+ proc_stat = create_proc_entry("nf_conntrack", S_IRUGO, proc_net_stat);
+ if (!proc_stat)
+ goto cleanup_proc_exp;
+
+ proc_stat->proc_fops = &ct_cpu_seq_fops;
+ proc_stat->owner = THIS_MODULE;
+#endif
+#ifdef CONFIG_SYSCTL
+ nf_ct_sysctl_header = register_sysctl_table(nf_ct_net_table, 0);
+ if (nf_ct_sysctl_header == NULL) {
+ printk("nf_conntrack: can't register to sysctl.\n");
+ ret = -ENOMEM;
+ goto cleanup_proc_stat;
+ }
+#endif
+
+ return ret;
+
+ cleanup:
+#ifdef CONFIG_SYSCTL
+ unregister_sysctl_table(nf_ct_sysctl_header);
+ cleanup_proc_stat:
+#endif
+#ifdef CONFIG_PROC_FS
+ proc_net_remove("nf_conntrack_stat");
+ cleanup_proc_exp:
+ proc_net_remove("nf_conntrack_expect");
+ cleanup_proc:
+ proc_net_remove("nf_conntrack");
+ cleanup_init:
+#endif /* CNFIG_PROC_FS */
+ nf_conntrack_cleanup();
+ cleanup_nothing:
+ return ret;
+}
+
+int nf_conntrack_l3proto_register(struct nf_conntrack_l3proto *proto)
+{
+ int ret = 0;
+
+ write_lock_bh(&nf_conntrack_lock);
+ if (nf_ct_l3protos[proto->l3proto] != &nf_conntrack_generic_l3proto) {
+ ret = -EBUSY;
+ goto out;
+ }
+ nf_ct_l3protos[proto->l3proto] = proto;
+out:
+ write_unlock_bh(&nf_conntrack_lock);
+
+ return ret;
+}
+
+void nf_conntrack_l3proto_unregister(struct nf_conntrack_l3proto *proto)
+{
+ write_lock_bh(&nf_conntrack_lock);
+ nf_ct_l3protos[proto->l3proto] = &nf_conntrack_generic_l3proto;
+ write_unlock_bh(&nf_conntrack_lock);
+
+ /* Somebody could be still looking at the proto in bh. */
+ synchronize_net();
+
+ /* Remove all contrack entries for this protocol */
+ nf_ct_iterate_cleanup(kill_l3proto, proto);
+}
+
+/* FIXME: Allow NULL functions and sub in pointers to generic for
+ them. --RR */
+int nf_conntrack_protocol_register(struct nf_conntrack_protocol *proto)
+{
+ int ret = 0;
+
+retry:
+ write_lock_bh(&nf_conntrack_lock);
+ if (nf_ct_protos[proto->l3proto]) {
+ if (nf_ct_protos[proto->l3proto][proto->proto]
+ != &nf_conntrack_generic_protocol) {
+ ret = -EBUSY;
+ goto out_unlock;
+ }
+ } else {
+ /* l3proto may be loaded latter. */
+ struct nf_conntrack_protocol **proto_array;
+ int i;
+
+ write_unlock_bh(&nf_conntrack_lock);
+
+ proto_array = (struct nf_conntrack_protocol **)
+ kmalloc(MAX_NF_CT_PROTO *
+ sizeof(struct nf_conntrack_protocol *),
+ GFP_KERNEL);
+ if (proto_array == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ for (i = 0; i < MAX_NF_CT_PROTO; i++)
+ proto_array[i] = &nf_conntrack_generic_protocol;
+
+ write_lock_bh(&nf_conntrack_lock);
+ if (nf_ct_protos[proto->l3proto]) {
+ /* bad timing, but no problem */
+ write_unlock_bh(&nf_conntrack_lock);
+ kfree(proto_array);
+ } else {
+ nf_ct_protos[proto->l3proto] = proto_array;
+ write_unlock_bh(&nf_conntrack_lock);
+ }
+
+ /*
+ * Just once because array is never freed until unloading
+ * nf_conntrack.ko
+ */
+ goto retry;
+ }
+
+ nf_ct_protos[proto->l3proto][proto->proto] = proto;
+
+out_unlock:
+ write_unlock_bh(&nf_conntrack_lock);
+out:
+ return ret;
+}
+
+void nf_conntrack_protocol_unregister(struct nf_conntrack_protocol *proto)
+{
+ write_lock_bh(&nf_conntrack_lock);
+ nf_ct_protos[proto->l3proto][proto->proto]
+ = &nf_conntrack_generic_protocol;
+ write_unlock_bh(&nf_conntrack_lock);
+
+ /* Somebody could be still looking at the proto in bh. */
+ synchronize_net();
+
+ /* Remove all contrack entries for this protocol */
+ nf_ct_iterate_cleanup(kill_proto, proto);
+}
+
+static int __init init(void)
+{
+ return init_or_cleanup(1);
+}
+
+static void __exit fini(void)
+{
+ init_or_cleanup(0);
+}
+
+module_init(init);
+module_exit(fini);
+
+/* Some modules need us, but don't depend directly on any symbol.
+ They should call this. */
+void need_nf_conntrack(void)
+{
+}
+
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+EXPORT_SYMBOL_GPL(nf_conntrack_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_expect_chain);
+EXPORT_SYMBOL_GPL(nf_conntrack_register_notifier);
+EXPORT_SYMBOL_GPL(nf_conntrack_unregister_notifier);
+EXPORT_SYMBOL_GPL(__nf_ct_event_cache_init);
+EXPORT_PER_CPU_SYMBOL_GPL(nf_conntrack_ecache);
+EXPORT_SYMBOL_GPL(nf_ct_deliver_cached_events);
+#endif
+EXPORT_SYMBOL(nf_conntrack_l3proto_register);
+EXPORT_SYMBOL(nf_conntrack_l3proto_unregister);
+EXPORT_SYMBOL(nf_conntrack_protocol_register);
+EXPORT_SYMBOL(nf_conntrack_protocol_unregister);
+EXPORT_SYMBOL(nf_ct_invert_tuplepr);
+EXPORT_SYMBOL(nf_conntrack_alter_reply);
+EXPORT_SYMBOL(nf_conntrack_destroyed);
+EXPORT_SYMBOL(need_nf_conntrack);
+EXPORT_SYMBOL(nf_conntrack_helper_register);
+EXPORT_SYMBOL(nf_conntrack_helper_unregister);
+EXPORT_SYMBOL(nf_ct_iterate_cleanup);
+EXPORT_SYMBOL(__nf_ct_refresh_acct);
+EXPORT_SYMBOL(nf_ct_protos);
+EXPORT_SYMBOL(nf_ct_find_proto);
+EXPORT_SYMBOL(nf_ct_l3protos);
+EXPORT_SYMBOL(nf_conntrack_expect_alloc);
+EXPORT_SYMBOL(nf_conntrack_expect_put);
+EXPORT_SYMBOL(nf_conntrack_expect_related);
+EXPORT_SYMBOL(nf_conntrack_unexpect_related);
+EXPORT_SYMBOL(nf_conntrack_tuple_taken);
+EXPORT_SYMBOL(nf_conntrack_htable_size);
+EXPORT_SYMBOL(nf_conntrack_lock);
+EXPORT_SYMBOL(nf_conntrack_hash);
+EXPORT_SYMBOL(nf_conntrack_untracked);
+EXPORT_SYMBOL_GPL(nf_conntrack_find_get);
+#ifdef CONFIG_IP_NF_NAT_NEEDED
+EXPORT_SYMBOL(nf_conntrack_tcp_update);
+#endif
+EXPORT_SYMBOL(__nf_conntrack_confirm);
+EXPORT_SYMBOL(nf_ct_get_tuple);
+EXPORT_SYMBOL(nf_ct_invert_tuple);
+EXPORT_SYMBOL(nf_conntrack_in);
+EXPORT_SYMBOL(__nf_conntrack_attach);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 4bc27a6334c..83f4c53030f 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -128,7 +128,7 @@ void __nfa_fill(struct sk_buff *skb, int attrtype, int attrlen,
memset(NFA_DATA(nfa) + attrlen, 0, NFA_ALIGN(size) - size);
}
-int nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len)
+void nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len)
{
memset(tb, 0, sizeof(struct nfattr *) * maxattr);
@@ -138,8 +138,6 @@ int nfattr_parse(struct nfattr *tb[], int maxattr, struct nfattr *nfa, int len)
tb[flavor-1] = nfa;
nfa = NFA_NEXT(nfa, len);
}
-
- return 0;
}
/**
@@ -242,15 +240,18 @@ static inline int nfnetlink_rcv_msg(struct sk_buff *skb,
ss = nfnetlink_get_subsys(type);
if (!ss) {
#ifdef CONFIG_KMOD
- /* don't call nfnl_shunlock, since it would reenter
- * with further packet processing */
- up(&nfnl_sem);
- request_module("nfnetlink-subsys-%d", NFNL_SUBSYS_ID(type));
- nfnl_shlock();
- ss = nfnetlink_get_subsys(type);
+ if (cap_raised(NETLINK_CB(skb).eff_cap, CAP_NET_ADMIN)) {
+ /* don't call nfnl_shunlock, since it would reenter
+ * with further packet processing */
+ up(&nfnl_sem);
+ request_module("nfnetlink-subsys-%d",
+ NFNL_SUBSYS_ID(type));
+ nfnl_shlock();
+ ss = nfnetlink_get_subsys(type);
+ }
if (!ss)
#endif
- goto err_inval;
+ goto err_inval;
}
nc = nfnetlink_find_client(type, ss);
diff --git a/net/netlink/Makefile b/net/netlink/Makefile
index 39d9c2dcd03..e3589c2de49 100644
--- a/net/netlink/Makefile
+++ b/net/netlink/Makefile
@@ -2,4 +2,4 @@
# Makefile for the netlink driver.
#
-obj-y := af_netlink.o
+obj-y := af_netlink.o attr.o genetlink.o
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 5ca283537bc..8c38ee6d255 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -58,6 +58,7 @@
#include <net/sock.h>
#include <net/scm.h>
+#include <net/netlink.h>
#define Nprintk(a...)
#define NLGRPSZ(x) (ALIGN(x, sizeof(unsigned long) * 8) / 8)
@@ -427,7 +428,8 @@ static int netlink_release(struct socket *sock)
spin_lock(&nlk->cb_lock);
if (nlk->cb) {
- nlk->cb->done(nlk->cb);
+ if (nlk->cb->done)
+ nlk->cb->done(nlk->cb);
netlink_destroy_callback(nlk->cb);
nlk->cb = NULL;
}
@@ -1322,7 +1324,8 @@ static int netlink_dump(struct sock *sk)
skb_queue_tail(&sk->sk_receive_queue, skb);
sk->sk_data_ready(sk, skb->len);
- cb->done(cb);
+ if (cb->done)
+ cb->done(cb);
nlk->cb = NULL;
spin_unlock(&nlk->cb_lock);
@@ -1409,6 +1412,94 @@ void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err)
netlink_unicast(in_skb->sk, skb, NETLINK_CB(in_skb).pid, MSG_DONTWAIT);
}
+static int netlink_rcv_skb(struct sk_buff *skb, int (*cb)(struct sk_buff *,
+ struct nlmsghdr *, int *))
+{
+ unsigned int total_len;
+ struct nlmsghdr *nlh;
+ int err;
+
+ while (skb->len >= nlmsg_total_size(0)) {
+ nlh = (struct nlmsghdr *) skb->data;
+
+ if (skb->len < nlh->nlmsg_len)
+ return 0;
+
+ total_len = min(NLMSG_ALIGN(nlh->nlmsg_len), skb->len);
+
+ if (cb(skb, nlh, &err) < 0) {
+ /* Not an error, but we have to interrupt processing
+ * here. Note: that in this case we do not pull
+ * message from skb, it will be processed later.
+ */
+ if (err == 0)
+ return -1;
+ netlink_ack(skb, nlh, err);
+ } else if (nlh->nlmsg_flags & NLM_F_ACK)
+ netlink_ack(skb, nlh, 0);
+
+ skb_pull(skb, total_len);
+ }
+
+ return 0;
+}
+
+/**
+ * nelink_run_queue - Process netlink receive queue.
+ * @sk: Netlink socket containing the queue
+ * @qlen: Place to store queue length upon entry
+ * @cb: Callback function invoked for each netlink message found
+ *
+ * Processes as much as there was in the queue upon entry and invokes
+ * a callback function for each netlink message found. The callback
+ * function may refuse a message by returning a negative error code
+ * but setting the error pointer to 0 in which case this function
+ * returns with a qlen != 0.
+ *
+ * qlen must be initialized to 0 before the initial entry, afterwards
+ * the function may be called repeatedly until qlen reaches 0.
+ */
+void netlink_run_queue(struct sock *sk, unsigned int *qlen,
+ int (*cb)(struct sk_buff *, struct nlmsghdr *, int *))
+{
+ struct sk_buff *skb;
+
+ if (!*qlen || *qlen > skb_queue_len(&sk->sk_receive_queue))
+ *qlen = skb_queue_len(&sk->sk_receive_queue);
+
+ for (; *qlen; (*qlen)--) {
+ skb = skb_dequeue(&sk->sk_receive_queue);
+ if (netlink_rcv_skb(skb, cb)) {
+ if (skb->len)
+ skb_queue_head(&sk->sk_receive_queue, skb);
+ else {
+ kfree_skb(skb);
+ (*qlen)--;
+ }
+ break;
+ }
+
+ kfree_skb(skb);
+ }
+}
+
+/**
+ * netlink_queue_skip - Skip netlink message while processing queue.
+ * @nlh: Netlink message to be skipped
+ * @skb: Socket buffer containing the netlink messages.
+ *
+ * Pulls the given netlink message off the socket buffer so the next
+ * call to netlink_queue_run() will not reconsider the message.
+ */
+void netlink_queue_skip(struct nlmsghdr *nlh, struct sk_buff *skb)
+{
+ int msglen = NLMSG_ALIGN(nlh->nlmsg_len);
+
+ if (msglen > skb->len)
+ msglen = skb->len;
+
+ skb_pull(skb, msglen);
+}
#ifdef CONFIG_PROC_FS
struct nl_seq_iter {
@@ -1657,6 +1748,8 @@ out:
core_initcall(netlink_proto_init);
EXPORT_SYMBOL(netlink_ack);
+EXPORT_SYMBOL(netlink_run_queue);
+EXPORT_SYMBOL(netlink_queue_skip);
EXPORT_SYMBOL(netlink_broadcast);
EXPORT_SYMBOL(netlink_dump_start);
EXPORT_SYMBOL(netlink_kernel_create);
diff --git a/net/netlink/attr.c b/net/netlink/attr.c
new file mode 100644
index 00000000000..fffef4ab276
--- /dev/null
+++ b/net/netlink/attr.c
@@ -0,0 +1,328 @@
+/*
+ * NETLINK Netlink attributes
+ *
+ * Authors: Thomas Graf <tgraf@suug.ch>
+ * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/jiffies.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <net/netlink.h>
+
+static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = {
+ [NLA_U8] = sizeof(u8),
+ [NLA_U16] = sizeof(u16),
+ [NLA_U32] = sizeof(u32),
+ [NLA_U64] = sizeof(u64),
+ [NLA_STRING] = 1,
+ [NLA_NESTED] = NLA_HDRLEN,
+};
+
+static int validate_nla(struct nlattr *nla, int maxtype,
+ struct nla_policy *policy)
+{
+ struct nla_policy *pt;
+ int minlen = 0;
+
+ if (nla->nla_type <= 0 || nla->nla_type > maxtype)
+ return 0;
+
+ pt = &policy[nla->nla_type];
+
+ BUG_ON(pt->type > NLA_TYPE_MAX);
+
+ if (pt->minlen)
+ minlen = pt->minlen;
+ else if (pt->type != NLA_UNSPEC)
+ minlen = nla_attr_minlen[pt->type];
+
+ if (pt->type == NLA_FLAG && nla_len(nla) > 0)
+ return -ERANGE;
+
+ if (nla_len(nla) < minlen)
+ return -ERANGE;
+
+ return 0;
+}
+
+/**
+ * nla_validate - Validate a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @maxtype: maximum attribute type to be expected
+ * @policy: validation policy
+ *
+ * Validates all attributes in the specified attribute stream against the
+ * specified policy. Attributes with a type exceeding maxtype will be
+ * ignored. See documenation of struct nla_policy for more details.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_validate(struct nlattr *head, int len, int maxtype,
+ struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ nla_for_each_attr(nla, head, len, rem) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * nla_parse - Parse a stream of attributes into a tb buffer
+ * @tb: destination array with maxtype+1 elements
+ * @maxtype: maximum attribute type to be expected
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ *
+ * Parses a stream of attributes and stores a pointer to each attribute in
+ * the tb array accessable via the attribute type. Attributes with a type
+ * exceeding maxtype will be silently ignored for backwards compatibility
+ * reasons. policy may be set to NULL if no validation is required.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len,
+ struct nla_policy *policy)
+{
+ struct nlattr *nla;
+ int rem, err;
+
+ memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1));
+
+ nla_for_each_attr(nla, head, len, rem) {
+ u16 type = nla->nla_type;
+
+ if (type > 0 && type <= maxtype) {
+ if (policy) {
+ err = validate_nla(nla, maxtype, policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ tb[type] = nla;
+ }
+ }
+
+ if (unlikely(rem > 0))
+ printk(KERN_WARNING "netlink: %d bytes leftover after parsing "
+ "attributes.\n", rem);
+
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * nla_find - Find a specific attribute in a stream of attributes
+ * @head: head of attribute stream
+ * @len: length of attribute stream
+ * @attrtype: type of attribute to look for
+ *
+ * Returns the first attribute in the stream matching the specified type.
+ */
+struct nlattr *nla_find(struct nlattr *head, int len, int attrtype)
+{
+ struct nlattr *nla;
+ int rem;
+
+ nla_for_each_attr(nla, head, len, rem)
+ if (nla->nla_type == attrtype)
+ return nla;
+
+ return NULL;
+}
+
+/**
+ * nla_strlcpy - Copy string attribute payload into a sized buffer
+ * @dst: where to copy the string to
+ * @src: attribute to copy the string from
+ * @dstsize: size of destination buffer
+ *
+ * Copies at most dstsize - 1 bytes into the destination buffer.
+ * The result is always a valid NUL-terminated string. Unlike
+ * strlcpy the destination buffer is always padded out.
+ *
+ * Returns the length of the source buffer.
+ */
+size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize)
+{
+ size_t srclen = nla_len(nla);
+ char *src = nla_data(nla);
+
+ if (srclen > 0 && src[srclen - 1] == '\0')
+ srclen--;
+
+ if (dstsize > 0) {
+ size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen;
+
+ memset(dst, 0, dstsize);
+ memcpy(dst, src, len);
+ }
+
+ return srclen;
+}
+
+/**
+ * nla_memcpy - Copy a netlink attribute into another memory area
+ * @dest: where to copy to memcpy
+ * @src: netlink attribute to copy from
+ * @count: size of the destination area
+ *
+ * Note: The number of bytes copied is limited by the length of
+ * attribute's payload. memcpy
+ *
+ * Returns the number of bytes copied.
+ */
+int nla_memcpy(void *dest, struct nlattr *src, int count)
+{
+ int minlen = min_t(int, count, nla_len(src));
+
+ memcpy(dest, nla_data(src), minlen);
+
+ return minlen;
+}
+
+/**
+ * nla_memcmp - Compare an attribute with sized memory area
+ * @nla: netlink attribute
+ * @data: memory area
+ * @size: size of memory area
+ */
+int nla_memcmp(const struct nlattr *nla, const void *data,
+ size_t size)
+{
+ int d = nla_len(nla) - size;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), data, size);
+
+ return d;
+}
+
+/**
+ * nla_strcmp - Compare a string attribute against a string
+ * @nla: netlink string attribute
+ * @str: another string
+ */
+int nla_strcmp(const struct nlattr *nla, const char *str)
+{
+ int len = strlen(str) + 1;
+ int d = nla_len(nla) - len;
+
+ if (d == 0)
+ d = memcmp(nla_data(nla), str, len);
+
+ return d;
+}
+
+/**
+ * __nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ struct nlattr *nla;
+
+ nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen));
+ nla->nla_type = attrtype;
+ nla->nla_len = nla_attr_size(attrlen);
+
+ memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen));
+
+ return nla;
+}
+
+/**
+ * nla_reserve - reserve room for attribute on the skb
+ * @skb: socket buffer to reserve room on
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ *
+ * Adds a netlink attribute header to a socket buffer and reserves
+ * room for the payload but does not copy it.
+ *
+ * Returns NULL if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return NULL;
+
+ return __nla_reserve(skb, attrtype, attrlen);
+}
+
+/**
+ * __nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * The caller is responsible to ensure that the skb provides enough
+ * tailroom for the attribute header and payload.
+ */
+void __nla_put(struct sk_buff *skb, int attrtype, int attrlen,
+ const void *data)
+{
+ struct nlattr *nla;
+
+ nla = __nla_reserve(skb, attrtype, attrlen);
+ memcpy(nla_data(nla), data, attrlen);
+}
+
+
+/**
+ * nla_put - Add a netlink attribute to a socket buffer
+ * @skb: socket buffer to add attribute to
+ * @attrtype: attribute type
+ * @attrlen: length of attribute payload
+ * @data: head of attribute payload
+ *
+ * Returns -1 if the tailroom of the skb is insufficient to store
+ * the attribute header and payload.
+ */
+int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data)
+{
+ if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen)))
+ return -1;
+
+ __nla_put(skb, attrtype, attrlen, data);
+ return 0;
+}
+
+
+EXPORT_SYMBOL(nla_validate);
+EXPORT_SYMBOL(nla_parse);
+EXPORT_SYMBOL(nla_find);
+EXPORT_SYMBOL(nla_strlcpy);
+EXPORT_SYMBOL(__nla_reserve);
+EXPORT_SYMBOL(nla_reserve);
+EXPORT_SYMBOL(__nla_put);
+EXPORT_SYMBOL(nla_put);
+EXPORT_SYMBOL(nla_memcpy);
+EXPORT_SYMBOL(nla_memcmp);
+EXPORT_SYMBOL(nla_strcmp);
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
new file mode 100644
index 00000000000..287cfcc5695
--- /dev/null
+++ b/net/netlink/genetlink.c
@@ -0,0 +1,579 @@
+/*
+ * NETLINK Generic Netlink Family
+ *
+ * Authors: Jamal Hadi Salim
+ * Thomas Graf <tgraf@suug.ch>
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/string.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/genetlink.h>
+
+struct sock *genl_sock = NULL;
+
+static DECLARE_MUTEX(genl_sem); /* serialization of message processing */
+
+static void genl_lock(void)
+{
+ down(&genl_sem);
+}
+
+static int genl_trylock(void)
+{
+ return down_trylock(&genl_sem);
+}
+
+static void genl_unlock(void)
+{
+ up(&genl_sem);
+
+ if (genl_sock && genl_sock->sk_receive_queue.qlen)
+ genl_sock->sk_data_ready(genl_sock, 0);
+}
+
+#define GENL_FAM_TAB_SIZE 16
+#define GENL_FAM_TAB_MASK (GENL_FAM_TAB_SIZE - 1)
+
+static struct list_head family_ht[GENL_FAM_TAB_SIZE];
+
+static int genl_ctrl_event(int event, void *data);
+
+static inline unsigned int genl_family_hash(unsigned int id)
+{
+ return id & GENL_FAM_TAB_MASK;
+}
+
+static inline struct list_head *genl_family_chain(unsigned int id)
+{
+ return &family_ht[genl_family_hash(id)];
+}
+
+static struct genl_family *genl_family_find_byid(unsigned int id)
+{
+ struct genl_family *f;
+
+ list_for_each_entry(f, genl_family_chain(id), family_list)
+ if (f->id == id)
+ return f;
+
+ return NULL;
+}
+
+static struct genl_family *genl_family_find_byname(char *name)
+{
+ struct genl_family *f;
+ int i;
+
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
+ list_for_each_entry(f, genl_family_chain(i), family_list)
+ if (strcmp(f->name, name) == 0)
+ return f;
+
+ return NULL;
+}
+
+static struct genl_ops *genl_get_cmd(u8 cmd, struct genl_family *family)
+{
+ struct genl_ops *ops;
+
+ list_for_each_entry(ops, &family->ops_list, ops_list)
+ if (ops->cmd == cmd)
+ return ops;
+
+ return NULL;
+}
+
+/* Of course we are going to have problems once we hit
+ * 2^16 alive types, but that can only happen by year 2K
+*/
+static inline u16 genl_generate_id(void)
+{
+ static u16 id_gen_idx;
+ int overflowed = 0;
+
+ do {
+ if (id_gen_idx == 0)
+ id_gen_idx = GENL_MIN_ID;
+
+ if (++id_gen_idx > GENL_MAX_ID) {
+ if (!overflowed) {
+ overflowed = 1;
+ id_gen_idx = 0;
+ continue;
+ } else
+ return 0;
+ }
+
+ } while (genl_family_find_byid(id_gen_idx));
+
+ return id_gen_idx;
+}
+
+/**
+ * genl_register_ops - register generic netlink operations
+ * @family: generic netlink family
+ * @ops: operations to be registered
+ *
+ * Registers the specified operations and assigns them to the specified
+ * family. Either a doit or dumpit callback must be specified or the
+ * operation will fail. Only one operation structure per command
+ * identifier may be registered.
+ *
+ * See include/net/genetlink.h for more documenation on the operations
+ * structure.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int genl_register_ops(struct genl_family *family, struct genl_ops *ops)
+{
+ int err = -EINVAL;
+
+ if (ops->dumpit == NULL && ops->doit == NULL)
+ goto errout;
+
+ if (genl_get_cmd(ops->cmd, family)) {
+ err = -EEXIST;
+ goto errout;
+ }
+
+ genl_lock();
+ list_add_tail(&ops->ops_list, &family->ops_list);
+ genl_unlock();
+
+ genl_ctrl_event(CTRL_CMD_NEWOPS, ops);
+ err = 0;
+errout:
+ return err;
+}
+
+/**
+ * genl_unregister_ops - unregister generic netlink operations
+ * @family: generic netlink family
+ * @ops: operations to be unregistered
+ *
+ * Unregisters the specified operations and unassigns them from the
+ * specified family. The operation blocks until the current message
+ * processing has finished and doesn't start again until the
+ * unregister process has finished.
+ *
+ * Note: It is not necessary to unregister all operations before
+ * unregistering the family, unregistering the family will cause
+ * all assigned operations to be unregistered automatically.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int genl_unregister_ops(struct genl_family *family, struct genl_ops *ops)
+{
+ struct genl_ops *rc;
+
+ genl_lock();
+ list_for_each_entry(rc, &family->ops_list, ops_list) {
+ if (rc == ops) {
+ list_del(&ops->ops_list);
+ genl_unlock();
+ genl_ctrl_event(CTRL_CMD_DELOPS, ops);
+ return 0;
+ }
+ }
+ genl_unlock();
+
+ return -ENOENT;
+}
+
+/**
+ * genl_register_family - register a generic netlink family
+ * @family: generic netlink family
+ *
+ * Registers the specified family after validating it first. Only one
+ * family may be registered with the same family name or identifier.
+ * The family id may equal GENL_ID_GENERATE causing an unique id to
+ * be automatically generated and assigned.
+ *
+ * Return 0 on success or a negative error code.
+ */
+int genl_register_family(struct genl_family *family)
+{
+ int err = -EINVAL;
+
+ if (family->id && family->id < GENL_MIN_ID)
+ goto errout;
+
+ if (family->id > GENL_MAX_ID)
+ goto errout;
+
+ INIT_LIST_HEAD(&family->ops_list);
+
+ genl_lock();
+
+ if (genl_family_find_byname(family->name)) {
+ err = -EEXIST;
+ goto errout_locked;
+ }
+
+ if (genl_family_find_byid(family->id)) {
+ err = -EEXIST;
+ goto errout_locked;
+ }
+
+ if (!try_module_get(family->owner)) {
+ err = -EBUSY;
+ goto errout_locked;
+ }
+
+ if (family->id == GENL_ID_GENERATE) {
+ u16 newid = genl_generate_id();
+
+ if (!newid) {
+ err = -ENOMEM;
+ goto errout_locked;
+ }
+
+ family->id = newid;
+ }
+
+ if (family->maxattr) {
+ family->attrbuf = kmalloc((family->maxattr+1) *
+ sizeof(struct nlattr *), GFP_KERNEL);
+ if (family->attrbuf == NULL) {
+ err = -ENOMEM;
+ goto errout;
+ }
+ } else
+ family->attrbuf = NULL;
+
+ list_add_tail(&family->family_list, genl_family_chain(family->id));
+ genl_unlock();
+
+ genl_ctrl_event(CTRL_CMD_NEWFAMILY, family);
+
+ return 0;
+
+errout_locked:
+ genl_unlock();
+errout:
+ return err;
+}
+
+/**
+ * genl_unregister_family - unregister generic netlink family
+ * @family: generic netlink family
+ *
+ * Unregisters the specified family.
+ *
+ * Returns 0 on success or a negative error code.
+ */
+int genl_unregister_family(struct genl_family *family)
+{
+ struct genl_family *rc;
+
+ genl_lock();
+
+ list_for_each_entry(rc, genl_family_chain(family->id), family_list) {
+ if (family->id != rc->id || strcmp(rc->name, family->name))
+ continue;
+
+ list_del(&rc->family_list);
+ INIT_LIST_HEAD(&family->ops_list);
+ genl_unlock();
+
+ module_put(family->owner);
+ kfree(family->attrbuf);
+ genl_ctrl_event(CTRL_CMD_DELFAMILY, family);
+ return 0;
+ }
+
+ genl_unlock();
+
+ return -ENOENT;
+}
+
+static inline int genl_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh,
+ int *errp)
+{
+ struct genl_ops *ops;
+ struct genl_family *family;
+ struct genl_info info;
+ struct genlmsghdr *hdr = nlmsg_data(nlh);
+ int hdrlen, err = -EINVAL;
+
+ if (!(nlh->nlmsg_flags & NLM_F_REQUEST))
+ goto ignore;
+
+ if (nlh->nlmsg_type < NLMSG_MIN_TYPE)
+ goto ignore;
+
+ family = genl_family_find_byid(nlh->nlmsg_type);
+ if (family == NULL) {
+ err = -ENOENT;
+ goto errout;
+ }
+
+ hdrlen = GENL_HDRLEN + family->hdrsize;
+ if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
+ goto errout;
+
+ ops = genl_get_cmd(hdr->cmd, family);
+ if (ops == NULL) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if ((ops->flags & GENL_ADMIN_PERM) && security_netlink_recv(skb)) {
+ err = -EPERM;
+ goto errout;
+ }
+
+ if (nlh->nlmsg_flags & NLM_F_DUMP) {
+ if (ops->dumpit == NULL) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ *errp = err = netlink_dump_start(genl_sock, skb, nlh,
+ ops->dumpit, NULL);
+ if (err == 0)
+ skb_pull(skb, min(NLMSG_ALIGN(nlh->nlmsg_len),
+ skb->len));
+ return -1;
+ }
+
+ if (ops->doit == NULL) {
+ err = -EOPNOTSUPP;
+ goto errout;
+ }
+
+ if (family->attrbuf) {
+ err = nlmsg_parse(nlh, hdrlen, family->attrbuf, family->maxattr,
+ ops->policy);
+ if (err < 0)
+ goto errout;
+ }
+
+ info.snd_seq = nlh->nlmsg_seq;
+ info.snd_pid = NETLINK_CB(skb).pid;
+ info.nlhdr = nlh;
+ info.genlhdr = nlmsg_data(nlh);
+ info.userhdr = nlmsg_data(nlh) + GENL_HDRLEN;
+ info.attrs = family->attrbuf;
+
+ *errp = err = ops->doit(skb, &info);
+ return err;
+
+ignore:
+ return 0;
+
+errout:
+ *errp = err;
+ return -1;
+}
+
+static void genl_rcv(struct sock *sk, int len)
+{
+ unsigned int qlen = 0;
+
+ do {
+ if (genl_trylock())
+ return;
+ netlink_run_queue(sk, &qlen, &genl_rcv_msg);
+ genl_unlock();
+ } while (qlen && genl_sock && genl_sock->sk_receive_queue.qlen);
+}
+
+/**************************************************************************
+ * Controller
+ **************************************************************************/
+
+static int ctrl_fill_info(struct genl_family *family, u32 pid, u32 seq,
+ u32 flags, struct sk_buff *skb, u8 cmd)
+{
+ void *hdr;
+
+ hdr = genlmsg_put(skb, pid, seq, GENL_ID_CTRL, 0, flags, cmd,
+ family->version);
+ if (hdr == NULL)
+ return -1;
+
+ NLA_PUT_STRING(skb, CTRL_ATTR_FAMILY_NAME, family->name);
+ NLA_PUT_U16(skb, CTRL_ATTR_FAMILY_ID, family->id);
+
+ return genlmsg_end(skb, hdr);
+
+nla_put_failure:
+ return genlmsg_cancel(skb, hdr);
+}
+
+static int ctrl_dumpfamily(struct sk_buff *skb, struct netlink_callback *cb)
+{
+
+ int i, n = 0;
+ struct genl_family *rt;
+ int chains_to_skip = cb->args[0];
+ int fams_to_skip = cb->args[1];
+
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++) {
+ if (i < chains_to_skip)
+ continue;
+ n = 0;
+ list_for_each_entry(rt, genl_family_chain(i), family_list) {
+ if (++n < fams_to_skip)
+ continue;
+ if (ctrl_fill_info(rt, NETLINK_CB(cb->skb).pid,
+ cb->nlh->nlmsg_seq, NLM_F_MULTI,
+ skb, CTRL_CMD_NEWFAMILY) < 0)
+ goto errout;
+ }
+
+ fams_to_skip = 0;
+ }
+
+errout:
+ cb->args[0] = i;
+ cb->args[1] = n;
+
+ return skb->len;
+}
+
+static struct sk_buff *ctrl_build_msg(struct genl_family *family, u32 pid,
+ int seq, int cmd)
+{
+ struct sk_buff *skb;
+ int err;
+
+ skb = nlmsg_new(NLMSG_GOODSIZE);
+ if (skb == NULL)
+ return ERR_PTR(-ENOBUFS);
+
+ err = ctrl_fill_info(family, pid, seq, 0, skb, cmd);
+ if (err < 0) {
+ nlmsg_free(skb);
+ return ERR_PTR(err);
+ }
+
+ return skb;
+}
+
+static struct nla_policy ctrl_policy[CTRL_ATTR_MAX+1] __read_mostly = {
+ [CTRL_ATTR_FAMILY_ID] = { .type = NLA_U16 },
+ [CTRL_ATTR_FAMILY_NAME] = { .type = NLA_STRING },
+};
+
+static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
+{
+ struct sk_buff *msg;
+ struct genl_family *res = NULL;
+ int err = -EINVAL;
+
+ if (info->attrs[CTRL_ATTR_FAMILY_ID]) {
+ u16 id = nla_get_u16(info->attrs[CTRL_ATTR_FAMILY_ID]);
+ res = genl_family_find_byid(id);
+ }
+
+ if (info->attrs[CTRL_ATTR_FAMILY_NAME]) {
+ char name[GENL_NAMSIZ];
+
+ if (nla_strlcpy(name, info->attrs[CTRL_ATTR_FAMILY_NAME],
+ GENL_NAMSIZ) >= GENL_NAMSIZ)
+ goto errout;
+
+ res = genl_family_find_byname(name);
+ }
+
+ if (res == NULL) {
+ err = -ENOENT;
+ goto errout;
+ }
+
+ msg = ctrl_build_msg(res, info->snd_pid, info->snd_seq,
+ CTRL_CMD_NEWFAMILY);
+ if (IS_ERR(msg)) {
+ err = PTR_ERR(msg);
+ goto errout;
+ }
+
+ err = genlmsg_unicast(msg, info->snd_pid);
+errout:
+ return err;
+}
+
+static int genl_ctrl_event(int event, void *data)
+{
+ struct sk_buff *msg;
+
+ if (genl_sock == NULL)
+ return 0;
+
+ switch (event) {
+ case CTRL_CMD_NEWFAMILY:
+ case CTRL_CMD_DELFAMILY:
+ msg = ctrl_build_msg(data, 0, 0, event);
+ if (IS_ERR(msg))
+ return PTR_ERR(msg);
+
+ genlmsg_multicast(msg, 0, GENL_ID_CTRL);
+ break;
+ }
+
+ return 0;
+}
+
+static struct genl_ops genl_ctrl_ops = {
+ .cmd = CTRL_CMD_GETFAMILY,
+ .doit = ctrl_getfamily,
+ .dumpit = ctrl_dumpfamily,
+ .policy = ctrl_policy,
+};
+
+static struct genl_family genl_ctrl = {
+ .id = GENL_ID_CTRL,
+ .name = "nlctrl",
+ .version = 0x1,
+ .maxattr = CTRL_ATTR_MAX,
+ .owner = THIS_MODULE,
+};
+
+static int __init genl_init(void)
+{
+ int i, err;
+
+ for (i = 0; i < GENL_FAM_TAB_SIZE; i++)
+ INIT_LIST_HEAD(&family_ht[i]);
+
+ err = genl_register_family(&genl_ctrl);
+ if (err < 0)
+ goto errout;
+
+ err = genl_register_ops(&genl_ctrl, &genl_ctrl_ops);
+ if (err < 0)
+ goto errout_register;
+
+ netlink_set_nonroot(NETLINK_GENERIC, NL_NONROOT_RECV);
+ genl_sock = netlink_kernel_create(NETLINK_GENERIC, GENL_MAX_ID,
+ genl_rcv, THIS_MODULE);
+ if (genl_sock == NULL) {
+ panic("GENL: Cannot initialize generic netlink\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+
+errout_register:
+ genl_unregister_family(&genl_ctrl);
+errout:
+ panic("GENL: Cannot register controller: %d\n", err);
+ return err;
+}
+
+subsys_initcall(genl_init);
+
+EXPORT_SYMBOL(genl_sock);
+EXPORT_SYMBOL(genl_register_ops);
+EXPORT_SYMBOL(genl_unregister_ops);
+EXPORT_SYMBOL(genl_register_family);
+EXPORT_SYMBOL(genl_unregister_family);
diff --git a/net/rxrpc/transport.c b/net/rxrpc/transport.c
index 122c086ee2d..dbe6105e83a 100644
--- a/net/rxrpc/transport.c
+++ b/net/rxrpc/transport.c
@@ -23,6 +23,7 @@
#include <linux/in.h>
#include <linux/in6.h>
#include <linux/icmp.h>
+#include <linux/skbuff.h>
#include <net/sock.h>
#include <net/ip.h>
#if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
@@ -475,15 +476,11 @@ void rxrpc_trans_receive_packet(struct rxrpc_transport *trans)
/* we'll probably need to checksum it (didn't call
* sock_recvmsg) */
- if (pkt->ip_summed != CHECKSUM_UNNECESSARY) {
- if ((unsigned short)
- csum_fold(skb_checksum(pkt, 0, pkt->len,
- pkt->csum))) {
- kfree_skb(pkt);
- rxrpc_krxiod_queue_transport(trans);
- _leave(" CSUM failed");
- return;
- }
+ if (skb_checksum_complete(pkt)) {
+ kfree_skb(pkt);
+ rxrpc_krxiod_queue_transport(trans);
+ _leave(" CSUM failed");
+ return;
}
addr = pkt->nh.iph->saddr;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 702ede309b0..61c3abeacca 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -55,6 +55,7 @@ static void call_bind(struct rpc_task *task);
static void call_bind_status(struct rpc_task *task);
static void call_transmit(struct rpc_task *task);
static void call_status(struct rpc_task *task);
+static void call_transmit_status(struct rpc_task *task);
static void call_refresh(struct rpc_task *task);
static void call_refreshresult(struct rpc_task *task);
static void call_timeout(struct rpc_task *task);
@@ -672,6 +673,18 @@ call_allocate(struct rpc_task *task)
rpc_exit(task, -ERESTARTSYS);
}
+static inline int
+rpc_task_need_encode(struct rpc_task *task)
+{
+ return task->tk_rqstp->rq_snd_buf.len == 0;
+}
+
+static inline void
+rpc_task_force_reencode(struct rpc_task *task)
+{
+ task->tk_rqstp->rq_snd_buf.len = 0;
+}
+
/*
* 3. Encode arguments of an RPC call
*/
@@ -867,12 +880,14 @@ call_transmit(struct rpc_task *task)
if (task->tk_status != 0)
return;
/* Encode here so that rpcsec_gss can use correct sequence number. */
- if (task->tk_rqstp->rq_bytes_sent == 0) {
+ if (rpc_task_need_encode(task)) {
+ task->tk_rqstp->rq_bytes_sent = 0;
call_encode(task);
/* Did the encode result in an error condition? */
if (task->tk_status != 0)
goto out_nosend;
}
+ task->tk_action = call_transmit_status;
xprt_transmit(task);
if (task->tk_status < 0)
return;
@@ -884,6 +899,7 @@ call_transmit(struct rpc_task *task)
out_nosend:
/* release socket write lock before attempting to handle error */
xprt_abort_transmit(task);
+ rpc_task_force_reencode(task);
}
/*
@@ -915,7 +931,6 @@ call_status(struct rpc_task *task)
break;
case -ECONNREFUSED:
case -ENOTCONN:
- req->rq_bytes_sent = 0;
if (clnt->cl_autobind)
clnt->cl_port = 0;
task->tk_action = call_bind;
@@ -937,7 +952,18 @@ call_status(struct rpc_task *task)
}
/*
- * 6a. Handle RPC timeout
+ * 6a. Handle transmission errors.
+ */
+static void
+call_transmit_status(struct rpc_task *task)
+{
+ if (task->tk_status != -EAGAIN)
+ rpc_task_force_reencode(task);
+ call_status(task);
+}
+
+/*
+ * 6b. Handle RPC timeout
* We do not release the request slot, so we keep using the
* same XID for all retransmits.
*/
diff --git a/net/sunrpc/socklib.c b/net/sunrpc/socklib.c
index 8f97e90f36c..eb330d4f66d 100644
--- a/net/sunrpc/socklib.c
+++ b/net/sunrpc/socklib.c
@@ -6,6 +6,9 @@
* Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
*/
+#include <linux/compiler.h>
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
#include <linux/types.h>
#include <linux/pagemap.h>
#include <linux/udp.h>
@@ -165,6 +168,8 @@ int csum_partial_copy_to_xdr(struct xdr_buf *xdr, struct sk_buff *skb)
return -1;
if ((unsigned short)csum_fold(desc.csum))
return -1;
+ if (unlikely(skb->ip_summed == CHECKSUM_HW))
+ netdev_rx_csum_fault(skb->dev);
return 0;
no_checksum:
if (xdr_partial_copy_from_skb(xdr, 0, &desc, skb_read_bits) < 0)
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index f16e7cdd615..e50e7cf4373 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -623,12 +623,9 @@ svc_udp_recvfrom(struct svc_rqst *rqstp)
/* we can use it in-place */
rqstp->rq_arg.head[0].iov_base = skb->data + sizeof(struct udphdr);
rqstp->rq_arg.head[0].iov_len = len;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY) {
- if ((unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum))) {
- skb_free_datagram(svsk->sk_sk, skb);
- return 0;
- }
- skb->ip_summed = CHECKSUM_UNNECESSARY;
+ if (skb_checksum_complete(skb)) {
+ skb_free_datagram(svsk->sk_sk, skb);
+ return 0;
}
rqstp->rq_skbuff = skb;
}
diff --git a/net/xfrm/xfrm_user.c b/net/xfrm/xfrm_user.c
index c35336a0f71..0cdd9a07e04 100644
--- a/net/xfrm/xfrm_user.c
+++ b/net/xfrm/xfrm_user.c
@@ -18,7 +18,6 @@
#include <linux/string.h>
#include <linux/net.h>
#include <linux/skbuff.h>
-#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/pfkeyv2.h>
#include <linux/ipsec.h>
@@ -26,6 +25,7 @@
#include <linux/security.h>
#include <net/sock.h>
#include <net/xfrm.h>
+#include <net/netlink.h>
#include <asm/uaccess.h>
static struct sock *xfrm_nl;
@@ -948,11 +948,6 @@ static struct xfrm_link {
[XFRM_MSG_FLUSHPOLICY - XFRM_MSG_BASE] = { .doit = xfrm_flush_policy },
};
-static int xfrm_done(struct netlink_callback *cb)
-{
- return 0;
-}
-
static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *errp)
{
struct rtattr *xfrma[XFRMA_MAX];
@@ -984,20 +979,15 @@ static int xfrm_user_rcv_msg(struct sk_buff *skb, struct nlmsghdr *nlh, int *err
if ((type == (XFRM_MSG_GETSA - XFRM_MSG_BASE) ||
type == (XFRM_MSG_GETPOLICY - XFRM_MSG_BASE)) &&
(nlh->nlmsg_flags & NLM_F_DUMP)) {
- u32 rlen;
-
if (link->dump == NULL)
goto err_einval;
if ((*errp = netlink_dump_start(xfrm_nl, skb, nlh,
- link->dump,
- xfrm_done)) != 0) {
+ link->dump, NULL)) != 0) {
return -1;
}
- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
- if (rlen > skb->len)
- rlen = skb->len;
- skb_pull(skb, rlen);
+
+ netlink_queue_skip(nlh, skb);
return -1;
}
@@ -1032,60 +1022,13 @@ err_einval:
return -1;
}
-static int xfrm_user_rcv_skb(struct sk_buff *skb)
-{
- int err;
- struct nlmsghdr *nlh;
-
- while (skb->len >= NLMSG_SPACE(0)) {
- u32 rlen;
-
- nlh = (struct nlmsghdr *) skb->data;
- if (nlh->nlmsg_len < sizeof(*nlh) ||
- skb->len < nlh->nlmsg_len)
- return 0;
- rlen = NLMSG_ALIGN(nlh->nlmsg_len);
- if (rlen > skb->len)
- rlen = skb->len;
- if (xfrm_user_rcv_msg(skb, nlh, &err) < 0) {
- if (err == 0)
- return -1;
- netlink_ack(skb, nlh, err);
- } else if (nlh->nlmsg_flags & NLM_F_ACK)
- netlink_ack(skb, nlh, 0);
- skb_pull(skb, rlen);
- }
-
- return 0;
-}
-
static void xfrm_netlink_rcv(struct sock *sk, int len)
{
- unsigned int qlen = skb_queue_len(&sk->sk_receive_queue);
+ unsigned int qlen = 0;
do {
- struct sk_buff *skb;
-
down(&xfrm_cfg_sem);
-
- if (qlen > skb_queue_len(&sk->sk_receive_queue))
- qlen = skb_queue_len(&sk->sk_receive_queue);
-
- for (; qlen; qlen--) {
- skb = skb_dequeue(&sk->sk_receive_queue);
- if (xfrm_user_rcv_skb(skb)) {
- if (skb->len)
- skb_queue_head(&sk->sk_receive_queue,
- skb);
- else {
- kfree_skb(skb);
- qlen--;
- }
- break;
- }
- kfree_skb(skb);
- }
-
+ netlink_run_queue(sk, &qlen, &xfrm_user_rcv_msg);
up(&xfrm_cfg_sem);
} while (qlen);
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index e72ccd1a004..1fdae678a34 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -1067,7 +1067,6 @@ MODULE_DEVICE_TABLE(pci, snd_ad1889_ids);
static struct pci_driver ad1889_pci = {
.name = "AD1889 Audio",
- .owner = THIS_MODULE,
.id_table = snd_ad1889_ids,
.probe = snd_ad1889_probe,
.remove = __devexit_p(snd_ad1889_remove),
diff --git a/sound/pci/ali5451/ali5451.c b/sound/pci/ali5451/ali5451.c
index 4e76c4a636d..feffbe73387 100644
--- a/sound/pci/ali5451/ali5451.c
+++ b/sound/pci/ali5451/ali5451.c
@@ -2403,7 +2403,6 @@ static void __devexit snd_ali_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ALI 5451",
- .owner = THIS_MODULE,
.id_table = snd_ali_ids,
.probe = snd_ali_probe,
.remove = __devexit_p(snd_ali_remove),
diff --git a/sound/pci/als4000.c b/sound/pci/als4000.c
index 7c61561f297..1904df65026 100644
--- a/sound/pci/als4000.c
+++ b/sound/pci/als4000.c
@@ -768,7 +768,6 @@ static void __devexit snd_card_als4000_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ALS4000",
- .owner = THIS_MODULE,
.id_table = snd_als4000_ids,
.probe = snd_card_als4000_probe,
.remove = __devexit_p(snd_card_als4000_remove),
diff --git a/sound/pci/atiixp.c b/sound/pci/atiixp.c
index f5dad9248e3..8bae10d9352 100644
--- a/sound/pci/atiixp.c
+++ b/sound/pci/atiixp.c
@@ -1635,7 +1635,6 @@ static void __devexit snd_atiixp_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ATI IXP AC97 controller",
- .owner = THIS_MODULE,
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
.remove = __devexit_p(snd_atiixp_remove),
diff --git a/sound/pci/atiixp_modem.c b/sound/pci/atiixp_modem.c
index 0cf20207957..3174b662541 100644
--- a/sound/pci/atiixp_modem.c
+++ b/sound/pci/atiixp_modem.c
@@ -1309,7 +1309,6 @@ static void __devexit snd_atiixp_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ATI IXP MC97 controller",
- .owner = THIS_MODULE,
.id_table = snd_atiixp_ids,
.probe = snd_atiixp_probe,
.remove = __devexit_p(snd_atiixp_remove),
diff --git a/sound/pci/au88x0/au88x0.c b/sound/pci/au88x0/au88x0.c
index 6af3b13f2fd..d965609d8b3 100644
--- a/sound/pci/au88x0/au88x0.c
+++ b/sound/pci/au88x0/au88x0.c
@@ -373,7 +373,6 @@ static void __devexit snd_vortex_remove(struct pci_dev *pci)
// pci_driver definition
static struct pci_driver driver = {
.name = CARD_NAME_SHORT,
- .owner = THIS_MODULE,
.id_table = snd_vortex_ids,
.probe = snd_vortex_probe,
.remove = __devexit_p(snd_vortex_remove),
diff --git a/sound/pci/azt3328.c b/sound/pci/azt3328.c
index da99b1be2e8..ab737d6df41 100644
--- a/sound/pci/azt3328.c
+++ b/sound/pci/azt3328.c
@@ -1838,7 +1838,6 @@ snd_azf3328_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "AZF3328",
- .owner = THIS_MODULE,
.id_table = snd_azf3328_ids,
.probe = snd_azf3328_probe,
.remove = __devexit_p(snd_azf3328_remove),
diff --git a/sound/pci/bt87x.c b/sound/pci/bt87x.c
index 01d98eeb242..8feca228c6d 100644
--- a/sound/pci/bt87x.c
+++ b/sound/pci/bt87x.c
@@ -897,14 +897,13 @@ static void __devexit snd_bt87x_remove(struct pci_dev *pci)
/* default entries for all Bt87x cards - it's not exported */
/* driver_data is set to 0 to call detection */
static struct pci_device_id snd_bt87x_default_ids[] = {
- BT_DEVICE(878, PCI_ANY_ID, PCI_ANY_ID, 0),
- BT_DEVICE(879, PCI_ANY_ID, PCI_ANY_ID, 0),
+ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_878, PCI_ANY_ID, PCI_ANY_ID, 0),
+ BT_DEVICE(PCI_DEVICE_ID_BROOKTREE_879, PCI_ANY_ID, PCI_ANY_ID, 0),
{ }
};
static struct pci_driver driver = {
.name = "Bt87x",
- .owner = THIS_MODULE,
.id_table = snd_bt87x_ids,
.probe = snd_bt87x_probe,
.remove = __devexit_p(snd_bt87x_remove),
diff --git a/sound/pci/ca0106/ca0106_main.c b/sound/pci/ca0106/ca0106_main.c
index ee58d16002e..389d967c97f 100644
--- a/sound/pci/ca0106/ca0106_main.c
+++ b/sound/pci/ca0106/ca0106_main.c
@@ -1499,7 +1499,6 @@ MODULE_DEVICE_TABLE(pci, snd_ca0106_ids);
// pci_driver definition
static struct pci_driver driver = {
.name = "CA0106",
- .owner = THIS_MODULE,
.id_table = snd_ca0106_ids,
.probe = snd_ca0106_probe,
.remove = __devexit_p(snd_ca0106_remove),
diff --git a/sound/pci/cmipci.c b/sound/pci/cmipci.c
index 57e8e433d56..db605373b3b 100644
--- a/sound/pci/cmipci.c
+++ b/sound/pci/cmipci.c
@@ -3053,7 +3053,6 @@ static void __devexit snd_cmipci_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "C-Media PCI",
- .owner = THIS_MODULE,
.id_table = snd_cmipci_ids,
.probe = snd_cmipci_probe,
.remove = __devexit_p(snd_cmipci_remove),
diff --git a/sound/pci/cs4281.c b/sound/pci/cs4281.c
index aea2c47712f..034ff3755a3 100644
--- a/sound/pci/cs4281.c
+++ b/sound/pci/cs4281.c
@@ -2106,7 +2106,6 @@ static int cs4281_resume(snd_card_t *card)
static struct pci_driver driver = {
.name = "CS4281",
- .owner = THIS_MODULE,
.id_table = snd_cs4281_ids,
.probe = snd_cs4281_probe,
.remove = __devexit_p(snd_cs4281_remove),
diff --git a/sound/pci/cs46xx/cs46xx.c b/sound/pci/cs46xx/cs46xx.c
index 32b4f8465ce..b9fff4ee6f9 100644
--- a/sound/pci/cs46xx/cs46xx.c
+++ b/sound/pci/cs46xx/cs46xx.c
@@ -163,7 +163,6 @@ static void __devexit snd_card_cs46xx_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Sound Fusion CS46xx",
- .owner = THIS_MODULE,
.id_table = snd_cs46xx_ids,
.probe = snd_card_cs46xx_probe,
.remove = __devexit_p(snd_card_cs46xx_remove),
diff --git a/sound/pci/emu10k1/emu10k1.c b/sound/pci/emu10k1/emu10k1.c
index dd1ea9d3b81..78270f8710f 100644
--- a/sound/pci/emu10k1/emu10k1.c
+++ b/sound/pci/emu10k1/emu10k1.c
@@ -223,7 +223,6 @@ static void __devexit snd_card_emu10k1_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "EMU10K1_Audigy",
- .owner = THIS_MODULE,
.id_table = snd_emu10k1_ids,
.probe = snd_card_emu10k1_probe,
.remove = __devexit_p(snd_card_emu10k1_remove),
diff --git a/sound/pci/emu10k1/emu10k1x.c b/sound/pci/emu10k1/emu10k1x.c
index cbb689474e7..795577716a5 100644
--- a/sound/pci/emu10k1/emu10k1x.c
+++ b/sound/pci/emu10k1/emu10k1x.c
@@ -1613,7 +1613,6 @@ MODULE_DEVICE_TABLE(pci, snd_emu10k1x_ids);
// pci_driver definition
static struct pci_driver driver = {
.name = "EMU10K1X",
- .owner = THIS_MODULE,
.id_table = snd_emu10k1x_ids,
.probe = snd_emu10k1x_probe,
.remove = __devexit_p(snd_emu10k1x_remove),
diff --git a/sound/pci/ens1370.c b/sound/pci/ens1370.c
index 92ff7c510f2..2daa575f43f 100644
--- a/sound/pci/ens1370.c
+++ b/sound/pci/ens1370.c
@@ -2386,7 +2386,6 @@ static void __devexit snd_audiopci_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = DRIVER_NAME,
- .owner = THIS_MODULE,
.id_table = snd_audiopci_ids,
.probe = snd_audiopci_probe,
.remove = __devexit_p(snd_audiopci_remove),
diff --git a/sound/pci/es1938.c b/sound/pci/es1938.c
index 78f90defcab..c134f48152b 100644
--- a/sound/pci/es1938.c
+++ b/sound/pci/es1938.c
@@ -1758,7 +1758,6 @@ static void __devexit snd_es1938_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ESS ES1938 (Solo-1)",
- .owner = THIS_MODULE,
.id_table = snd_es1938_ids,
.probe = snd_es1938_probe,
.remove = __devexit_p(snd_es1938_remove),
diff --git a/sound/pci/es1968.c b/sound/pci/es1968.c
index ac8294e21cc..50079dc9074 100644
--- a/sound/pci/es1968.c
+++ b/sound/pci/es1968.c
@@ -2761,7 +2761,6 @@ static void __devexit snd_es1968_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ES1968 (ESS Maestro)",
- .owner = THIS_MODULE,
.id_table = snd_es1968_ids,
.probe = snd_es1968_probe,
.remove = __devexit_p(snd_es1968_remove),
diff --git a/sound/pci/fm801.c b/sound/pci/fm801.c
index 4c7c8d225c7..4e1d3434888 100644
--- a/sound/pci/fm801.c
+++ b/sound/pci/fm801.c
@@ -1459,7 +1459,6 @@ static void __devexit snd_card_fm801_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "FM801",
- .owner = THIS_MODULE,
.id_table = snd_fm801_ids,
.probe = snd_card_fm801_probe,
.remove = __devexit_p(snd_card_fm801_remove),
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 9d1412a9f2f..ed525c03c99 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1616,7 +1616,6 @@ MODULE_DEVICE_TABLE(pci, azx_ids);
/* pci_driver definition */
static struct pci_driver driver = {
.name = "HDA Intel",
- .owner = THIS_MODULE,
.id_table = azx_ids,
.probe = azx_probe,
.remove = __devexit_p(azx_remove),
diff --git a/sound/pci/ice1712/ice1712.c b/sound/pci/ice1712/ice1712.c
index 5aca37798c3..bd71bf42454 100644
--- a/sound/pci/ice1712/ice1712.c
+++ b/sound/pci/ice1712/ice1712.c
@@ -2735,7 +2735,6 @@ static void __devexit snd_ice1712_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ICE1712",
- .owner = THIS_MODULE,
.id_table = snd_ice1712_ids,
.probe = snd_ice1712_probe,
.remove = __devexit_p(snd_ice1712_remove),
diff --git a/sound/pci/ice1712/ice1724.c b/sound/pci/ice1712/ice1724.c
index 5b4293f5a65..0b5389ee26d 100644
--- a/sound/pci/ice1712/ice1724.c
+++ b/sound/pci/ice1712/ice1724.c
@@ -2332,7 +2332,6 @@ static void __devexit snd_vt1724_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "ICE1724",
- .owner = THIS_MODULE,
.id_table = snd_vt1724_ids,
.probe = snd_vt1724_probe,
.remove = __devexit_p(snd_vt1724_remove),
diff --git a/sound/pci/intel8x0.c b/sound/pci/intel8x0.c
index 0801083f32d..cf7801d2dd1 100644
--- a/sound/pci/intel8x0.c
+++ b/sound/pci/intel8x0.c
@@ -2876,7 +2876,6 @@ static void __devexit snd_intel8x0_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Intel ICH",
- .owner = THIS_MODULE,
.id_table = snd_intel8x0_ids,
.probe = snd_intel8x0_probe,
.remove = __devexit_p(snd_intel8x0_remove),
diff --git a/sound/pci/intel8x0m.c b/sound/pci/intel8x0m.c
index acfb197a833..a42091860da 100644
--- a/sound/pci/intel8x0m.c
+++ b/sound/pci/intel8x0m.c
@@ -1317,7 +1317,6 @@ static void __devexit snd_intel8x0m_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Intel ICH Modem",
- .owner = THIS_MODULE,
.id_table = snd_intel8x0m_ids,
.probe = snd_intel8x0m_probe,
.remove = __devexit_p(snd_intel8x0m_remove),
diff --git a/sound/pci/korg1212/korg1212.c b/sound/pci/korg1212/korg1212.c
index 5561fd4091e..a110d664f62 100644
--- a/sound/pci/korg1212/korg1212.c
+++ b/sound/pci/korg1212/korg1212.c
@@ -2534,7 +2534,6 @@ static void __devexit snd_korg1212_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "korg1212",
- .owner = THIS_MODULE,
.id_table = snd_korg1212_ids,
.probe = snd_korg1212_probe,
.remove = __devexit_p(snd_korg1212_remove),
diff --git a/sound/pci/maestro3.c b/sound/pci/maestro3.c
index 99eb76c56f8..ede7a75bfe0 100644
--- a/sound/pci/maestro3.c
+++ b/sound/pci/maestro3.c
@@ -2858,7 +2858,6 @@ static void __devexit snd_m3_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Maestro3",
- .owner = THIS_MODULE,
.id_table = snd_m3_ids,
.probe = snd_m3_probe,
.remove = __devexit_p(snd_m3_remove),
diff --git a/sound/pci/mixart/mixart.c b/sound/pci/mixart/mixart.c
index c341c99ec78..b3090a13eda 100644
--- a/sound/pci/mixart/mixart.c
+++ b/sound/pci/mixart/mixart.c
@@ -1423,7 +1423,6 @@ static void __devexit snd_mixart_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Digigram miXart",
- .owner = THIS_MODULE,
.id_table = snd_mixart_ids,
.probe = snd_mixart_probe,
.remove = __devexit_p(snd_mixart_remove),
diff --git a/sound/pci/nm256/nm256.c b/sound/pci/nm256/nm256.c
index e7aa1517845..089d23b4a00 100644
--- a/sound/pci/nm256/nm256.c
+++ b/sound/pci/nm256/nm256.c
@@ -1673,7 +1673,6 @@ static void __devexit snd_nm256_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "NeoMagic 256",
- .owner = THIS_MODULE,
.id_table = snd_nm256_ids,
.probe = snd_nm256_probe,
.remove = __devexit_p(snd_nm256_remove),
diff --git a/sound/pci/rme32.c b/sound/pci/rme32.c
index e6627b0e38e..783df7625c1 100644
--- a/sound/pci/rme32.c
+++ b/sound/pci/rme32.c
@@ -2012,7 +2012,6 @@ static void __devexit snd_rme32_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "RME Digi32",
- .owner = THIS_MODULE,
.id_table = snd_rme32_ids,
.probe = snd_rme32_probe,
.remove = __devexit_p(snd_rme32_remove),
diff --git a/sound/pci/rme96.c b/sound/pci/rme96.c
index 0eddeb16a10..6d422ef6499 100644
--- a/sound/pci/rme96.c
+++ b/sound/pci/rme96.c
@@ -2413,7 +2413,6 @@ static void __devexit snd_rme96_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "RME Digi96",
- .owner = THIS_MODULE,
.id_table = snd_rme96_ids,
.probe = snd_rme96_probe,
.remove = __devexit_p(snd_rme96_remove),
diff --git a/sound/pci/rme9652/hdsp.c b/sound/pci/rme9652/hdsp.c
index 845158b01b0..d15ffb3e9b0 100644
--- a/sound/pci/rme9652/hdsp.c
+++ b/sound/pci/rme9652/hdsp.c
@@ -5062,7 +5062,6 @@ static void __devexit snd_hdsp_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "RME Hammerfall DSP",
- .owner = THIS_MODULE,
.id_table = snd_hdsp_ids,
.probe = snd_hdsp_probe,
.remove = __devexit_p(snd_hdsp_remove),
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 60a1141f132..a1aef6f6767 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -3639,7 +3639,6 @@ static void __devexit snd_hdspm_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "RME Hammerfall DSP MADI",
- .owner = THIS_MODULE,
.id_table = snd_hdspm_ids,
.probe = snd_hdspm_probe,
.remove = __devexit_p(snd_hdspm_remove),
diff --git a/sound/pci/rme9652/rme9652.c b/sound/pci/rme9652/rme9652.c
index 59fcef9b6b8..f9d0c126c21 100644
--- a/sound/pci/rme9652/rme9652.c
+++ b/sound/pci/rme9652/rme9652.c
@@ -2654,7 +2654,6 @@ static void __devexit snd_rme9652_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "RME Digi9652 (Hammerfall)",
- .owner = THIS_MODULE,
.id_table = snd_rme9652_ids,
.probe = snd_rme9652_probe,
.remove = __devexit_p(snd_rme9652_remove),
diff --git a/sound/pci/sonicvibes.c b/sound/pci/sonicvibes.c
index 9a35474aad0..e92ef3ae2ca 100644
--- a/sound/pci/sonicvibes.c
+++ b/sound/pci/sonicvibes.c
@@ -1502,7 +1502,6 @@ static void __devexit snd_sonic_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "S3 SonicVibes",
- .owner = THIS_MODULE,
.id_table = snd_sonic_ids,
.probe = snd_sonic_probe,
.remove = __devexit_p(snd_sonic_remove),
diff --git a/sound/pci/trident/trident.c b/sound/pci/trident/trident.c
index a8ca8e17853..940d531575c 100644
--- a/sound/pci/trident/trident.c
+++ b/sound/pci/trident/trident.c
@@ -177,7 +177,6 @@ static void __devexit snd_trident_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Trident4DWaveAudio",
- .owner = THIS_MODULE,
.id_table = snd_trident_ids,
.probe = snd_trident_probe,
.remove = __devexit_p(snd_trident_remove),
diff --git a/sound/pci/via82xx.c b/sound/pci/via82xx.c
index 523eace250f..fad2a2413bf 100644
--- a/sound/pci/via82xx.c
+++ b/sound/pci/via82xx.c
@@ -2478,7 +2478,6 @@ static void __devexit snd_via82xx_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "VIA 82xx Audio",
- .owner = THIS_MODULE,
.id_table = snd_via82xx_ids,
.probe = snd_via82xx_probe,
.remove = __devexit_p(snd_via82xx_remove),
diff --git a/sound/pci/via82xx_modem.c b/sound/pci/via82xx_modem.c
index 011f0fb63bf..b83660bd05b 100644
--- a/sound/pci/via82xx_modem.c
+++ b/sound/pci/via82xx_modem.c
@@ -1198,7 +1198,6 @@ static void __devexit snd_via82xx_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "VIA 82xx Modem",
- .owner = THIS_MODULE,
.id_table = snd_via82xx_modem_ids,
.probe = snd_via82xx_probe,
.remove = __devexit_p(snd_via82xx_remove),
diff --git a/sound/pci/vx222/vx222.c b/sound/pci/vx222/vx222.c
index 2a7ad9dec02..dca6bd2c758 100644
--- a/sound/pci/vx222/vx222.c
+++ b/sound/pci/vx222/vx222.c
@@ -252,7 +252,6 @@ static void __devexit snd_vx222_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Digigram VX222",
- .owner = THIS_MODULE,
.id_table = snd_vx222_ids,
.probe = snd_vx222_probe,
.remove = __devexit_p(snd_vx222_remove),
diff --git a/sound/pci/ymfpci/ymfpci.c b/sound/pci/ymfpci/ymfpci.c
index 1bbba32517f..d013237205d 100644
--- a/sound/pci/ymfpci/ymfpci.c
+++ b/sound/pci/ymfpci/ymfpci.c
@@ -344,7 +344,6 @@ static void __devexit snd_card_ymfpci_remove(struct pci_dev *pci)
static struct pci_driver driver = {
.name = "Yamaha DS-XG PCI",
- .owner = THIS_MODULE,
.id_table = snd_ymfpci_ids,
.probe = snd_card_ymfpci_probe,
.remove = __devexit_p(snd_card_ymfpci_remove),