summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/filesystems/00-INDEX2
-rw-r--r--Documentation/filesystems/ceph.txt11
-rw-r--r--Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt54
-rw-r--r--MAINTAINERS14
-rw-r--r--Makefile2
-rw-r--r--arch/arm/include/asm/cacheflush.h38
-rw-r--r--arch/arm/include/asm/clkdev.h1
-rw-r--r--arch/arm/include/asm/irq.h1
-rw-r--r--arch/arm/include/asm/outercache.h75
-rw-r--r--arch/arm/include/asm/system.h16
-rw-r--r--arch/arm/kernel/kprobes.c10
-rw-r--r--arch/arm/lib/memmove.S4
-rw-r--r--arch/arm/mm/Kconfig13
-rw-r--r--arch/arm/mm/cache-l2x0.c10
-rw-r--r--arch/arm/vfp/vfpmodule.c2
-rw-r--r--arch/cris/arch-v32/drivers/pci/bios.c2
-rw-r--r--arch/frv/mb93090-mb00/pci-frv.c6
-rw-r--r--arch/microblaze/Kconfig3
-rw-r--r--arch/microblaze/Makefile4
-rw-r--r--arch/microblaze/boot/Makefile6
-rw-r--r--arch/microblaze/include/asm/processor.h1
-rw-r--r--arch/microblaze/include/asm/segment.h49
-rw-r--r--arch/microblaze/include/asm/thread_info.h5
-rw-r--r--arch/microblaze/include/asm/tlbflush.h3
-rw-r--r--arch/microblaze/include/asm/uaccess.h447
-rw-r--r--arch/microblaze/kernel/dma.c2
-rw-r--r--arch/microblaze/kernel/head.S12
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S112
-rw-r--r--arch/microblaze/kernel/misc.S15
-rw-r--r--arch/microblaze/kernel/process.c10
-rw-r--r--arch/microblaze/kernel/setup.c24
-rw-r--r--arch/microblaze/kernel/traps.c6
-rw-r--r--arch/microblaze/lib/Makefile3
-rw-r--r--arch/microblaze/lib/fastcopy.S6
-rw-r--r--arch/microblaze/lib/memcpy.c2
-rw-r--r--arch/microblaze/lib/memset.c15
-rw-r--r--arch/microblaze/lib/uaccess.c48
-rw-r--r--arch/microblaze/lib/uaccess_old.S45
-rw-r--r--arch/microblaze/mm/fault.c24
-rw-r--r--arch/microblaze/mm/init.c9
-rw-r--r--arch/microblaze/mm/pgtable.c2
-rw-r--r--arch/powerpc/include/asm/asm-compat.h2
-rw-r--r--arch/powerpc/kernel/misc.S26
-rw-r--r--arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c2
-rw-r--r--arch/sh/kernel/return_address.c3
-rw-r--r--arch/sh/mm/tlb-pteaex.c28
-rw-r--r--arch/sh/mm/tlb-sh3.c19
-rw-r--r--arch/sh/mm/tlb-sh4.c28
-rw-r--r--arch/sh/mm/tlbflush_32.c28
-rw-r--r--arch/sparc/configs/sparc64_defconfig28
-rw-r--r--arch/sparc/include/asm/stat.h4
-rw-r--r--arch/sparc/kernel/helpers.S75
-rw-r--r--arch/sparc/kernel/perf_event.c2
-rw-r--r--arch/sparc/kernel/ptrace_32.c4
-rw-r--r--arch/sparc/kernel/ptrace_64.c4
-rw-r--r--arch/sparc/kernel/sysfs.c4
-rw-r--r--arch/sparc/kernel/us2e_cpufreq.c8
-rw-r--r--arch/sparc/kernel/us3_cpufreq.c8
-rw-r--r--arch/sparc/mm/init_64.c2
-rw-r--r--arch/x86/include/asm/fixmap.h6
-rw-r--r--arch/x86/include/asm/hw_irq.h1
-rw-r--r--arch/x86/include/asm/msr-index.h2
-rw-r--r--arch/x86/kernel/apic/io_apic.c8
-rw-r--r--arch/x86/kernel/cpu/perf_event.c54
-rw-r--r--arch/x86/kernel/cpu/perf_event_amd.c80
-rw-r--r--arch/x86/kernel/dumpstack.h5
-rw-r--r--arch/x86/kernel/head32.c4
-rw-r--r--arch/x86/kernel/head64.c3
-rw-r--r--arch/x86/kernel/irqinit.c22
-rw-r--r--arch/x86/kernel/kgdb.c2
-rw-r--r--arch/x86/kernel/process.c32
-rw-r--r--arch/x86/kernel/setup.c10
-rw-r--r--arch/x86/kernel/smpboot.c6
-rw-r--r--arch/x86/kernel/vmlinux.lds.S2
-rw-r--r--arch/x86/mm/init.c32
-rw-r--r--arch/x86/pci/acpi.c22
-rw-r--r--arch/x86/pci/i386.c5
-rw-r--r--drivers/ata/pata_via.c4
-rw-r--r--drivers/base/power/main.c31
-rw-r--r--drivers/char/tty_io.c2
-rw-r--r--drivers/firewire/core-device.c103
-rw-r--r--drivers/firewire/core-iso.c5
-rw-r--r--drivers/firewire/ohci.c4
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c9
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c2
-rw-r--r--drivers/gpu/drm/drm_fops.c16
-rw-r--r--drivers/gpu/drm/nouveau/Makefile2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.c28
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bios.h3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_bo.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_dma.c5
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drv.h6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_irq.c609
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_state.c5
-rw-r--r--drivers/gpu/drm/nouveau/nv04_crtc.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c6
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fb.c32
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_graph.c22
-rw-r--r--drivers/gpu/drm/nouveau/nv50_grctx.c13
-rw-r--r--drivers/gpu/drm/radeon/Makefile2
-rw-r--r--drivers/gpu/drm/radeon/atom.c91
-rw-r--r--drivers/gpu/drm/radeon/atom.h8
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c98
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c6
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c11
-rw-r--r--drivers/gpu/drm/radeon/r100.c25
-rw-r--r--drivers/gpu/drm/radeon/r200.c1
-rw-r--r--drivers/gpu/drm/radeon/r300.c5
-rw-r--r--drivers/gpu/drm/radeon/r420.c2
-rw-r--r--drivers/gpu/drm/radeon/r520.c9
-rw-r--r--drivers/gpu/drm/radeon/r600.c30
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c52
-rw-r--r--drivers/gpu/drm/radeon/r600_blit_shaders.c35
-rw-r--r--drivers/gpu/drm/radeon/r600_cp.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c70
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c191
-rw-r--r--drivers/gpu/drm/radeon/r600_reg.h10
-rw-r--r--drivers/gpu/drm/radeon/r600d.h49
-rw-r--r--drivers/gpu/drm/radeon/radeon.h66
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.c772
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h545
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c461
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c237
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c68
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c121
-rw-r--r--drivers/gpu/drm/radeon/radeon_i2c.c153
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/radeon_legacy_tv.c29
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h12
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c46
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/reg_srcs/r60075
-rw-r--r--drivers/gpu/drm/radeon/rs400.c7
-rw-r--r--drivers/gpu/drm/radeon/rs600.c33
-rw-r--r--drivers/gpu/drm/radeon/rs600d.h53
-rw-r--r--drivers/gpu/drm/radeon/rs690.c122
-rw-r--r--drivers/gpu/drm/radeon/rs690d.h3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c45
-rw-r--r--drivers/gpu/drm/radeon/rv770.c31
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c4
-rw-r--r--drivers/gpu/drm/ttm/ttm_memory.c18
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c23
-rw-r--r--drivers/gpu/drm/vmwgfx/Kconfig2
-rw-r--r--drivers/hid/hid-gyration.c5
-rw-r--r--drivers/hid/usbhid/hid-quirks.c1
-rw-r--r--drivers/hwmon/Kconfig4
-rw-r--r--drivers/hwmon/coretemp.c4
-rw-r--r--drivers/hwmon/w83793.c2
-rw-r--r--drivers/ide/ide-probe.c12
-rw-r--r--drivers/ide/via82cxxx.c57
-rw-r--r--drivers/isdn/hisax/avma1_cs.c12
-rw-r--r--drivers/isdn/hisax/elsa_cs.c12
-rw-r--r--drivers/isdn/hisax/sedlbauer_cs.c12
-rw-r--r--drivers/isdn/hisax/teles_cs.c12
-rw-r--r--drivers/misc/kgdbts.c6
-rw-r--r--drivers/net/atlx/atl1.c2
-rw-r--r--drivers/net/benet/be_ethtool.c2
-rw-r--r--drivers/net/bnx2.c14
-rw-r--r--drivers/net/bonding/bond_main.c40
-rw-r--r--drivers/net/can/bfin_can.c97
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_main.c9
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/netdev.c11
-rw-r--r--drivers/net/gianfar.c5
-rw-r--r--drivers/net/gianfar.h6
-rw-r--r--drivers/net/igb/e1000_mac.c6
-rw-r--r--drivers/net/igb/igb.h1
-rw-r--r--drivers/net/igb/igb_main.c22
-rw-r--r--drivers/net/igbvf/igbvf.h1
-rw-r--r--drivers/net/igbvf/netdev.c11
-rw-r--r--drivers/net/ixgbe/ixgbe.h7
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c21
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c33
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c43
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h1
-rw-r--r--drivers/net/ixgbevf/ixgbevf_main.c3
-rw-r--r--drivers/net/ksz884x.c2
-rw-r--r--drivers/net/netxen/netxen_nic.h4
-rw-r--r--drivers/net/netxen/netxen_nic_ctx.c14
-rw-r--r--drivers/net/netxen/netxen_nic_init.c2
-rw-r--r--drivers/net/netxen/netxen_nic_main.c49
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c3
-rw-r--r--drivers/net/r8169.c54
-rw-r--r--drivers/net/tulip/uli526x.c8
-rw-r--r--drivers/net/via-velocity.c2
-rw-r--r--drivers/of/fdt.c7
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c5
-rw-r--r--drivers/pci/ioapic.c9
-rw-r--r--drivers/pci/pci.c44
-rw-r--r--drivers/pci/probe.c53
-rw-r--r--drivers/pci/quirks.c29
-rw-r--r--drivers/pci/setup-res.c14
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/au1000_generic.c13
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c12
-rw-r--r--drivers/pcmcia/cs.c124
-rw-r--r--drivers/pcmcia/db1xxx_ss.c27
-rw-r--r--drivers/pcmcia/ds.c8
-rw-r--r--drivers/pcmcia/i82092.c16
-rw-r--r--drivers/pcmcia/i82365.c11
-rw-r--r--drivers/pcmcia/m32r_cfc.c11
-rw-r--r--drivers/pcmcia/m32r_pcc.c12
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c17
-rw-r--r--drivers/pcmcia/omap_cf.c12
-rw-r--r--drivers/pcmcia/pd6729.c80
-rw-r--r--drivers/pcmcia/pxa2xx_base.c8
-rw-r--r--drivers/pcmcia/rsrc_nonstatic.c19
-rw-r--r--drivers/pcmcia/sa1100_generic.c13
-rw-r--r--drivers/pcmcia/sa1111_generic.c12
-rw-r--r--drivers/pcmcia/tcic.c12
-rw-r--r--drivers/pcmcia/vrc4171_card.c13
-rw-r--r--drivers/pcmcia/yenta_socket.c17
-rw-r--r--drivers/platform/x86/Kconfig10
-rw-r--r--drivers/platform/x86/Makefile1
-rw-r--r--drivers/platform/x86/asus-laptop.c4
-rw-r--r--drivers/platform/x86/eeepc-wmi.c157
-rw-r--r--drivers/serial/serial_cs.c1
-rw-r--r--drivers/serial/sunsu.c4
-rw-r--r--drivers/staging/et131x/et1310_mac.c2
-rw-r--r--drivers/usb/gadget/at91_udc.c9
-rw-r--r--drivers/video/sunxvr500.c24
-rw-r--r--fs/ceph/addr.c10
-rw-r--r--fs/ceph/auth_x.c53
-rw-r--r--fs/ceph/caps.c73
-rw-r--r--fs/ceph/dir.c4
-rw-r--r--fs/ceph/inode.c16
-rw-r--r--fs/ceph/mds_client.c43
-rw-r--r--fs/ceph/messenger.c19
-rw-r--r--fs/ceph/messenger.h1
-rw-r--r--fs/ceph/osd_client.c29
-rw-r--r--fs/ceph/osd_client.h2
-rw-r--r--fs/ceph/osdmap.c17
-rw-r--r--fs/ceph/snap.c6
-rw-r--r--fs/ext3/ialloc.c4
-rw-r--r--fs/ext3/inode.c2
-rw-r--r--fs/ext4/ialloc.c4
-rw-r--r--fs/ext4/inode.c4
-rw-r--r--fs/ext4/super.c29
-rw-r--r--fs/fat/namei_vfat.c6
-rw-r--r--fs/fscache/object.c6
-rw-r--r--fs/fscache/operation.c4
-rw-r--r--fs/logfs/dev_bdev.c9
-rw-r--r--fs/logfs/dir.c4
-rw-r--r--fs/logfs/journal.c7
-rw-r--r--fs/logfs/logfs.h1
-rw-r--r--fs/logfs/readwrite.c13
-rw-r--r--fs/logfs/segment.c54
-rw-r--r--fs/logfs/super.c15
-rw-r--r--fs/namei.c18
-rw-r--r--fs/nilfs2/segbuf.c8
-rw-r--r--fs/nilfs2/segment.c15
-rw-r--r--fs/ocfs2/acl.c77
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c4
-rw-r--r--fs/ocfs2/inode.c15
-rw-r--r--fs/ocfs2/localalloc.c10
-rw-r--r--fs/ocfs2/locks.c2
-rw-r--r--fs/ocfs2/namei.c28
-rw-r--r--fs/ocfs2/ocfs2.h14
-rw-r--r--fs/ocfs2/refcounttree.c1
-rw-r--r--fs/ocfs2/suballoc.c129
-rw-r--r--fs/ocfs2/suballoc.h5
-rw-r--r--fs/ocfs2/xattr.c12
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/task_mmu.c87
-rw-r--r--fs/reiserfs/super.c10
-rw-r--r--include/drm/drmP.h34
-rw-r--r--include/drm/drm_mem_util.h65
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/drm/ttm/ttm_bo_driver.h1
-rw-r--r--include/linux/amba/bus.h3
-rw-r--r--include/linux/amba/pl061.h2
-rw-r--r--include/linux/clockchips.h2
-rw-r--r--include/linux/ext3_fs.h6
-rw-r--r--include/linux/ext3_fs_i.h2
-rw-r--r--include/linux/freezer.h7
-rw-r--r--include/linux/fscache-cache.h2
-rw-r--r--include/linux/ioport.h2
-rw-r--r--include/linux/netfilter_ipv6.h1
-rw-r--r--include/linux/perf_event.h21
-rw-r--r--include/linux/rcupdate.h23
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/tracepoint.h2
-rw-r--r--include/pcmcia/ss.h6
-rw-r--r--kernel/cgroup_freezer.c9
-rw-r--r--kernel/cred.c6
-rw-r--r--kernel/early_res.c6
-rw-r--r--kernel/irq/chip.c35
-rw-r--r--kernel/irq/manage.c22
-rw-r--r--kernel/kgdb.c205
-rw-r--r--kernel/perf_event.c22
-rw-r--r--kernel/posix-cpu-timers.c10
-rw-r--r--kernel/power/process.c5
-rw-r--r--kernel/rcupdate.c23
-rw-r--r--kernel/resource.c44
-rw-r--r--kernel/sched.c14
-rw-r--r--kernel/sched_debug.c4
-rw-r--r--kernel/slow-work.c2
-rw-r--r--kernel/slow-work.h8
-rw-r--r--kernel/softlockup.c4
-rw-r--r--kernel/time/tick-oneshot.c52
-rw-r--r--kernel/time/timekeeping.c3
-rw-r--r--kernel/time/timer_list.c3
-rw-r--r--kernel/timer.c1
-rw-r--r--kernel/trace/ring_buffer.c22
-rw-r--r--kernel/trace/trace_clock.c4
-rw-r--r--kernel/trace/trace_event_perf.c11
-rw-r--r--mm/bootmem.c13
-rw-r--r--mm/nommu.c6
-rw-r--r--net/8021q/vlan.c2
-rw-r--r--net/8021q/vlan_dev.c71
-rw-r--r--net/core/netpoll.c7
-rw-r--r--net/ipv4/devinet.c2
-rw-r--r--net/ipv4/ipmr.c11
-rw-r--r--net/ipv4/route.c21
-rw-r--r--net/ipv6/addrconf.c2
-rw-r--r--net/ipv6/ip6mr.c11
-rw-r--r--net/ipv6/netfilter/ip6table_raw.c2
-rw-r--r--net/ipv6/route.c13
-rw-r--r--net/key/af_key.c8
-rw-r--r--net/netfilter/xt_hashlimit.c4
-rw-r--r--net/netfilter/xt_recent.c2
-rw-r--r--net/sched/Kconfig5
-rw-r--r--net/sched/cls_cgroup.c36
-rw-r--r--net/socket.c4
-rw-r--r--sound/core/pcm_lib.c6
-rw-r--r--sound/pci/ac97/ac97_patch.c2
-rw-r--r--sound/pci/hda/hda_intel.c1
-rw-r--r--sound/pci/hda/patch_realtek.c5
-rw-r--r--tools/perf/Makefile10
-rw-r--r--tools/perf/builtin-probe.c1
-rw-r--r--tools/perf/builtin-top.c13
-rw-r--r--tools/perf/util/probe-event.c2
-rw-r--r--tools/perf/util/probe-finder.c18
-rw-r--r--tools/perf/util/probe-finder.h1
-rw-r--r--tools/perf/util/scripting-engines/trace-event-python.c17
-rw-r--r--tools/perf/util/symbol.c18
-rw-r--r--tools/perf/util/symbol.h3
351 files changed, 5901 insertions, 3734 deletions
diff --git a/Documentation/filesystems/00-INDEX b/Documentation/filesystems/00-INDEX
index 3bae418c6ad..4303614b5ad 100644
--- a/Documentation/filesystems/00-INDEX
+++ b/Documentation/filesystems/00-INDEX
@@ -16,6 +16,8 @@ befs.txt
- information about the BeOS filesystem for Linux.
bfs.txt
- info for the SCO UnixWare Boot Filesystem (BFS).
+ceph.txt
+ - info for the Ceph Distributed File System
cifs.txt
- description of the CIFS filesystem.
coda.txt
diff --git a/Documentation/filesystems/ceph.txt b/Documentation/filesystems/ceph.txt
index 6e03917316b..0660c9f5dee 100644
--- a/Documentation/filesystems/ceph.txt
+++ b/Documentation/filesystems/ceph.txt
@@ -8,7 +8,7 @@ Basic features include:
* POSIX semantics
* Seamless scaling from 1 to many thousands of nodes
- * High availability and reliability. No single points of failure.
+ * High availability and reliability. No single point of failure.
* N-way replication of data across storage nodes
* Fast recovery from node failures
* Automatic rebalancing of data on node addition/removal
@@ -94,7 +94,7 @@ Mount Options
wsize=X
Specify the maximum write size in bytes. By default there is no
- maximu. Ceph will normally size writes based on the file stripe
+ maximum. Ceph will normally size writes based on the file stripe
size.
rsize=X
@@ -115,7 +115,7 @@ Mount Options
number of entries in that directory.
nocrc
- Disable CRC32C calculation for data writes. If set, the OSD
+ Disable CRC32C calculation for data writes. If set, the storage node
must rely on TCP's error correction to detect data corruption
in the data payload.
@@ -133,7 +133,8 @@ For more information on Ceph, see the home page at
http://ceph.newdream.net/
The Linux kernel client source tree is available at
- git://ceph.newdream.net/linux-ceph-client.git
+ git://ceph.newdream.net/git/ceph-client.git
+ git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
and the source for the full system is at
- git://ceph.newdream.net/ceph.git
+ git://ceph.newdream.net/git/ceph.git
diff --git a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
index 6e37be1eeb2..4f8930263dd 100644
--- a/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
+++ b/Documentation/powerpc/dts-bindings/fsl/cpm_qe/qe.txt
@@ -21,6 +21,15 @@ Required properties:
- fsl,qe-num-snums: define how many serial number(SNUM) the QE can use for the
threads.
+Optional properties:
+- fsl,firmware-phandle:
+ Usage: required only if there is no fsl,qe-firmware child node
+ Value type: <phandle>
+ Definition: Points to a firmware node (see "QE Firmware Node" below)
+ that contains the firmware that should be uploaded for this QE.
+ The compatible property for the firmware node should say,
+ "fsl,qe-firmware".
+
Recommended properties
- brg-frequency : the internal clock source frequency for baud-rate
generators in Hz.
@@ -59,3 +68,48 @@ Example:
reg = <0 c000>;
};
};
+
+* QE Firmware Node
+
+This node defines a firmware binary that is embedded in the device tree, for
+the purpose of passing the firmware from bootloader to the kernel, or from
+the hypervisor to the guest.
+
+The firmware node itself contains the firmware binary contents, a compatible
+property, and any firmware-specific properties. The node should be placed
+inside a QE node that needs it. Doing so eliminates the need for a
+fsl,firmware-phandle property. Other QE nodes that need the same firmware
+should define an fsl,firmware-phandle property that points to the firmware node
+in the first QE node.
+
+The fsl,firmware property can be specified in the DTS (possibly using incbin)
+or can be inserted by the boot loader at boot time.
+
+Required properties:
+ - compatible
+ Usage: required
+ Value type: <string>
+ Definition: A standard property. Specify a string that indicates what
+ kind of firmware it is. For QE, this should be "fsl,qe-firmware".
+
+ - fsl,firmware
+ Usage: required
+ Value type: <prop-encoded-array>, encoded as an array of bytes
+ Definition: A standard property. This property contains the firmware
+ binary "blob".
+
+Example:
+ qe1@e0080000 {
+ compatible = "fsl,qe";
+ qe_firmware:qe-firmware {
+ compatible = "fsl,qe-firmware";
+ fsl,firmware = [0x70 0xcd 0x00 0x00 0x01 0x46 0x45 ...];
+ };
+ ...
+ };
+
+ qe2@e0090000 {
+ compatible = "fsl,qe";
+ fsl,firmware-phandle = <&qe_firmware>;
+ ...
+ };
diff --git a/MAINTAINERS b/MAINTAINERS
index fbc3d653d52..3d29fa38988 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1443,7 +1443,7 @@ F: arch/powerpc/platforms/cell/
CEPH DISTRIBUTED FILE SYSTEM CLIENT
M: Sage Weil <sage@newdream.net>
-L: ceph-devel@lists.sourceforge.net
+L: ceph-devel@vger.kernel.org
W: http://ceph.newdream.net/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/sage/ceph-client.git
S: Supported
@@ -3083,6 +3083,7 @@ F: include/scsi/*iscsi*
ISDN SUBSYSTEM
M: Karsten Keil <isdn@linux-pingi.de>
L: isdn4linux@listserv.isdn4linux.de (subscribers-only)
+L: netdev@vger.kernel.org
W: http://www.isdn4linux.de
T: git git://git.kernel.org/pub/scm/linux/kernel/git/kkeil/isdn-2.6.git
S: Maintained
@@ -3269,6 +3270,16 @@ S: Maintained
F: include/linux/kexec.h
F: kernel/kexec.c
+KEYS/KEYRINGS:
+M: David Howells <dhowells@redhat.com>
+L: keyrings@linux-nfs.org
+S: Maintained
+F: Documentation/keys.txt
+F: include/linux/key.h
+F: include/linux/key-type.h
+F: include/keys/
+F: security/keys/
+
KGDB
M: Jason Wessel <jason.wessel@windriver.com>
L: kgdb-bugreport@lists.sourceforge.net
@@ -5423,7 +5434,6 @@ S: Maintained
F: sound/soc/codecs/twl4030*
TIPC NETWORK LAYER
-M: Per Liden <per.liden@ericsson.com>
M: Jon Maloy <jon.maloy@ericsson.com>
M: Allan Stephens <allan.stephens@windriver.com>
L: tipc-discussion@lists.sourceforge.net
diff --git a/Makefile b/Makefile
index a5ba759e0fd..67c1001cfbf 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 34
-EXTRAVERSION = -rc2
+EXTRAVERSION = -rc3
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 72da7e045c6..0d08d4170b6 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -15,6 +15,7 @@
#include <asm/glue.h>
#include <asm/shmparam.h>
#include <asm/cachetype.h>
+#include <asm/outercache.h>
#define CACHE_COLOUR(vaddr) ((vaddr & (SHMLBA - 1)) >> PAGE_SHIFT)
@@ -219,12 +220,6 @@ struct cpu_cache_fns {
void (*dma_flush_range)(const void *, const void *);
};
-struct outer_cache_fns {
- void (*inv_range)(unsigned long, unsigned long);
- void (*clean_range)(unsigned long, unsigned long);
- void (*flush_range)(unsigned long, unsigned long);
-};
-
/*
* Select the calling method
*/
@@ -281,37 +276,6 @@ extern void dmac_flush_range(const void *, const void *);
#endif
-#ifdef CONFIG_OUTER_CACHE
-
-extern struct outer_cache_fns outer_cache;
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{
- if (outer_cache.inv_range)
- outer_cache.inv_range(start, end);
-}
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{
- if (outer_cache.clean_range)
- outer_cache.clean_range(start, end);
-}
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{
- if (outer_cache.flush_range)
- outer_cache.flush_range(start, end);
-}
-
-#else
-
-static inline void outer_inv_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_clean_range(unsigned long start, unsigned long end)
-{ }
-static inline void outer_flush_range(unsigned long start, unsigned long end)
-{ }
-
-#endif
-
/*
* Copy user data from/to a page which is mapped into a different
* processes address space. Really, we want to allow our "user
diff --git a/arch/arm/include/asm/clkdev.h b/arch/arm/include/asm/clkdev.h
index 7a0690da5e6..b56c1389b6f 100644
--- a/arch/arm/include/asm/clkdev.h
+++ b/arch/arm/include/asm/clkdev.h
@@ -13,6 +13,7 @@
#define __ASM_CLKDEV_H
struct clk;
+struct device;
struct clk_lookup {
struct list_head node;
diff --git a/arch/arm/include/asm/irq.h b/arch/arm/include/asm/irq.h
index 328f14a8b79..237282f7c76 100644
--- a/arch/arm/include/asm/irq.h
+++ b/arch/arm/include/asm/irq.h
@@ -17,6 +17,7 @@
#ifndef __ASSEMBLY__
struct irqaction;
+struct pt_regs;
extern void migrate_irqs(void);
extern void asm_do_IRQ(unsigned int, struct pt_regs *);
diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
new file mode 100644
index 00000000000..25f76bae57a
--- /dev/null
+++ b/arch/arm/include/asm/outercache.h
@@ -0,0 +1,75 @@
+/*
+ * arch/arm/include/asm/outercache.h
+ *
+ * Copyright (C) 2010 ARM Ltd.
+ * Written by Catalin Marinas <catalin.marinas@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#ifndef __ASM_OUTERCACHE_H
+#define __ASM_OUTERCACHE_H
+
+struct outer_cache_fns {
+ void (*inv_range)(unsigned long, unsigned long);
+ void (*clean_range)(unsigned long, unsigned long);
+ void (*flush_range)(unsigned long, unsigned long);
+#ifdef CONFIG_OUTER_CACHE_SYNC
+ void (*sync)(void);
+#endif
+};
+
+#ifdef CONFIG_OUTER_CACHE
+
+extern struct outer_cache_fns outer_cache;
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{
+ if (outer_cache.inv_range)
+ outer_cache.inv_range(start, end);
+}
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{
+ if (outer_cache.clean_range)
+ outer_cache.clean_range(start, end);
+}
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{
+ if (outer_cache.flush_range)
+ outer_cache.flush_range(start, end);
+}
+
+#else
+
+static inline void outer_inv_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_clean_range(unsigned long start, unsigned long end)
+{ }
+static inline void outer_flush_range(unsigned long start, unsigned long end)
+{ }
+
+#endif
+
+#ifdef CONFIG_OUTER_CACHE_SYNC
+static inline void outer_sync(void)
+{
+ if (outer_cache.sync)
+ outer_cache.sync();
+}
+#else
+static inline void outer_sync(void)
+{ }
+#endif
+
+#endif /* __ASM_OUTERCACHE_H */
diff --git a/arch/arm/include/asm/system.h b/arch/arm/include/asm/system.h
index ca88e6a8470..4ace45ec3ef 100644
--- a/arch/arm/include/asm/system.h
+++ b/arch/arm/include/asm/system.h
@@ -60,6 +60,8 @@
#include <linux/linkage.h>
#include <linux/irqflags.h>
+#include <asm/outercache.h>
+
#define __exception __attribute__((section(".exception.text")))
struct thread_info;
@@ -137,10 +139,12 @@ extern unsigned int user_debug;
#define dmb() __asm__ __volatile__ ("" : : : "memory")
#endif
-#if __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
-#define mb() dmb()
+#ifdef CONFIG_ARCH_HAS_BARRIERS
+#include <mach/barriers.h>
+#elif __LINUX_ARM_ARCH__ >= 7 || defined(CONFIG_SMP)
+#define mb() do { dsb(); outer_sync(); } while (0)
#define rmb() dmb()
-#define wmb() dmb()
+#define wmb() mb()
#else
#define mb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
#define rmb() do { if (arch_is_coherent()) dmb(); else barrier(); } while (0)
@@ -152,9 +156,9 @@ extern unsigned int user_debug;
#define smp_rmb() barrier()
#define smp_wmb() barrier()
#else
-#define smp_mb() mb()
-#define smp_rmb() rmb()
-#define smp_wmb() wmb()
+#define smp_mb() dmb()
+#define smp_rmb() dmb()
+#define smp_wmb() dmb()
#endif
#define read_barrier_depends() do { } while(0)
diff --git a/arch/arm/kernel/kprobes.c b/arch/arm/kernel/kprobes.c
index 60c62c377fa..610e0f561c3 100644
--- a/arch/arm/kernel/kprobes.c
+++ b/arch/arm/kernel/kprobes.c
@@ -393,6 +393,14 @@ void __kprobes jprobe_return(void)
/*
* Setup an empty pt_regs. Fill SP and PC fields as
* they're needed by longjmp_break_handler.
+ *
+ * We allocate some slack between the original SP and start of
+ * our fabricated regs. To be precise we want to have worst case
+ * covered which is STMFD with all 16 regs so we allocate 2 *
+ * sizeof(struct_pt_regs)).
+ *
+ * This is to prevent any simulated instruction from writing
+ * over the regs when they are accessing the stack.
*/
"sub sp, %0, %1 \n\t"
"ldr r0, ="__stringify(JPROBE_MAGIC_ADDR)"\n\t"
@@ -410,7 +418,7 @@ void __kprobes jprobe_return(void)
"ldmia sp, {r0 - pc} \n\t"
:
: "r" (kcb->jprobe_saved_regs.ARM_sp),
- "I" (sizeof(struct pt_regs)),
+ "I" (sizeof(struct pt_regs) * 2),
"J" (offsetof(struct pt_regs, ARM_sp)),
"J" (offsetof(struct pt_regs, ARM_pc)),
"J" (offsetof(struct pt_regs, ARM_cpsr))
diff --git a/arch/arm/lib/memmove.S b/arch/arm/lib/memmove.S
index 5025c863713..938fc14f962 100644
--- a/arch/arm/lib/memmove.S
+++ b/arch/arm/lib/memmove.S
@@ -74,7 +74,7 @@ ENTRY(memmove)
rsb ip, ip, #32
addne pc, pc, ip @ C is always clear here
b 7f
-6: nop
+6: W(nop)
W(ldr) r3, [r1, #-4]!
W(ldr) r4, [r1, #-4]!
W(ldr) r5, [r1, #-4]!
@@ -85,7 +85,7 @@ ENTRY(memmove)
add pc, pc, ip
nop
- nop
+ W(nop)
W(str) r3, [r0, #-4]!
W(str) r4, [r0, #-4]!
W(str) r5, [r0, #-4]!
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index c4ed9f93f64..5bd7c89a604 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -736,6 +736,12 @@ config NEEDS_SYSCALL_FOR_CMPXCHG
config OUTER_CACHE
bool
+config OUTER_CACHE_SYNC
+ bool
+ help
+ The outer cache has a outer_cache_fns.sync function pointer
+ that can be used to drain the write buffer of the outer cache.
+
config CACHE_FEROCEON_L2
bool "Enable the Feroceon L2 cache controller"
depends on ARCH_KIRKWOOD || ARCH_MV78XX0
@@ -757,6 +763,7 @@ config CACHE_L2X0
REALVIEW_EB_A9MP || ARCH_MX35 || ARCH_MX31 || MACH_REALVIEW_PBX || ARCH_NOMADIK || ARCH_OMAP4
default y
select OUTER_CACHE
+ select OUTER_CACHE_SYNC
help
This option enables the L2x0 PrimeCell.
@@ -781,3 +788,9 @@ config ARM_L1_CACHE_SHIFT
int
default 6 if ARM_L1_CACHE_SHIFT_6
default 5
+
+config ARCH_HAS_BARRIERS
+ bool
+ help
+ This option allows the use of custom mandatory barriers
+ included via the mach/barriers.h file.
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 07334632d3e..21ad68ba22b 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -93,6 +93,15 @@ static inline void l2x0_flush_line(unsigned long addr)
}
#endif
+static void l2x0_cache_sync(void)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&l2x0_lock, flags);
+ cache_sync();
+ spin_unlock_irqrestore(&l2x0_lock, flags);
+}
+
static inline void l2x0_inv_all(void)
{
unsigned long flags;
@@ -225,6 +234,7 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
+ outer_cache.sync = l2x0_cache_sync;
printk(KERN_INFO "L2X0 cache controller enabled\n");
}
diff --git a/arch/arm/vfp/vfpmodule.c b/arch/arm/vfp/vfpmodule.c
index 7f3f59fcaa2..a420cb94932 100644
--- a/arch/arm/vfp/vfpmodule.c
+++ b/arch/arm/vfp/vfpmodule.c
@@ -545,7 +545,7 @@ static int __init vfp_init(void)
*/
elf_hwcap |= HWCAP_VFP;
#ifdef CONFIG_VFPv3
- if (VFP_arch >= 3) {
+ if (VFP_arch >= 2) {
elf_hwcap |= HWCAP_VFPv3;
/*
diff --git a/arch/cris/arch-v32/drivers/pci/bios.c b/arch/cris/arch-v32/drivers/pci/bios.c
index d4b9c36ddc0..bc0cfdad1cb 100644
--- a/arch/cris/arch-v32/drivers/pci/bios.c
+++ b/arch/cris/arch-v32/drivers/pci/bios.c
@@ -50,7 +50,7 @@ pcibios_align_resource(void *data, const struct resource *res,
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
- return start
+ return start;
}
int pcibios_enable_resources(struct pci_dev *dev, int mask)
diff --git a/arch/frv/mb93090-mb00/pci-frv.c b/arch/frv/mb93090-mb00/pci-frv.c
index 1ed15d7fea2..6b4fb28e9f9 100644
--- a/arch/frv/mb93090-mb00/pci-frv.c
+++ b/arch/frv/mb93090-mb00/pci-frv.c
@@ -41,7 +41,7 @@ pcibios_align_resource(void *data, const struct resource *res,
if ((res->flags & IORESOURCE_IO) && (start & 0x300))
start = (start + 0x3ff) & ~0x3ff;
- return start
+ return start;
}
@@ -94,8 +94,7 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
r = &dev->resource[idx];
if (!r->start)
continue;
- if (pci_claim_resource(dev, idx) < 0)
- printk(KERN_ERR "PCI: Cannot allocate resource region %d of bridge %s\n", idx, pci_name(dev));
+ pci_claim_resource(dev, idx);
}
}
pcibios_allocate_bus_resources(&bus->children);
@@ -125,7 +124,6 @@ static void __init pcibios_allocate_resources(int pass)
DBG("PCI: Resource %08lx-%08lx (f=%lx, d=%d, p=%d)\n",
r->start, r->end, r->flags, disabled, pass);
if (pci_claim_resource(dev, idx) < 0) {
- printk(KERN_ERR "PCI: Cannot allocate resource region %d of device %s\n", idx, pci_name(dev));
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 203ec61c6d4..76818f92653 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -75,9 +75,6 @@ config LOCKDEP_SUPPORT
config HAVE_LATENCYTOP_SUPPORT
def_bool y
-config PCI
- def_bool n
-
config DTC
def_bool y
diff --git a/arch/microblaze/Makefile b/arch/microblaze/Makefile
index 836832dd9b2..72f6e858374 100644
--- a/arch/microblaze/Makefile
+++ b/arch/microblaze/Makefile
@@ -84,7 +84,7 @@ define archhelp
echo '* linux.bin - Create raw binary'
echo ' linux.bin.gz - Create compressed raw binary'
echo ' simpleImage.<dt> - ELF image with $(arch)/boot/dts/<dt>.dts linked in'
- echo ' - stripped elf with fdt blob
+ echo ' - stripped elf with fdt blob'
echo ' simpleImage.<dt>.unstrip - full ELF image with fdt blob'
echo ' *_defconfig - Select default config from arch/microblaze/configs'
echo ''
@@ -94,3 +94,5 @@ define archhelp
echo ' name of a dts file from the arch/microblaze/boot/dts/ directory'
echo ' (minus the .dts extension).'
endef
+
+MRPROPER_FILES += $(boot)/simpleImage.*
diff --git a/arch/microblaze/boot/Makefile b/arch/microblaze/boot/Makefile
index 902cf9846c3..57f50c2371c 100644
--- a/arch/microblaze/boot/Makefile
+++ b/arch/microblaze/boot/Makefile
@@ -23,8 +23,6 @@ $(obj)/system.dtb: $(obj)/$(DTB).dtb
endif
$(obj)/linux.bin: vmlinux FORCE
- [ -n $(CONFIG_INITRAMFS_SOURCE) ] && [ ! -e $(CONFIG_INITRAMFS_SOURCE) ] && \
- touch $(CONFIG_INITRAMFS_SOURCE) || echo "No CPIO image"
$(call if_changed,objcopy)
$(call if_changed,uimage)
@echo 'Kernel: $@ is ready' ' (#'`cat .version`')'
@@ -62,6 +60,4 @@ quiet_cmd_dtc = DTC $@
$(obj)/%.dtb: $(dtstree)/%.dts FORCE
$(call if_changed,dtc)
-clean-kernel += linux.bin linux.bin.gz simpleImage.*
-
-clean-files += *.dtb simpleImage.*.unstrip
+clean-files += *.dtb simpleImage.*.unstrip linux.bin.ub
diff --git a/arch/microblaze/include/asm/processor.h b/arch/microblaze/include/asm/processor.h
index 563c6b9453f..8eeb09211ec 100644
--- a/arch/microblaze/include/asm/processor.h
+++ b/arch/microblaze/include/asm/processor.h
@@ -14,7 +14,6 @@
#include <asm/ptrace.h>
#include <asm/setup.h>
#include <asm/registers.h>
-#include <asm/segment.h>
#include <asm/entry.h>
#include <asm/current.h>
diff --git a/arch/microblaze/include/asm/segment.h b/arch/microblaze/include/asm/segment.h
deleted file mode 100644
index 0e7102c3fb1..00000000000
--- a/arch/microblaze/include/asm/segment.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
- * Copyright (C) 2008-2009 PetaLogix
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#ifndef _ASM_MICROBLAZE_SEGMENT_H
-#define _ASM_MICROBLAZE_SEGMENT_H
-
-# ifndef __ASSEMBLY__
-
-typedef struct {
- unsigned long seg;
-} mm_segment_t;
-
-/*
- * On Microblaze the fs value is actually the top of the corresponding
- * address space.
- *
- * The fs value determines whether argument validity checking should be
- * performed or not. If get_fs() == USER_DS, checking is performed, with
- * get_fs() == KERNEL_DS, checking is bypassed.
- *
- * For historical reasons, these macros are grossly misnamed.
- *
- * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
- */
-# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-
-# ifndef CONFIG_MMU
-# define KERNEL_DS MAKE_MM_SEG(0)
-# define USER_DS KERNEL_DS
-# else
-# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
-# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
-# endif
-
-# define get_ds() (KERNEL_DS)
-# define get_fs() (current_thread_info()->addr_limit)
-# define set_fs(val) (current_thread_info()->addr_limit = (val))
-
-# define segment_eq(a, b) ((a).seg == (b).seg)
-
-# endif /* __ASSEMBLY__ */
-#endif /* _ASM_MICROBLAZE_SEGMENT_H */
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 6e92885d381..b2ca80f6464 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -19,7 +19,6 @@
#ifndef __ASSEMBLY__
# include <linux/types.h>
# include <asm/processor.h>
-# include <asm/segment.h>
/*
* low level task data that entry.S needs immediate access to
@@ -60,6 +59,10 @@ struct cpu_context {
__u32 fsr;
};
+typedef struct {
+ unsigned long seg;
+} mm_segment_t;
+
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
diff --git a/arch/microblaze/include/asm/tlbflush.h b/arch/microblaze/include/asm/tlbflush.h
index bcb8b41d55a..2e1353c2d18 100644
--- a/arch/microblaze/include/asm/tlbflush.h
+++ b/arch/microblaze/include/asm/tlbflush.h
@@ -24,6 +24,7 @@ extern void _tlbie(unsigned long address);
extern void _tlbia(void);
#define __tlbia() { preempt_disable(); _tlbia(); preempt_enable(); }
+#define __tlbie(x) { _tlbie(x); }
static inline void local_flush_tlb_all(void)
{ __tlbia(); }
@@ -31,7 +32,7 @@ static inline void local_flush_tlb_mm(struct mm_struct *mm)
{ __tlbia(); }
static inline void local_flush_tlb_page(struct vm_area_struct *vma,
unsigned long vmaddr)
- { _tlbie(vmaddr); }
+ { __tlbie(vmaddr); }
static inline void local_flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{ __tlbia(); }
diff --git a/arch/microblaze/include/asm/uaccess.h b/arch/microblaze/include/asm/uaccess.h
index 371bd6e56d9..446bec29b14 100644
--- a/arch/microblaze/include/asm/uaccess.h
+++ b/arch/microblaze/include/asm/uaccess.h
@@ -22,101 +22,73 @@
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/pgtable.h>
-#include <asm/segment.h>
#include <linux/string.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
-#define __clear_user(addr, n) (memset((void *)(addr), 0, (n)), 0)
-
-#ifndef CONFIG_MMU
-
-extern int ___range_ok(unsigned long addr, unsigned long size);
-
-#define __range_ok(addr, size) \
- ___range_ok((unsigned long)(addr), (unsigned long)(size))
-
-#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
-#define __access_ok(add, size) (__range_ok((addr), (size)) == 0)
-
-/* Undefined function to trigger linker error */
-extern int bad_user_access_length(void);
-
-/* FIXME this is function for optimalization -> memcpy */
-#define __get_user(var, ptr) \
-({ \
- int __gu_err = 0; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- (var) = *(ptr); \
- break; \
- case 8: \
- memcpy((void *) &(var), (ptr), 8); \
- break; \
- default: \
- (var) = 0; \
- __gu_err = __get_user_bad(); \
- break; \
- } \
- __gu_err; \
-})
+/*
+ * On Microblaze the fs value is actually the top of the corresponding
+ * address space.
+ *
+ * The fs value determines whether argument validity checking should be
+ * performed or not. If get_fs() == USER_DS, checking is performed, with
+ * get_fs() == KERNEL_DS, checking is bypassed.
+ *
+ * For historical reasons, these macros are grossly misnamed.
+ *
+ * For non-MMU arch like Microblaze, KERNEL_DS and USER_DS is equal.
+ */
+# define MAKE_MM_SEG(s) ((mm_segment_t) { (s) })
-#define __get_user_bad() (bad_user_access_length(), (-EFAULT))
+# ifndef CONFIG_MMU
+# define KERNEL_DS MAKE_MM_SEG(0)
+# define USER_DS KERNEL_DS
+# else
+# define KERNEL_DS MAKE_MM_SEG(0xFFFFFFFF)
+# define USER_DS MAKE_MM_SEG(TASK_SIZE - 1)
+# endif
-/* FIXME is not there defined __pu_val */
-#define __put_user(var, ptr) \
-({ \
- int __pu_err = 0; \
- switch (sizeof(*(ptr))) { \
- case 1: \
- case 2: \
- case 4: \
- *(ptr) = (var); \
- break; \
- case 8: { \
- typeof(*(ptr)) __pu_val = (var); \
- memcpy(ptr, &__pu_val, sizeof(__pu_val)); \
- } \
- break; \
- default: \
- __pu_err = __put_user_bad(); \
- break; \
- } \
- __pu_err; \
-})
+# define get_ds() (KERNEL_DS)
+# define get_fs() (current_thread_info()->addr_limit)
+# define set_fs(val) (current_thread_info()->addr_limit = (val))
-#define __put_user_bad() (bad_user_access_length(), (-EFAULT))
+# define segment_eq(a, b) ((a).seg == (b).seg)
-#define put_user(x, ptr) __put_user((x), (ptr))
-#define get_user(x, ptr) __get_user((x), (ptr))
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+struct exception_table_entry {
+ unsigned long insn, fixup;
+};
-#define copy_to_user(to, from, n) (memcpy((to), (from), (n)), 0)
-#define copy_from_user(to, from, n) (memcpy((to), (from), (n)), 0)
+/* Returns 0 if exception not found and fixup otherwise. */
+extern unsigned long search_exception_table(unsigned long);
-#define __copy_to_user(to, from, n) (copy_to_user((to), (from), (n)))
-#define __copy_from_user(to, from, n) (copy_from_user((to), (from), (n)))
-#define __copy_to_user_inatomic(to, from, n) \
- (__copy_to_user((to), (from), (n)))
-#define __copy_from_user_inatomic(to, from, n) \
- (__copy_from_user((to), (from), (n)))
+#ifndef CONFIG_MMU
-static inline unsigned long clear_user(void *addr, unsigned long size)
+/* Check against bounds of physical memory */
+static inline int ___range_ok(unsigned long addr, unsigned long size)
{
- if (access_ok(VERIFY_WRITE, addr, size))
- size = __clear_user(addr, size);
- return size;
+ return ((addr < memory_start) ||
+ ((addr + size) > memory_end));
}
-/* Returns 0 if exception not found and fixup otherwise. */
-extern unsigned long search_exception_table(unsigned long);
+#define __range_ok(addr, size) \
+ ___range_ok((unsigned long)(addr), (unsigned long)(size))
-extern long strncpy_from_user(char *dst, const char *src, long count);
-extern long strnlen_user(const char *src, long count);
+#define access_ok(type, addr, size) (__range_ok((addr), (size)) == 0)
-#else /* CONFIG_MMU */
+#else
/*
* Address is valid if:
@@ -129,24 +101,88 @@ extern long strnlen_user(const char *src, long count);
/* || printk("access_ok failed for %s at 0x%08lx (size %d), seg 0x%08x\n",
type?"WRITE":"READ",addr,size,get_fs().seg)) */
-/*
- * All the __XXX versions macros/functions below do not perform
- * access checking. It is assumed that the necessary checks have been
- * already performed before the finction (macro) is called.
- */
+#endif
-#define get_user(x, ptr) \
-({ \
- access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
- ? __get_user((x), (ptr)) : -EFAULT; \
-})
+#ifdef CONFIG_MMU
+# define __FIXUP_SECTION ".section .fixup,\"ax\"\n"
+# define __EX_TABLE_SECTION ".section __ex_table,\"a\"\n"
+#else
+# define __FIXUP_SECTION ".section .discard,\"ax\"\n"
+# define __EX_TABLE_SECTION ".section .discard,\"a\"\n"
+#endif
-#define put_user(x, ptr) \
-({ \
- access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
- ? __put_user((x), (ptr)) : -EFAULT; \
+extern unsigned long __copy_tofrom_user(void __user *to,
+ const void __user *from, unsigned long size);
+
+/* Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail. */
+static inline unsigned long __must_check __clear_user(void __user *to,
+ unsigned long n)
+{
+ /* normal memset with two words to __ex_table */
+ __asm__ __volatile__ ( \
+ "1: sb r0, %2, r0;" \
+ " addik %0, %0, -1;" \
+ " bneid %0, 1b;" \
+ " addik %2, %2, 1;" \
+ "2: " \
+ __EX_TABLE_SECTION \
+ ".word 1b,2b;" \
+ ".previous;" \
+ : "=r"(n) \
+ : "0"(n), "r"(to)
+ );
+ return n;
+}
+
+static inline unsigned long __must_check clear_user(void __user *to,
+ unsigned long n)
+{
+ might_sleep();
+ if (unlikely(!access_ok(VERIFY_WRITE, to, n)))
+ return n;
+
+ return __clear_user(to, n);
+}
+
+/* put_user and get_user macros */
+extern long __user_bad(void);
+
+#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
+({ \
+ __asm__ __volatile__ ( \
+ "1:" insn " %1, %2, r0;" \
+ " addk %0, r0, r0;" \
+ "2: " \
+ __FIXUP_SECTION \
+ "3: brid 2b;" \
+ " addik %0, r0, %3;" \
+ ".previous;" \
+ __EX_TABLE_SECTION \
+ ".word 1b,3b;" \
+ ".previous;" \
+ : "=&r"(__gu_err), "=r"(__gu_val) \
+ : "r"(__gu_ptr), "i"(-EFAULT) \
+ ); \
})
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+
#define __get_user(x, ptr) \
({ \
unsigned long __gu_val; \
@@ -163,30 +199,74 @@ extern long strnlen_user(const char *src, long count);
__get_user_asm("lw", (ptr), __gu_val, __gu_err); \
break; \
default: \
- __gu_val = 0; __gu_err = -EINVAL; \
+ /* __gu_val = 0; __gu_err = -EINVAL;*/ __gu_err = __user_bad();\
} \
x = (__typeof__(*(ptr))) __gu_val; \
__gu_err; \
})
-#define __get_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
+
+#define get_user(x, ptr) \
({ \
- __asm__ __volatile__ ( \
- "1:" insn " %1, %2, r0; \
- addk %0, r0, r0; \
- 2: \
- .section .fixup,\"ax\"; \
- 3: brid 2b; \
- addik %0, r0, %3; \
- .previous; \
- .section __ex_table,\"a\"; \
- .word 1b,3b; \
- .previous;" \
- : "=r"(__gu_err), "=r"(__gu_val) \
- : "r"(__gu_ptr), "i"(-EFAULT) \
- ); \
+ access_ok(VERIFY_READ, (ptr), sizeof(*(ptr))) \
+ ? __get_user((x), (ptr)) : -EFAULT; \
+})
+
+#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
+({ \
+ __asm__ __volatile__ ( \
+ "1:" insn " %1, %2, r0;" \
+ " addk %0, r0, r0;" \
+ "2: " \
+ __FIXUP_SECTION \
+ "3: brid 2b;" \
+ " addik %0, r0, %3;" \
+ ".previous;" \
+ __EX_TABLE_SECTION \
+ ".word 1b,3b;" \
+ ".previous;" \
+ : "=&r"(__gu_err) \
+ : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
+ ); \
})
+#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
+({ \
+ __asm__ __volatile__ (" lwi %0, %1, 0;" \
+ "1: swi %0, %2, 0;" \
+ " lwi %0, %1, 4;" \
+ "2: swi %0, %2, 4;" \
+ " addk %0, r0, r0;" \
+ "3: " \
+ __FIXUP_SECTION \
+ "4: brid 3b;" \
+ " addik %0, r0, %3;" \
+ ".previous;" \
+ __EX_TABLE_SECTION \
+ ".word 1b,4b,2b,4b;" \
+ ".previous;" \
+ : "=&r"(__gu_err) \
+ : "r"(&__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
+ ); \
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) volatile __gu_val = (x); \
@@ -195,7 +275,7 @@ extern long strnlen_user(const char *src, long count);
case 1: \
__put_user_asm("sb", (ptr), __gu_val, __gu_err); \
break; \
- case 2: \
+ case 2: \
__put_user_asm("sh", (ptr), __gu_val, __gu_err); \
break; \
case 4: \
@@ -205,121 +285,82 @@ extern long strnlen_user(const char *src, long count);
__put_user_asm_8((ptr), __gu_val, __gu_err); \
break; \
default: \
- __gu_err = -EINVAL; \
+ /*__gu_err = -EINVAL;*/ __gu_err = __user_bad(); \
} \
__gu_err; \
})
-#define __put_user_asm_8(__gu_ptr, __gu_val, __gu_err) \
-({ \
-__asm__ __volatile__ (" lwi %0, %1, 0; \
- 1: swi %0, %2, 0; \
- lwi %0, %1, 4; \
- 2: swi %0, %2, 4; \
- addk %0,r0,r0; \
- 3: \
- .section .fixup,\"ax\"; \
- 4: brid 3b; \
- addik %0, r0, %3; \
- .previous; \
- .section __ex_table,\"a\"; \
- .word 1b,4b,2b,4b; \
- .previous;" \
- : "=&r"(__gu_err) \
- : "r"(&__gu_val), \
- "r"(__gu_ptr), "i"(-EFAULT) \
- ); \
-})
+#ifndef CONFIG_MMU
-#define __put_user_asm(insn, __gu_ptr, __gu_val, __gu_err) \
-({ \
- __asm__ __volatile__ ( \
- "1:" insn " %1, %2, r0; \
- addk %0, r0, r0; \
- 2: \
- .section .fixup,\"ax\"; \
- 3: brid 2b; \
- addik %0, r0, %3; \
- .previous; \
- .section __ex_table,\"a\"; \
- .word 1b,3b; \
- .previous;" \
- : "=r"(__gu_err) \
- : "r"(__gu_val), "r"(__gu_ptr), "i"(-EFAULT) \
- ); \
-})
+#define put_user(x, ptr) __put_user((x), (ptr))
-/*
- * Return: number of not copied bytes, i.e. 0 if OK or non-zero if fail.
- */
-static inline int clear_user(char *to, int size)
-{
- if (size && access_ok(VERIFY_WRITE, to, size)) {
- __asm__ __volatile__ (" \
- 1: \
- sb r0, %2, r0; \
- addik %0, %0, -1; \
- bneid %0, 1b; \
- addik %2, %2, 1; \
- 2: \
- .section __ex_table,\"a\"; \
- .word 1b,2b; \
- .section .text;" \
- : "=r"(size) \
- : "0"(size), "r"(to)
- );
- }
- return size;
-}
+#else /* CONFIG_MMU */
-#define __copy_from_user(to, from, n) copy_from_user((to), (from), (n))
+#define put_user(x, ptr) \
+({ \
+ access_ok(VERIFY_WRITE, (ptr), sizeof(*(ptr))) \
+ ? __put_user((x), (ptr)) : -EFAULT; \
+})
+#endif /* CONFIG_MMU */
+
+/* copy_to_from_user */
+#define __copy_from_user(to, from, n) \
+ __copy_tofrom_user((__force void __user *)(to), \
+ (void __user *)(from), (n))
#define __copy_from_user_inatomic(to, from, n) \
copy_from_user((to), (from), (n))
-#define copy_to_user(to, from, n) \
- (access_ok(VERIFY_WRITE, (to), (n)) ? \
- __copy_tofrom_user((void __user *)(to), \
- (__force const void __user *)(from), (n)) \
- : -EFAULT)
+static inline long copy_from_user(void *to,
+ const void __user *from, unsigned long n)
+{
+ might_sleep();
+ if (access_ok(VERIFY_READ, from, n))
+ return __copy_from_user(to, from, n);
+ return n;
+}
-#define __copy_to_user(to, from, n) copy_to_user((to), (from), (n))
+#define __copy_to_user(to, from, n) \
+ __copy_tofrom_user((void __user *)(to), \
+ (__force const void __user *)(from), (n))
#define __copy_to_user_inatomic(to, from, n) copy_to_user((to), (from), (n))
-#define copy_from_user(to, from, n) \
- (access_ok(VERIFY_READ, (from), (n)) ? \
- __copy_tofrom_user((__force void __user *)(to), \
- (void __user *)(from), (n)) \
- : -EFAULT)
+static inline long copy_to_user(void __user *to,
+ const void *from, unsigned long n)
+{
+ might_sleep();
+ if (access_ok(VERIFY_WRITE, to, n))
+ return __copy_to_user(to, from, n);
+ return n;
+}
+/*
+ * Copy a null terminated string from userspace.
+ */
extern int __strncpy_user(char *to, const char __user *from, int len);
-extern int __strnlen_user(const char __user *sstr, int len);
-#define strncpy_from_user(to, from, len) \
- (access_ok(VERIFY_READ, from, 1) ? \
- __strncpy_user(to, from, len) : -EFAULT)
-#define strnlen_user(str, len) \
- (access_ok(VERIFY_READ, str, 1) ? __strnlen_user(str, len) : 0)
+#define __strncpy_from_user __strncpy_user
-#endif /* CONFIG_MMU */
-
-extern unsigned long __copy_tofrom_user(void __user *to,
- const void __user *from, unsigned long size);
+static inline long
+strncpy_from_user(char *dst, const char __user *src, long count)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return -EFAULT;
+ return __strncpy_from_user(dst, src, count);
+}
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
+ * Return the size of a string (including the ending 0)
*
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
+ * Return 0 on exception, a value greater than N if too long
*/
-struct exception_table_entry {
- unsigned long insn, fixup;
-};
+extern int __strnlen_user(const char __user *sstr, int len);
+
+static inline long strnlen_user(const char __user *src, long n)
+{
+ if (!access_ok(VERIFY_READ, src, 1))
+ return 0;
+ return __strnlen_user(src, n);
+}
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
index b1084974fcc..4d5b0311601 100644
--- a/arch/microblaze/kernel/dma.c
+++ b/arch/microblaze/kernel/dma.c
@@ -37,7 +37,7 @@ static inline void __dma_sync_page(unsigned long paddr, unsigned long offset,
static unsigned long get_dma_direct_offset(struct device *dev)
{
- if (dev)
+ if (likely(dev))
return (unsigned long)dev->archdata.dma_data;
return PCI_DRAM_OFFSET; /* FIXME Not sure if is correct */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
index cb7815cfe5a..da6a5f5dc76 100644
--- a/arch/microblaze/kernel/head.S
+++ b/arch/microblaze/kernel/head.S
@@ -51,6 +51,12 @@ swapper_pg_dir:
.text
ENTRY(_start)
+#if CONFIG_KERNEL_BASE_ADDR == 0
+ brai TOPHYS(real_start)
+ .org 0x100
+real_start:
+#endif
+
mfs r1, rmsr
andi r1, r1, ~2
mts rmsr, r1
@@ -99,8 +105,8 @@ no_fdt_arg:
tophys(r4,r4) /* convert to phys address */
ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
_copy_command_line:
- lbu r2, r5, r6 /* r7=r5+r6 - r5 contain pointer to command line */
- sb r2, r4, r6 /* addr[r4+r6]= r7*/
+ lbu r2, r5, r6 /* r2=r5+r6 - r5 contain pointer to command line */
+ sb r2, r4, r6 /* addr[r4+r6]= r2*/
addik r6, r6, 1 /* increment counting */
bgtid r3, _copy_command_line /* loop for all entries */
addik r3, r3, -1 /* descrement loop */
@@ -128,7 +134,7 @@ _copy_bram:
* virtual to physical.
*/
nop
- addik r3, r0, 63 /* Invalidate all TLB entries */
+ addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
_invalidate:
mts rtlbx, r3
mts rtlbhi, r0 /* flush: ensure V is clear */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
index 2b86c03aa84..995a2123635 100644
--- a/arch/microblaze/kernel/hw_exception_handler.S
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -313,13 +313,13 @@ _hw_exception_handler:
mfs r5, rmsr;
nop
swi r5, r1, 0;
- mfs r3, resr
+ mfs r4, resr
nop
- mfs r4, rear;
+ mfs r3, rear;
nop
#ifndef CONFIG_MMU
- andi r5, r3, 0x1000; /* Check ESR[DS] */
+ andi r5, r4, 0x1000; /* Check ESR[DS] */
beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@@ -327,13 +327,14 @@ not_in_delay_slot:
swi r17, r1, PT_R17
#endif
- andi r5, r3, 0x1F; /* Extract ESR[EXC] */
+ andi r5, r4, 0x1F; /* Extract ESR[EXC] */
#ifdef CONFIG_MMU
/* Calculate exception vector offset = r5 << 2 */
addk r6, r5, r5; /* << 1 */
addk r6, r6, r6; /* << 2 */
+#ifdef DEBUG
/* counting which exception happen */
lwi r5, r0, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
@@ -341,6 +342,7 @@ not_in_delay_slot:
lwi r5, r6, 0x200 + TOPHYS(r0_ram)
addi r5, r5, 1
swi r5, r6, 0x200 + TOPHYS(r0_ram)
+#endif
/* end */
/* Load the HW Exception vector */
lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
@@ -376,7 +378,7 @@ handle_other_ex: /* Handle Other exceptions here */
swi r18, r1, PT_R18
or r5, r1, r0
- andi r6, r3, 0x1F; /* Load ESR[EC] */
+ andi r6, r4, 0x1F; /* Load ESR[EC] */
lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
swi r7, r1, PT_MODE
mfs r7, rfsr
@@ -426,11 +428,11 @@ handle_other_ex: /* Handle Other exceptions here */
*/
handle_unaligned_ex:
/* Working registers already saved: R3, R4, R5, R6
- * R3 = ESR
- * R4 = EAR
+ * R4 = ESR
+ * R3 = EAR
*/
#ifdef CONFIG_MMU
- andi r6, r3, 0x1000 /* Check ESR[DS] */
+ andi r6, r4, 0x1000 /* Check ESR[DS] */
beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
nop
@@ -439,7 +441,7 @@ _no_delayslot:
RESTORE_STATE;
bri unaligned_data_trap
#endif
- andi r6, r3, 0x3E0; /* Mask and extract the register operand */
+ andi r6, r4, 0x3E0; /* Mask and extract the register operand */
srl r6, r6; /* r6 >> 5 */
srl r6, r6;
srl r6, r6;
@@ -448,33 +450,33 @@ _no_delayslot:
/* Store the register operand in a temporary location */
sbi r6, r0, TOPHYS(ex_reg_op);
- andi r6, r3, 0x400; /* Extract ESR[S] */
+ andi r6, r4, 0x400; /* Extract ESR[S] */
bnei r6, ex_sw;
ex_lw:
- andi r6, r3, 0x800; /* Extract ESR[W] */
+ andi r6, r4, 0x800; /* Extract ESR[W] */
beqi r6, ex_lhw;
- lbui r5, r4, 0; /* Exception address in r4 */
+ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a word, byte-by-byte from destination address
and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r5, r4, 1;
+ lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
- lbui r5, r4, 2;
+ lbui r5, r3, 2;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
- lbui r5, r4, 3;
+ lbui r5, r3, 3;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
- /* Get the destination register value into r3 */
- lwi r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Get the destination register value into r4 */
+ lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
bri ex_lw_tail;
ex_lhw:
- lbui r5, r4, 0; /* Exception address in r4 */
+ lbui r5, r3, 0; /* Exception address in r3 */
/* Load a half-word, byte-by-byte from destination
address and save it in tmp space */
sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r5, r4, 1;
+ lbui r5, r3, 1;
sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
- /* Get the destination register value into r3 */
- lhui r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Get the destination register value into r4 */
+ lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
ex_lw_tail:
/* Get the destination register number into r5 */
lbui r5, r0, TOPHYS(ex_reg_op);
@@ -502,25 +504,25 @@ ex_sw_tail:
andi r6, r6, 0x800; /* Extract ESR[W] */
beqi r6, ex_shw;
/* Get the word - delay slot */
- swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
/* Store the word, byte-by-byte into destination address */
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_0);
- sbi r3, r4, 0;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_1);
- sbi r3, r4, 1;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
- sbi r3, r4, 2;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
- sbi r3, r4, 3;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
+ sbi r4, r3, 1;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 2;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 3;
bri ex_handler_done;
ex_shw:
/* Store the lower half-word, byte-by-byte into destination address */
- swi r3, r0, TOPHYS(ex_tmp_data_loc_0);
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_2);
- sbi r3, r4, 0;
- lbui r3, r0, TOPHYS(ex_tmp_data_loc_3);
- sbi r3, r4, 1;
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 1;
ex_sw_end: /* Exception handling of store word, ends. */
ex_handler_done:
@@ -560,21 +562,16 @@ ex_handler_done:
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
*/
- ori r4, r0, CONFIG_KERNEL_START
- cmpu r4, r3, r4
- bgti r4, ex3
+ ori r5, r0, CONFIG_KERNEL_START
+ cmpu r5, r3, r5
+ bgti r5, ex3
/* First, check if it was a zone fault (which means a user
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- mfs r4, resr
- nop
andi r4, r4, 0x800 /* ESR_Z - zone protection */
bnei r4, ex2
@@ -589,8 +586,6 @@ ex_handler_done:
* tried to access a kernel or read-protected page - always
* a SEGV). All other faults here must be stores, so no
* need to check ESR_S as well. */
- mfs r4, resr
- nop
andi r4, r4, 0x800 /* ESR_Z */
bnei r4, ex2
/* get current task address */
@@ -665,8 +660,6 @@ ex_handler_done:
* R3 = ESR
*/
- mfs r3, rear /* Get faulting address */
- nop
RESTORE_STATE;
bri page_fault_instr_trap
@@ -677,18 +670,15 @@ ex_handler_done:
*/
handle_data_tlb_miss_exception:
/* Working registers already saved: R3, R4, R5, R6
- * R3 = ESR
+ * R3 = EAR, R4 = ESR
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables. */
- ori r4, r0, CONFIG_KERNEL_START
- cmpu r4, r3, r4
+ ori r6, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r6
bgti r4, ex5
ori r4, r0, swapper_pg_dir
mts rpid, r0 /* TLB will have 0 TID */
@@ -731,9 +721,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
+ brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
-
- bri finish_tlb_load
ex7:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -754,9 +743,6 @@ ex_handler_done:
*/
mfs r11, rpid
nop
- bri 4
- mfs r3, rear /* Get faulting address */
- nop
/* If we are faulting a kernel address, we have to use the
* kernel page tables.
@@ -792,7 +778,7 @@ ex_handler_done:
lwi r4, r5, 0 /* Get Linux PTE */
andi r6, r4, _PAGE_PRESENT
- beqi r6, ex7
+ beqi r6, ex10
ori r4, r4, _PAGE_ACCESSED
swi r4, r5, 0
@@ -805,9 +791,8 @@ ex_handler_done:
* Many of these bits are software only. Bits we don't set
* here we (properly should) assume have the appropriate value.
*/
+ brid finish_tlb_load
andni r4, r4, 0x0ce2 /* Make sure 20, 21 are zero */
-
- bri finish_tlb_load
ex10:
/* The bailout. Restore registers to pre-exception conditions
* and call the heavyweights to help us out.
@@ -837,9 +822,9 @@ ex_handler_done:
andi r5, r5, (MICROBLAZE_TLB_SIZE-1)
ori r6, r0, 1
cmp r31, r5, r6
- blti r31, sem
+ blti r31, ex12
addik r5, r6, 1
- sem:
+ ex12:
/* MS: save back current TLB index */
swi r5, r0, TOPHYS(tlb_index)
@@ -859,7 +844,6 @@ ex_handler_done:
nop
/* Done...restore registers and get out of here. */
- ex12:
mts rpid, r11
nop
bri 4
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
index df16c6287a8..7cf86498326 100644
--- a/arch/microblaze/kernel/misc.S
+++ b/arch/microblaze/kernel/misc.S
@@ -26,9 +26,10 @@
* We avoid flushing the pinned 0, 1 and possibly 2 entries.
*/
.globl _tlbia;
+.type _tlbia, @function
.align 4;
_tlbia:
- addik r12, r0, 63 /* flush all entries (63 - 3) */
+ addik r12, r0, MICROBLAZE_TLB_SIZE - 1 /* flush all entries (63 - 3) */
/* isync */
_tlbia_1:
mts rtlbx, r12
@@ -41,11 +42,13 @@ _tlbia_1:
/* sync */
rtsd r15, 8
nop
+ .size _tlbia, . - _tlbia
/*
* Flush MMU TLB for a particular address (in r5)
*/
.globl _tlbie;
+.type _tlbie, @function
.align 4;
_tlbie:
mts rtlbsx, r5 /* look up the address in TLB */
@@ -59,17 +62,20 @@ _tlbie_1:
rtsd r15, 8
nop
+ .size _tlbie, . - _tlbie
+
/*
* Allocate TLB entry for early console
*/
.globl early_console_reg_tlb_alloc;
+.type early_console_reg_tlb_alloc, @function
.align 4;
early_console_reg_tlb_alloc:
/*
* Load a TLB entry for the UART, so that microblaze_progress() can use
* the UARTs nice and early. We use a 4k real==virtual mapping.
*/
- ori r4, r0, 63
+ ori r4, r0, MICROBLAZE_TLB_SIZE - 1
mts rtlbx, r4 /* TLB slot 2 */
or r4,r5,r0
@@ -86,6 +92,8 @@ early_console_reg_tlb_alloc:
rtsd r15, 8
nop
+ .size early_console_reg_tlb_alloc, . - early_console_reg_tlb_alloc
+
/*
* Copy a whole page (4096 bytes).
*/
@@ -104,6 +112,7 @@ early_console_reg_tlb_alloc:
#define DCACHE_LINE_BYTES (4 * 4)
.globl copy_page;
+.type copy_page, @function
.align 4;
copy_page:
ori r11, r0, (PAGE_SIZE/DCACHE_LINE_BYTES) - 1
@@ -118,3 +127,5 @@ _copy_page_loop:
addik r11, r11, -1
rtsd r15, 8
nop
+
+ .size copy_page, . - copy_page
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 812f1bf06c9..09bed44dfcd 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -15,6 +15,7 @@
#include <linux/bitops.h>
#include <asm/system.h>
#include <asm/pgalloc.h>
+#include <asm/uaccess.h> /* for USER_DS macros */
#include <asm/cacheflush.h>
void show_regs(struct pt_regs *regs)
@@ -74,7 +75,10 @@ __setup("hlt", hlt_setup);
void default_idle(void)
{
- if (!hlt_counter) {
+ if (likely(hlt_counter)) {
+ while (!need_resched())
+ cpu_relax();
+ } else {
clear_thread_flag(TIF_POLLING_NRFLAG);
smp_mb__after_clear_bit();
local_irq_disable();
@@ -82,9 +86,7 @@ void default_idle(void)
cpu_sleep();
local_irq_enable();
set_thread_flag(TIF_POLLING_NRFLAG);
- } else
- while (!need_resched())
- cpu_relax();
+ }
}
void cpu_idle(void)
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
index f974ec7aa35..17c98dbcec8 100644
--- a/arch/microblaze/kernel/setup.c
+++ b/arch/microblaze/kernel/setup.c
@@ -92,6 +92,12 @@ inline unsigned get_romfs_len(unsigned *addr)
}
#endif /* CONFIG_MTD_UCLINUX_EBSS */
+#if defined(CONFIG_EARLY_PRINTK) && defined(CONFIG_SERIAL_UARTLITE_CONSOLE)
+#define eprintk early_printk
+#else
+#define eprintk printk
+#endif
+
void __init machine_early_init(const char *cmdline, unsigned int ram,
unsigned int fdt, unsigned int msr)
{
@@ -139,32 +145,32 @@ void __init machine_early_init(const char *cmdline, unsigned int ram,
setup_early_printk(NULL);
#endif
- early_printk("Ramdisk addr 0x%08x, ", ram);
+ eprintk("Ramdisk addr 0x%08x, ", ram);
if (fdt)
- early_printk("FDT at 0x%08x\n", fdt);
+ eprintk("FDT at 0x%08x\n", fdt);
else
- early_printk("Compiled-in FDT at 0x%08x\n",
+ eprintk("Compiled-in FDT at 0x%08x\n",
(unsigned int)_fdt_start);
#ifdef CONFIG_MTD_UCLINUX
- early_printk("Found romfs @ 0x%08x (0x%08x)\n",
+ eprintk("Found romfs @ 0x%08x (0x%08x)\n",
romfs_base, romfs_size);
- early_printk("#### klimit %p ####\n", old_klimit);
+ eprintk("#### klimit %p ####\n", old_klimit);
BUG_ON(romfs_size < 0); /* What else can we do? */
- early_printk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ eprintk("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
romfs_size, romfs_base, (unsigned)&_ebss);
- early_printk("New klimit: 0x%08x\n", (unsigned)klimit);
+ eprintk("New klimit: 0x%08x\n", (unsigned)klimit);
#endif
#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
if (msr)
- early_printk("!!!Your kernel has setup MSR instruction but "
+ eprintk("!!!Your kernel has setup MSR instruction but "
"CPU don't have it %d\n", msr);
#else
if (!msr)
- early_printk("!!!Your kernel not setup MSR instruction but "
+ eprintk("!!!Your kernel not setup MSR instruction but "
"CPU have it %d\n", msr);
#endif
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
index eaaaf805f31..5e4570ef515 100644
--- a/arch/microblaze/kernel/traps.c
+++ b/arch/microblaze/kernel/traps.c
@@ -22,13 +22,11 @@ void trap_init(void)
__enable_hw_exceptions();
}
-static int kstack_depth_to_print = 24;
+static unsigned long kstack_depth_to_print = 24;
static int __init kstack_setup(char *s)
{
- kstack_depth_to_print = strict_strtoul(s, 0, NULL);
-
- return 1;
+ return !strict_strtoul(s, 0, &kstack_depth_to_print);
}
__setup("kstack=", kstack_setup);
diff --git a/arch/microblaze/lib/Makefile b/arch/microblaze/lib/Makefile
index b579db068c0..4dfe47d3cd9 100644
--- a/arch/microblaze/lib/Makefile
+++ b/arch/microblaze/lib/Makefile
@@ -10,5 +10,4 @@ else
lib-y += memcpy.o memmove.o
endif
-lib-$(CONFIG_NO_MMU) += uaccess.o
-lib-$(CONFIG_MMU) += uaccess_old.o
+lib-y += uaccess_old.o
diff --git a/arch/microblaze/lib/fastcopy.S b/arch/microblaze/lib/fastcopy.S
index 02e3ab4eddf..fdc48bb065d 100644
--- a/arch/microblaze/lib/fastcopy.S
+++ b/arch/microblaze/lib/fastcopy.S
@@ -30,8 +30,9 @@
*/
#include <linux/linkage.h>
-
+ .text
.globl memcpy
+ .type memcpy, @function
.ent memcpy
memcpy:
@@ -345,9 +346,11 @@ a_done:
rtsd r15, 8
nop
+.size memcpy, . - memcpy
.end memcpy
/*----------------------------------------------------------------------------*/
.globl memmove
+ .type memmove, @function
.ent memmove
memmove:
@@ -659,4 +662,5 @@ d_done:
rtsd r15, 8
nop
+.size memmove, . - memmove
.end memmove
diff --git a/arch/microblaze/lib/memcpy.c b/arch/microblaze/lib/memcpy.c
index cc2108b6b26..014bac92bdf 100644
--- a/arch/microblaze/lib/memcpy.c
+++ b/arch/microblaze/lib/memcpy.c
@@ -53,7 +53,7 @@ void *memcpy(void *v_dst, const void *v_src, __kernel_size_t c)
const uint32_t *i_src;
uint32_t *i_dst;
- if (c >= 4) {
+ if (likely(c >= 4)) {
unsigned value, buf_hold;
/* Align the dstination to a word boundry. */
diff --git a/arch/microblaze/lib/memset.c b/arch/microblaze/lib/memset.c
index 4df851d41a2..ecfb663e1fc 100644
--- a/arch/microblaze/lib/memset.c
+++ b/arch/microblaze/lib/memset.c
@@ -33,22 +33,23 @@
#ifdef __HAVE_ARCH_MEMSET
void *memset(void *v_src, int c, __kernel_size_t n)
{
-
char *src = v_src;
#ifdef CONFIG_OPT_LIB_FUNCTION
uint32_t *i_src;
- uint32_t w32;
+ uint32_t w32 = 0;
#endif
/* Truncate c to 8 bits */
c = (c & 0xFF);
#ifdef CONFIG_OPT_LIB_FUNCTION
- /* Make a repeating word out of it */
- w32 = c;
- w32 |= w32 << 8;
- w32 |= w32 << 16;
+ if (unlikely(c)) {
+ /* Make a repeating word out of it */
+ w32 = c;
+ w32 |= w32 << 8;
+ w32 |= w32 << 16;
+ }
- if (n >= 4) {
+ if (likely(n >= 4)) {
/* Align the destination to a word boundary */
/* This is done in an endian independant manner */
switch ((unsigned) src & 3) {
diff --git a/arch/microblaze/lib/uaccess.c b/arch/microblaze/lib/uaccess.c
deleted file mode 100644
index a853fe089c4..00000000000
--- a/arch/microblaze/lib/uaccess.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) 2006 Atmark Techno, Inc.
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-
-#include <linux/string.h>
-#include <asm/uaccess.h>
-
-#include <asm/bug.h>
-
-long strnlen_user(const char __user *src, long count)
-{
- return strlen(src) + 1;
-}
-
-#define __do_strncpy_from_user(dst, src, count, res) \
- do { \
- char *tmp; \
- strncpy(dst, src, count); \
- for (tmp = dst; *tmp && count > 0; tmp++, count--) \
- ; \
- res = (tmp - dst); \
- } while (0)
-
-long __strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res;
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-long strncpy_from_user(char *dst, const char __user *src, long count)
-{
- long res = -EFAULT;
- if (access_ok(VERIFY_READ, src, 1))
- __do_strncpy_from_user(dst, src, count, res);
- return res;
-}
-
-unsigned long __copy_tofrom_user(void __user *to,
- const void __user *from, unsigned long size)
-{
- memcpy(to, from, size);
- return 0;
-}
diff --git a/arch/microblaze/lib/uaccess_old.S b/arch/microblaze/lib/uaccess_old.S
index 67f991c14b8..5810cec54a7 100644
--- a/arch/microblaze/lib/uaccess_old.S
+++ b/arch/microblaze/lib/uaccess_old.S
@@ -22,6 +22,7 @@
.text
.globl __strncpy_user;
+.type __strncpy_user, @function
.align 4;
__strncpy_user:
@@ -50,7 +51,7 @@ __strncpy_user:
3:
rtsd r15,8
nop
-
+ .size __strncpy_user, . - __strncpy_user
.section .fixup, "ax"
.align 2
@@ -72,6 +73,7 @@ __strncpy_user:
.text
.globl __strnlen_user;
+.type __strnlen_user, @function
.align 4;
__strnlen_user:
addik r3,r6,0
@@ -90,7 +92,7 @@ __strnlen_user:
3:
rtsd r15,8
nop
-
+ .size __strnlen_user, . - __strnlen_user
.section .fixup,"ax"
4:
@@ -108,6 +110,7 @@ __strnlen_user:
*/
.text
.globl __copy_tofrom_user;
+.type __copy_tofrom_user, @function
.align 4;
__copy_tofrom_user:
/*
@@ -116,20 +119,34 @@ __copy_tofrom_user:
* r7, r3 - count
* r4 - tempval
*/
- addik r3,r7,0
- beqi r3,3f
-1:
- lbu r4,r6,r0
- addik r6,r6,1
-2:
- sb r4,r5,r0
- addik r3,r3,-1
- bneid r3,1b
- addik r5,r5,1 /* delay slot */
+ beqid r7, 3f /* zero size is not likely */
+ andi r3, r7, 0x3 /* filter add count */
+ bneid r3, 4f /* if is odd value then byte copying */
+ or r3, r5, r6 /* find if is any to/from unaligned */
+ andi r3, r3, 0x3 /* mask unaligned */
+ bneid r3, 1f /* it is unaligned -> then jump */
+ or r3, r0, r0
+
+/* at least one 4 byte copy */
+5: lw r4, r6, r3
+6: sw r4, r5, r3
+ addik r7, r7, -4
+ bneid r7, 5b
+ addik r3, r3, 4
+ addik r3, r7, 0
+ rtsd r15, 8
+ nop
+4: or r3, r0, r0
+1: lbu r4,r6,r3
+2: sb r4,r5,r3
+ addik r7,r7,-1
+ bneid r7,1b
+ addik r3,r3,1 /* delay slot */
3:
+ addik r3,r7,0
rtsd r15,8
nop
-
+ .size __copy_tofrom_user, . - __copy_tofrom_user
.section __ex_table,"a"
- .word 1b,3b,2b,3b
+ .word 1b,3b,2b,3b,5b,3b,6b,3b
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index d9d249a66ff..7af87f4b2c2 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -106,7 +106,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
regs->esr = error_code;
/* On a kernel SLB miss we can only check for a valid exception entry */
- if (kernel_mode(regs) && (address >= TASK_SIZE)) {
+ if (unlikely(kernel_mode(regs) && (address >= TASK_SIZE))) {
printk(KERN_WARNING "kernel task_size exceed");
_exception(SIGSEGV, regs, code, address);
}
@@ -122,7 +122,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
#endif /* CONFIG_KGDB */
- if (in_atomic() || !mm) {
+ if (unlikely(in_atomic() || !mm)) {
if (kernel_mode(regs))
goto bad_area_nosemaphore;
@@ -150,7 +150,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* source. If this is invalid we can skip the address space check,
* thus avoiding the deadlock.
*/
- if (!down_read_trylock(&mm->mmap_sem)) {
+ if (unlikely(!down_read_trylock(&mm->mmap_sem))) {
if (kernel_mode(regs) && !search_exception_tables(regs->pc))
goto bad_area_nosemaphore;
@@ -158,16 +158,16 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
}
vma = find_vma(mm, address);
- if (!vma)
+ if (unlikely(!vma))
goto bad_area;
if (vma->vm_start <= address)
goto good_area;
- if (!(vma->vm_flags & VM_GROWSDOWN))
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN)))
goto bad_area;
- if (!is_write)
+ if (unlikely(!is_write))
goto bad_area;
/*
@@ -179,7 +179,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* before setting the user r1. Thus we allow the stack to
* expand to 1MB without further checks.
*/
- if (address + 0x100000 < vma->vm_end) {
+ if (unlikely(address + 0x100000 < vma->vm_end)) {
/* get user regs even if this fault is in kernel mode */
struct pt_regs *uregs = current->thread.regs;
@@ -209,15 +209,15 @@ good_area:
code = SEGV_ACCERR;
/* a write */
- if (is_write) {
- if (!(vma->vm_flags & VM_WRITE))
+ if (unlikely(is_write)) {
+ if (unlikely(!(vma->vm_flags & VM_WRITE)))
goto bad_area;
/* a read */
} else {
/* protection fault */
- if (error_code & 0x08000000)
+ if (unlikely(error_code & 0x08000000))
goto bad_area;
- if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ if (unlikely(!(vma->vm_flags & (VM_READ | VM_EXEC))))
goto bad_area;
}
@@ -235,7 +235,7 @@ survive:
goto do_sigbus;
BUG();
}
- if (fault & VM_FAULT_MAJOR)
+ if (unlikely(fault & VM_FAULT_MAJOR))
current->maj_flt++;
else
current->min_flt++;
diff --git a/arch/microblaze/mm/init.c b/arch/microblaze/mm/init.c
index 1608e2e1a44..40bc10ede09 100644
--- a/arch/microblaze/mm/init.c
+++ b/arch/microblaze/mm/init.c
@@ -165,7 +165,6 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (addr = begin; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)addr, 0xcc, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
@@ -208,14 +207,6 @@ void __init mem_init(void)
}
#ifndef CONFIG_MMU
-/* Check against bounds of physical memory */
-int ___range_ok(unsigned long addr, unsigned long size)
-{
- return ((addr < memory_start) ||
- ((addr + size) > memory_end));
-}
-EXPORT_SYMBOL(___range_ok);
-
int page_is_ram(unsigned long pfn)
{
return __range_ok(pfn, 0);
diff --git a/arch/microblaze/mm/pgtable.c b/arch/microblaze/mm/pgtable.c
index 63a6fd07c48..d31312cde6e 100644
--- a/arch/microblaze/mm/pgtable.c
+++ b/arch/microblaze/mm/pgtable.c
@@ -154,7 +154,7 @@ int map_page(unsigned long va, phys_addr_t pa, int flags)
err = 0;
set_pte_at(&init_mm, va, pg, pfn_pte(pa >> PAGE_SHIFT,
__pgprot(flags)));
- if (mem_init_done)
+ if (unlikely(mem_init_done))
flush_HPTE(0, va, pmd_val(*pd));
/* flush_HPTE(0, va, pg); */
}
diff --git a/arch/powerpc/include/asm/asm-compat.h b/arch/powerpc/include/asm/asm-compat.h
index c1b475a941e..a9b91ed3d4b 100644
--- a/arch/powerpc/include/asm/asm-compat.h
+++ b/arch/powerpc/include/asm/asm-compat.h
@@ -28,6 +28,7 @@
#define PPC_LLARX(t, a, b, eh) PPC_LDARX(t, a, b, eh)
#define PPC_STLCX stringify_in_c(stdcx.)
#define PPC_CNTLZL stringify_in_c(cntlzd)
+#define PPC_LR_STKOFF 16
/* Move to CR, single-entry optimized version. Only available
* on POWER4 and later.
@@ -51,6 +52,7 @@
#define PPC_STLCX stringify_in_c(stwcx.)
#define PPC_CNTLZL stringify_in_c(cntlzw)
#define PPC_MTOCRF stringify_in_c(mtcrf)
+#define PPC_LR_STKOFF 4
#endif
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 2d29752cbe1..22e507c8a55 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -127,3 +127,29 @@ _GLOBAL(__setup_cpu_power7)
_GLOBAL(__restore_cpu_power7)
/* place holder */
blr
+
+/*
+ * Get a minimal set of registers for our caller's nth caller.
+ * r3 = regs pointer, r5 = n.
+ *
+ * We only get R1 (stack pointer), NIP (next instruction pointer)
+ * and LR (link register). These are all we can get in the
+ * general case without doing complicated stack unwinding, but
+ * fortunately they are enough to do a stack backtrace, which
+ * is all we need them for.
+ */
+_GLOBAL(perf_arch_fetch_caller_regs)
+ mr r6,r1
+ cmpwi r5,0
+ mflr r4
+ ble 2f
+ mtctr r5
+1: PPC_LL r6,0(r6)
+ bdnz 1b
+ PPC_LL r4,PPC_LR_STKOFF(r6)
+2: PPC_LL r7,0(r6)
+ PPC_LL r7,PPC_LR_STKOFF(r7)
+ PPC_STL r6,GPR1-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r4,_NIP-STACK_FRAME_OVERHEAD(r3)
+ PPC_STL r7,_LINK-STACK_FRAME_OVERHEAD(r3)
+ blr
diff --git a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
index 929d017535a..d4f8be307cd 100644
--- a/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
+++ b/arch/powerpc/platforms/52xx/mpc52xx_lpbfifo.c
@@ -481,6 +481,8 @@ mpc52xx_lpbfifo_probe(struct of_device *op, const struct of_device_id *match)
if (rc)
goto err_bcom_rx_irq;
+ lpbfifo.dma_irqs_enabled = 1;
+
/* Request the Bestcomm transmit (memory --> fifo) task and IRQ */
lpbfifo.bcom_tx_task =
bcom_gen_bd_tx_init(2, res.start + LPBFIFO_REG_FIFO_DATA,
diff --git a/arch/sh/kernel/return_address.c b/arch/sh/kernel/return_address.c
index df3ab581107..cbf1dd5372b 100644
--- a/arch/sh/kernel/return_address.c
+++ b/arch/sh/kernel/return_address.c
@@ -9,6 +9,7 @@
* for more details.
*/
#include <linux/kernel.h>
+#include <linux/module.h>
#include <asm/dwarf.h>
#ifdef CONFIG_DWARF_UNWINDER
@@ -52,3 +53,5 @@ void *return_address(unsigned int depth)
}
#endif
+
+EXPORT_SYMBOL_GPL(return_address);
diff --git a/arch/sh/mm/tlb-pteaex.c b/arch/sh/mm/tlb-pteaex.c
index bdd0982b56e..b71db6af806 100644
--- a/arch/sh/mm/tlb-pteaex.c
+++ b/arch/sh/mm/tlb-pteaex.c
@@ -77,3 +77,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
__raw_writel(asid, MMU_ITLB_ADDRESS_ARRAY2 | MMU_PAGE_ASSOC_BIT);
back_to_cached();
}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+ int i;
+
+ /*
+ * Flush all the TLB.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ status = __raw_readl(MMUCR);
+ status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+ if (status == 0)
+ status = MMUCR_URB_NENTRIES;
+
+ for (i = 0; i < status; i++)
+ __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+ for (i = 0; i < 4; i++)
+ __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+ back_to_cached();
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh3.c b/arch/sh/mm/tlb-sh3.c
index 4f5f7cbdd50..7a940dbfc2e 100644
--- a/arch/sh/mm/tlb-sh3.c
+++ b/arch/sh/mm/tlb-sh3.c
@@ -77,3 +77,22 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
for (i = 0; i < ways; i++)
__raw_writel(data, addr + (i << 8));
}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+
+ /*
+ * Flush all the TLB.
+ *
+ * Write to the MMU control register's bit:
+ * TF-bit for SH-3, TI-bit for SH-4.
+ * It's same position, bit #2.
+ */
+ local_irq_save(flags);
+ status = __raw_readl(MMUCR);
+ status |= 0x04;
+ __raw_writel(status, MMUCR);
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlb-sh4.c b/arch/sh/mm/tlb-sh4.c
index ccac77f504a..cfdf7930d29 100644
--- a/arch/sh/mm/tlb-sh4.c
+++ b/arch/sh/mm/tlb-sh4.c
@@ -80,3 +80,31 @@ void local_flush_tlb_one(unsigned long asid, unsigned long page)
__raw_writel(data, addr);
back_to_cached();
}
+
+void local_flush_tlb_all(void)
+{
+ unsigned long flags, status;
+ int i;
+
+ /*
+ * Flush all the TLB.
+ */
+ local_irq_save(flags);
+ jump_to_uncached();
+
+ status = __raw_readl(MMUCR);
+ status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
+
+ if (status == 0)
+ status = MMUCR_URB_NENTRIES;
+
+ for (i = 0; i < status; i++)
+ __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
+
+ for (i = 0; i < 4; i++)
+ __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
+
+ back_to_cached();
+ ctrl_barrier();
+ local_irq_restore(flags);
+}
diff --git a/arch/sh/mm/tlbflush_32.c b/arch/sh/mm/tlbflush_32.c
index 77dc5efa712..3fbe03ce8fe 100644
--- a/arch/sh/mm/tlbflush_32.c
+++ b/arch/sh/mm/tlbflush_32.c
@@ -119,31 +119,3 @@ void local_flush_tlb_mm(struct mm_struct *mm)
local_irq_restore(flags);
}
}
-
-void local_flush_tlb_all(void)
-{
- unsigned long flags, status;
- int i;
-
- /*
- * Flush all the TLB.
- */
- local_irq_save(flags);
- jump_to_uncached();
-
- status = __raw_readl(MMUCR);
- status = ((status & MMUCR_URB) >> MMUCR_URB_SHIFT);
-
- if (status == 0)
- status = MMUCR_URB_NENTRIES;
-
- for (i = 0; i < status; i++)
- __raw_writel(0x0, MMU_UTLB_ADDRESS_ARRAY | (i << 8));
-
- for (i = 0; i < 4; i++)
- __raw_writel(0x0, MMU_ITLB_ADDRESS_ARRAY | (i << 8));
-
- back_to_cached();
- ctrl_barrier();
- local_irq_restore(flags);
-}
diff --git a/arch/sparc/configs/sparc64_defconfig b/arch/sparc/configs/sparc64_defconfig
index 56e3163673e..259e3fd5099 100644
--- a/arch/sparc/configs/sparc64_defconfig
+++ b/arch/sparc/configs/sparc64_defconfig
@@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
-# Linux kernel version: 2.6.33
-# Wed Mar 3 02:54:29 2010
+# Linux kernel version: 2.6.34-rc3
+# Sat Apr 3 15:49:56 2010
#
CONFIG_64BIT=y
CONFIG_SPARC=y
@@ -23,6 +23,7 @@ CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y
CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_MMU=y
+CONFIG_NEED_DMA_MAP_STATE=y
CONFIG_ARCH_NO_VIRT_TO_BUS=y
CONFIG_OF=y
CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y
@@ -439,6 +440,7 @@ CONFIG_MISC_DEVICES=y
# CONFIG_ENCLOSURE_SERVICES is not set
# CONFIG_HP_ILO is not set
# CONFIG_ISL29003 is not set
+# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_DS1682 is not set
# CONFIG_C2PORT is not set
@@ -511,6 +513,7 @@ CONFIG_BLK_DEV_IDEDMA=y
#
# SCSI device support
#
+CONFIG_SCSI_MOD=y
CONFIG_RAID_ATTRS=m
CONFIG_SCSI=y
CONFIG_SCSI_DMA=y
@@ -888,6 +891,7 @@ CONFIG_SERIAL_SUNHV=y
CONFIG_SERIAL_CORE=y
CONFIG_SERIAL_CORE_CONSOLE=y
# CONFIG_SERIAL_JSM is not set
+# CONFIG_SERIAL_TIMBERDALE is not set
# CONFIG_SERIAL_GRLIB_GAISLER_APBUART is not set
CONFIG_UNIX98_PTYS=y
# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set
@@ -935,6 +939,7 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_I2C_OCORES is not set
# CONFIG_I2C_SIMTEC is not set
+# CONFIG_I2C_XILINX is not set
#
# External I2C/SMBus adapter drivers
@@ -948,15 +953,9 @@ CONFIG_I2C_ALGOBIT=y
#
# CONFIG_I2C_PCA_PLATFORM is not set
# CONFIG_I2C_STUB is not set
-
-#
-# Miscellaneous I2C Chip support
-#
-# CONFIG_SENSORS_TSL2550 is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
-# CONFIG_I2C_DEBUG_CHIP is not set
# CONFIG_SPI is not set
#
@@ -982,10 +981,11 @@ CONFIG_HWMON=y
# CONFIG_SENSORS_ADM1029 is not set
# CONFIG_SENSORS_ADM1031 is not set
# CONFIG_SENSORS_ADM9240 is not set
+# CONFIG_SENSORS_ADT7411 is not set
# CONFIG_SENSORS_ADT7462 is not set
# CONFIG_SENSORS_ADT7470 is not set
-# CONFIG_SENSORS_ADT7473 is not set
# CONFIG_SENSORS_ADT7475 is not set
+# CONFIG_SENSORS_ASC7621 is not set
# CONFIG_SENSORS_ATXP1 is not set
# CONFIG_SENSORS_DS1621 is not set
# CONFIG_SENSORS_I5K_AMB is not set
@@ -1052,18 +1052,21 @@ CONFIG_SSB_POSSIBLE=y
# Multifunction device drivers
#
# CONFIG_MFD_CORE is not set
+# CONFIG_MFD_88PM860X is not set
# CONFIG_MFD_SM501 is not set
# CONFIG_HTC_PASIC3 is not set
# CONFIG_TWL4030_CORE is not set
# CONFIG_MFD_TMIO is not set
# CONFIG_PMIC_DA903X is not set
# CONFIG_PMIC_ADP5520 is not set
+# CONFIG_MFD_MAX8925 is not set
# CONFIG_MFD_WM8400 is not set
# CONFIG_MFD_WM831X is not set
# CONFIG_MFD_WM8350_I2C is not set
+# CONFIG_MFD_WM8994 is not set
# CONFIG_MFD_PCF50633 is not set
# CONFIG_AB3100_CORE is not set
-# CONFIG_MFD_88PM8607 is not set
+# CONFIG_LPC_SCH is not set
# CONFIG_REGULATOR is not set
# CONFIG_MEDIA_SUPPORT is not set
@@ -1113,6 +1116,7 @@ CONFIG_FB_FFB=y
# CONFIG_FB_LEO is not set
CONFIG_FB_XVR500=y
CONFIG_FB_XVR2500=y
+CONFIG_FB_XVR1000=y
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
@@ -1430,7 +1434,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_LEGOTOWER is not set
# CONFIG_USB_LCD is not set
-# CONFIG_USB_BERRY_CHARGE is not set
# CONFIG_USB_LED is not set
# CONFIG_USB_CYPRESS_CY7C63 is not set
# CONFIG_USB_CYTHERM is not set
@@ -1443,7 +1446,6 @@ CONFIG_USB_STORAGE=m
# CONFIG_USB_IOWARRIOR is not set
# CONFIG_USB_TEST is not set
# CONFIG_USB_ISIGHTFW is not set
-# CONFIG_USB_VST is not set
# CONFIG_USB_GADGET is not set
#
@@ -1610,6 +1612,7 @@ CONFIG_MISC_FILESYSTEMS=y
# CONFIG_BEFS_FS is not set
# CONFIG_BFS_FS is not set
# CONFIG_EFS_FS is not set
+# CONFIG_LOGFS is not set
# CONFIG_CRAMFS is not set
# CONFIG_SQUASHFS is not set
# CONFIG_VXFS_FS is not set
@@ -1624,6 +1627,7 @@ CONFIG_NETWORK_FILESYSTEMS=y
# CONFIG_NFS_FS is not set
# CONFIG_NFSD is not set
# CONFIG_SMB_FS is not set
+# CONFIG_CEPH_FS is not set
# CONFIG_CIFS is not set
# CONFIG_NCP_FS is not set
# CONFIG_CODA_FS is not set
diff --git a/arch/sparc/include/asm/stat.h b/arch/sparc/include/asm/stat.h
index 39327d6a57e..a232e9e1f4e 100644
--- a/arch/sparc/include/asm/stat.h
+++ b/arch/sparc/include/asm/stat.h
@@ -53,8 +53,8 @@ struct stat {
ino_t st_ino;
mode_t st_mode;
short st_nlink;
- uid16_t st_uid;
- gid16_t st_gid;
+ unsigned short st_uid;
+ unsigned short st_gid;
unsigned short st_rdev;
off_t st_size;
time_t st_atime;
diff --git a/arch/sparc/kernel/helpers.S b/arch/sparc/kernel/helpers.S
index 314dd0c9fc5..92090cc9e82 100644
--- a/arch/sparc/kernel/helpers.S
+++ b/arch/sparc/kernel/helpers.S
@@ -46,6 +46,81 @@ stack_trace_flush:
nop
.size stack_trace_flush,.-stack_trace_flush
+#ifdef CONFIG_PERF_EVENTS
+ .globl perf_arch_fetch_caller_regs
+ .type perf_arch_fetch_caller_regs,#function
+perf_arch_fetch_caller_regs:
+ /* We always read the %pstate into %o5 since we will use
+ * that to construct a fake %tstate to store into the regs.
+ */
+ rdpr %pstate, %o5
+ brz,pn %o2, 50f
+ mov %o2, %g7
+
+ /* Turn off interrupts while we walk around the register
+ * window by hand.
+ */
+ wrpr %o5, PSTATE_IE, %pstate
+
+ /* The %canrestore tells us how many register windows are
+ * still live in the chip above us, past that we have to
+ * walk the frame as saved on the stack. We stash away
+ * the %cwp in %g1 so we can return back to the original
+ * register window.
+ */
+ rdpr %cwp, %g1
+ rdpr %canrestore, %g2
+ sub %g1, 1, %g3
+
+ /* We have the skip count in %g7, if it hits zero then
+ * %fp/%i7 are the registers we need. Otherwise if our
+ * %canrestore count maintained in %g2 hits zero we have
+ * to start traversing the stack.
+ */
+10: brz,pn %g2, 4f
+ sub %g2, 1, %g2
+ wrpr %g3, %cwp
+ subcc %g7, 1, %g7
+ bne,pt %xcc, 10b
+ sub %g3, 1, %g3
+
+ /* We found the values we need in the cpu's register
+ * windows.
+ */
+ mov %fp, %g3
+ ba,pt %xcc, 3f
+ mov %i7, %g2
+
+50: mov %fp, %g3
+ ba,pt %xcc, 2f
+ mov %i7, %g2
+
+ /* We hit the end of the valid register windows in the
+ * cpu, start traversing the stack frame.
+ */
+4: mov %fp, %g3
+
+20: ldx [%g3 + STACK_BIAS + RW_V9_I7], %g2
+ subcc %g7, 1, %g7
+ bne,pn %xcc, 20b
+ ldx [%g3 + STACK_BIAS + RW_V9_I6], %g3
+
+ /* Restore the current register window position and
+ * re-enable interrupts.
+ */
+3: wrpr %g1, %cwp
+ wrpr %o5, %pstate
+
+2: stx %g3, [%o0 + PT_V9_FP]
+ sllx %o5, 8, %o5
+ stx %o5, [%o0 + PT_V9_TSTATE]
+ stx %g2, [%o0 + PT_V9_TPC]
+ add %g2, 4, %g2
+ retl
+ stx %g2, [%o0 + PT_V9_TNPC]
+ .size perf_arch_fetch_caller_regs,.-perf_arch_fetch_caller_regs
+#endif /* CONFIG_PERF_EVENTS */
+
#ifdef CONFIG_SMP
.globl hard_smp_processor_id
.type hard_smp_processor_id,#function
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index 68cb9b42088..e2771939341 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -1337,7 +1337,7 @@ static void perf_callchain_user_32(struct pt_regs *regs,
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->tpc);
- ufp = regs->u_regs[UREG_I6];
+ ufp = regs->u_regs[UREG_I6] & 0xffffffffUL;
do {
struct sparc_stackf32 *usf, sf;
unsigned long pc;
diff --git a/arch/sparc/kernel/ptrace_32.c b/arch/sparc/kernel/ptrace_32.c
index 7e3dfd9bb97..e608f397e11 100644
--- a/arch/sparc/kernel/ptrace_32.c
+++ b/arch/sparc/kernel/ptrace_32.c
@@ -65,6 +65,7 @@ static int genregs32_get(struct task_struct *target,
*k++ = regs->u_regs[pos++];
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
return -EFAULT;
@@ -76,6 +77,7 @@ static int genregs32_get(struct task_struct *target,
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
put_user(reg, u++))
@@ -141,6 +143,7 @@ static int genregs32_set(struct task_struct *target,
regs->u_regs[pos++] = *k++;
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
return -EFAULT;
@@ -153,6 +156,7 @@ static int genregs32_set(struct task_struct *target,
}
reg_window = (unsigned long __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
put_user(reg, &reg_window[pos++]))
diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
index 2f6524d1a81..aa90da08bf6 100644
--- a/arch/sparc/kernel/ptrace_64.c
+++ b/arch/sparc/kernel/ptrace_64.c
@@ -492,6 +492,7 @@ static int genregs32_get(struct task_struct *target,
*k++ = regs->u_regs[pos++];
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(*k++, &reg_window[pos++]))
@@ -516,6 +517,7 @@ static int genregs32_get(struct task_struct *target,
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, &reg_window[pos++]) ||
@@ -599,6 +601,7 @@ static int genregs32_set(struct task_struct *target,
regs->u_regs[pos++] = *k++;
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (put_user(*k++, &reg_window[pos++]))
@@ -625,6 +628,7 @@ static int genregs32_set(struct task_struct *target,
}
reg_window = (compat_ulong_t __user *) regs->u_regs[UREG_I6];
+ reg_window -= 16;
if (target == current) {
for (; count > 0 && pos < 32; count--) {
if (get_user(reg, u++) ||
diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
index ca39c606fe8..1eb8b00aed7 100644
--- a/arch/sparc/kernel/sysfs.c
+++ b/arch/sparc/kernel/sysfs.c
@@ -107,12 +107,12 @@ static unsigned long run_on_cpu(unsigned long cpu,
unsigned long ret;
/* should return -EINVAL to userspace */
- if (set_cpus_allowed(current, cpumask_of_cpu(cpu)))
+ if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
return 0;
ret = func(arg);
- set_cpus_allowed(current, old_affinity);
+ set_cpus_allowed_ptr(current, &old_affinity);
return ret;
}
diff --git a/arch/sparc/kernel/us2e_cpufreq.c b/arch/sparc/kernel/us2e_cpufreq.c
index 791c15138f3..8f982b76c71 100644
--- a/arch/sparc/kernel/us2e_cpufreq.c
+++ b/arch/sparc/kernel/us2e_cpufreq.c
@@ -238,12 +238,12 @@ static unsigned int us2e_freq_get(unsigned int cpu)
return 0;
cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
clock_tick = sparc64_get_clock_tick(cpu) / 1000;
estar = read_hbreg(HBIRD_ESTAR_MODE_ADDR);
- set_cpus_allowed(current, cpus_allowed);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
return clock_tick / estar_to_divisor(estar);
}
@@ -259,7 +259,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
return;
cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = clock_tick = sparc64_get_clock_tick(cpu) / 1000;
new_bits = index_to_estar_mode(index);
@@ -281,7 +281,7 @@ static void us2e_set_cpu_divider_index(unsigned int cpu, unsigned int index)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- set_cpus_allowed(current, cpus_allowed);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
}
static int us2e_freq_target(struct cpufreq_policy *policy,
diff --git a/arch/sparc/kernel/us3_cpufreq.c b/arch/sparc/kernel/us3_cpufreq.c
index 365b6464e2c..f35d1e79454 100644
--- a/arch/sparc/kernel/us3_cpufreq.c
+++ b/arch/sparc/kernel/us3_cpufreq.c
@@ -86,12 +86,12 @@ static unsigned int us3_freq_get(unsigned int cpu)
return 0;
cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
reg = read_safari_cfg();
ret = get_current_freq(cpu, reg);
- set_cpus_allowed(current, cpus_allowed);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
return ret;
}
@@ -106,7 +106,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
return;
cpus_allowed = current->cpus_allowed;
- set_cpus_allowed(current, cpumask_of_cpu(cpu));
+ set_cpus_allowed_ptr(current, cpumask_of(cpu));
new_freq = sparc64_get_clock_tick(cpu) / 1000;
switch (index) {
@@ -140,7 +140,7 @@ static void us3_set_cpu_divider_index(unsigned int cpu, unsigned int index)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
- set_cpus_allowed(current, cpus_allowed);
+ set_cpus_allowed_ptr(current, &cpus_allowed);
}
static int us3_freq_target(struct cpufreq_policy *policy,
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 9245a822a2f..20beb806a53 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -2117,7 +2117,7 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node)
"node=%d entry=%lu/%lu\n", start, block, nr,
node,
addr >> VMEMMAP_CHUNK_SHIFT,
- VMEMMAP_SIZE >> VMEMMAP_CHUNK_SHIFT);
+ VMEMMAP_SIZE);
}
}
return 0;
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 635f03bb499..d07b44f7d1d 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -82,6 +82,9 @@ enum fixed_addresses {
#endif
FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE,
+#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
+ FIX_OHCI1394_BASE,
+#endif
#ifdef CONFIG_X86_LOCAL_APIC
FIX_APIC_BASE, /* local (CPU) APIC) -- required for SMP or not */
#endif
@@ -132,9 +135,6 @@ enum fixed_addresses {
(__end_of_permanent_fixed_addresses & (TOTAL_FIX_BTMAPS - 1))
: __end_of_permanent_fixed_addresses,
FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
-#ifdef CONFIG_PROVIDE_OHCI1394_DMA_INIT
- FIX_OHCI1394_BASE,
-#endif
#ifdef CONFIG_X86_32
FIX_WP_TEST,
#endif
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
index a929c9ede33..46c0fe05f23 100644
--- a/arch/x86/include/asm/hw_irq.h
+++ b/arch/x86/include/asm/hw_irq.h
@@ -133,6 +133,7 @@ extern void (*__initconst interrupt[NR_VECTORS-FIRST_EXTERNAL_VECTOR])(void);
typedef int vector_irq_t[NR_VECTORS];
DECLARE_PER_CPU(vector_irq_t, vector_irq);
+extern void setup_vector_irq(int cpu);
#ifdef CONFIG_X86_IO_APIC
extern void lock_vector_lock(void);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1cd58cdbc03..4604e6a54d3 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -105,6 +105,8 @@
#define MSR_AMD64_PATCH_LEVEL 0x0000008b
#define MSR_AMD64_NB_CFG 0xc001001f
#define MSR_AMD64_PATCH_LOADER 0xc0010020
+#define MSR_AMD64_OSVW_ID_LENGTH 0xc0010140
+#define MSR_AMD64_OSVW_STATUS 0xc0010141
#define MSR_AMD64_IBSFETCHCTL 0xc0011030
#define MSR_AMD64_IBSFETCHLINAD 0xc0011031
#define MSR_AMD64_IBSFETCHPHYSAD 0xc0011032
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index e4e0ddcb154..463de9a858a 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1268,6 +1268,14 @@ void __setup_vector_irq(int cpu)
/* Mark the inuse vectors */
for_each_irq_desc(irq, desc) {
cfg = desc->chip_data;
+
+ /*
+ * If it is a legacy IRQ handled by the legacy PIC, this cpu
+ * will be part of the irq_cfg's domain.
+ */
+ if (irq < legacy_pic->nr_legacy_irqs && !IO_APIC_IRQ(irq))
+ cpumask_set_cpu(cpu, cfg->domain);
+
if (!cpumask_test_cpu(cpu, cfg->domain))
continue;
vector = cfg->vector;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 60398a0d947..53ea4cf1a87 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -28,6 +28,7 @@
#include <asm/apic.h>
#include <asm/stacktrace.h>
#include <asm/nmi.h>
+#include <asm/compat.h>
static u64 perf_event_mask __read_mostly;
@@ -158,7 +159,7 @@ struct x86_pmu {
struct perf_event *event);
struct event_constraint *event_constraints;
- void (*cpu_prepare)(int cpu);
+ int (*cpu_prepare)(int cpu);
void (*cpu_starting)(int cpu);
void (*cpu_dying)(int cpu);
void (*cpu_dead)(int cpu);
@@ -1333,11 +1334,12 @@ static int __cpuinit
x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
{
unsigned int cpu = (long)hcpu;
+ int ret = NOTIFY_OK;
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
if (x86_pmu.cpu_prepare)
- x86_pmu.cpu_prepare(cpu);
+ ret = x86_pmu.cpu_prepare(cpu);
break;
case CPU_STARTING:
@@ -1350,6 +1352,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
x86_pmu.cpu_dying(cpu);
break;
+ case CPU_UP_CANCELED:
case CPU_DEAD:
if (x86_pmu.cpu_dead)
x86_pmu.cpu_dead(cpu);
@@ -1359,7 +1362,7 @@ x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
break;
}
- return NOTIFY_OK;
+ return ret;
}
static void __init pmu_check_apic(void)
@@ -1628,14 +1631,42 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
return len;
}
-static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
+#ifdef CONFIG_COMPAT
+static inline int
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
{
- unsigned long bytes;
+ /* 32-bit process in 64-bit kernel. */
+ struct stack_frame_ia32 frame;
+ const void __user *fp;
- bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
+ if (!test_thread_flag(TIF_IA32))
+ return 0;
+
+ fp = compat_ptr(regs->bp);
+ while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ unsigned long bytes;
+ frame.next_frame = 0;
+ frame.return_address = 0;
+
+ bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+ if (bytes != sizeof(frame))
+ break;
+
+ if (fp < compat_ptr(regs->sp))
+ break;
- return bytes == sizeof(*frame);
+ callchain_store(entry, frame.return_address);
+ fp = compat_ptr(frame.next_frame);
+ }
+ return 1;
+}
+#else
+static inline int
+perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
+{
+ return 0;
}
+#endif
static void
perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
@@ -1651,11 +1682,16 @@ perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
callchain_store(entry, PERF_CONTEXT_USER);
callchain_store(entry, regs->ip);
+ if (perf_callchain_user32(regs, entry))
+ return;
+
while (entry->nr < PERF_MAX_STACK_DEPTH) {
+ unsigned long bytes;
frame.next_frame = NULL;
frame.return_address = 0;
- if (!copy_stack_frame(fp, &frame))
+ bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
+ if (bytes != sizeof(frame))
break;
if ((unsigned long)fp < regs->sp)
@@ -1702,7 +1738,6 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
return entry;
}
-#ifdef CONFIG_EVENT_TRACING
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
{
regs->ip = ip;
@@ -1714,4 +1749,3 @@ void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int ski
regs->cs = __KERNEL_CS;
local_save_flags(regs->flags);
}
-#endif
diff --git a/arch/x86/kernel/cpu/perf_event_amd.c b/arch/x86/kernel/cpu/perf_event_amd.c
index b87e0b6970c..db6f7d4056e 100644
--- a/arch/x86/kernel/cpu/perf_event_amd.c
+++ b/arch/x86/kernel/cpu/perf_event_amd.c
@@ -137,6 +137,13 @@ static inline int amd_is_nb_event(struct hw_perf_event *hwc)
return (hwc->config & 0xe0) == 0xe0;
}
+static inline int amd_has_nb(struct cpu_hw_events *cpuc)
+{
+ struct amd_nb *nb = cpuc->amd_nb;
+
+ return nb && nb->nb_id != -1;
+}
+
static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
struct perf_event *event)
{
@@ -147,7 +154,7 @@ static void amd_put_event_constraints(struct cpu_hw_events *cpuc,
/*
* only care about NB events
*/
- if (!(nb && amd_is_nb_event(hwc)))
+ if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return;
/*
@@ -214,7 +221,7 @@ amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
/*
* if not NB event or no NB, then no constraints
*/
- if (!(nb && amd_is_nb_event(hwc)))
+ if (!(amd_has_nb(cpuc) && amd_is_nb_event(hwc)))
return &unconstrained;
/*
@@ -293,51 +300,55 @@ static struct amd_nb *amd_alloc_nb(int cpu, int nb_id)
return nb;
}
-static void amd_pmu_cpu_online(int cpu)
+static int amd_pmu_cpu_prepare(int cpu)
+{
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+
+ WARN_ON_ONCE(cpuc->amd_nb);
+
+ if (boot_cpu_data.x86_max_cores < 2)
+ return NOTIFY_OK;
+
+ cpuc->amd_nb = amd_alloc_nb(cpu, -1);
+ if (!cpuc->amd_nb)
+ return NOTIFY_BAD;
+
+ return NOTIFY_OK;
+}
+
+static void amd_pmu_cpu_starting(int cpu)
{
- struct cpu_hw_events *cpu1, *cpu2;
- struct amd_nb *nb = NULL;
+ struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+ struct amd_nb *nb;
int i, nb_id;
if (boot_cpu_data.x86_max_cores < 2)
return;
- /*
- * function may be called too early in the
- * boot process, in which case nb_id is bogus
- */
nb_id = amd_get_nb_id(cpu);
- if (nb_id == BAD_APICID)
- return;
-
- cpu1 = &per_cpu(cpu_hw_events, cpu);
- cpu1->amd_nb = NULL;
+ WARN_ON_ONCE(nb_id == BAD_APICID);
raw_spin_lock(&amd_nb_lock);
for_each_online_cpu(i) {
- cpu2 = &per_cpu(cpu_hw_events, i);
- nb = cpu2->amd_nb;
- if (!nb)
+ nb = per_cpu(cpu_hw_events, i).amd_nb;
+ if (WARN_ON_ONCE(!nb))
continue;
- if (nb->nb_id == nb_id)
- goto found;
- }
- nb = amd_alloc_nb(cpu, nb_id);
- if (!nb) {
- pr_err("perf_events: failed NB allocation for CPU%d\n", cpu);
- raw_spin_unlock(&amd_nb_lock);
- return;
+ if (nb->nb_id == nb_id) {
+ kfree(cpuc->amd_nb);
+ cpuc->amd_nb = nb;
+ break;
+ }
}
-found:
- nb->refcnt++;
- cpu1->amd_nb = nb;
+
+ cpuc->amd_nb->nb_id = nb_id;
+ cpuc->amd_nb->refcnt++;
raw_spin_unlock(&amd_nb_lock);
}
-static void amd_pmu_cpu_offline(int cpu)
+static void amd_pmu_cpu_dead(int cpu)
{
struct cpu_hw_events *cpuhw;
@@ -349,8 +360,10 @@ static void amd_pmu_cpu_offline(int cpu)
raw_spin_lock(&amd_nb_lock);
if (cpuhw->amd_nb) {
- if (--cpuhw->amd_nb->refcnt == 0)
- kfree(cpuhw->amd_nb);
+ struct amd_nb *nb = cpuhw->amd_nb;
+
+ if (nb->nb_id == -1 || --nb->refcnt == 0)
+ kfree(nb);
cpuhw->amd_nb = NULL;
}
@@ -379,8 +392,9 @@ static __initconst struct x86_pmu amd_pmu = {
.get_event_constraints = amd_get_event_constraints,
.put_event_constraints = amd_put_event_constraints,
- .cpu_prepare = amd_pmu_cpu_online,
- .cpu_dead = amd_pmu_cpu_offline,
+ .cpu_prepare = amd_pmu_cpu_prepare,
+ .cpu_starting = amd_pmu_cpu_starting,
+ .cpu_dead = amd_pmu_cpu_dead,
};
static __init int amd_pmu_init(void)
diff --git a/arch/x86/kernel/dumpstack.h b/arch/x86/kernel/dumpstack.h
index 29e5f7c845b..e39e77168a3 100644
--- a/arch/x86/kernel/dumpstack.h
+++ b/arch/x86/kernel/dumpstack.h
@@ -30,6 +30,11 @@ struct stack_frame {
unsigned long return_address;
};
+struct stack_frame_ia32 {
+ u32 next_frame;
+ u32 return_address;
+};
+
static inline unsigned long rewind_frame_pointer(int n)
{
struct stack_frame *frame;
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index adedeef1ded..b2e24603739 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -7,6 +7,7 @@
#include <linux/init.h>
#include <linux/start_kernel.h>
+#include <linux/mm.h>
#include <asm/setup.h>
#include <asm/sections.h>
@@ -44,9 +45,10 @@ void __init i386_start_kernel(void)
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+ /* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
- u64 ramdisk_end = ramdisk_image + ramdisk_size;
+ u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index b5a9896ca1e..7147143fd61 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -103,9 +103,10 @@ void __init x86_64_start_reservations(char *real_mode_data)
#ifdef CONFIG_BLK_DEV_INITRD
/* Reserve INITRD */
if (boot_params.hdr.type_of_loader && boot_params.hdr.ramdisk_image) {
+ /* Assume only end is not page aligned */
unsigned long ramdisk_image = boot_params.hdr.ramdisk_image;
unsigned long ramdisk_size = boot_params.hdr.ramdisk_size;
- unsigned long ramdisk_end = ramdisk_image + ramdisk_size;
+ unsigned long ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
reserve_early(ramdisk_image, ramdisk_end, "RAMDISK");
}
#endif
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index ef257fc2921..f01d390f9c5 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -141,6 +141,28 @@ void __init init_IRQ(void)
x86_init.irqs.intr_init();
}
+/*
+ * Setup the vector to irq mappings.
+ */
+void setup_vector_irq(int cpu)
+{
+#ifndef CONFIG_X86_IO_APIC
+ int irq;
+
+ /*
+ * On most of the platforms, legacy PIC delivers the interrupts on the
+ * boot cpu. But there are certain platforms where PIC interrupts are
+ * delivered to multiple cpu's. If the legacy IRQ is handled by the
+ * legacy PIC, for the new cpu that is coming online, setup the static
+ * legacy vector to irq mapping:
+ */
+ for (irq = 0; irq < legacy_pic->nr_legacy_irqs; irq++)
+ per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+#endif
+
+ __setup_vector_irq(cpu);
+}
+
static void __init smp_intr_init(void)
{
#ifdef CONFIG_SMP
diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
index bfba6019d76..b2258ca9100 100644
--- a/arch/x86/kernel/kgdb.c
+++ b/arch/x86/kernel/kgdb.c
@@ -618,8 +618,8 @@ int kgdb_arch_init(void)
* portion of kgdb because this operation requires mutexs to
* complete.
*/
+ hw_breakpoint_init(&attr);
attr.bp_addr = (unsigned long)kgdb_arch_init;
- attr.type = PERF_TYPE_BREAKPOINT;
attr.bp_len = HW_BREAKPOINT_LEN_1;
attr.bp_type = HW_BREAKPOINT_W;
attr.disabled = 1;
diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
index ad9540676fc..28ad9f4d8b9 100644
--- a/arch/x86/kernel/process.c
+++ b/arch/x86/kernel/process.c
@@ -526,21 +526,37 @@ static int __cpuinit mwait_usable(const struct cpuinfo_x86 *c)
}
/*
- * Check for AMD CPUs, which have potentially C1E support
+ * Check for AMD CPUs, where APIC timer interrupt does not wake up CPU from C1e.
+ * For more information see
+ * - Erratum #400 for NPT family 0xf and family 0x10 CPUs
+ * - Erratum #365 for family 0x11 (not affected because C1e not in use)
*/
static int __cpuinit check_c1e_idle(const struct cpuinfo_x86 *c)
{
+ u64 val;
if (c->x86_vendor != X86_VENDOR_AMD)
- return 0;
-
- if (c->x86 < 0x0F)
- return 0;
+ goto no_c1e_idle;
/* Family 0x0f models < rev F do not have C1E */
- if (c->x86 == 0x0f && c->x86_model < 0x40)
- return 0;
+ if (c->x86 == 0x0F && c->x86_model >= 0x40)
+ return 1;
- return 1;
+ if (c->x86 == 0x10) {
+ /*
+ * check OSVW bit for CPUs that are not affected
+ * by erratum #400
+ */
+ rdmsrl(MSR_AMD64_OSVW_ID_LENGTH, val);
+ if (val >= 2) {
+ rdmsrl(MSR_AMD64_OSVW_STATUS, val);
+ if (!(val & BIT(1)))
+ goto no_c1e_idle;
+ }
+ return 1;
+ }
+
+no_c1e_idle:
+ return 0;
}
static cpumask_var_t c1e_mask;
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 5d7ba1a449b..d76e18570c6 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -314,16 +314,17 @@ static void __init reserve_brk(void)
#define MAX_MAP_CHUNK (NR_FIX_BTMAPS << PAGE_SHIFT)
static void __init relocate_initrd(void)
{
-
+ /* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
+ u64 area_size = PAGE_ALIGN(ramdisk_size);
u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
u64 ramdisk_here;
unsigned long slop, clen, mapaddr;
char *p, *q;
/* We need to move the initrd down into lowmem */
- ramdisk_here = find_e820_area(0, end_of_lowmem, ramdisk_size,
+ ramdisk_here = find_e820_area(0, end_of_lowmem, area_size,
PAGE_SIZE);
if (ramdisk_here == -1ULL)
@@ -332,7 +333,7 @@ static void __init relocate_initrd(void)
/* Note: this includes all the lowmem currently occupied by
the initrd, we rely on that fact to keep the data intact. */
- reserve_early(ramdisk_here, ramdisk_here + ramdisk_size,
+ reserve_early(ramdisk_here, ramdisk_here + area_size,
"NEW RAMDISK");
initrd_start = ramdisk_here + PAGE_OFFSET;
initrd_end = initrd_start + ramdisk_size;
@@ -376,9 +377,10 @@ static void __init relocate_initrd(void)
static void __init reserve_initrd(void)
{
+ /* Assume only end is not page aligned */
u64 ramdisk_image = boot_params.hdr.ramdisk_image;
u64 ramdisk_size = boot_params.hdr.ramdisk_size;
- u64 ramdisk_end = ramdisk_image + ramdisk_size;
+ u64 ramdisk_end = PAGE_ALIGN(ramdisk_image + ramdisk_size);
u64 end_of_lowmem = max_low_pfn_mapped << PAGE_SHIFT;
if (!boot_params.hdr.type_of_loader ||
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index a02e80c3c54..6808b934d6c 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -242,12 +242,10 @@ static void __cpuinit smp_callin(void)
end_local_APIC_setup();
map_cpu_to_logical_apicid();
- notify_cpu_starting(cpuid);
-
/*
* Need to setup vector mappings before we enable interrupts.
*/
- __setup_vector_irq(smp_processor_id());
+ setup_vector_irq(smp_processor_id());
/*
* Get our bogomips.
*
@@ -264,6 +262,8 @@ static void __cpuinit smp_callin(void)
*/
smp_store_cpu_info(cpuid);
+ notify_cpu_starting(cpuid);
+
/*
* Allow the master to continue.
*/
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 44879df5569..2cc249718c4 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -291,8 +291,8 @@ SECTIONS
.smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
__smp_locks = .;
*(.smp_locks)
- __smp_locks_end = .;
. = ALIGN(PAGE_SIZE);
+ __smp_locks_end = .;
}
#ifdef CONFIG_X86_64
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index e71c5cbc8f3..452ee5b8f30 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -331,11 +331,23 @@ int devmem_is_allowed(unsigned long pagenr)
void free_init_pages(char *what, unsigned long begin, unsigned long end)
{
- unsigned long addr = begin;
+ unsigned long addr;
+ unsigned long begin_aligned, end_aligned;
- if (addr >= end)
+ /* Make sure boundaries are page aligned */
+ begin_aligned = PAGE_ALIGN(begin);
+ end_aligned = end & PAGE_MASK;
+
+ if (WARN_ON(begin_aligned != begin || end_aligned != end)) {
+ begin = begin_aligned;
+ end = end_aligned;
+ }
+
+ if (begin >= end)
return;
+ addr = begin;
+
/*
* If debugging page accesses then do not free this memory but
* mark them not present - any buggy init-section access will
@@ -343,7 +355,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
*/
#ifdef CONFIG_DEBUG_PAGEALLOC
printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n",
- begin, PAGE_ALIGN(end));
+ begin, end);
set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
#else
/*
@@ -358,8 +370,7 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
for (; addr < end; addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
init_page_count(virt_to_page(addr));
- memset((void *)(addr & ~(PAGE_SIZE-1)),
- POISON_FREE_INITMEM, PAGE_SIZE);
+ memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
free_page(addr);
totalram_pages++;
}
@@ -376,6 +387,15 @@ void free_initmem(void)
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
- free_init_pages("initrd memory", start, end);
+ /*
+ * end could be not aligned, and We can not align that,
+ * decompresser could be confused by aligned initrd_end
+ * We already reserve the end partial page before in
+ * - i386_start_kernel()
+ * - x86_64_start_kernel()
+ * - relocate_initrd()
+ * So here We can do PAGE_ALIGN() safely to get partial page to be freed
+ */
+ free_init_pages("initrd memory", start, PAGE_ALIGN(end));
}
#endif
diff --git a/arch/x86/pci/acpi.c b/arch/x86/pci/acpi.c
index 6e22454bfaa..e31160216ef 100644
--- a/arch/x86/pci/acpi.c
+++ b/arch/x86/pci/acpi.c
@@ -122,8 +122,8 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
struct acpi_resource_address64 addr;
acpi_status status;
unsigned long flags;
- struct resource *root;
- u64 start, end;
+ struct resource *root, *conflict;
+ u64 start, end, max_len;
status = resource_to_addr(acpi_res, &addr);
if (!ACPI_SUCCESS(status))
@@ -140,6 +140,17 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
} else
return AE_OK;
+ max_len = addr.maximum - addr.minimum + 1;
+ if (addr.address_length > max_len) {
+ dev_printk(KERN_DEBUG, &info->bridge->dev,
+ "host bridge window length %#llx doesn't fit in "
+ "%#llx-%#llx, trimming\n",
+ (unsigned long long) addr.address_length,
+ (unsigned long long) addr.minimum,
+ (unsigned long long) addr.maximum);
+ addr.address_length = max_len;
+ }
+
start = addr.minimum + addr.translation_offset;
end = start + addr.address_length - 1;
@@ -157,9 +168,12 @@ setup_resource(struct acpi_resource *acpi_res, void *data)
return AE_OK;
}
- if (insert_resource(root, res)) {
+ conflict = insert_resource_conflict(root, res);
+ if (conflict) {
dev_err(&info->bridge->dev,
- "can't allocate host bridge window %pR\n", res);
+ "address space collision: host bridge window %pR "
+ "conflicts with %s %pR\n",
+ res, conflict->name, conflict);
} else {
pci_bus_add_resource(info->bus, res, 0);
info->res_num++;
diff --git a/arch/x86/pci/i386.c b/arch/x86/pci/i386.c
index dece3eb9c90..46fd43f7910 100644
--- a/arch/x86/pci/i386.c
+++ b/arch/x86/pci/i386.c
@@ -127,9 +127,6 @@ static void __init pcibios_allocate_bus_resources(struct list_head *bus_list)
continue;
if (!r->start ||
pci_claim_resource(dev, idx) < 0) {
- dev_info(&dev->dev,
- "can't reserve window %pR\n",
- r);
/*
* Something is wrong with the region.
* Invalidate the resource to prevent
@@ -181,8 +178,6 @@ static void __init pcibios_allocate_resources(int pass)
"BAR %d: reserving %pr (d=%d, p=%d)\n",
idx, r, disabled, pass);
if (pci_claim_resource(dev, idx) < 0) {
- dev_info(&dev->dev,
- "can't reserve %pR\n", r);
/* We'll assign a new address later */
r->end -= r->start;
r->start = 0;
diff --git a/drivers/ata/pata_via.c b/drivers/ata/pata_via.c
index 95d39c36ace..c59b40710fb 100644
--- a/drivers/ata/pata_via.c
+++ b/drivers/ata/pata_via.c
@@ -576,6 +576,10 @@ static int via_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
u8 rev = isa->revision;
pci_dev_put(isa);
+ if ((id->device == 0x0415 || id->device == 0x3164) &&
+ (config->id != id->device))
+ continue;
+
if (rev >= config->rev_min && rev <= config->rev_max)
break;
}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index d477f4dc5e5..941fcb87e52 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -439,8 +439,23 @@ static int device_resume_noirq(struct device *dev, pm_message_t state)
if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "EARLY ");
error = pm_noirq_op(dev, dev->bus->pm, state);
+ if (error)
+ goto End;
}
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "EARLY type ");
+ error = pm_noirq_op(dev, dev->type->pm, state);
+ if (error)
+ goto End;
+ }
+
+ if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "EARLY class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ }
+
+End:
TRACE_RESUME(error);
return error;
}
@@ -735,10 +750,26 @@ static int device_suspend_noirq(struct device *dev, pm_message_t state)
{
int error = 0;
+ if (dev->class && dev->class->pm) {
+ pm_dev_dbg(dev, state, "LATE class ");
+ error = pm_noirq_op(dev, dev->class->pm, state);
+ if (error)
+ goto End;
+ }
+
+ if (dev->type && dev->type->pm) {
+ pm_dev_dbg(dev, state, "LATE type ");
+ error = pm_noirq_op(dev, dev->type->pm, state);
+ if (error)
+ goto End;
+ }
+
if (dev->bus && dev->bus->pm) {
pm_dev_dbg(dev, state, "LATE ");
error = pm_noirq_op(dev, dev->bus->pm, state);
}
+
+End:
return error;
}
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index a42c466f709..6da962c9b21 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1423,6 +1423,8 @@ static void release_one_tty(struct work_struct *work)
list_del_init(&tty->tty_files);
file_list_unlock();
+ put_pid(tty->pgrp);
+ put_pid(tty->session);
free_tty_struct(tty);
}
diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
index 5db0518c66d..882472d1e14 100644
--- a/drivers/firewire/core-device.c
+++ b/drivers/firewire/core-device.c
@@ -126,97 +126,74 @@ int fw_csr_string(const u32 *directory, int key, char *buf, size_t size)
}
EXPORT_SYMBOL(fw_csr_string);
-static bool is_fw_unit(struct device *dev);
-
-static int match_unit_directory(const u32 *directory, u32 match_flags,
- const struct ieee1394_device_id *id)
+static void get_ids(const u32 *directory, int *id)
{
struct fw_csr_iterator ci;
- int key, value, match;
+ int key, value;
- match = 0;
fw_csr_iterator_init(&ci, directory);
while (fw_csr_iterator_next(&ci, &key, &value)) {
- if (key == CSR_VENDOR && value == id->vendor_id)
- match |= IEEE1394_MATCH_VENDOR_ID;
- if (key == CSR_MODEL && value == id->model_id)
- match |= IEEE1394_MATCH_MODEL_ID;
- if (key == CSR_SPECIFIER_ID && value == id->specifier_id)
- match |= IEEE1394_MATCH_SPECIFIER_ID;
- if (key == CSR_VERSION && value == id->version)
- match |= IEEE1394_MATCH_VERSION;
+ switch (key) {
+ case CSR_VENDOR: id[0] = value; break;
+ case CSR_MODEL: id[1] = value; break;
+ case CSR_SPECIFIER_ID: id[2] = value; break;
+ case CSR_VERSION: id[3] = value; break;
+ }
}
+}
+
+static void get_modalias_ids(struct fw_unit *unit, int *id)
+{
+ get_ids(&fw_parent_device(unit)->config_rom[5], id);
+ get_ids(unit->directory, id);
+}
+
+static bool match_ids(const struct ieee1394_device_id *id_table, int *id)
+{
+ int match = 0;
+
+ if (id[0] == id_table->vendor_id)
+ match |= IEEE1394_MATCH_VENDOR_ID;
+ if (id[1] == id_table->model_id)
+ match |= IEEE1394_MATCH_MODEL_ID;
+ if (id[2] == id_table->specifier_id)
+ match |= IEEE1394_MATCH_SPECIFIER_ID;
+ if (id[3] == id_table->version)
+ match |= IEEE1394_MATCH_VERSION;
- return (match & match_flags) == match_flags;
+ return (match & id_table->match_flags) == id_table->match_flags;
}
+static bool is_fw_unit(struct device *dev);
+
static int fw_unit_match(struct device *dev, struct device_driver *drv)
{
- struct fw_unit *unit = fw_unit(dev);
- struct fw_device *device;
- const struct ieee1394_device_id *id;
+ const struct ieee1394_device_id *id_table =
+ container_of(drv, struct fw_driver, driver)->id_table;
+ int id[] = {0, 0, 0, 0};
/* We only allow binding to fw_units. */
if (!is_fw_unit(dev))
return 0;
- device = fw_parent_device(unit);
- id = container_of(drv, struct fw_driver, driver)->id_table;
+ get_modalias_ids(fw_unit(dev), id);
- for (; id->match_flags != 0; id++) {
- if (match_unit_directory(unit->directory, id->match_flags, id))
+ for (; id_table->match_flags != 0; id_table++)
+ if (match_ids(id_table, id))
return 1;
- /* Also check vendor ID in the root directory. */
- if ((id->match_flags & IEEE1394_MATCH_VENDOR_ID) &&
- match_unit_directory(&device->config_rom[5],
- IEEE1394_MATCH_VENDOR_ID, id) &&
- match_unit_directory(unit->directory, id->match_flags
- & ~IEEE1394_MATCH_VENDOR_ID, id))
- return 1;
- }
-
return 0;
}
static int get_modalias(struct fw_unit *unit, char *buffer, size_t buffer_size)
{
- struct fw_device *device = fw_parent_device(unit);
- struct fw_csr_iterator ci;
+ int id[] = {0, 0, 0, 0};
- int key, value;
- int vendor = 0;
- int model = 0;
- int specifier_id = 0;
- int version = 0;
-
- fw_csr_iterator_init(&ci, &device->config_rom[5]);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- switch (key) {
- case CSR_VENDOR:
- vendor = value;
- break;
- case CSR_MODEL:
- model = value;
- break;
- }
- }
-
- fw_csr_iterator_init(&ci, unit->directory);
- while (fw_csr_iterator_next(&ci, &key, &value)) {
- switch (key) {
- case CSR_SPECIFIER_ID:
- specifier_id = value;
- break;
- case CSR_VERSION:
- version = value;
- break;
- }
- }
+ get_modalias_ids(unit, id);
return snprintf(buffer, buffer_size,
"ieee1394:ven%08Xmo%08Xsp%08Xver%08X",
- vendor, model, specifier_id, version);
+ id[0], id[1], id[2], id[3]);
}
static int fw_unit_uevent(struct device *dev, struct kobj_uevent_env *env)
diff --git a/drivers/firewire/core-iso.c b/drivers/firewire/core-iso.c
index 1c0b504a42f..99c20f1b613 100644
--- a/drivers/firewire/core-iso.c
+++ b/drivers/firewire/core-iso.c
@@ -331,8 +331,9 @@ void fw_iso_resource_manage(struct fw_card *card, int generation,
if (ret < 0)
*bandwidth = 0;
- if (allocate && ret < 0 && c >= 0) {
- deallocate_channel(card, irm_id, generation, c, buffer);
+ if (allocate && ret < 0) {
+ if (c >= 0)
+ deallocate_channel(card, irm_id, generation, c, buffer);
*channel = ret;
}
}
diff --git a/drivers/firewire/ohci.c b/drivers/firewire/ohci.c
index 75dc6988cff..e33917bf97d 100644
--- a/drivers/firewire/ohci.c
+++ b/drivers/firewire/ohci.c
@@ -231,6 +231,8 @@ static inline struct fw_ohci *fw_ohci(struct fw_card *card)
static char ohci_driver_name[] = KBUILD_MODNAME;
+#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
+
#define QUIRK_CYCLE_TIMER 1
#define QUIRK_RESET_PACKET 2
#define QUIRK_BE_HEADERS 4
@@ -239,6 +241,8 @@ static char ohci_driver_name[] = KBUILD_MODNAME;
static const struct {
unsigned short vendor, device, flags;
} ohci_quirks[] = {
+ {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, QUIRK_CYCLE_TIMER |
+ QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_TI, PCI_ANY_ID, QUIRK_RESET_PACKET},
{PCI_VENDOR_ID_AL, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
{PCI_VENDOR_ID_NEC, PCI_ANY_ID, QUIRK_CYCLE_TIMER},
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index f2aaf39be39..51103aa469f 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -104,6 +104,7 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
if (connector->status == connector_status_disconnected) {
DRM_DEBUG_KMS("%s is disconnected\n",
drm_get_connector_name(connector));
+ drm_mode_connector_update_edid_property(connector, NULL);
goto prune;
}
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index f97e7c42ac8..7e608f4a0df 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -707,15 +707,6 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
mode->vsync_end = mode->vsync_start + vsync_pulse_width;
mode->vtotal = mode->vdisplay + vblank;
- /* perform the basic check for the detailed timing */
- if (mode->hsync_end > mode->htotal ||
- mode->vsync_end > mode->vtotal) {
- drm_mode_destroy(dev, mode);
- DRM_DEBUG_KMS("Incorrect detailed timing. "
- "Sync is beyond the blank.\n");
- return NULL;
- }
-
/* Some EDIDs have bogus h/vtotal values */
if (mode->hsync_end > mode->htotal)
mode->htotal = mode->hsync_end + 1;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 50549703584..99487237111 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -283,6 +283,8 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
.help_msg = "force-fb(V)",
.action_msg = "Restore framebuffer console",
};
+#else
+static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = { };
#endif
static void drm_fb_helper_on(struct fb_info *info)
diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
index 08d14df3bb4..4804872f8b1 100644
--- a/drivers/gpu/drm/drm_fops.c
+++ b/drivers/gpu/drm/drm_fops.c
@@ -140,14 +140,16 @@ int drm_open(struct inode *inode, struct file *filp)
spin_unlock(&dev->count_lock);
}
out:
- mutex_lock(&dev->struct_mutex);
- if (minor->type == DRM_MINOR_LEGACY) {
- BUG_ON((dev->dev_mapping != NULL) &&
- (dev->dev_mapping != inode->i_mapping));
- if (dev->dev_mapping == NULL)
- dev->dev_mapping = inode->i_mapping;
+ if (!retcode) {
+ mutex_lock(&dev->struct_mutex);
+ if (minor->type == DRM_MINOR_LEGACY) {
+ if (dev->dev_mapping == NULL)
+ dev->dev_mapping = inode->i_mapping;
+ else if (dev->dev_mapping != inode->i_mapping)
+ retcode = -ENODEV;
+ }
+ mutex_unlock(&dev->struct_mutex);
}
- mutex_unlock(&dev->struct_mutex);
return retcode;
}
diff --git a/drivers/gpu/drm/nouveau/Makefile b/drivers/gpu/drm/nouveau/Makefile
index 32db806f3b5..7f0d807a0d0 100644
--- a/drivers/gpu/drm/nouveau/Makefile
+++ b/drivers/gpu/drm/nouveau/Makefile
@@ -12,7 +12,7 @@ nouveau-y := nouveau_drv.o nouveau_state.o nouveau_channel.o nouveau_mem.o \
nouveau_dp.o nouveau_grctx.o \
nv04_timer.o \
nv04_mc.o nv40_mc.o nv50_mc.o \
- nv04_fb.o nv10_fb.o nv40_fb.o \
+ nv04_fb.o nv10_fb.o nv40_fb.o nv50_fb.o \
nv04_fifo.o nv10_fifo.o nv40_fifo.o nv50_fifo.o \
nv04_graph.o nv10_graph.o nv20_graph.o \
nv40_graph.o nv50_graph.o \
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
index 75bceee7604..b5a9336a2e8 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
@@ -5211,6 +5211,21 @@ divine_connector_type(struct nvbios *bios, int index)
}
static void
+apply_dcb_connector_quirks(struct nvbios *bios, int idx)
+{
+ struct dcb_connector_table_entry *cte = &bios->dcb.connector.entry[idx];
+ struct drm_device *dev = bios->dev;
+
+ /* Gigabyte NX85T */
+ if ((dev->pdev->device == 0x0421) &&
+ (dev->pdev->subsystem_vendor == 0x1458) &&
+ (dev->pdev->subsystem_device == 0x344c)) {
+ if (cte->type == DCB_CONNECTOR_HDMI_1)
+ cte->type = DCB_CONNECTOR_DVI_I;
+ }
+}
+
+static void
parse_dcb_connector_table(struct nvbios *bios)
{
struct drm_device *dev = bios->dev;
@@ -5238,13 +5253,14 @@ parse_dcb_connector_table(struct nvbios *bios)
entry = conntab + conntab[1];
cte = &ct->entry[0];
for (i = 0; i < conntab[2]; i++, entry += conntab[3], cte++) {
+ cte->index = i;
if (conntab[3] == 2)
cte->entry = ROM16(entry[0]);
else
cte->entry = ROM32(entry[0]);
cte->type = (cte->entry & 0x000000ff) >> 0;
- cte->index = (cte->entry & 0x00000f00) >> 8;
+ cte->index2 = (cte->entry & 0x00000f00) >> 8;
switch (cte->entry & 0x00033000) {
case 0x00001000:
cte->gpio_tag = 0x07;
@@ -5266,6 +5282,8 @@ parse_dcb_connector_table(struct nvbios *bios)
if (cte->type == 0xff)
continue;
+ apply_dcb_connector_quirks(bios, i);
+
NV_INFO(dev, " %d: 0x%08x: type 0x%02x idx %d tag 0x%02x\n",
i, cte->entry, cte->type, cte->index, cte->gpio_tag);
@@ -5287,10 +5305,16 @@ parse_dcb_connector_table(struct nvbios *bios)
break;
default:
cte->type = divine_connector_type(bios, cte->index);
- NV_WARN(dev, "unknown type, using 0x%02x", cte->type);
+ NV_WARN(dev, "unknown type, using 0x%02x\n", cte->type);
break;
}
+ if (nouveau_override_conntype) {
+ int type = divine_connector_type(bios, cte->index);
+ if (type != cte->type)
+ NV_WARN(dev, " -> type 0x%02x\n", cte->type);
+ }
+
}
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.h b/drivers/gpu/drm/nouveau/nouveau_bios.h
index 9f688aa9a65..4f88e6924d2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bios.h
+++ b/drivers/gpu/drm/nouveau/nouveau_bios.h
@@ -72,9 +72,10 @@ enum dcb_connector_type {
};
struct dcb_connector_table_entry {
+ uint8_t index;
uint32_t entry;
enum dcb_connector_type type;
- uint8_t index;
+ uint8_t index2;
uint8_t gpio_tag;
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 028719fddf7..026612471c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -439,8 +439,7 @@ nouveau_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
switch (bo->mem.mem_type) {
case TTM_PL_VRAM:
- nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT |
- TTM_PL_FLAG_SYSTEM);
+ nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_TT);
break;
default:
nouveau_bo_placement_set(nvbo, TTM_PL_FLAG_SYSTEM);
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 24327f468c4..14afe1e47e5 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -302,7 +302,7 @@ nouveau_connector_detect(struct drm_connector *connector)
detect_analog:
nv_encoder = find_encoder_by_type(connector, OUTPUT_ANALOG);
- if (!nv_encoder)
+ if (!nv_encoder && !nouveau_tv_disable)
nv_encoder = find_encoder_by_type(connector, OUTPUT_TV);
if (nv_encoder) {
struct drm_encoder *encoder = to_drm_encoder(nv_encoder);
diff --git a/drivers/gpu/drm/nouveau/nouveau_dma.c b/drivers/gpu/drm/nouveau/nouveau_dma.c
index c8482a108a7..65c441a1999 100644
--- a/drivers/gpu/drm/nouveau/nouveau_dma.c
+++ b/drivers/gpu/drm/nouveau/nouveau_dma.c
@@ -190,6 +190,11 @@ nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo,
nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8);
chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max;
+
+ DRM_MEMORYBARRIER();
+ /* Flush writes. */
+ nouveau_bo_rd32(pb, 0);
+
nvchan_wr32(chan, 0x8c, chan->dma.ib_put);
chan->dma.ib_free--;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.c b/drivers/gpu/drm/nouveau/nouveau_drv.c
index 30cc09e8a70..1de974acbc6 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.c
@@ -83,6 +83,14 @@ MODULE_PARM_DESC(nofbaccel, "Disable fbcon acceleration");
int nouveau_nofbaccel = 0;
module_param_named(nofbaccel, nouveau_nofbaccel, int, 0400);
+MODULE_PARM_DESC(override_conntype, "Ignore DCB connector type");
+int nouveau_override_conntype = 0;
+module_param_named(override_conntype, nouveau_override_conntype, int, 0400);
+
+MODULE_PARM_DESC(tv_disable, "Disable TV-out detection\n");
+int nouveau_tv_disable = 0;
+module_param_named(tv_disable, nouveau_tv_disable, int, 0400);
+
MODULE_PARM_DESC(tv_norm, "Default TV norm.\n"
"\t\tSupported: PAL, PAL-M, PAL-N, PAL-Nc, NTSC-M, NTSC-J,\n"
"\t\t\thd480i, hd480p, hd576i, hd576p, hd720p, hd1080i.\n"
@@ -154,9 +162,11 @@ nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state)
if (pm_state.event == PM_EVENT_PRETHAW)
return 0;
+ NV_INFO(dev, "Disabling fbcon acceleration...\n");
fbdev_flags = dev_priv->fbdev_info->flags;
dev_priv->fbdev_info->flags |= FBINFO_HWACCEL_DISABLED;
+ NV_INFO(dev, "Unpinning framebuffer(s)...\n");
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_framebuffer *nouveau_fb;
diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
index 4b9aaf2a8d0..d8b55901177 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
+++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
@@ -681,6 +681,7 @@ extern int nouveau_uscript_tmds;
extern int nouveau_vram_pushbuf;
extern int nouveau_vram_notify;
extern int nouveau_fbpercrtc;
+extern int nouveau_tv_disable;
extern char *nouveau_tv_norm;
extern int nouveau_reg_debug;
extern char *nouveau_vbios;
@@ -688,6 +689,7 @@ extern int nouveau_ctxfw;
extern int nouveau_ignorelid;
extern int nouveau_nofbaccel;
extern int nouveau_noaccel;
+extern int nouveau_override_conntype;
extern int nouveau_pci_suspend(struct pci_dev *pdev, pm_message_t pm_state);
extern int nouveau_pci_resume(struct pci_dev *pdev);
@@ -926,6 +928,10 @@ extern void nv40_fb_takedown(struct drm_device *);
extern void nv40_fb_set_region_tiling(struct drm_device *, int, uint32_t,
uint32_t, uint32_t);
+/* nv50_fb.c */
+extern int nv50_fb_init(struct drm_device *);
+extern void nv50_fb_takedown(struct drm_device *);
+
/* nv04_fifo.c */
extern int nv04_fifo_init(struct drm_device *);
extern void nv04_fifo_disable(struct drm_device *);
diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
index 95220ddebb4..2bd59a92fee 100644
--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
+++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
@@ -311,6 +311,31 @@ nouveau_print_bitfield_names_(uint32_t value,
#define nouveau_print_bitfield_names(val, namelist) \
nouveau_print_bitfield_names_((val), (namelist), ARRAY_SIZE(namelist))
+struct nouveau_enum_names {
+ uint32_t value;
+ const char *name;
+};
+
+static void
+nouveau_print_enum_names_(uint32_t value,
+ const struct nouveau_enum_names *namelist,
+ const int namelist_len)
+{
+ /*
+ * Caller must have already printed the KERN_* log level for us.
+ * Also the caller is responsible for adding the newline.
+ */
+ int i;
+ for (i = 0; i < namelist_len; ++i) {
+ if (value == namelist[i].value) {
+ printk("%s", namelist[i].name);
+ return;
+ }
+ }
+ printk("unknown value 0x%08x", value);
+}
+#define nouveau_print_enum_names(val, namelist) \
+ nouveau_print_enum_names_((val), (namelist), ARRAY_SIZE(namelist))
static int
nouveau_graph_chid_from_grctx(struct drm_device *dev)
@@ -427,14 +452,16 @@ nouveau_graph_dump_trap_info(struct drm_device *dev, const char *id,
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t nsource = trap->nsource, nstatus = trap->nstatus;
- NV_INFO(dev, "%s - nSource:", id);
- nouveau_print_bitfield_names(nsource, nsource_names);
- printk(", nStatus:");
- if (dev_priv->card_type < NV_10)
- nouveau_print_bitfield_names(nstatus, nstatus_names);
- else
- nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
- printk("\n");
+ if (dev_priv->card_type < NV_50) {
+ NV_INFO(dev, "%s - nSource:", id);
+ nouveau_print_bitfield_names(nsource, nsource_names);
+ printk(", nStatus:");
+ if (dev_priv->card_type < NV_10)
+ nouveau_print_bitfield_names(nstatus, nstatus_names);
+ else
+ nouveau_print_bitfield_names(nstatus, nstatus_names_nv10);
+ printk("\n");
+ }
NV_INFO(dev, "%s - Ch %d/%d Class 0x%04x Mthd 0x%04x "
"Data 0x%08x:0x%08x\n",
@@ -578,27 +605,502 @@ nouveau_pgraph_irq_handler(struct drm_device *dev)
}
static void
+nv50_pfb_vm_trap(struct drm_device *dev, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t trap[6];
+ int i, ch;
+ uint32_t idx = nv_rd32(dev, 0x100c90);
+ if (idx & 0x80000000) {
+ idx &= 0xffffff;
+ if (display) {
+ for (i = 0; i < 6; i++) {
+ nv_wr32(dev, 0x100c90, idx | i << 24);
+ trap[i] = nv_rd32(dev, 0x100c94);
+ }
+ for (ch = 0; ch < dev_priv->engine.fifo.channels; ch++) {
+ struct nouveau_channel *chan = dev_priv->fifos[ch];
+
+ if (!chan || !chan->ramin)
+ continue;
+
+ if (trap[1] == chan->ramin->instance >> 12)
+ break;
+ }
+ NV_INFO(dev, "%s - VM: Trapped %s at %02x%04x%04x status %08x %08x channel %d\n",
+ name, (trap[5]&0x100?"read":"write"),
+ trap[5]&0xff, trap[4]&0xffff,
+ trap[3]&0xffff, trap[0], trap[2], ch);
+ }
+ nv_wr32(dev, 0x100c90, idx | 0x80000000);
+ } else if (display) {
+ NV_INFO(dev, "%s - no VM fault?\n", name);
+ }
+}
+
+static struct nouveau_enum_names nv50_mp_exec_error_names[] =
+{
+ { 3, "STACK_UNDERFLOW" },
+ { 4, "QUADON_ACTIVE" },
+ { 8, "TIMEOUT" },
+ { 0x10, "INVALID_OPCODE" },
+ { 0x40, "BREAKPOINT" },
+};
+
+static void
+nv50_pgraph_mp_trap(struct drm_device *dev, int tpid, int display)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ uint32_t addr, mp10, status, pc, oplow, ophigh;
+ int i;
+ int mps = 0;
+ for (i = 0; i < 4; i++) {
+ if (!(units & 1 << (i+24)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ addr = 0x408200 + (tpid << 12) + (i << 7);
+ else
+ addr = 0x408100 + (tpid << 11) + (i << 7);
+ mp10 = nv_rd32(dev, addr + 0x10);
+ status = nv_rd32(dev, addr + 0x14);
+ if (!status)
+ continue;
+ if (display) {
+ nv_rd32(dev, addr + 0x20);
+ pc = nv_rd32(dev, addr + 0x24);
+ oplow = nv_rd32(dev, addr + 0x70);
+ ophigh= nv_rd32(dev, addr + 0x74);
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - "
+ "TP %d MP %d: ", tpid, i);
+ nouveau_print_enum_names(status,
+ nv50_mp_exec_error_names);
+ printk(" at %06x warp %d, opcode %08x %08x\n",
+ pc&0xffffff, pc >> 24,
+ oplow, ophigh);
+ }
+ nv_wr32(dev, addr + 0x10, mp10);
+ nv_wr32(dev, addr + 0x14, 0);
+ mps++;
+ }
+ if (!mps && display)
+ NV_INFO(dev, "PGRAPH_TRAP_MP_EXEC - TP %d: "
+ "No MPs claiming errors?\n", tpid);
+}
+
+static void
+nv50_pgraph_tp_trap(struct drm_device *dev, int type, uint32_t ustatus_old,
+ uint32_t ustatus_new, int display, const char *name)
+{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ int tps = 0;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i, r;
+ uint32_t ustatus_addr, ustatus;
+ for (i = 0; i < 16; i++) {
+ if (!(units & (1 << i)))
+ continue;
+ if (dev_priv->chipset < 0xa0)
+ ustatus_addr = ustatus_old + (i << 12);
+ else
+ ustatus_addr = ustatus_new + (i << 11);
+ ustatus = nv_rd32(dev, ustatus_addr) & 0x7fffffff;
+ if (!ustatus)
+ continue;
+ tps++;
+ switch (type) {
+ case 6: /* texture error... unknown for now */
+ nv50_pfb_vm_trap(dev, display, name);
+ if (display) {
+ NV_ERROR(dev, "magic set %d:\n", i);
+ for (r = ustatus_addr + 4; r <= ustatus_addr + 0x10; r += 4)
+ NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
+ nv_rd32(dev, r));
+ }
+ break;
+ case 7: /* MP error */
+ if (ustatus & 0x00010000) {
+ nv50_pgraph_mp_trap(dev, i, display);
+ ustatus &= ~0x00010000;
+ }
+ break;
+ case 8: /* TPDMA error */
+ {
+ uint32_t e0c = nv_rd32(dev, ustatus_addr + 4);
+ uint32_t e10 = nv_rd32(dev, ustatus_addr + 8);
+ uint32_t e14 = nv_rd32(dev, ustatus_addr + 0xc);
+ uint32_t e18 = nv_rd32(dev, ustatus_addr + 0x10);
+ uint32_t e1c = nv_rd32(dev, ustatus_addr + 0x14);
+ uint32_t e20 = nv_rd32(dev, ustatus_addr + 0x18);
+ uint32_t e24 = nv_rd32(dev, ustatus_addr + 0x1c);
+ nv50_pfb_vm_trap(dev, display, name);
+ /* 2d engine destination */
+ if (ustatus & 0x00000010) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_2D - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000010;
+ }
+ /* Render target */
+ if (ustatus & 0x00000040) {
+ if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - Unknown fault at address %02x%08x\n",
+ i, e14, e10);
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA_RT - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000040;
+ }
+ /* CUDA memory: l[], g[] or stack. */
+ if (ustatus & 0x00000080) {
+ if (display) {
+ if (e18 & 0x80000000) {
+ /* g[] read fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global read fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 24) & 0x1f));
+ e18 &= ~0x1f000000;
+ } else if (e18 & 0xc) {
+ /* g[] write fault? */
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Global write fault at address %02x%08x\n",
+ i, e14, e10 | ((e18 >> 7) & 0x1f));
+ e18 &= ~0x00000f80;
+ } else {
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - Unknown CUDA fault at address %02x%08x\n",
+ i, e14, e10);
+ }
+ NV_INFO(dev, "PGRAPH_TRAP_TPDMA - TP %d - e0c: %08x, e18: %08x, e1c: %08x, e20: %08x, e24: %08x\n",
+ i, e0c, e18, e1c, e20, e24);
+ }
+ ustatus &= ~0x00000080;
+ }
+ }
+ break;
+ }
+ if (ustatus) {
+ if (display)
+ NV_INFO(dev, "%s - TP%d: Unhandled ustatus 0x%08x\n", name, i, ustatus);
+ }
+ nv_wr32(dev, ustatus_addr, 0xc0000000);
+ }
+
+ if (!tps && display)
+ NV_INFO(dev, "%s - No TPs claiming errors?\n", name);
+}
+
+static void
+nv50_pgraph_trap_handler(struct drm_device *dev)
+{
+ struct nouveau_pgraph_trap trap;
+ uint32_t status = nv_rd32(dev, 0x400108);
+ uint32_t ustatus;
+ int display = nouveau_ratelimit();
+
+
+ if (!status && display) {
+ nouveau_graph_trap_info(dev, &trap);
+ nouveau_graph_dump_trap_info(dev, "PGRAPH_TRAP", &trap);
+ NV_INFO(dev, "PGRAPH_TRAP - no units reporting traps?\n");
+ }
+
+ /* DISPATCH: Relays commands to other units and handles NOTIFY,
+ * COND, QUERY. If you get a trap from it, the command is still stuck
+ * in DISPATCH and you need to do something about it. */
+ if (status & 0x001) {
+ ustatus = nv_rd32(dev, 0x400804) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - no ustatus?\n");
+ }
+
+ /* Known to be triggered by screwed up NOTIFY and COND... */
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_FAULT");
+ nv_wr32(dev, 0x400500, 0);
+ if (nv_rd32(dev, 0x400808) & 0x80000000) {
+ if (display) {
+ if (nouveau_graph_trapped_channel(dev, &trap.channel))
+ trap.channel = -1;
+ trap.class = nv_rd32(dev, 0x400814);
+ trap.mthd = nv_rd32(dev, 0x400808) & 0x1ffc;
+ trap.subc = (nv_rd32(dev, 0x400808) >> 16) & 0x7;
+ trap.data = nv_rd32(dev, 0x40080c);
+ trap.data2 = nv_rd32(dev, 0x400810);
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_TRAP_DISPATCH_FAULT", &trap);
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400808: %08x\n", nv_rd32(dev, 0x400808));
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - 400848: %08x\n", nv_rd32(dev, 0x400848));
+ }
+ nv_wr32(dev, 0x400808, 0);
+ } else if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_FAULT - No stuck command?\n");
+ }
+ nv_wr32(dev, 0x4008e8, nv_rd32(dev, 0x4008e8) & 3);
+ nv_wr32(dev, 0x400848, 0);
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus & 0x00000002) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_DISPATCH_QUERY");
+ nv_wr32(dev, 0x400500, 0);
+ if (nv_rd32(dev, 0x40084c) & 0x80000000) {
+ if (display) {
+ if (nouveau_graph_trapped_channel(dev, &trap.channel))
+ trap.channel = -1;
+ trap.class = nv_rd32(dev, 0x400814);
+ trap.mthd = nv_rd32(dev, 0x40084c) & 0x1ffc;
+ trap.subc = (nv_rd32(dev, 0x40084c) >> 16) & 0x7;
+ trap.data = nv_rd32(dev, 0x40085c);
+ trap.data2 = 0;
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_TRAP_DISPATCH_QUERY", &trap);
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - 40084c: %08x\n", nv_rd32(dev, 0x40084c));
+ }
+ nv_wr32(dev, 0x40084c, 0);
+ } else if (display) {
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH_QUERY - No stuck command?\n");
+ }
+ ustatus &= ~0x00000002;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_DISPATCH - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x400804, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x001);
+ status &= ~0x001;
+ }
+
+ /* TRAPs other than dispatch use the "normal" trap regs. */
+ if (status && display) {
+ nouveau_graph_trap_info(dev, &trap);
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_TRAP", &trap);
+ }
+
+ /* M2MF: Memory to memory copy engine. */
+ if (status & 0x002) {
+ ustatus = nv_rd32(dev, 0x406800) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_M2MF - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_NOTIFY");
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus & 0x00000002) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_IN");
+ ustatus &= ~0x00000002;
+ }
+ if (ustatus & 0x00000004) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_M2MF_OUT");
+ ustatus &= ~0x00000004;
+ }
+ NV_INFO (dev, "PGRAPH_TRAP_M2MF - %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x406804),
+ nv_rd32(dev, 0x406808),
+ nv_rd32(dev, 0x40680c),
+ nv_rd32(dev, 0x406810));
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_M2MF - Unhandled ustatus 0x%08x\n", ustatus);
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 2);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x406800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x002);
+ status &= ~0x002;
+ }
+
+ /* VFETCH: Fetches data from vertex buffers. */
+ if (status & 0x004) {
+ ustatus = nv_rd32(dev, 0x400c04) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_VFETCH - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_VFETCH_FAULT");
+ NV_INFO (dev, "PGRAPH_TRAP_VFETCH_FAULT - %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x400c00),
+ nv_rd32(dev, 0x400c08),
+ nv_rd32(dev, 0x400c0c),
+ nv_rd32(dev, 0x400c10));
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_VFETCH - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x400c04, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x004);
+ status &= ~0x004;
+ }
+
+ /* STRMOUT: DirectX streamout / OpenGL transform feedback. */
+ if (status & 0x008) {
+ ustatus = nv_rd32(dev, 0x401800) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_STRMOUT_FAULT");
+ NV_INFO (dev, "PGRAPH_TRAP_STRMOUT_FAULT - %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x401804),
+ nv_rd32(dev, 0x401808),
+ nv_rd32(dev, 0x40180c),
+ nv_rd32(dev, 0x401810));
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_STRMOUT - Unhandled ustatus 0x%08x\n", ustatus);
+ /* No sane way found yet -- just reset the bugger. */
+ nv_wr32(dev, 0x400040, 0x80);
+ nv_wr32(dev, 0x400040, 0);
+ nv_wr32(dev, 0x401800, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x008);
+ status &= ~0x008;
+ }
+
+ /* CCACHE: Handles code and c[] caches and fills them. */
+ if (status & 0x010) {
+ ustatus = nv_rd32(dev, 0x405018) & 0x7fffffff;
+ if (!ustatus && display) {
+ NV_INFO(dev, "PGRAPH_TRAP_CCACHE - no ustatus?\n");
+ }
+ if (ustatus & 0x00000001) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_CCACHE_FAULT");
+ NV_INFO (dev, "PGRAPH_TRAP_CCACHE_FAULT - %08x %08x %08x %08x %08x %08x %08x\n",
+ nv_rd32(dev, 0x405800),
+ nv_rd32(dev, 0x405804),
+ nv_rd32(dev, 0x405808),
+ nv_rd32(dev, 0x40580c),
+ nv_rd32(dev, 0x405810),
+ nv_rd32(dev, 0x405814),
+ nv_rd32(dev, 0x40581c));
+ ustatus &= ~0x00000001;
+ }
+ if (ustatus && display)
+ NV_INFO(dev, "PGRAPH_TRAP_CCACHE - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x405018, 0xc0000000);
+ nv_wr32(dev, 0x400108, 0x010);
+ status &= ~0x010;
+ }
+
+ /* Unknown, not seen yet... 0x402000 is the only trap status reg
+ * remaining, so try to handle it anyway. Perhaps related to that
+ * unknown DMA slot on tesla? */
+ if (status & 0x20) {
+ nv50_pfb_vm_trap(dev, display, "PGRAPH_TRAP_UNKC04");
+ ustatus = nv_rd32(dev, 0x402000) & 0x7fffffff;
+ if (display)
+ NV_INFO(dev, "PGRAPH_TRAP_UNKC04 - Unhandled ustatus 0x%08x\n", ustatus);
+ nv_wr32(dev, 0x402000, 0xc0000000);
+ /* no status modifiction on purpose */
+ }
+
+ /* TEXTURE: CUDA texturing units */
+ if (status & 0x040) {
+ nv50_pgraph_tp_trap (dev, 6, 0x408900, 0x408600, display,
+ "PGRAPH_TRAP_TEXTURE");
+ nv_wr32(dev, 0x400108, 0x040);
+ status &= ~0x040;
+ }
+
+ /* MP: CUDA execution engines. */
+ if (status & 0x080) {
+ nv50_pgraph_tp_trap (dev, 7, 0x408314, 0x40831c, display,
+ "PGRAPH_TRAP_MP");
+ nv_wr32(dev, 0x400108, 0x080);
+ status &= ~0x080;
+ }
+
+ /* TPDMA: Handles TP-initiated uncached memory accesses:
+ * l[], g[], stack, 2d surfaces, render targets. */
+ if (status & 0x100) {
+ nv50_pgraph_tp_trap (dev, 8, 0x408e08, 0x408708, display,
+ "PGRAPH_TRAP_TPDMA");
+ nv_wr32(dev, 0x400108, 0x100);
+ status &= ~0x100;
+ }
+
+ if (status) {
+ if (display)
+ NV_INFO(dev, "PGRAPH_TRAP - Unknown trap 0x%08x\n",
+ status);
+ nv_wr32(dev, 0x400108, status);
+ }
+}
+
+/* There must be a *lot* of these. Will take some time to gather them up. */
+static struct nouveau_enum_names nv50_data_error_names[] =
+{
+ { 4, "INVALID_VALUE" },
+ { 5, "INVALID_ENUM" },
+ { 8, "INVALID_OBJECT" },
+ { 0xc, "INVALID_BITFIELD" },
+ { 0x28, "MP_NO_REG_SPACE" },
+ { 0x2b, "MP_BLOCK_SIZE_MISMATCH" },
+};
+
+static void
nv50_pgraph_irq_handler(struct drm_device *dev)
{
+ struct nouveau_pgraph_trap trap;
+ int unhandled = 0;
uint32_t status;
while ((status = nv_rd32(dev, NV03_PGRAPH_INTR))) {
- uint32_t nsource = nv_rd32(dev, NV03_PGRAPH_NSOURCE);
-
+ /* NOTIFY: You've set a NOTIFY an a command and it's done. */
if (status & 0x00000001) {
- nouveau_pgraph_intr_notify(dev, nsource);
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_NOTIFY", &trap);
status &= ~0x00000001;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000001);
}
- if (status & 0x00000010) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_ILLEGAL_MTHD);
+ /* COMPUTE_QUERY: Purpose and exact cause unknown, happens
+ * when you write 0x200 to 0x50c0 method 0x31c. */
+ if (status & 0x00000002) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_COMPUTE_QUERY", &trap);
+ status &= ~0x00000002;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000002);
+ }
+ /* Unknown, never seen: 0x4 */
+
+ /* ILLEGAL_MTHD: You used a wrong method for this class. */
+ if (status & 0x00000010) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_pgraph_intr_swmthd(dev, &trap))
+ unhandled = 1;
+ if (unhandled && nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_ILLEGAL_MTHD", &trap);
status &= ~0x00000010;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000010);
}
+ /* ILLEGAL_CLASS: You used a wrong class. */
+ if (status & 0x00000020) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_ILLEGAL_CLASS", &trap);
+ status &= ~0x00000020;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000020);
+ }
+
+ /* DOUBLE_NOTIFY: You tried to set a NOTIFY on another NOTIFY. */
+ if (status & 0x00000040) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_DOUBLE_NOTIFY", &trap);
+ status &= ~0x00000040;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00000040);
+ }
+
+ /* CONTEXT_SWITCH: PGRAPH needs us to load a new context */
if (status & 0x00001000) {
nv_wr32(dev, 0x400500, 0x00000000);
nv_wr32(dev, NV03_PGRAPH_INTR,
@@ -613,49 +1115,59 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
status &= ~NV_PGRAPH_INTR_CONTEXT_SWITCH;
}
- if (status & 0x00100000) {
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_DATA_ERROR);
+ /* BUFFER_NOTIFY: Your m2mf transfer finished */
+ if (status & 0x00010000) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_BUFFER_NOTIFY", &trap);
+ status &= ~0x00010000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x00010000);
+ }
+ /* DATA_ERROR: Invalid value for this method, or invalid
+ * state in current PGRAPH context for this operation */
+ if (status & 0x00100000) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit()) {
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_DATA_ERROR", &trap);
+ NV_INFO (dev, "PGRAPH_DATA_ERROR - ");
+ nouveau_print_enum_names(nv_rd32(dev, 0x400110),
+ nv50_data_error_names);
+ printk("\n");
+ }
status &= ~0x00100000;
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00100000);
}
+ /* TRAP: Something bad happened in the middle of command
+ * execution. Has a billion types, subtypes, and even
+ * subsubtypes. */
if (status & 0x00200000) {
- int r;
-
- nouveau_pgraph_intr_error(dev, nsource |
- NV03_PGRAPH_NSOURCE_PROTECTION_ERROR);
-
- NV_ERROR(dev, "magic set 1:\n");
- for (r = 0x408900; r <= 0x408910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x408900,
- nv_rd32(dev, 0x408904) | 0xc0000000);
- for (r = 0x408e08; r <= 0x408e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x408e08,
- nv_rd32(dev, 0x408e08) | 0xc0000000);
-
- NV_ERROR(dev, "magic set 2:\n");
- for (r = 0x409900; r <= 0x409910; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x409900,
- nv_rd32(dev, 0x409904) | 0xc0000000);
- for (r = 0x409e08; r <= 0x409e24; r += 4)
- NV_ERROR(dev, "\t0x%08x: 0x%08x\n", r,
- nv_rd32(dev, r));
- nv_wr32(dev, 0x409e08,
- nv_rd32(dev, 0x409e08) | 0xc0000000);
-
+ nv50_pgraph_trap_handler(dev);
status &= ~0x00200000;
- nv_wr32(dev, NV03_PGRAPH_NSOURCE, nsource);
nv_wr32(dev, NV03_PGRAPH_INTR, 0x00200000);
}
+ /* Unknown, never seen: 0x00400000 */
+
+ /* SINGLE_STEP: Happens on every method if you turned on
+ * single stepping in 40008c */
+ if (status & 0x01000000) {
+ nouveau_graph_trap_info(dev, &trap);
+ if (nouveau_ratelimit())
+ nouveau_graph_dump_trap_info(dev,
+ "PGRAPH_SINGLE_STEP", &trap);
+ status &= ~0x01000000;
+ nv_wr32(dev, NV03_PGRAPH_INTR, 0x01000000);
+ }
+
+ /* 0x02000000 happens when you pause a ctxprog...
+ * but the only way this can happen that I know is by
+ * poking the relevant MMIO register, and we don't
+ * do that. */
+
if (status) {
NV_INFO(dev, "Unhandled PGRAPH_INTR - 0x%08x\n",
status);
@@ -672,7 +1184,8 @@ nv50_pgraph_irq_handler(struct drm_device *dev)
}
nv_wr32(dev, NV03_PMC_INTR_0, NV_PMC_INTR_0_PGRAPH_PENDING);
- nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
+ if (nv_rd32(dev, 0x400824) & (1 << 31))
+ nv_wr32(dev, 0x400824, nv_rd32(dev, 0x400824) & ~(1 << 31));
}
static void
diff --git a/drivers/gpu/drm/nouveau/nouveau_state.c b/drivers/gpu/drm/nouveau/nouveau_state.c
index eb8f084d5f5..58b46807de2 100644
--- a/drivers/gpu/drm/nouveau/nouveau_state.c
+++ b/drivers/gpu/drm/nouveau/nouveau_state.c
@@ -35,7 +35,6 @@
#include "nouveau_drm.h"
#include "nv50_display.h"
-static int nouveau_stub_init(struct drm_device *dev) { return 0; }
static void nouveau_stub_takedown(struct drm_device *dev) {}
static int nouveau_init_engine_ptrs(struct drm_device *dev)
@@ -277,8 +276,8 @@ static int nouveau_init_engine_ptrs(struct drm_device *dev)
engine->timer.init = nv04_timer_init;
engine->timer.read = nv04_timer_read;
engine->timer.takedown = nv04_timer_takedown;
- engine->fb.init = nouveau_stub_init;
- engine->fb.takedown = nouveau_stub_takedown;
+ engine->fb.init = nv50_fb_init;
+ engine->fb.takedown = nv50_fb_takedown;
engine->graph.grclass = nv50_graph_grclass;
engine->graph.init = nv50_graph_init;
engine->graph.takedown = nv50_graph_takedown;
diff --git a/drivers/gpu/drm/nouveau/nv04_crtc.c b/drivers/gpu/drm/nouveau/nv04_crtc.c
index a1d1ebb073d..eba687f1099 100644
--- a/drivers/gpu/drm/nouveau/nv04_crtc.c
+++ b/drivers/gpu/drm/nouveau/nv04_crtc.c
@@ -230,9 +230,9 @@ nv_crtc_mode_set_vga(struct drm_crtc *crtc, struct drm_display_mode *mode)
struct drm_framebuffer *fb = crtc->fb;
/* Calculate our timings */
- int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
- int horizStart = (mode->crtc_hsync_start >> 3) - 1;
- int horizEnd = (mode->crtc_hsync_end >> 3) - 1;
+ int horizDisplay = (mode->crtc_hdisplay >> 3) - 1;
+ int horizStart = (mode->crtc_hsync_start >> 3) + 1;
+ int horizEnd = (mode->crtc_hsync_end >> 3) + 1;
int horizTotal = (mode->crtc_htotal >> 3) - 5;
int horizBlankStart = (mode->crtc_hdisplay >> 3) - 1;
int horizBlankEnd = (mode->crtc_htotal >> 3) - 1;
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 3da90c2c4e6..813b25cec72 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -118,8 +118,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
return;
}
- width = ALIGN(image->width, 32);
- dsize = (width * image->height) >> 5;
+ width = ALIGN(image->width, 8);
+ dsize = ALIGN(width * image->height, 32) >> 5;
if (info->fix.visual == FB_VISUAL_TRUECOLOR ||
info->fix.visual == FB_VISUAL_DIRECTCOLOR) {
@@ -136,8 +136,8 @@ nv04_fbcon_imageblit(struct fb_info *info, const struct fb_image *image)
((image->dx + image->width) & 0xffff));
OUT_RING(chan, bg);
OUT_RING(chan, fg);
- OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->height << 16) | width);
+ OUT_RING(chan, (image->height << 16) | image->width);
OUT_RING(chan, (image->dy << 16) | (image->dx & 0xffff));
while (dsize) {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 61a89f2dc55..fac6c88a2b1 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -522,8 +522,8 @@ int nv50_display_create(struct drm_device *dev)
}
for (i = 0 ; i < dcb->connector.entries; i++) {
- if (i != 0 && dcb->connector.entry[i].index ==
- dcb->connector.entry[i - 1].index)
+ if (i != 0 && dcb->connector.entry[i].index2 ==
+ dcb->connector.entry[i - 1].index2)
continue;
nouveau_connector_create(dev, &dcb->connector.entry[i]);
}
diff --git a/drivers/gpu/drm/nouveau/nv50_fb.c b/drivers/gpu/drm/nouveau/nv50_fb.c
new file mode 100644
index 00000000000..a95e6941ba8
--- /dev/null
+++ b/drivers/gpu/drm/nouveau/nv50_fb.c
@@ -0,0 +1,32 @@
+#include "drmP.h"
+#include "drm.h"
+#include "nouveau_drv.h"
+#include "nouveau_drm.h"
+
+int
+nv50_fb_init(struct drm_device *dev)
+{
+ /* This is needed to get meaningful information from 100c90
+ * on traps. No idea what these values mean exactly. */
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+
+ switch (dev_priv->chipset) {
+ case 0x50:
+ nv_wr32(dev, 0x100c90, 0x0707ff);
+ break;
+ case 0xa5:
+ case 0xa8:
+ nv_wr32(dev, 0x100c90, 0x0d0fff);
+ break;
+ default:
+ nv_wr32(dev, 0x100c90, 0x1d07ff);
+ break;
+ }
+
+ return 0;
+}
+
+void
+nv50_fb_takedown(struct drm_device *dev)
+{
+}
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 993c7126fbd..25a3cd8794f 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -233,7 +233,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
BEGIN_RING(chan, NvSub2D, 0x0808, 3);
OUT_RING(chan, 0);
OUT_RING(chan, 0);
- OUT_RING(chan, 0);
+ OUT_RING(chan, 1);
BEGIN_RING(chan, NvSub2D, 0x081c, 1);
OUT_RING(chan, 1);
BEGIN_RING(chan, NvSub2D, 0x0840, 4);
diff --git a/drivers/gpu/drm/nouveau/nv50_graph.c b/drivers/gpu/drm/nouveau/nv50_graph.c
index 857a09671a3..c62b33a02f8 100644
--- a/drivers/gpu/drm/nouveau/nv50_graph.c
+++ b/drivers/gpu/drm/nouveau/nv50_graph.c
@@ -56,6 +56,10 @@ nv50_graph_init_intr(struct drm_device *dev)
static void
nv50_graph_init_regs__nv(struct drm_device *dev)
{
+ struct drm_nouveau_private *dev_priv = dev->dev_private;
+ uint32_t units = nv_rd32(dev, 0x1540);
+ int i;
+
NV_DEBUG(dev, "\n");
nv_wr32(dev, 0x400804, 0xc0000000);
@@ -65,6 +69,20 @@ nv50_graph_init_regs__nv(struct drm_device *dev)
nv_wr32(dev, 0x405018, 0xc0000000);
nv_wr32(dev, 0x402000, 0xc0000000);
+ for (i = 0; i < 16; i++) {
+ if (units & 1 << i) {
+ if (dev_priv->chipset < 0xa0) {
+ nv_wr32(dev, 0x408900 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408e08 + (i << 12), 0xc0000000);
+ nv_wr32(dev, 0x408314 + (i << 12), 0xc0000000);
+ } else {
+ nv_wr32(dev, 0x408600 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x408708 + (i << 11), 0xc0000000);
+ nv_wr32(dev, 0x40831c + (i << 11), 0xc0000000);
+ }
+ }
+ }
+
nv_wr32(dev, 0x400108, 0xffffffff);
nv_wr32(dev, 0x400824, 0x00004000);
@@ -229,10 +247,6 @@ nv50_graph_create_context(struct nouveau_channel *chan)
nouveau_grctx_vals_load(dev, ctx);
}
nv_wo32(dev, ctx, 0x00000/4, chan->ramin->instance >> 12);
- if ((dev_priv->chipset & 0xf0) == 0xa0)
- nv_wo32(dev, ctx, 0x00004/4, 0x00000000);
- else
- nv_wo32(dev, ctx, 0x0011c/4, 0x00000000);
dev_priv->engine.instmem.finish_access(dev);
return 0;
diff --git a/drivers/gpu/drm/nouveau/nv50_grctx.c b/drivers/gpu/drm/nouveau/nv50_grctx.c
index d105fcd42ca..546b31949a3 100644
--- a/drivers/gpu/drm/nouveau/nv50_grctx.c
+++ b/drivers/gpu/drm/nouveau/nv50_grctx.c
@@ -64,6 +64,9 @@
#define CP_FLAG_ALWAYS ((2 * 32) + 13)
#define CP_FLAG_ALWAYS_FALSE 0
#define CP_FLAG_ALWAYS_TRUE 1
+#define CP_FLAG_INTR ((2 * 32) + 15)
+#define CP_FLAG_INTR_NOT_PENDING 0
+#define CP_FLAG_INTR_PENDING 1
#define CP_CTX 0x00100000
#define CP_CTX_COUNT 0x000f0000
@@ -214,6 +217,8 @@ nv50_grctx_init(struct nouveau_grctx *ctx)
cp_name(ctx, cp_setup_save);
cp_set (ctx, UNK1D, SET);
cp_wait(ctx, STATUS, BUSY);
+ cp_wait(ctx, INTR, PENDING);
+ cp_bra (ctx, STATUS, BUSY, cp_setup_save);
cp_set (ctx, UNK01, SET);
cp_set (ctx, SWAP_DIRECTION, SAVE);
@@ -269,7 +274,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
int offset, base;
uint32_t units = nv_rd32 (ctx->dev, 0x1540);
- /* 0800 */
+ /* 0800: DISPATCH */
cp_ctx(ctx, 0x400808, 7);
gr_def(ctx, 0x400814, 0x00000030);
cp_ctx(ctx, 0x400834, 0x32);
@@ -300,7 +305,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
gr_def(ctx, 0x400b20, 0x0001629d);
}
- /* 0C00 */
+ /* 0C00: VFETCH */
cp_ctx(ctx, 0x400c08, 0x2);
gr_def(ctx, 0x400c08, 0x0000fe0c);
@@ -326,7 +331,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
cp_ctx(ctx, 0x401540, 0x5);
gr_def(ctx, 0x401550, 0x00001018);
- /* 1800 */
+ /* 1800: STREAMOUT */
cp_ctx(ctx, 0x401814, 0x1);
gr_def(ctx, 0x401814, 0x000000ff);
if (dev_priv->chipset == 0x50) {
@@ -641,7 +646,7 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
if (dev_priv->chipset == 0x50)
cp_ctx(ctx, 0x4063e0, 0x1);
- /* 6800 */
+ /* 6800: M2MF */
if (dev_priv->chipset < 0x90) {
cp_ctx(ctx, 0x406814, 0x2b);
gr_def(ctx, 0x406818, 0x00000f80);
diff --git a/drivers/gpu/drm/radeon/Makefile b/drivers/gpu/drm/radeon/Makefile
index ed38262d998..3c91312dea9 100644
--- a/drivers/gpu/drm/radeon/Makefile
+++ b/drivers/gpu/drm/radeon/Makefile
@@ -50,7 +50,7 @@ $(obj)/r600_cs.o: $(obj)/r600_reg_safe.h
radeon-y := radeon_drv.o radeon_cp.o radeon_state.o radeon_mem.o \
radeon_irq.o r300_cmdbuf.o r600_cp.o
# add KMS driver
-radeon-y += radeon_device.o radeon_kms.o \
+radeon-y += radeon_device.o radeon_asic.o radeon_kms.o \
radeon_atombios.o radeon_agp.o atombios_crtc.o radeon_combios.o \
atom.o radeon_fence.o radeon_ttm.o radeon_object.o radeon_gart.o \
radeon_legacy_crtc.o radeon_legacy_encoders.o radeon_connectors.o \
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index d75788feac6..247f8ee7e94 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -52,15 +52,17 @@
typedef struct {
struct atom_context *ctx;
-
uint32_t *ps, *ws;
int ps_shift;
uint16_t start;
+ unsigned last_jump;
+ unsigned long last_jump_jiffies;
+ bool abort;
} atom_exec_context;
int atom_debug = 0;
-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params);
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params);
static uint32_t atom_arg_mask[8] =
{ 0xFFFFFFFF, 0xFFFF, 0xFFFF00, 0xFFFF0000, 0xFF, 0xFF00, 0xFF0000,
@@ -604,12 +606,17 @@ static void atom_op_beep(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_calltable(atom_exec_context *ctx, int *ptr, int arg)
{
int idx = U8((*ptr)++);
+ int r = 0;
+
if (idx < ATOM_TABLE_NAMES_CNT)
SDEBUG(" table: %d (%s)\n", idx, atom_table_names[idx]);
else
SDEBUG(" table: %d\n", idx);
if (U16(ctx->ctx->cmd_table + 4 + 2 * idx))
- atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ r = atom_execute_table_locked(ctx->ctx, idx, ctx->ps + ctx->ps_shift);
+ if (r) {
+ ctx->abort = true;
+ }
}
static void atom_op_clear(atom_exec_context *ctx, int *ptr, int arg)
@@ -673,6 +680,8 @@ static void atom_op_eot(atom_exec_context *ctx, int *ptr, int arg)
static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
{
int execute = 0, target = U16(*ptr);
+ unsigned long cjiffies;
+
(*ptr) += 2;
switch (arg) {
case ATOM_COND_ABOVE:
@@ -700,8 +709,25 @@ static void atom_op_jump(atom_exec_context *ctx, int *ptr, int arg)
if (arg != ATOM_COND_ALWAYS)
SDEBUG(" taken: %s\n", execute ? "yes" : "no");
SDEBUG(" target: 0x%04X\n", target);
- if (execute)
+ if (execute) {
+ if (ctx->last_jump == (ctx->start + target)) {
+ cjiffies = jiffies;
+ if (time_after(cjiffies, ctx->last_jump_jiffies)) {
+ cjiffies -= ctx->last_jump_jiffies;
+ if ((jiffies_to_msecs(cjiffies) > 1000)) {
+ DRM_ERROR("atombios stuck in loop for more than 1sec aborting\n");
+ ctx->abort = true;
+ }
+ } else {
+ /* jiffies wrap around we will just wait a little longer */
+ ctx->last_jump_jiffies = jiffies;
+ }
+ } else {
+ ctx->last_jump = ctx->start + target;
+ ctx->last_jump_jiffies = jiffies;
+ }
*ptr = ctx->start + target;
+ }
}
static void atom_op_mask(atom_exec_context *ctx, int *ptr, int arg)
@@ -1104,7 +1130,7 @@ static struct {
atom_op_shr, ATOM_ARG_MC}, {
atom_op_debug, 0},};
-static void atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
+static int atom_execute_table_locked(struct atom_context *ctx, int index, uint32_t * params)
{
int base = CU16(ctx->cmd_table + 4 + 2 * index);
int len, ws, ps, ptr;
@@ -1112,7 +1138,7 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
atom_exec_context ectx;
if (!base)
- return;
+ return -EINVAL;
len = CU16(base + ATOM_CT_SIZE_PTR);
ws = CU8(base + ATOM_CT_WS_PTR);
@@ -1125,6 +1151,8 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
ectx.ps_shift = ps / 4;
ectx.start = base;
ectx.ps = params;
+ ectx.abort = false;
+ ectx.last_jump = 0;
if (ws)
ectx.ws = kzalloc(4 * ws, GFP_KERNEL);
else
@@ -1137,6 +1165,11 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
SDEBUG("%s @ 0x%04X\n", atom_op_names[op], ptr - 1);
else
SDEBUG("[%d] @ 0x%04X\n", op, ptr - 1);
+ if (ectx.abort) {
+ DRM_ERROR("atombios stuck executing %04X (len %d, WS %d, PS %d) @ 0x%04X\n",
+ base, len, ws, ps, ptr - 1);
+ return -EINVAL;
+ }
if (op < ATOM_OP_CNT && op > 0)
opcode_table[op].func(&ectx, &ptr,
@@ -1152,10 +1185,13 @@ static void atom_execute_table_locked(struct atom_context *ctx, int index, uint3
if (ws)
kfree(ectx.ws);
+ return 0;
}
-void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
+int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
{
+ int r;
+
mutex_lock(&ctx->mutex);
/* reset reg block */
ctx->reg_block = 0;
@@ -1163,8 +1199,9 @@ void atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
ctx->fb_base = 0;
/* reset io mode */
ctx->io_mode = ATOM_IO_MM;
- atom_execute_table_locked(ctx, index, params);
+ r = atom_execute_table_locked(ctx, index, params);
mutex_unlock(&ctx->mutex);
+ return r;
}
static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
@@ -1248,9 +1285,7 @@ int atom_asic_init(struct atom_context *ctx)
if (!CU16(ctx->cmd_table + 4 + 2 * ATOM_CMD_INIT))
return 1;
- atom_execute_table(ctx, ATOM_CMD_INIT, ps);
-
- return 0;
+ return atom_execute_table(ctx, ATOM_CMD_INIT, ps);
}
void atom_destroy(struct atom_context *ctx)
@@ -1260,12 +1295,16 @@ void atom_destroy(struct atom_context *ctx)
kfree(ctx);
}
-void atom_parse_data_header(struct atom_context *ctx, int index,
+bool atom_parse_data_header(struct atom_context *ctx, int index,
uint16_t * size, uint8_t * frev, uint8_t * crev,
uint16_t * data_start)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->data_table + offset);
+ u16 *mdt = (u16 *)(ctx->bios + ctx->data_table + 4);
+
+ if (!mdt[index])
+ return false;
if (size)
*size = CU16(idx);
@@ -1274,38 +1313,42 @@ void atom_parse_data_header(struct atom_context *ctx, int index,
if (crev)
*crev = CU8(idx + 3);
*data_start = idx;
- return;
+ return true;
}
-void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
+bool atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t * frev,
uint8_t * crev)
{
int offset = index * 2 + 4;
int idx = CU16(ctx->cmd_table + offset);
+ u16 *mct = (u16 *)(ctx->bios + ctx->cmd_table + 4);
+
+ if (!mct[index])
+ return false;
if (frev)
*frev = CU8(idx + 2);
if (crev)
*crev = CU8(idx + 3);
- return;
+ return true;
}
int atom_allocate_fb_scratch(struct atom_context *ctx)
{
int index = GetIndexIntoMasterTable(DATA, VRAM_UsageByFirmware);
uint16_t data_offset;
- int usage_bytes;
+ int usage_bytes = 0;
struct _ATOM_VRAM_USAGE_BY_FIRMWARE *firmware_usage;
- atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
+ if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
- firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset);
+ DRM_DEBUG("atom firmware requested %08x %dkb\n",
+ firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
+ firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
- DRM_DEBUG("atom firmware requested %08x %dkb\n",
- firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware,
- firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb);
-
- usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024;
+ }
if (usage_bytes == 0)
usage_bytes = 20 * 1024;
/* allocate some scratch memory */
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index bc73781423a..cd1b64ab5ca 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -140,11 +140,13 @@ struct atom_context {
extern int atom_debug;
struct atom_context *atom_parse(struct card_info *, void *);
-void atom_execute_table(struct atom_context *, int, uint32_t *);
+int atom_execute_table(struct atom_context *, int, uint32_t *);
int atom_asic_init(struct atom_context *);
void atom_destroy(struct atom_context *);
-void atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, uint8_t *frev, uint8_t *crev, uint16_t *data_start);
-void atom_parse_cmd_header(struct atom_context *ctx, int index, uint8_t *frev, uint8_t *crev);
+bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
+ uint8_t *frev, uint8_t *crev, uint16_t *data_start);
+bool atom_parse_cmd_header(struct atom_context *ctx, int index,
+ uint8_t *frev, uint8_t *crev);
int atom_allocate_fb_scratch(struct atom_context *ctx);
#include "atom-types.h"
#include "atombios.h"
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index dd9fdf56061..fd4ef6d1884 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -353,12 +353,55 @@ static void atombios_crtc_set_timing(struct drm_crtc *crtc,
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
+static void atombios_disable_ss(struct drm_crtc *crtc)
+{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ u32 ss_cntl;
+
+ if (ASIC_IS_DCE4(rdev)) {
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ ss_cntl = RREG32(EVERGREEN_P1PLL_SS_CNTL);
+ ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+ WREG32(EVERGREEN_P1PLL_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_PPLL2:
+ ss_cntl = RREG32(EVERGREEN_P2PLL_SS_CNTL);
+ ss_cntl &= ~EVERGREEN_PxPLL_SS_EN;
+ WREG32(EVERGREEN_P2PLL_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ } else if (ASIC_IS_AVIVO(rdev)) {
+ switch (radeon_crtc->pll_id) {
+ case ATOM_PPLL1:
+ ss_cntl = RREG32(AVIVO_P1PLL_INT_SS_CNTL);
+ ss_cntl &= ~1;
+ WREG32(AVIVO_P1PLL_INT_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_PPLL2:
+ ss_cntl = RREG32(AVIVO_P2PLL_INT_SS_CNTL);
+ ss_cntl &= ~1;
+ WREG32(AVIVO_P2PLL_INT_SS_CNTL, ss_cntl);
+ break;
+ case ATOM_DCPLL:
+ case ATOM_PPLL_INVALID:
+ return;
+ }
+ }
+}
+
+
union atom_enable_ss {
ENABLE_LVDS_SS_PARAMETERS legacy;
ENABLE_SPREAD_SPECTRUM_ON_PPLL_PS_ALLOCATION v1;
};
-static void atombios_set_ss(struct drm_crtc *crtc, int enable)
+static void atombios_enable_ss(struct drm_crtc *crtc)
{
struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
struct drm_device *dev = crtc->dev;
@@ -387,9 +430,9 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
step = dig->ss->step;
delay = dig->ss->delay;
range = dig->ss->range;
- } else if (enable)
+ } else
return;
- } else if (enable)
+ } else
return;
break;
}
@@ -406,13 +449,13 @@ static void atombios_set_ss(struct drm_crtc *crtc, int enable)
args.v1.ucSpreadSpectrumDelay = delay;
args.v1.ucSpreadSpectrumRange = range;
args.v1.ucPpll = radeon_crtc->crtc_id ? ATOM_PPLL2 : ATOM_PPLL1;
- args.v1.ucEnable = enable;
+ args.v1.ucEnable = ATOM_ENABLE;
} else {
args.legacy.usSpreadSpectrumPercentage = cpu_to_le16(percentage);
args.legacy.ucSpreadSpectrumType = type;
args.legacy.ucSpreadSpectrumStepSize_Delay = (step & 3) << 2;
args.legacy.ucSpreadSpectrumStepSize_Delay |= (delay & 7) << 4;
- args.legacy.ucEnable = enable;
+ args.legacy.ucEnable = ATOM_ENABLE;
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
}
@@ -478,11 +521,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
/* DVO wants 2x pixel clock if the DVO chip is in 12 bit mode */
if (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_KLDSCP_DVO1)
adjusted_clock = mode->clock * 2;
- /* LVDS PLL quirks */
- if (encoder->encoder_type == DRM_MODE_ENCODER_LVDS) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- pll->algo = dig->pll_algo;
- }
} else {
if (encoder->encoder_type != DRM_MODE_ENCODER_DAC)
pll->flags |= RADEON_PLL_NO_ODD_POST_DIV;
@@ -503,8 +541,9 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
int index;
index = GetIndexIntoMasterTable(COMMAND, AdjustDisplayPll);
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
- &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return adjusted_clock;
memset(&args, 0, sizeof(args));
@@ -542,11 +581,16 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
}
} else if (radeon_encoder->devices & (ATOM_DEVICE_LCD_SUPPORT)) {
/* may want to enable SS on DP/eDP eventually */
- args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_SS_ENABLE;
- if (mode->clock > 165000)
+ /*args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_SS_ENABLE;*/
+ if (encoder_mode == ATOM_ENCODER_MODE_DP)
args.v3.sInput.ucDispPllConfig |=
- DISPPLL_CONFIG_DUAL_LINK;
+ DISPPLL_CONFIG_COHERENT_MODE;
+ else {
+ if (mode->clock > 165000)
+ args.v3.sInput.ucDispPllConfig |=
+ DISPPLL_CONFIG_DUAL_LINK;
+ }
}
atom_execute_table(rdev->mode_info.atom_context,
index, (uint32_t *)&args);
@@ -592,8 +636,9 @@ static void atombios_crtc_set_dcpll(struct drm_crtc *crtc)
memset(&args, 0, sizeof(args));
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
- &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return;
switch (frev) {
case 1:
@@ -667,8 +712,9 @@ static void atombios_crtc_set_pll(struct drm_crtc *crtc, struct drm_display_mode
&ref_div, &post_div);
index = GetIndexIntoMasterTable(COMMAND, SetPixelClock);
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
- &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev,
+ &crev))
+ return;
switch (frev) {
case 1:
@@ -1083,15 +1129,12 @@ int atombios_crtc_mode_set(struct drm_crtc *crtc,
/* TODO color tiling */
- /* pick pll */
- radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
-
- atombios_set_ss(crtc, 0);
+ atombios_disable_ss(crtc);
/* always set DCPLL */
if (ASIC_IS_DCE4(rdev))
atombios_crtc_set_dcpll(crtc);
atombios_crtc_set_pll(crtc, adjusted_mode);
- atombios_set_ss(crtc, 1);
+ atombios_enable_ss(crtc);
if (ASIC_IS_DCE4(rdev))
atombios_set_crtc_dtd_timing(crtc, adjusted_mode);
@@ -1120,6 +1163,11 @@ static bool atombios_crtc_mode_fixup(struct drm_crtc *crtc,
static void atombios_crtc_prepare(struct drm_crtc *crtc)
{
+ struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
+
+ /* pick pll */
+ radeon_crtc->pll_id = radeon_atom_pick_pll(crtc);
+
atombios_lock_crtc(crtc, ATOM_ENABLE);
atombios_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
}
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 8a133bda00a..28b31c64f48 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -745,14 +745,14 @@ void dp_link_train(struct drm_encoder *encoder,
>> DP_TRAIN_PRE_EMPHASIS_SHIFT);
/* disable the training pattern on the sink */
+ dp_set_training(radeon_connector, DP_TRAINING_PATTERN_DISABLE);
+
+ /* disable the training pattern on the source */
if (ASIC_IS_DCE4(rdev))
atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_LINK_TRAINING_COMPLETE);
else
radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
dig_connector->dp_clock, enc_id, 0);
-
- radeon_dp_encoder_service(rdev, ATOM_DP_ACTION_TRAINING_COMPLETE,
- dig_connector->dp_clock, enc_id, 0);
}
int radeon_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode,
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index bd2e7aa85c1..647a0efdc35 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -25,6 +25,7 @@
#include <linux/platform_device.h>
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
@@ -436,7 +437,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
int evergreen_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
u32 tmp;
int chansize, numchan;
@@ -481,12 +481,8 @@ int evergreen_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ radeon_update_bandwidth_info(rdev);
+
return 0;
}
@@ -746,6 +742,7 @@ int evergreen_init(struct radeon_device *rdev)
void evergreen_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
evergreen_suspend(rdev);
#if 0
r600_blit_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 91eb762eb3f..3ae51ada1ab 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -31,6 +31,7 @@
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "r100d.h"
#include "rs100d.h"
#include "rv200d.h"
@@ -235,9 +236,9 @@ int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
void r100_pci_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
r100_pci_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
int r100_irq_set(struct radeon_device *rdev)
@@ -312,10 +313,12 @@ int r100_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (status & RADEON_CRTC_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_CRTC2_VBLANK_STAT) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (status & RADEON_FP_DETECT_STAT) {
@@ -741,6 +744,8 @@ int r100_cp_init(struct radeon_device *rdev, unsigned ring_size)
udelay(10);
rdev->cp.rptr = RREG32(RADEON_CP_RB_RPTR);
rdev->cp.wptr = RREG32(RADEON_CP_RB_WPTR);
+ /* protect against crazy HW on resume */
+ rdev->cp.wptr &= rdev->cp.ptr_mask;
/* Set cp mode to bus mastering & enable cp*/
WREG32(RADEON_CP_CSQ_MODE,
REG_SET(RADEON_INDIRECT2_START, indirect2_start) |
@@ -1804,6 +1809,7 @@ void r100_set_common_regs(struct radeon_device *rdev)
{
struct drm_device *dev = rdev->ddev;
bool force_dac2 = false;
+ u32 tmp;
/* set these so they don't interfere with anything */
WREG32(RADEON_OV0_SCALE_CNTL, 0);
@@ -1875,6 +1881,12 @@ void r100_set_common_regs(struct radeon_device *rdev)
WREG32(RADEON_DISP_HW_DEBUG, disp_hw_debug);
WREG32(RADEON_DAC_CNTL2, dac2_cntl);
}
+
+ /* switch PM block to ACPI mode */
+ tmp = RREG32_PLL(RADEON_PLL_PWRMGT_CNTL);
+ tmp &= ~RADEON_PM_MODE_SEL;
+ WREG32_PLL(RADEON_PLL_PWRMGT_CNTL, tmp);
+
}
/*
@@ -2022,6 +2034,7 @@ void r100_mc_init(struct radeon_device *rdev)
radeon_vram_location(rdev, &rdev->mc, base);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
@@ -2385,6 +2398,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
uint32_t pixel_bytes1 = 0;
uint32_t pixel_bytes2 = 0;
+ radeon_update_display_priority(rdev);
+
if (rdev->mode_info.crtcs[0]->base.enabled) {
mode1 = &rdev->mode_info.crtcs[0]->base.mode;
pixel_bytes1 = rdev->mode_info.crtcs[0]->base.fb->bits_per_pixel / 8;
@@ -2413,11 +2428,8 @@ void r100_bandwidth_update(struct radeon_device *rdev)
/*
* determine is there is enough bw for current mode
*/
- mclk_ff.full = rfixed_const(rdev->clock.default_mclk);
- temp_ff.full = rfixed_const(100);
- mclk_ff.full = rfixed_div(mclk_ff, temp_ff);
- sclk_ff.full = rfixed_const(rdev->clock.default_sclk);
- sclk_ff.full = rfixed_div(sclk_ff, temp_ff);
+ sclk_ff = rdev->pm.sclk;
+ mclk_ff = rdev->pm.mclk;
temp = (rdev->mc.vram_width / 8) * (rdev->mc.vram_is_ddr ? 2 : 1);
temp_ff.full = rfixed_const(temp);
@@ -3440,6 +3452,7 @@ int r100_suspend(struct radeon_device *rdev)
void r100_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 1146c9909c2..85617c31121 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -30,6 +30,7 @@
#include "radeon_drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "r100d.h"
#include "r200_reg_safe.h"
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index 4cef90cd74e..1023eeb6587 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -30,6 +30,7 @@
#include "drm.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_drm.h"
#include "r100_track.h"
#include "r300d.h"
@@ -164,9 +165,9 @@ void rv370_pcie_gart_disable(struct radeon_device *rdev)
void rv370_pcie_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rv370_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
void r300_fence_ring_emit(struct radeon_device *rdev,
@@ -481,6 +482,7 @@ void r300_mc_init(struct radeon_device *rdev)
radeon_vram_location(rdev, &rdev->mc, base);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes)
@@ -1334,6 +1336,7 @@ int r300_suspend(struct radeon_device *rdev)
void r300_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r420.c b/drivers/gpu/drm/radeon/r420.c
index c7593b8f58e..0b8603ca697 100644
--- a/drivers/gpu/drm/radeon/r420.c
+++ b/drivers/gpu/drm/radeon/r420.c
@@ -29,6 +29,7 @@
#include "drmP.h"
#include "radeon_reg.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "r100d.h"
#include "r420d.h"
@@ -266,6 +267,7 @@ int r420_suspend(struct radeon_device *rdev)
void r420_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index 2b8a5dd1351..3c44b8d3931 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -27,6 +27,7 @@
*/
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "r520d.h"
@@ -121,19 +122,13 @@ static void r520_vram_get_type(struct radeon_device *rdev)
void r520_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
r520_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ radeon_update_bandwidth_info(rdev);
}
void r520_mc_program(struct radeon_device *rdev)
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index c5229019729..5509354c7c8 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -31,6 +31,7 @@
#include "drmP.h"
#include "radeon_drm.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_mode.h"
#include "r600d.h"
#include "atom.h"
@@ -491,9 +492,9 @@ void r600_pcie_gart_disable(struct radeon_device *rdev)
void r600_pcie_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
r600_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
void r600_agp_enable(struct radeon_device *rdev)
@@ -675,7 +676,6 @@ void r600_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
int r600_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
u32 tmp;
int chansize, numchan;
@@ -719,14 +719,10 @@ int r600_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+
if (rdev->flags & RADEON_IS_IGP)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
+ radeon_update_bandwidth_info(rdev);
return 0;
}
@@ -1132,6 +1128,7 @@ void r600_gpu_init(struct radeon_device *rdev)
/* Setup pipes */
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
@@ -2119,6 +2116,7 @@ int r600_init(struct radeon_device *rdev)
void r600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_audio_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
@@ -2398,19 +2396,19 @@ static void r600_disable_interrupt_state(struct radeon_device *rdev)
WREG32(DC_HPD4_INT_CONTROL, tmp);
if (ASIC_IS_DCE32(rdev)) {
tmp = RREG32(DC_HPD5_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD5_INT_CONTROL, 0);
+ WREG32(DC_HPD5_INT_CONTROL, tmp);
tmp = RREG32(DC_HPD6_INT_CONTROL) & DC_HPDx_INT_POLARITY;
- WREG32(DC_HPD6_INT_CONTROL, 0);
+ WREG32(DC_HPD6_INT_CONTROL, tmp);
}
} else {
WREG32(DACA_AUTODETECT_INT_CONTROL, 0);
WREG32(DACB_AUTODETECT_INT_CONTROL, 0);
tmp = RREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
- WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, 0);
+ WREG32(DC_HOT_PLUG_DETECT1_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
- WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, 0);
+ WREG32(DC_HOT_PLUG_DETECT2_INT_CONTROL, tmp);
tmp = RREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL) & DC_HOT_PLUG_DETECTx_INT_POLARITY;
- WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, 0);
+ WREG32(DC_HOT_PLUG_DETECT3_INT_CONTROL, tmp);
}
}
@@ -2765,6 +2763,7 @@ restart_ih:
case 0: /* D1 vblank */
if (disp_int & LB_D1_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D1_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D1 vblank\n");
@@ -2786,6 +2785,7 @@ restart_ih:
case 0: /* D2 vblank */
if (disp_int & LB_D2_VBLANK_INTERRUPT) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
disp_int &= ~LB_D2_VBLANK_INTERRUPT;
DRM_DEBUG("IH: D2 vblank\n");
@@ -2834,14 +2834,14 @@ restart_ih:
break;
case 10:
if (disp_int_cont2 & DC_HPD5_INTERRUPT) {
- disp_int_cont &= ~DC_HPD5_INTERRUPT;
+ disp_int_cont2 &= ~DC_HPD5_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD5\n");
}
break;
case 12:
if (disp_int_cont2 & DC_HPD6_INTERRUPT) {
- disp_int_cont &= ~DC_HPD6_INTERRUPT;
+ disp_int_cont2 &= ~DC_HPD6_INTERRUPT;
queue_hotplug = true;
DRM_DEBUG("IH: HPD6\n");
}
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index db928016d03..dac7042b797 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -182,41 +182,6 @@ int r600_audio_init(struct radeon_device *rdev)
}
/*
- * determin how the encoders and audio interface is wired together
- */
-int r600_audio_tmds_index(struct drm_encoder *encoder)
-{
- struct drm_device *dev = encoder->dev;
- struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct drm_encoder *other;
-
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- return 0;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- /* special case check if an TMDS1 is present */
- list_for_each_entry(other, &dev->mode_config.encoder_list, head) {
- if (to_radeon_encoder(other)->encoder_id ==
- ENCODER_OBJECT_ID_INTERNAL_TMDS1)
- return 1;
- }
- return 0;
-
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- return 1;
-
- default:
- DRM_ERROR("Unsupported encoder type 0x%02X\n",
- radeon_encoder->encoder_id);
- return -1;
- }
-}
-
-/*
* atach the audio codec to the clock source of the encoder
*/
void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
@@ -224,6 +189,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
int base_rate = 48000;
switch (radeon_encoder->encoder_id) {
@@ -231,32 +197,34 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
WREG32_P(R600_AUDIO_TIMING, 0, ~0x301);
break;
-
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
WREG32_P(R600_AUDIO_TIMING, 0x100, ~0x301);
break;
-
default:
DRM_ERROR("Unsupported encoder type 0x%02X\n",
radeon_encoder->encoder_id);
return;
}
- switch (r600_audio_tmds_index(encoder)) {
+ switch (dig->dig_encoder) {
case 0:
- WREG32(R600_AUDIO_PLL1_MUL, base_rate*50);
- WREG32(R600_AUDIO_PLL1_DIV, clock*100);
+ WREG32(R600_AUDIO_PLL1_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL1_DIV, clock * 100);
WREG32(R600_AUDIO_CLK_SRCSEL, 0);
break;
case 1:
- WREG32(R600_AUDIO_PLL2_MUL, base_rate*50);
- WREG32(R600_AUDIO_PLL2_DIV, clock*100);
+ WREG32(R600_AUDIO_PLL2_MUL, base_rate * 50);
+ WREG32(R600_AUDIO_PLL2_DIV, clock * 100);
WREG32(R600_AUDIO_CLK_SRCSEL, 1);
break;
+ default:
+ dev_err(rdev->dev, "Unsupported DIG on encoder 0x%02X\n",
+ radeon_encoder->encoder_id);
+ return;
}
}
diff --git a/drivers/gpu/drm/radeon/r600_blit_shaders.c b/drivers/gpu/drm/radeon/r600_blit_shaders.c
index a112c59f9d8..0271b53fa2d 100644
--- a/drivers/gpu/drm/radeon/r600_blit_shaders.c
+++ b/drivers/gpu/drm/radeon/r600_blit_shaders.c
@@ -1,7 +1,42 @@
+/*
+ * Copyright 2009 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Authors:
+ * Alex Deucher <alexander.deucher@amd.com>
+ */
#include <linux/types.h>
#include <linux/kernel.h>
+/*
+ * R6xx+ cards need to use the 3D engine to blit data which requires
+ * quite a bit of hw state setup. Rather than pull the whole 3D driver
+ * (which normally generates the 3D state) into the DRM, we opt to use
+ * statically generated state tables. The regsiter state and shaders
+ * were hand generated to support blitting functionality. See the 3D
+ * driver or documentation for descriptions of the registers and
+ * shader instructions.
+ */
+
const u32 r6xx_default_state[] =
{
0xc0002400,
diff --git a/drivers/gpu/drm/radeon/r600_cp.c b/drivers/gpu/drm/radeon/r600_cp.c
index 40416c068d9..68e6f434930 100644
--- a/drivers/gpu/drm/radeon/r600_cp.c
+++ b/drivers/gpu/drm/radeon/r600_cp.c
@@ -1548,10 +1548,13 @@ static void r700_gfx_init(struct drm_device *dev,
RADEON_WRITE(R600_CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R600_CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ RADEON_WRITE(R600_GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
RADEON_WRITE(R700_CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
RADEON_WRITE(R700_CGTS_SYS_TCC_DISABLE, 0);
RADEON_WRITE(R700_CGTS_TCC_DISABLE, 0);
+ RADEON_WRITE(R700_CGTS_USER_SYS_TCC_DISABLE, 0);
+ RADEON_WRITE(R700_CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & R600_INACTIVE_QD_PIPES_MASK) >> 8);
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index cd2c63bce50..c39c1bc1301 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -45,6 +45,7 @@ struct r600_cs_track {
u32 nbanks;
u32 npipes;
/* value we track */
+ u32 sq_config;
u32 nsamples;
u32 cb_color_base_last[8];
struct radeon_bo *cb_color_bo[8];
@@ -141,6 +142,8 @@ static void r600_cs_track_init(struct r600_cs_track *track)
{
int i;
+ /* assume DX9 mode */
+ track->sq_config = DX9_CONSTS;
for (i = 0; i < 8; i++) {
track->cb_color_base_last[i] = 0;
track->cb_color_size[i] = 0;
@@ -715,6 +718,9 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
tmp =radeon_get_ib_value(p, idx);
ib[idx] = 0;
break;
+ case SQ_CONFIG:
+ track->sq_config = radeon_get_ib_value(p, idx);
+ break;
case R_028800_DB_DEPTH_CONTROL:
track->db_depth_control = radeon_get_ib_value(p, idx);
break;
@@ -869,6 +875,54 @@ static inline int r600_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx
case SQ_PGM_START_VS:
case SQ_PGM_START_GS:
case SQ_PGM_START_PS:
+ case SQ_ALU_CONST_CACHE_GS_0:
+ case SQ_ALU_CONST_CACHE_GS_1:
+ case SQ_ALU_CONST_CACHE_GS_2:
+ case SQ_ALU_CONST_CACHE_GS_3:
+ case SQ_ALU_CONST_CACHE_GS_4:
+ case SQ_ALU_CONST_CACHE_GS_5:
+ case SQ_ALU_CONST_CACHE_GS_6:
+ case SQ_ALU_CONST_CACHE_GS_7:
+ case SQ_ALU_CONST_CACHE_GS_8:
+ case SQ_ALU_CONST_CACHE_GS_9:
+ case SQ_ALU_CONST_CACHE_GS_10:
+ case SQ_ALU_CONST_CACHE_GS_11:
+ case SQ_ALU_CONST_CACHE_GS_12:
+ case SQ_ALU_CONST_CACHE_GS_13:
+ case SQ_ALU_CONST_CACHE_GS_14:
+ case SQ_ALU_CONST_CACHE_GS_15:
+ case SQ_ALU_CONST_CACHE_PS_0:
+ case SQ_ALU_CONST_CACHE_PS_1:
+ case SQ_ALU_CONST_CACHE_PS_2:
+ case SQ_ALU_CONST_CACHE_PS_3:
+ case SQ_ALU_CONST_CACHE_PS_4:
+ case SQ_ALU_CONST_CACHE_PS_5:
+ case SQ_ALU_CONST_CACHE_PS_6:
+ case SQ_ALU_CONST_CACHE_PS_7:
+ case SQ_ALU_CONST_CACHE_PS_8:
+ case SQ_ALU_CONST_CACHE_PS_9:
+ case SQ_ALU_CONST_CACHE_PS_10:
+ case SQ_ALU_CONST_CACHE_PS_11:
+ case SQ_ALU_CONST_CACHE_PS_12:
+ case SQ_ALU_CONST_CACHE_PS_13:
+ case SQ_ALU_CONST_CACHE_PS_14:
+ case SQ_ALU_CONST_CACHE_PS_15:
+ case SQ_ALU_CONST_CACHE_VS_0:
+ case SQ_ALU_CONST_CACHE_VS_1:
+ case SQ_ALU_CONST_CACHE_VS_2:
+ case SQ_ALU_CONST_CACHE_VS_3:
+ case SQ_ALU_CONST_CACHE_VS_4:
+ case SQ_ALU_CONST_CACHE_VS_5:
+ case SQ_ALU_CONST_CACHE_VS_6:
+ case SQ_ALU_CONST_CACHE_VS_7:
+ case SQ_ALU_CONST_CACHE_VS_8:
+ case SQ_ALU_CONST_CACHE_VS_9:
+ case SQ_ALU_CONST_CACHE_VS_10:
+ case SQ_ALU_CONST_CACHE_VS_11:
+ case SQ_ALU_CONST_CACHE_VS_12:
+ case SQ_ALU_CONST_CACHE_VS_13:
+ case SQ_ALU_CONST_CACHE_VS_14:
+ case SQ_ALU_CONST_CACHE_VS_15:
r = r600_cs_packet_next_reloc(p, &reloc);
if (r) {
dev_warn(p->dev, "bad SET_CONTEXT_REG "
@@ -1226,13 +1280,15 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
}
break;
case PACKET3_SET_ALU_CONST:
- start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
- end_reg = 4 * pkt->count + start_reg - 4;
- if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
- (start_reg >= PACKET3_SET_ALU_CONST_END) ||
- (end_reg >= PACKET3_SET_ALU_CONST_END)) {
- DRM_ERROR("bad SET_ALU_CONST\n");
- return -EINVAL;
+ if (track->sq_config & DX9_CONSTS) {
+ start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
+ end_reg = 4 * pkt->count + start_reg - 4;
+ if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
+ (start_reg >= PACKET3_SET_ALU_CONST_END) ||
+ (end_reg >= PACKET3_SET_ALU_CONST_END)) {
+ DRM_ERROR("bad SET_ALU_CONST\n");
+ return -EINVAL;
+ }
}
break;
case PACKET3_SET_BOOL_CONST:
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index fcc949df0e5..029fa1406d1 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -42,13 +42,13 @@ enum r600_hdmi_color_format {
*/
enum r600_hdmi_iec_status_bits {
AUDIO_STATUS_DIG_ENABLE = 0x01,
- AUDIO_STATUS_V = 0x02,
- AUDIO_STATUS_VCFG = 0x04,
+ AUDIO_STATUS_V = 0x02,
+ AUDIO_STATUS_VCFG = 0x04,
AUDIO_STATUS_EMPHASIS = 0x08,
AUDIO_STATUS_COPYRIGHT = 0x10,
AUDIO_STATUS_NONAUDIO = 0x20,
AUDIO_STATUS_PROFESSIONAL = 0x40,
- AUDIO_STATUS_LEVEL = 0x80
+ AUDIO_STATUS_LEVEL = 0x80
};
struct {
@@ -85,7 +85,7 @@ struct {
static void r600_hdmi_calc_CTS(uint32_t clock, int *CTS, int N, int freq)
{
if (*CTS == 0)
- *CTS = clock*N/(128*freq)*1000;
+ *CTS = clock * N / (128 * freq) * 1000;
DRM_DEBUG("Using ACR timing N=%d CTS=%d for frequency %d\n",
N, *CTS, freq);
}
@@ -131,11 +131,11 @@ static void r600_hdmi_infoframe_checksum(uint8_t packetType,
uint8_t length,
uint8_t *frame)
{
- int i;
- frame[0] = packetType + versionNumber + length;
- for (i = 1; i <= length; i++)
- frame[0] += frame[i];
- frame[0] = 0x100 - frame[0];
+ int i;
+ frame[0] = packetType + versionNumber + length;
+ for (i = 1; i <= length; i++)
+ frame[0] += frame[i];
+ frame[0] = 0x100 - frame[0];
}
/*
@@ -417,90 +417,141 @@ void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
WREG32_P(offset+R600_HDMI_CNTL, 0x04000000, ~0x04000000);
}
-/*
- * enable/disable the HDMI engine
- */
-void r600_hdmi_enable(struct drm_encoder *encoder, int enable)
+static int r600_hdmi_find_free_block(struct drm_device *dev)
+{
+ struct radeon_device *rdev = dev->dev_private;
+ struct drm_encoder *encoder;
+ struct radeon_encoder *radeon_encoder;
+ bool free_blocks[3] = { true, true, true };
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ radeon_encoder = to_radeon_encoder(encoder);
+ switch (radeon_encoder->hdmi_offset) {
+ case R600_HDMI_BLOCK1:
+ free_blocks[0] = false;
+ break;
+ case R600_HDMI_BLOCK2:
+ free_blocks[1] = false;
+ break;
+ case R600_HDMI_BLOCK3:
+ free_blocks[2] = false;
+ break;
+ }
+ }
+
+ if (rdev->family == CHIP_RS600 || rdev->family == CHIP_RS690) {
+ return free_blocks[0] ? R600_HDMI_BLOCK1 : 0;
+ } else if (rdev->family >= CHIP_R600) {
+ if (free_blocks[0])
+ return R600_HDMI_BLOCK1;
+ else if (free_blocks[1])
+ return R600_HDMI_BLOCK2;
+ }
+ return 0;
+}
+
+static void r600_hdmi_assign_block(struct drm_encoder *encoder)
{
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- uint32_t offset = to_radeon_encoder(encoder)->hdmi_offset;
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (!offset)
+ if (!dig) {
+ dev_err(rdev->dev, "Enabling HDMI on non-dig encoder\n");
return;
+ }
- DRM_DEBUG("%s HDMI interface @ 0x%04X\n", enable ? "Enabling" : "Disabling", offset);
-
- /* some version of atombios ignore the enable HDMI flag
- * so enabling/disabling HDMI was moved here for TMDS1+2 */
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- WREG32_P(AVIVO_TMDSA_CNTL, enable ? 0x4 : 0x0, ~0x4);
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x101 : 0x0);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- WREG32_P(AVIVO_LVTMA_CNTL, enable ? 0x4 : 0x0, ~0x4);
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x105 : 0x0);
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- /* This part is doubtfull in my opinion */
- WREG32(offset+R600_HDMI_ENABLE, enable ? 0x110 : 0x0);
- break;
-
- default:
- DRM_ERROR("unknown HDMI output type\n");
- break;
+ if (ASIC_IS_DCE4(rdev)) {
+ /* TODO */
+ } else if (ASIC_IS_DCE3(rdev)) {
+ radeon_encoder->hdmi_offset = dig->dig_encoder ?
+ R600_HDMI_BLOCK3 : R600_HDMI_BLOCK1;
+ if (ASIC_IS_DCE32(rdev))
+ radeon_encoder->hdmi_config_offset = dig->dig_encoder ?
+ R600_HDMI_CONFIG2 : R600_HDMI_CONFIG1;
+ } else if (rdev->family >= CHIP_R600) {
+ radeon_encoder->hdmi_offset = r600_hdmi_find_free_block(dev);
}
}
/*
- * determin at which register offset the HDMI encoder is
+ * enable the HDMI engine
*/
-void r600_hdmi_init(struct drm_encoder *encoder)
+void r600_hdmi_enable(struct drm_encoder *encoder)
{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
- break;
-
- case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
- switch (r600_audio_tmds_index(encoder)) {
- case 0:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS1;
+ if (!radeon_encoder->hdmi_offset) {
+ r600_hdmi_assign_block(encoder);
+ if (!radeon_encoder->hdmi_offset) {
+ dev_warn(rdev->dev, "Could not find HDMI block for "
+ "0x%x encoder\n", radeon_encoder->encoder_id);
+ return;
+ }
+ }
+
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0x1, ~0x1);
+ } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ int offset = radeon_encoder->hdmi_offset;
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0x4, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0x101);
break;
- case 1:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, 0x4, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0x105);
break;
default:
- radeon_encoder->hdmi_offset = 0;
+ dev_err(rdev->dev, "Unknown HDMI output type\n");
break;
}
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
- radeon_encoder->hdmi_offset = R600_HDMI_TMDS2;
- break;
+ }
- case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_LVTMA:
- radeon_encoder->hdmi_offset = R600_HDMI_DIG;
- break;
+ DRM_DEBUG("Enabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+}
- default:
- radeon_encoder->hdmi_offset = 0;
- break;
+/*
+ * disable the HDMI engine
+ */
+void r600_hdmi_disable(struct drm_encoder *encoder)
+{
+ struct drm_device *dev = encoder->dev;
+ struct radeon_device *rdev = dev->dev_private;
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (!radeon_encoder->hdmi_offset) {
+ dev_err(rdev->dev, "Disabling not enabled HDMI\n");
+ return;
}
- DRM_DEBUG("using HDMI engine at offset 0x%04X for encoder 0x%x\n",
- radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+ DRM_DEBUG("Disabling HDMI interface @ 0x%04X for encoder 0x%x\n",
+ radeon_encoder->hdmi_offset, radeon_encoder->encoder_id);
+
+ if (ASIC_IS_DCE32(rdev) && !ASIC_IS_DCE4(rdev)) {
+ WREG32_P(radeon_encoder->hdmi_config_offset + 0x4, 0, ~0x1);
+ } else if (rdev->family >= CHIP_R600 && !ASIC_IS_DCE3(rdev)) {
+ int offset = radeon_encoder->hdmi_offset;
+ switch (radeon_encoder->encoder_id) {
+ case ENCODER_OBJECT_ID_INTERNAL_KLDSCP_TMDS1:
+ WREG32_P(AVIVO_TMDSA_CNTL, 0, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ case ENCODER_OBJECT_ID_INTERNAL_LVTM1:
+ WREG32_P(AVIVO_LVTMA_CNTL, 0, ~0x4);
+ WREG32(offset + R600_HDMI_ENABLE, 0);
+ break;
+ default:
+ dev_err(rdev->dev, "Unknown HDMI output type\n");
+ break;
+ }
+ }
- /* TODO: make this configureable */
- radeon_encoder->hdmi_audio_workaround = 0;
+ radeon_encoder->hdmi_offset = 0;
+ radeon_encoder->hdmi_config_offset = 0;
}
diff --git a/drivers/gpu/drm/radeon/r600_reg.h b/drivers/gpu/drm/radeon/r600_reg.h
index d0e28ffdeda..7b1d22370f6 100644
--- a/drivers/gpu/drm/radeon/r600_reg.h
+++ b/drivers/gpu/drm/radeon/r600_reg.h
@@ -152,9 +152,9 @@
#define R600_AUDIO_STATUS_BITS 0x73d8
/* HDMI base register addresses */
-#define R600_HDMI_TMDS1 0x7400
-#define R600_HDMI_TMDS2 0x7700
-#define R600_HDMI_DIG 0x7800
+#define R600_HDMI_BLOCK1 0x7400
+#define R600_HDMI_BLOCK2 0x7700
+#define R600_HDMI_BLOCK3 0x7800
/* HDMI registers */
#define R600_HDMI_ENABLE 0x00
@@ -185,4 +185,8 @@
#define R600_HDMI_AUDIO_DEBUG_2 0xe8
#define R600_HDMI_AUDIO_DEBUG_3 0xec
+/* HDMI additional config base register addresses */
+#define R600_HDMI_CONFIG1 0x7600
+#define R600_HDMI_CONFIG2 0x7a00
+
#endif
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 5b2e4d44282..59c1f8793e6 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -77,6 +77,55 @@
#define CB_COLOR0_FRAG 0x280e0
#define CB_COLOR0_MASK 0x28100
+#define SQ_ALU_CONST_CACHE_PS_0 0x28940
+#define SQ_ALU_CONST_CACHE_PS_1 0x28944
+#define SQ_ALU_CONST_CACHE_PS_2 0x28948
+#define SQ_ALU_CONST_CACHE_PS_3 0x2894c
+#define SQ_ALU_CONST_CACHE_PS_4 0x28950
+#define SQ_ALU_CONST_CACHE_PS_5 0x28954
+#define SQ_ALU_CONST_CACHE_PS_6 0x28958
+#define SQ_ALU_CONST_CACHE_PS_7 0x2895c
+#define SQ_ALU_CONST_CACHE_PS_8 0x28960
+#define SQ_ALU_CONST_CACHE_PS_9 0x28964
+#define SQ_ALU_CONST_CACHE_PS_10 0x28968
+#define SQ_ALU_CONST_CACHE_PS_11 0x2896c
+#define SQ_ALU_CONST_CACHE_PS_12 0x28970
+#define SQ_ALU_CONST_CACHE_PS_13 0x28974
+#define SQ_ALU_CONST_CACHE_PS_14 0x28978
+#define SQ_ALU_CONST_CACHE_PS_15 0x2897c
+#define SQ_ALU_CONST_CACHE_VS_0 0x28980
+#define SQ_ALU_CONST_CACHE_VS_1 0x28984
+#define SQ_ALU_CONST_CACHE_VS_2 0x28988
+#define SQ_ALU_CONST_CACHE_VS_3 0x2898c
+#define SQ_ALU_CONST_CACHE_VS_4 0x28990
+#define SQ_ALU_CONST_CACHE_VS_5 0x28994
+#define SQ_ALU_CONST_CACHE_VS_6 0x28998
+#define SQ_ALU_CONST_CACHE_VS_7 0x2899c
+#define SQ_ALU_CONST_CACHE_VS_8 0x289a0
+#define SQ_ALU_CONST_CACHE_VS_9 0x289a4
+#define SQ_ALU_CONST_CACHE_VS_10 0x289a8
+#define SQ_ALU_CONST_CACHE_VS_11 0x289ac
+#define SQ_ALU_CONST_CACHE_VS_12 0x289b0
+#define SQ_ALU_CONST_CACHE_VS_13 0x289b4
+#define SQ_ALU_CONST_CACHE_VS_14 0x289b8
+#define SQ_ALU_CONST_CACHE_VS_15 0x289bc
+#define SQ_ALU_CONST_CACHE_GS_0 0x289c0
+#define SQ_ALU_CONST_CACHE_GS_1 0x289c4
+#define SQ_ALU_CONST_CACHE_GS_2 0x289c8
+#define SQ_ALU_CONST_CACHE_GS_3 0x289cc
+#define SQ_ALU_CONST_CACHE_GS_4 0x289d0
+#define SQ_ALU_CONST_CACHE_GS_5 0x289d4
+#define SQ_ALU_CONST_CACHE_GS_6 0x289d8
+#define SQ_ALU_CONST_CACHE_GS_7 0x289dc
+#define SQ_ALU_CONST_CACHE_GS_8 0x289e0
+#define SQ_ALU_CONST_CACHE_GS_9 0x289e4
+#define SQ_ALU_CONST_CACHE_GS_10 0x289e8
+#define SQ_ALU_CONST_CACHE_GS_11 0x289ec
+#define SQ_ALU_CONST_CACHE_GS_12 0x289f0
+#define SQ_ALU_CONST_CACHE_GS_13 0x289f4
+#define SQ_ALU_CONST_CACHE_GS_14 0x289f8
+#define SQ_ALU_CONST_CACHE_GS_15 0x289fc
+
#define CONFIG_MEMSIZE 0x5428
#define CONFIG_CNTL 0x5424
#define CP_STAT 0x8680
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 829e26e8a4b..034218c3dbb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -91,6 +91,8 @@ extern int radeon_tv;
extern int radeon_new_pll;
extern int radeon_dynpm;
extern int radeon_audio;
+extern int radeon_disp_priority;
+extern int radeon_hw_i2c;
/*
* Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -168,6 +170,7 @@ struct radeon_clock {
* Power management
*/
int radeon_pm_init(struct radeon_device *rdev);
+void radeon_pm_fini(struct radeon_device *rdev);
void radeon_pm_compute_clocks(struct radeon_device *rdev);
void radeon_combios_get_power_modes(struct radeon_device *rdev);
void radeon_atombios_get_power_modes(struct radeon_device *rdev);
@@ -687,6 +690,7 @@ struct radeon_pm {
bool downclocked;
int active_crtcs;
int req_vblank;
+ bool vblank_sync;
fixed20_12 max_bandwidth;
fixed20_12 igp_sideport_mclk;
fixed20_12 igp_system_mclk;
@@ -697,6 +701,7 @@ struct radeon_pm {
fixed20_12 ht_bandwidth;
fixed20_12 core_bandwidth;
fixed20_12 sclk;
+ fixed20_12 mclk;
fixed20_12 needed_bandwidth;
/* XXX: use a define for num power modes */
struct radeon_power_state power_state[8];
@@ -707,6 +712,7 @@ struct radeon_pm {
struct radeon_power_state *requested_power_state;
struct radeon_pm_clock_info *requested_clock_mode;
struct radeon_power_state *default_power_state;
+ struct radeon_i2c_chan *i2c_bus;
};
@@ -729,8 +735,6 @@ int radeon_debugfs_add_files(struct radeon_device *rdev,
struct drm_info_list *files,
unsigned nfiles);
int radeon_debugfs_fence_init(struct radeon_device *rdev);
-int r100_debugfs_rbbm_init(struct radeon_device *rdev);
-int r100_debugfs_cp_init(struct radeon_device *rdev);
/*
@@ -782,7 +786,7 @@ struct radeon_asic {
int (*set_surface_reg)(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
- int (*clear_surface_reg)(struct radeon_device *rdev, int reg);
+ void (*clear_surface_reg)(struct radeon_device *rdev, int reg);
void (*bandwidth_update)(struct radeon_device *rdev);
void (*hpd_init)(struct radeon_device *rdev);
void (*hpd_fini)(struct radeon_device *rdev);
@@ -862,6 +866,12 @@ union radeon_asic_config {
struct rv770_asic rv770;
};
+/*
+ * asic initizalization from radeon_asic.c
+ */
+void radeon_agp_disable(struct radeon_device *rdev);
+int radeon_asic_init(struct radeon_device *rdev);
+
/*
* IOCTL.
@@ -1172,6 +1182,8 @@ extern void radeon_gart_restore(struct radeon_device *rdev);
extern int radeon_modeset_init(struct radeon_device *rdev);
extern void radeon_modeset_fini(struct radeon_device *rdev);
extern bool radeon_card_posted(struct radeon_device *rdev);
+extern void radeon_update_bandwidth_info(struct radeon_device *rdev);
+extern void radeon_update_display_priority(struct radeon_device *rdev);
extern bool radeon_boot_test_post_card(struct radeon_device *rdev);
extern int radeon_clocks_init(struct radeon_device *rdev);
extern void radeon_clocks_fini(struct radeon_device *rdev);
@@ -1188,51 +1200,6 @@ extern int radeon_resume_kms(struct drm_device *dev);
extern int radeon_suspend_kms(struct drm_device *dev, pm_message_t state);
/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
-struct r100_mc_save {
- u32 GENMO_WT;
- u32 CRTC_EXT_CNTL;
- u32 CRTC_GEN_CNTL;
- u32 CRTC2_GEN_CNTL;
- u32 CUR_OFFSET;
- u32 CUR2_OFFSET;
-};
-extern void r100_cp_disable(struct radeon_device *rdev);
-extern int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
-extern void r100_cp_fini(struct radeon_device *rdev);
-extern void r100_pci_gart_tlb_flush(struct radeon_device *rdev);
-extern int r100_pci_gart_init(struct radeon_device *rdev);
-extern void r100_pci_gart_fini(struct radeon_device *rdev);
-extern int r100_pci_gart_enable(struct radeon_device *rdev);
-extern void r100_pci_gart_disable(struct radeon_device *rdev);
-extern int r100_pci_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
-extern int r100_debugfs_mc_info_init(struct radeon_device *rdev);
-extern int r100_gui_wait_for_idle(struct radeon_device *rdev);
-extern void r100_ib_fini(struct radeon_device *rdev);
-extern int r100_ib_init(struct radeon_device *rdev);
-extern void r100_irq_disable(struct radeon_device *rdev);
-extern int r100_irq_set(struct radeon_device *rdev);
-extern void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
-extern void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
-extern void r100_vram_init_sizes(struct radeon_device *rdev);
-extern void r100_wb_disable(struct radeon_device *rdev);
-extern void r100_wb_fini(struct radeon_device *rdev);
-extern int r100_wb_init(struct radeon_device *rdev);
-extern void r100_hdp_reset(struct radeon_device *rdev);
-extern int r100_rb2d_reset(struct radeon_device *rdev);
-extern int r100_cp_reset(struct radeon_device *rdev);
-extern void r100_vga_render_disable(struct radeon_device *rdev);
-extern int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- struct radeon_bo *robj);
-extern int r100_cs_parse_packet0(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- const unsigned *auth, unsigned n,
- radeon_packet0_check_t check);
-extern int r100_cs_packet_parse(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt,
- unsigned idx);
-extern void r100_enable_bm(struct radeon_device *rdev);
-extern void r100_set_common_regs(struct radeon_device *rdev);
/* rv200,rv250,rv280 */
extern void r200_set_safe_registers(struct radeon_device *rdev);
@@ -1322,7 +1289,8 @@ extern int r600_audio_tmds_index(struct drm_encoder *encoder);
extern void r600_audio_set_clock(struct drm_encoder *encoder, int clock);
extern void r600_audio_fini(struct radeon_device *rdev);
extern void r600_hdmi_init(struct drm_encoder *encoder);
-extern void r600_hdmi_enable(struct drm_encoder *encoder, int enable);
+extern void r600_hdmi_enable(struct drm_encoder *encoder);
+extern void r600_hdmi_disable(struct drm_encoder *encoder);
extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
extern int r600_hdmi_buffer_status_changed(struct drm_encoder *encoder);
extern void r600_hdmi_update_audio_settings(struct drm_encoder *encoder,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.c b/drivers/gpu/drm/radeon/radeon_asic.c
new file mode 100644
index 00000000000..a4b4bc9fa32
--- /dev/null
+++ b/drivers/gpu/drm/radeon/radeon_asic.c
@@ -0,0 +1,772 @@
+/*
+ * Copyright 2008 Advanced Micro Devices, Inc.
+ * Copyright 2008 Red Hat Inc.
+ * Copyright 2009 Jerome Glisse.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
+ * Jerome Glisse
+ */
+
+#include <linux/console.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/radeon_drm.h>
+#include <linux/vgaarb.h>
+#include <linux/vga_switcheroo.h>
+#include "radeon_reg.h"
+#include "radeon.h"
+#include "radeon_asic.h"
+#include "atom.h"
+
+/*
+ * Registers accessors functions.
+ */
+static uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
+{
+ DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
+ BUG_ON(1);
+ return 0;
+}
+
+static void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
+{
+ DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
+ reg, v);
+ BUG_ON(1);
+}
+
+static void radeon_register_accessor_init(struct radeon_device *rdev)
+{
+ rdev->mc_rreg = &radeon_invalid_rreg;
+ rdev->mc_wreg = &radeon_invalid_wreg;
+ rdev->pll_rreg = &radeon_invalid_rreg;
+ rdev->pll_wreg = &radeon_invalid_wreg;
+ rdev->pciep_rreg = &radeon_invalid_rreg;
+ rdev->pciep_wreg = &radeon_invalid_wreg;
+
+ /* Don't change order as we are overridding accessor. */
+ if (rdev->family < CHIP_RV515) {
+ rdev->pcie_reg_mask = 0xff;
+ } else {
+ rdev->pcie_reg_mask = 0x7ff;
+ }
+ /* FIXME: not sure here */
+ if (rdev->family <= CHIP_R580) {
+ rdev->pll_rreg = &r100_pll_rreg;
+ rdev->pll_wreg = &r100_pll_wreg;
+ }
+ if (rdev->family >= CHIP_R420) {
+ rdev->mc_rreg = &r420_mc_rreg;
+ rdev->mc_wreg = &r420_mc_wreg;
+ }
+ if (rdev->family >= CHIP_RV515) {
+ rdev->mc_rreg = &rv515_mc_rreg;
+ rdev->mc_wreg = &rv515_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
+ rdev->mc_rreg = &rs400_mc_rreg;
+ rdev->mc_wreg = &rs400_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
+ rdev->mc_rreg = &rs690_mc_rreg;
+ rdev->mc_wreg = &rs690_mc_wreg;
+ }
+ if (rdev->family == CHIP_RS600) {
+ rdev->mc_rreg = &rs600_mc_rreg;
+ rdev->mc_wreg = &rs600_mc_wreg;
+ }
+ if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
+ rdev->pciep_rreg = &r600_pciep_rreg;
+ rdev->pciep_wreg = &r600_pciep_wreg;
+ }
+}
+
+
+/* helper to disable agp */
+void radeon_agp_disable(struct radeon_device *rdev)
+{
+ rdev->flags &= ~RADEON_IS_AGP;
+ if (rdev->family >= CHIP_R600) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ } else if (rdev->family >= CHIP_RV515 ||
+ rdev->family == CHIP_RV380 ||
+ rdev->family == CHIP_RV410 ||
+ rdev->family == CHIP_R423) {
+ DRM_INFO("Forcing AGP to PCIE mode\n");
+ rdev->flags |= RADEON_IS_PCIE;
+ rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
+ rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
+ } else {
+ DRM_INFO("Forcing AGP to PCI mode\n");
+ rdev->flags |= RADEON_IS_PCI;
+ rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
+ rdev->asic->gart_set_page = &r100_pci_gart_set_page;
+ }
+ rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
+}
+
+/*
+ * ASIC
+ */
+static struct radeon_asic r100_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r100_gpu_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = NULL,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r200_asic = {
+ .init = &r100_init,
+ .fini = &r100_fini,
+ .suspend = &r100_suspend,
+ .resume = &r100_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r100_gpu_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r100_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r100_fence_ring_emit,
+ .cs_parse = &r100_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r300_asic = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &r100_pci_gart_tlb_flush,
+ .gart_set_page = &r100_pci_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r300_asic_pcie = {
+ .init = &r300_init,
+ .fini = &r300_fini,
+ .suspend = &r300_suspend,
+ .resume = &r300_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r420_asic = {
+ .init = &r420_init,
+ .fini = &r420_fini,
+ .suspend = &r420_suspend,
+ .resume = &r420_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rs400_asic = {
+ .init = &rs400_init,
+ .fini = &rs400_fini,
+ .suspend = &rs400_suspend,
+ .resume = &rs400_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &r100_irq_set,
+ .irq_process = &r100_irq_process,
+ .get_vblank_counter = &r100_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_legacy_get_engine_clock,
+ .set_engine_clock = &radeon_legacy_set_engine_clock,
+ .get_memory_clock = &radeon_legacy_get_memory_clock,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_legacy_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &r100_bandwidth_update,
+ .hpd_init = &r100_hpd_init,
+ .hpd_fini = &r100_hpd_fini,
+ .hpd_sense = &r100_hpd_sense,
+ .hpd_set_polarity = &r100_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rs600_asic = {
+ .init = &rs600_init,
+ .fini = &rs600_fini,
+ .suspend = &rs600_suspend,
+ .resume = &rs600_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rs600_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs600_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rs690_asic = {
+ .init = &rs690_init,
+ .fini = &rs690_fini,
+ .suspend = &rs690_suspend,
+ .resume = &rs690_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &r300_gpu_reset,
+ .gart_tlb_flush = &rs400_gart_tlb_flush,
+ .gart_set_page = &rs400_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &r300_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r200_copy_dma,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic rv515_asic = {
+ .init = &rv515_init,
+ .fini = &rv515_fini,
+ .suspend = &rv515_suspend,
+ .resume = &rv515_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &rv515_gpu_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r520_asic = {
+ .init = &r520_init,
+ .fini = &rv515_fini,
+ .suspend = &rv515_suspend,
+ .resume = &r520_resume,
+ .vga_set_state = &r100_vga_set_state,
+ .gpu_reset = &rv515_gpu_reset,
+ .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
+ .gart_set_page = &rv370_pcie_gart_set_page,
+ .cp_commit = &r100_cp_commit,
+ .ring_start = &rv515_ring_start,
+ .ring_test = &r100_ring_test,
+ .ring_ib_execute = &r100_ring_ib_execute,
+ .irq_set = &rs600_irq_set,
+ .irq_process = &rs600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r300_fence_ring_emit,
+ .cs_parse = &r300_cs_parse,
+ .copy_blit = &r100_copy_blit,
+ .copy_dma = &r200_copy_dma,
+ .copy = &r100_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = &rv370_set_pcie_lanes,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r100_set_surface_reg,
+ .clear_surface_reg = r100_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &rs600_hpd_init,
+ .hpd_fini = &rs600_hpd_fini,
+ .hpd_sense = &rs600_hpd_sense,
+ .hpd_set_polarity = &rs600_hpd_set_polarity,
+ .ioctl_wait_idle = NULL,
+};
+
+static struct radeon_asic r600_asic = {
+ .init = &r600_init,
+ .fini = &r600_fini,
+ .suspend = &r600_suspend,
+ .resume = &r600_resume,
+ .cp_commit = &r600_cp_commit,
+ .vga_set_state = &r600_vga_set_state,
+ .gpu_reset = &r600_gpu_reset,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+};
+
+static struct radeon_asic rs780_asic = {
+ .init = &r600_init,
+ .fini = &r600_fini,
+ .suspend = &r600_suspend,
+ .resume = &r600_resume,
+ .cp_commit = &r600_cp_commit,
+ .vga_set_state = &r600_vga_set_state,
+ .gpu_reset = &r600_gpu_reset,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = NULL,
+ .set_memory_clock = NULL,
+ .get_pcie_lanes = NULL,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rs690_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+};
+
+static struct radeon_asic rv770_asic = {
+ .init = &rv770_init,
+ .fini = &rv770_fini,
+ .suspend = &rv770_suspend,
+ .resume = &rv770_resume,
+ .cp_commit = &r600_cp_commit,
+ .gpu_reset = &rv770_gpu_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = &r600_ring_test,
+ .ring_ib_execute = &r600_ring_ib_execute,
+ .irq_set = &r600_irq_set,
+ .irq_process = &r600_irq_process,
+ .get_vblank_counter = &rs600_get_vblank_counter,
+ .fence_ring_emit = &r600_fence_ring_emit,
+ .cs_parse = &r600_cs_parse,
+ .copy_blit = &r600_copy_blit,
+ .copy_dma = &r600_copy_blit,
+ .copy = &r600_copy_blit,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .get_pcie_lanes = &rv370_get_pcie_lanes,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = &radeon_atom_set_clock_gating,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &rv515_bandwidth_update,
+ .hpd_init = &r600_hpd_init,
+ .hpd_fini = &r600_hpd_fini,
+ .hpd_sense = &r600_hpd_sense,
+ .hpd_set_polarity = &r600_hpd_set_polarity,
+ .ioctl_wait_idle = r600_ioctl_wait_idle,
+};
+
+static struct radeon_asic evergreen_asic = {
+ .init = &evergreen_init,
+ .fini = &evergreen_fini,
+ .suspend = &evergreen_suspend,
+ .resume = &evergreen_resume,
+ .cp_commit = NULL,
+ .gpu_reset = &evergreen_gpu_reset,
+ .vga_set_state = &r600_vga_set_state,
+ .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
+ .gart_set_page = &rs600_gart_set_page,
+ .ring_test = NULL,
+ .ring_ib_execute = NULL,
+ .irq_set = NULL,
+ .irq_process = NULL,
+ .get_vblank_counter = NULL,
+ .fence_ring_emit = NULL,
+ .cs_parse = NULL,
+ .copy_blit = NULL,
+ .copy_dma = NULL,
+ .copy = NULL,
+ .get_engine_clock = &radeon_atom_get_engine_clock,
+ .set_engine_clock = &radeon_atom_set_engine_clock,
+ .get_memory_clock = &radeon_atom_get_memory_clock,
+ .set_memory_clock = &radeon_atom_set_memory_clock,
+ .set_pcie_lanes = NULL,
+ .set_clock_gating = NULL,
+ .set_surface_reg = r600_set_surface_reg,
+ .clear_surface_reg = r600_clear_surface_reg,
+ .bandwidth_update = &evergreen_bandwidth_update,
+ .hpd_init = &evergreen_hpd_init,
+ .hpd_fini = &evergreen_hpd_fini,
+ .hpd_sense = &evergreen_hpd_sense,
+ .hpd_set_polarity = &evergreen_hpd_set_polarity,
+};
+
+int radeon_asic_init(struct radeon_device *rdev)
+{
+ radeon_register_accessor_init(rdev);
+ switch (rdev->family) {
+ case CHIP_R100:
+ case CHIP_RV100:
+ case CHIP_RS100:
+ case CHIP_RV200:
+ case CHIP_RS200:
+ rdev->asic = &r100_asic;
+ break;
+ case CHIP_R200:
+ case CHIP_RV250:
+ case CHIP_RS300:
+ case CHIP_RV280:
+ rdev->asic = &r200_asic;
+ break;
+ case CHIP_R300:
+ case CHIP_R350:
+ case CHIP_RV350:
+ case CHIP_RV380:
+ if (rdev->flags & RADEON_IS_PCIE)
+ rdev->asic = &r300_asic_pcie;
+ else
+ rdev->asic = &r300_asic;
+ break;
+ case CHIP_R420:
+ case CHIP_R423:
+ case CHIP_RV410:
+ rdev->asic = &r420_asic;
+ break;
+ case CHIP_RS400:
+ case CHIP_RS480:
+ rdev->asic = &rs400_asic;
+ break;
+ case CHIP_RS600:
+ rdev->asic = &rs600_asic;
+ break;
+ case CHIP_RS690:
+ case CHIP_RS740:
+ rdev->asic = &rs690_asic;
+ break;
+ case CHIP_RV515:
+ rdev->asic = &rv515_asic;
+ break;
+ case CHIP_R520:
+ case CHIP_RV530:
+ case CHIP_RV560:
+ case CHIP_RV570:
+ case CHIP_R580:
+ rdev->asic = &r520_asic;
+ break;
+ case CHIP_R600:
+ case CHIP_RV610:
+ case CHIP_RV630:
+ case CHIP_RV620:
+ case CHIP_RV635:
+ case CHIP_RV670:
+ rdev->asic = &r600_asic;
+ break;
+ case CHIP_RS780:
+ case CHIP_RS880:
+ rdev->asic = &rs780_asic;
+ break;
+ case CHIP_RV770:
+ case CHIP_RV730:
+ case CHIP_RV710:
+ case CHIP_RV740:
+ rdev->asic = &rv770_asic;
+ break;
+ case CHIP_CEDAR:
+ case CHIP_REDWOOD:
+ case CHIP_JUNIPER:
+ case CHIP_CYPRESS:
+ case CHIP_HEMLOCK:
+ rdev->asic = &evergreen_asic;
+ break;
+ default:
+ /* FIXME: not supported yet */
+ return -EINVAL;
+ }
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ rdev->asic->get_memory_clock = NULL;
+ rdev->asic->set_memory_clock = NULL;
+ }
+
+ /* set the number of crtcs */
+ if (rdev->flags & RADEON_SINGLE_CRTC)
+ rdev->num_crtc = 1;
+ else {
+ if (ASIC_IS_DCE4(rdev))
+ rdev->num_crtc = 6;
+ else
+ rdev->num_crtc = 2;
+ }
+
+ return 0;
+}
+
+/*
+ * Wrapper around modesetting bits. Move to radeon_clocks.c?
+ */
+int radeon_clocks_init(struct radeon_device *rdev)
+{
+ int r;
+
+ r = radeon_static_clocks_init(rdev->ddev);
+ if (r) {
+ return r;
+ }
+ DRM_INFO("Clocks initialized !\n");
+ return 0;
+}
+
+void radeon_clocks_fini(struct radeon_device *rdev)
+{
+}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index d3a157b2bcb..a0b8280663d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -45,10 +45,18 @@ void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
/*
* r100,rv100,rs100,rv200,rs200
*/
-extern int r100_init(struct radeon_device *rdev);
-extern void r100_fini(struct radeon_device *rdev);
-extern int r100_suspend(struct radeon_device *rdev);
-extern int r100_resume(struct radeon_device *rdev);
+struct r100_mc_save {
+ u32 GENMO_WT;
+ u32 CRTC_EXT_CNTL;
+ u32 CRTC_GEN_CNTL;
+ u32 CRTC2_GEN_CNTL;
+ u32 CUR_OFFSET;
+ u32 CUR2_OFFSET;
+};
+int r100_init(struct radeon_device *rdev);
+void r100_fini(struct radeon_device *rdev);
+int r100_suspend(struct radeon_device *rdev);
+int r100_resume(struct radeon_device *rdev);
uint32_t r100_mm_rreg(struct radeon_device *rdev, uint32_t reg);
void r100_mm_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void r100_vga_set_state(struct radeon_device *rdev, bool state);
@@ -73,7 +81,7 @@ int r100_copy_blit(struct radeon_device *rdev,
int r100_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
-int r100_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r100_clear_surface_reg(struct radeon_device *rdev, int reg);
void r100_bandwidth_update(struct radeon_device *rdev);
void r100_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r100_ring_test(struct radeon_device *rdev);
@@ -82,44 +90,42 @@ void r100_hpd_fini(struct radeon_device *rdev);
bool r100_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void r100_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-
-static struct radeon_asic r100_asic = {
- .init = &r100_init,
- .fini = &r100_fini,
- .suspend = &r100_suspend,
- .resume = &r100_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
- .cs_parse = &r100_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = NULL,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
+int r100_debugfs_rbbm_init(struct radeon_device *rdev);
+int r100_debugfs_cp_init(struct radeon_device *rdev);
+void r100_cp_disable(struct radeon_device *rdev);
+int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
+void r100_cp_fini(struct radeon_device *rdev);
+int r100_pci_gart_init(struct radeon_device *rdev);
+void r100_pci_gart_fini(struct radeon_device *rdev);
+int r100_pci_gart_enable(struct radeon_device *rdev);
+void r100_pci_gart_disable(struct radeon_device *rdev);
+int r100_debugfs_mc_info_init(struct radeon_device *rdev);
+int r100_gui_wait_for_idle(struct radeon_device *rdev);
+void r100_ib_fini(struct radeon_device *rdev);
+int r100_ib_init(struct radeon_device *rdev);
+void r100_irq_disable(struct radeon_device *rdev);
+void r100_mc_stop(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_mc_resume(struct radeon_device *rdev, struct r100_mc_save *save);
+void r100_vram_init_sizes(struct radeon_device *rdev);
+void r100_wb_disable(struct radeon_device *rdev);
+void r100_wb_fini(struct radeon_device *rdev);
+int r100_wb_init(struct radeon_device *rdev);
+void r100_hdp_reset(struct radeon_device *rdev);
+int r100_rb2d_reset(struct radeon_device *rdev);
+int r100_cp_reset(struct radeon_device *rdev);
+void r100_vga_render_disable(struct radeon_device *rdev);
+int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ struct radeon_bo *robj);
+int r100_cs_parse_packet0(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ const unsigned *auth, unsigned n,
+ radeon_packet0_check_t check);
+int r100_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx);
+void r100_enable_bm(struct radeon_device *rdev);
+void r100_set_common_regs(struct radeon_device *rdev);
/*
* r200,rv250,rs300,rv280
@@ -129,43 +135,6 @@ extern int r200_copy_dma(struct radeon_device *rdev,
uint64_t dst_offset,
unsigned num_pages,
struct radeon_fence *fence);
-static struct radeon_asic r200_asic = {
- .init = &r100_init,
- .fini = &r100_fini,
- .suspend = &r100_suspend,
- .resume = &r100_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r100_gpu_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r100_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r100_fence_ring_emit,
- .cs_parse = &r100_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* r300,r350,rv350,rv380
@@ -186,82 +155,6 @@ extern void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v
extern void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes);
extern int rv370_get_pcie_lanes(struct radeon_device *rdev);
-static struct radeon_asic r300_asic = {
- .init = &r300_init,
- .fini = &r300_fini,
- .suspend = &r300_suspend,
- .resume = &r300_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &r100_pci_gart_tlb_flush,
- .gart_set_page = &r100_pci_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
-
-static struct radeon_asic r300_asic_pcie = {
- .init = &r300_init,
- .fini = &r300_fini,
- .suspend = &r300_suspend,
- .resume = &r300_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* r420,r423,rv410
*/
@@ -269,44 +162,6 @@ extern int r420_init(struct radeon_device *rdev);
extern void r420_fini(struct radeon_device *rdev);
extern int r420_suspend(struct radeon_device *rdev);
extern int r420_resume(struct radeon_device *rdev);
-static struct radeon_asic r420_asic = {
- .init = &r420_init,
- .fini = &r420_fini,
- .suspend = &r420_suspend,
- .resume = &r420_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* rs400,rs480
@@ -319,44 +174,6 @@ void rs400_gart_tlb_flush(struct radeon_device *rdev);
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr);
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs400_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
-static struct radeon_asic rs400_asic = {
- .init = &rs400_init,
- .fini = &rs400_fini,
- .suspend = &rs400_suspend,
- .resume = &rs400_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rs400_gart_tlb_flush,
- .gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &r100_irq_set,
- .irq_process = &r100_irq_process,
- .get_vblank_counter = &r100_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_legacy_get_engine_clock,
- .set_engine_clock = &radeon_legacy_set_engine_clock,
- .get_memory_clock = &radeon_legacy_get_memory_clock,
- .set_memory_clock = NULL,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_legacy_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &r100_bandwidth_update,
- .hpd_init = &r100_hpd_init,
- .hpd_fini = &r100_hpd_fini,
- .hpd_sense = &r100_hpd_sense,
- .hpd_set_polarity = &r100_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* rs600.
@@ -379,45 +196,6 @@ bool rs600_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void rs600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-static struct radeon_asic rs600_asic = {
- .init = &rs600_init,
- .fini = &rs600_fini,
- .suspend = &rs600_suspend,
- .resume = &rs600_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rs600_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rs600_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
-
/*
* rs690,rs740
*/
@@ -428,44 +206,6 @@ int rs690_suspend(struct radeon_device *rdev);
uint32_t rs690_mc_rreg(struct radeon_device *rdev, uint32_t reg);
void rs690_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rs690_bandwidth_update(struct radeon_device *rdev);
-static struct radeon_asic rs690_asic = {
- .init = &rs690_init,
- .fini = &rs690_fini,
- .suspend = &rs690_suspend,
- .resume = &rs690_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &r300_gpu_reset,
- .gart_tlb_flush = &rs400_gart_tlb_flush,
- .gart_set_page = &rs400_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &r300_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r200_copy_dma,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = NULL,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rs690_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* rv515
@@ -481,87 +221,12 @@ void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
void rv515_bandwidth_update(struct radeon_device *rdev);
int rv515_resume(struct radeon_device *rdev);
int rv515_suspend(struct radeon_device *rdev);
-static struct radeon_asic rv515_asic = {
- .init = &rv515_init,
- .fini = &rv515_fini,
- .suspend = &rv515_suspend,
- .resume = &rv515_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
-
/*
* r520,rv530,rv560,rv570,r580
*/
int r520_init(struct radeon_device *rdev);
int r520_resume(struct radeon_device *rdev);
-static struct radeon_asic r520_asic = {
- .init = &r520_init,
- .fini = &rv515_fini,
- .suspend = &rv515_suspend,
- .resume = &r520_resume,
- .vga_set_state = &r100_vga_set_state,
- .gpu_reset = &rv515_gpu_reset,
- .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
- .gart_set_page = &rv370_pcie_gart_set_page,
- .cp_commit = &r100_cp_commit,
- .ring_start = &rv515_ring_start,
- .ring_test = &r100_ring_test,
- .ring_ib_execute = &r100_ring_ib_execute,
- .irq_set = &rs600_irq_set,
- .irq_process = &rs600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r300_fence_ring_emit,
- .cs_parse = &r300_cs_parse,
- .copy_blit = &r100_copy_blit,
- .copy_dma = &r200_copy_dma,
- .copy = &r100_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = &rv370_set_pcie_lanes,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r100_set_surface_reg,
- .clear_surface_reg = r100_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &rs600_hpd_init,
- .hpd_fini = &rs600_hpd_fini,
- .hpd_sense = &rs600_hpd_sense,
- .hpd_set_polarity = &rs600_hpd_set_polarity,
- .ioctl_wait_idle = NULL,
-};
/*
* r600,rv610,rv630,rv620,rv635,rv670,rs780,rs880
@@ -591,7 +256,7 @@ int r600_gpu_reset(struct radeon_device *rdev);
int r600_set_surface_reg(struct radeon_device *rdev, int reg,
uint32_t tiling_flags, uint32_t pitch,
uint32_t offset, uint32_t obj_size);
-int r600_clear_surface_reg(struct radeon_device *rdev, int reg);
+void r600_clear_surface_reg(struct radeon_device *rdev, int reg);
void r600_ring_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
int r600_ring_test(struct radeon_device *rdev);
int r600_copy_blit(struct radeon_device *rdev,
@@ -604,43 +269,6 @@ void r600_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
extern void r600_ioctl_wait_idle(struct radeon_device *rdev, struct radeon_bo *bo);
-static struct radeon_asic r600_asic = {
- .init = &r600_init,
- .fini = &r600_fini,
- .suspend = &r600_suspend,
- .resume = &r600_resume,
- .cp_commit = &r600_cp_commit,
- .vga_set_state = &r600_vga_set_state,
- .gpu_reset = &r600_gpu_reset,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
-};
-
/*
* rv770,rv730,rv710,rv740
*/
@@ -650,43 +278,6 @@ int rv770_suspend(struct radeon_device *rdev);
int rv770_resume(struct radeon_device *rdev);
int rv770_gpu_reset(struct radeon_device *rdev);
-static struct radeon_asic rv770_asic = {
- .init = &rv770_init,
- .fini = &rv770_fini,
- .suspend = &rv770_suspend,
- .resume = &rv770_resume,
- .cp_commit = &r600_cp_commit,
- .gpu_reset = &rv770_gpu_reset,
- .vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = &r600_ring_test,
- .ring_ib_execute = &r600_ring_ib_execute,
- .irq_set = &r600_irq_set,
- .irq_process = &r600_irq_process,
- .get_vblank_counter = &rs600_get_vblank_counter,
- .fence_ring_emit = &r600_fence_ring_emit,
- .cs_parse = &r600_cs_parse,
- .copy_blit = &r600_copy_blit,
- .copy_dma = &r600_copy_blit,
- .copy = &r600_copy_blit,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .get_pcie_lanes = &rv370_get_pcie_lanes,
- .set_pcie_lanes = NULL,
- .set_clock_gating = &radeon_atom_set_clock_gating,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &rv515_bandwidth_update,
- .hpd_init = &r600_hpd_init,
- .hpd_fini = &r600_hpd_fini,
- .hpd_sense = &r600_hpd_sense,
- .hpd_set_polarity = &r600_hpd_set_polarity,
- .ioctl_wait_idle = r600_ioctl_wait_idle,
-};
-
/*
* evergreen
*/
@@ -701,40 +292,4 @@ void evergreen_hpd_fini(struct radeon_device *rdev);
bool evergreen_hpd_sense(struct radeon_device *rdev, enum radeon_hpd_id hpd);
void evergreen_hpd_set_polarity(struct radeon_device *rdev,
enum radeon_hpd_id hpd);
-
-static struct radeon_asic evergreen_asic = {
- .init = &evergreen_init,
- .fini = &evergreen_fini,
- .suspend = &evergreen_suspend,
- .resume = &evergreen_resume,
- .cp_commit = NULL,
- .gpu_reset = &evergreen_gpu_reset,
- .vga_set_state = &r600_vga_set_state,
- .gart_tlb_flush = &r600_pcie_gart_tlb_flush,
- .gart_set_page = &rs600_gart_set_page,
- .ring_test = NULL,
- .ring_ib_execute = NULL,
- .irq_set = NULL,
- .irq_process = NULL,
- .get_vblank_counter = NULL,
- .fence_ring_emit = NULL,
- .cs_parse = NULL,
- .copy_blit = NULL,
- .copy_dma = NULL,
- .copy = NULL,
- .get_engine_clock = &radeon_atom_get_engine_clock,
- .set_engine_clock = &radeon_atom_set_engine_clock,
- .get_memory_clock = &radeon_atom_get_memory_clock,
- .set_memory_clock = &radeon_atom_set_memory_clock,
- .set_pcie_lanes = NULL,
- .set_clock_gating = NULL,
- .set_surface_reg = r600_set_surface_reg,
- .clear_surface_reg = r600_clear_surface_reg,
- .bandwidth_update = &evergreen_bandwidth_update,
- .hpd_init = &evergreen_hpd_init,
- .hpd_fini = &evergreen_hpd_fini,
- .hpd_sense = &evergreen_hpd_sense,
- .hpd_set_polarity = &evergreen_hpd_set_polarity,
-};
-
#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 93783b15c81..1fff95505cf 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -75,46 +75,45 @@ static inline struct radeon_i2c_bus_rec radeon_lookup_i2c_gpio(struct radeon_dev
memset(&i2c, 0, sizeof(struct radeon_i2c_bus_rec));
i2c.valid = false;
- atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset);
-
- i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
-
-
- for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
- gpio = &i2c_info->asGPIO_Info[i];
-
- if (gpio->sucI2cId.ucAccess == id) {
- i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
- i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
- i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
- i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
- i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
- i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
- i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
- i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
- i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
- i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
- i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
- i2c.en_data_mask = (1 << gpio->ucDataEnShift);
- i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
- i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
- i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
- i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
-
- if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
- i2c.hw_capable = true;
- else
- i2c.hw_capable = false;
-
- if (gpio->sucI2cId.ucAccess == 0xa0)
- i2c.mm_i2c = true;
- else
- i2c.mm_i2c = false;
-
- i2c.i2c_id = gpio->sucI2cId.ucAccess;
-
- i2c.valid = true;
- break;
+ if (atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) {
+ i2c_info = (struct _ATOM_GPIO_I2C_INFO *)(ctx->bios + data_offset);
+
+ for (i = 0; i < ATOM_MAX_SUPPORTED_DEVICE; i++) {
+ gpio = &i2c_info->asGPIO_Info[i];
+
+ if (gpio->sucI2cId.ucAccess == id) {
+ i2c.mask_clk_reg = le16_to_cpu(gpio->usClkMaskRegisterIndex) * 4;
+ i2c.mask_data_reg = le16_to_cpu(gpio->usDataMaskRegisterIndex) * 4;
+ i2c.en_clk_reg = le16_to_cpu(gpio->usClkEnRegisterIndex) * 4;
+ i2c.en_data_reg = le16_to_cpu(gpio->usDataEnRegisterIndex) * 4;
+ i2c.y_clk_reg = le16_to_cpu(gpio->usClkY_RegisterIndex) * 4;
+ i2c.y_data_reg = le16_to_cpu(gpio->usDataY_RegisterIndex) * 4;
+ i2c.a_clk_reg = le16_to_cpu(gpio->usClkA_RegisterIndex) * 4;
+ i2c.a_data_reg = le16_to_cpu(gpio->usDataA_RegisterIndex) * 4;
+ i2c.mask_clk_mask = (1 << gpio->ucClkMaskShift);
+ i2c.mask_data_mask = (1 << gpio->ucDataMaskShift);
+ i2c.en_clk_mask = (1 << gpio->ucClkEnShift);
+ i2c.en_data_mask = (1 << gpio->ucDataEnShift);
+ i2c.y_clk_mask = (1 << gpio->ucClkY_Shift);
+ i2c.y_data_mask = (1 << gpio->ucDataY_Shift);
+ i2c.a_clk_mask = (1 << gpio->ucClkA_Shift);
+ i2c.a_data_mask = (1 << gpio->ucDataA_Shift);
+
+ if (gpio->sucI2cId.sbfAccess.bfHW_Capable)
+ i2c.hw_capable = true;
+ else
+ i2c.hw_capable = false;
+
+ if (gpio->sucI2cId.ucAccess == 0xa0)
+ i2c.mm_i2c = true;
+ else
+ i2c.mm_i2c = false;
+
+ i2c.i2c_id = gpio->sucI2cId.ucAccess;
+
+ i2c.valid = true;
+ break;
+ }
}
}
@@ -135,20 +134,21 @@ static inline struct radeon_gpio_rec radeon_lookup_gpio(struct radeon_device *rd
memset(&gpio, 0, sizeof(struct radeon_gpio_rec));
gpio.valid = false;
- atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset);
+ if (atom_parse_data_header(ctx, index, &size, NULL, NULL, &data_offset)) {
+ gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
- gpio_info = (struct _ATOM_GPIO_PIN_LUT *)(ctx->bios + data_offset);
+ num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) /
+ sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
- num_indices = (size - sizeof(ATOM_COMMON_TABLE_HEADER)) / sizeof(ATOM_GPIO_PIN_ASSIGNMENT);
-
- for (i = 0; i < num_indices; i++) {
- pin = &gpio_info->asGPIO_Pin[i];
- if (id == pin->ucGPIO_ID) {
- gpio.id = pin->ucGPIO_ID;
- gpio.reg = pin->usGpioPin_AIndex * 4;
- gpio.mask = (1 << pin->ucGpioPinBitShift);
- gpio.valid = true;
- break;
+ for (i = 0; i < num_indices; i++) {
+ pin = &gpio_info->asGPIO_Pin[i];
+ if (id == pin->ucGPIO_ID) {
+ gpio.id = pin->ucGPIO_ID;
+ gpio.reg = pin->usGpioPin_AIndex * 4;
+ gpio.mask = (1 << pin->ucGpioPinBitShift);
+ gpio.valid = true;
+ break;
+ }
}
}
@@ -264,6 +264,8 @@ static bool radeon_atom_apply_quirks(struct drm_device *dev,
if ((supported_device == ATOM_DEVICE_CRT1_SUPPORT) ||
(supported_device == ATOM_DEVICE_DFP2_SUPPORT))
return false;
+ if (supported_device == ATOM_DEVICE_CRT2_SUPPORT)
+ *line_mux = 0x90;
}
/* ASUS HD 3600 XT board lists the DVI port as HDMI */
@@ -395,9 +397,7 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
struct radeon_gpio_rec gpio;
struct radeon_hpd hpd;
- atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
-
- if (data_offset == 0)
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
return false;
if (crev < 2)
@@ -449,37 +449,43 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
GetIndexIntoMasterTable(DATA,
IntegratedSystemInfo);
- atom_parse_data_header(ctx, index, &size, &frev,
- &crev, &igp_offset);
-
- if (crev >= 2) {
- igp_obj =
- (ATOM_INTEGRATED_SYSTEM_INFO_V2
- *) (ctx->bios + igp_offset);
-
- if (igp_obj) {
- uint32_t slot_config, ct;
-
- if (con_obj_num == 1)
- slot_config =
- igp_obj->
- ulDDISlot1Config;
- else
- slot_config =
- igp_obj->
- ulDDISlot2Config;
-
- ct = (slot_config >> 16) & 0xff;
- connector_type =
- object_connector_convert
- [ct];
- connector_object_id = ct;
- igp_lane_info =
- slot_config & 0xffff;
+ if (atom_parse_data_header(ctx, index, &size, &frev,
+ &crev, &igp_offset)) {
+
+ if (crev >= 2) {
+ igp_obj =
+ (ATOM_INTEGRATED_SYSTEM_INFO_V2
+ *) (ctx->bios + igp_offset);
+
+ if (igp_obj) {
+ uint32_t slot_config, ct;
+
+ if (con_obj_num == 1)
+ slot_config =
+ igp_obj->
+ ulDDISlot1Config;
+ else
+ slot_config =
+ igp_obj->
+ ulDDISlot2Config;
+
+ ct = (slot_config >> 16) & 0xff;
+ connector_type =
+ object_connector_convert
+ [ct];
+ connector_object_id = ct;
+ igp_lane_info =
+ slot_config & 0xffff;
+ } else
+ continue;
} else
continue;
- } else
- continue;
+ } else {
+ igp_lane_info = 0;
+ connector_type =
+ object_connector_convert[con_obj_id];
+ connector_object_id = con_obj_id;
+ }
} else {
igp_lane_info = 0;
connector_type =
@@ -627,20 +633,23 @@ static uint16_t atombios_get_connector_object_id(struct drm_device *dev,
uint8_t frev, crev;
ATOM_XTMDS_INFO *xtmds;
- atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
- xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
+ if (atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset)) {
+ xtmds = (ATOM_XTMDS_INFO *)(ctx->bios + data_offset);
- if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
- if (connector_type == DRM_MODE_CONNECTOR_DVII)
- return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
- else
- return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
- } else {
- if (connector_type == DRM_MODE_CONNECTOR_DVII)
- return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
- else
- return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
- }
+ if (xtmds->ucSupportedLink & ATOM_XTMDS_SUPPORTED_DUALLINK) {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_DUAL_LINK_DVI_D;
+ } else {
+ if (connector_type == DRM_MODE_CONNECTOR_DVII)
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_I;
+ else
+ return CONNECTOR_OBJECT_ID_SINGLE_LINK_DVI_D;
+ }
+ } else
+ return supported_devices_connector_object_id_convert
+ [connector_type];
} else {
return supported_devices_connector_object_id_convert
[connector_type];
@@ -672,7 +681,8 @@ bool radeon_get_atom_connector_info_from_supported_devices_table(struct
int i, j, max_device;
struct bios_connector bios_connectors[ATOM_MAX_SUPPORTED_DEVICE];
- atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset);
+ if (!atom_parse_data_header(ctx, index, &size, &frev, &crev, &data_offset))
+ return false;
supported_devices =
(union atom_supported_devices *)(ctx->bios + data_offset);
@@ -865,14 +875,11 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
struct radeon_pll *mpll = &rdev->clock.mpll;
uint16_t data_offset;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- firmware_info =
- (union firmware_info *)(mode_info->atom_context->bios +
- data_offset);
-
- if (firmware_info) {
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ firmware_info =
+ (union firmware_info *)(mode_info->atom_context->bios +
+ data_offset);
/* pixel clocks */
p1pll->reference_freq =
le16_to_cpu(firmware_info->info.usReferenceClock);
@@ -887,6 +894,20 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
p1pll->pll_out_max =
le32_to_cpu(firmware_info->info.ulMaxPixelClockPLL_Output);
+ if (crev >= 4) {
+ p1pll->lcd_pll_out_min =
+ le16_to_cpu(firmware_info->info_14.usLcdMinPixelClockPLL_Output) * 100;
+ if (p1pll->lcd_pll_out_min == 0)
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max =
+ le16_to_cpu(firmware_info->info_14.usLcdMaxPixelClockPLL_Output) * 100;
+ if (p1pll->lcd_pll_out_max == 0)
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+ } else {
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
+ }
+
if (p1pll->pll_out_min == 0) {
if (ASIC_IS_AVIVO(rdev))
p1pll->pll_out_min = 64800;
@@ -992,13 +1013,10 @@ bool radeon_atombios_sideport_present(struct radeon_device *rdev)
u8 frev, crev;
u16 data_offset;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- igp_info = (union igp_info *)(mode_info->atom_context->bios +
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ igp_info = (union igp_info *)(mode_info->atom_context->bios +
data_offset);
-
- if (igp_info) {
switch (crev) {
case 1:
if (igp_info->info.ucMemoryType & 0xf0)
@@ -1029,14 +1047,12 @@ bool radeon_atombios_get_tmds_info(struct radeon_encoder *encoder,
uint16_t maxfreq;
int i;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- tmds_info =
- (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
- data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ tmds_info =
+ (struct _ATOM_TMDS_INFO *)(mode_info->atom_context->bios +
+ data_offset);
- if (tmds_info) {
maxfreq = le16_to_cpu(tmds_info->usMaxFrequency);
for (i = 0; i < 4; i++) {
tmds->tmds_pll[i].freq =
@@ -1085,13 +1101,11 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
if (id > ATOM_MAX_SS_ENTRY)
return NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ ss_info =
+ (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
- ss_info =
- (struct _ATOM_SPREAD_SPECTRUM_INFO *)(mode_info->atom_context->bios + data_offset);
-
- if (ss_info) {
ss =
kzalloc(sizeof(struct radeon_atom_ss), GFP_KERNEL);
@@ -1114,30 +1128,6 @@ static struct radeon_atom_ss *radeon_atombios_get_ss_info(struct
return ss;
}
-static void radeon_atom_apply_lvds_quirks(struct drm_device *dev,
- struct radeon_encoder_atom_dig *lvds)
-{
-
- /* Toshiba A300-1BU laptop panel doesn't like new pll divider algo */
- if ((dev->pdev->device == 0x95c4) &&
- (dev->pdev->subsystem_vendor == 0x1179) &&
- (dev->pdev->subsystem_device == 0xff50)) {
- if ((lvds->native_mode.hdisplay == 1280) &&
- (lvds->native_mode.vdisplay == 800))
- lvds->pll_algo = PLL_ALGO_LEGACY;
- }
-
- /* Dell Studio 15 laptop panel doesn't like new pll divider algo */
- if ((dev->pdev->device == 0x95c4) &&
- (dev->pdev->subsystem_vendor == 0x1028) &&
- (dev->pdev->subsystem_device == 0x029f)) {
- if ((lvds->native_mode.hdisplay == 1280) &&
- (lvds->native_mode.vdisplay == 800))
- lvds->pll_algo = PLL_ALGO_LEGACY;
- }
-
-}
-
union lvds_info {
struct _ATOM_LVDS_INFO info;
struct _ATOM_LVDS_INFO_V12 info_12;
@@ -1156,13 +1146,10 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
uint8_t frev, crev;
struct radeon_encoder_atom_dig *lvds = NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev,
- &crev, &data_offset);
-
- lvds_info =
- (union lvds_info *)(mode_info->atom_context->bios + data_offset);
-
- if (lvds_info) {
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ lvds_info =
+ (union lvds_info *)(mode_info->atom_context->bios + data_offset);
lvds =
kzalloc(sizeof(struct radeon_encoder_atom_dig), GFP_KERNEL);
@@ -1220,9 +1207,6 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct
lvds->pll_algo = PLL_ALGO_LEGACY;
}
- /* LVDS quirks */
- radeon_atom_apply_lvds_quirks(dev, lvds);
-
encoder->native_mode = lvds->native_mode;
}
return lvds;
@@ -1241,11 +1225,11 @@ radeon_atombios_get_primary_dac_info(struct radeon_encoder *encoder)
uint8_t bg, dac;
struct radeon_encoder_primary_dac *p_dac = NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
-
- dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ dac_info = (struct _COMPASSIONATE_DATA *)
+ (mode_info->atom_context->bios + data_offset);
- if (dac_info) {
p_dac = kzalloc(sizeof(struct radeon_encoder_primary_dac), GFP_KERNEL);
if (!p_dac)
@@ -1270,7 +1254,9 @@ bool radeon_atom_get_tv_timings(struct radeon_device *rdev, int index,
u8 frev, crev;
u16 data_offset, misc;
- atom_parse_data_header(mode_info->atom_context, data_index, NULL, &frev, &crev, &data_offset);
+ if (!atom_parse_data_header(mode_info->atom_context, data_index, NULL,
+ &frev, &crev, &data_offset))
+ return false;
switch (crev) {
case 1:
@@ -1362,47 +1348,50 @@ radeon_atombios_get_tv_info(struct radeon_device *rdev)
struct _ATOM_ANALOG_TV_INFO *tv_info;
enum radeon_tv_std tv_std = TV_STD_NTSC;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
- tv_info = (struct _ATOM_ANALOG_TV_INFO *)(mode_info->atom_context->bios + data_offset);
+ tv_info = (struct _ATOM_ANALOG_TV_INFO *)
+ (mode_info->atom_context->bios + data_offset);
- switch (tv_info->ucTV_BootUpDefaultStandard) {
- case ATOM_TV_NTSC:
- tv_std = TV_STD_NTSC;
- DRM_INFO("Default TV standard: NTSC\n");
- break;
- case ATOM_TV_NTSCJ:
- tv_std = TV_STD_NTSC_J;
- DRM_INFO("Default TV standard: NTSC-J\n");
- break;
- case ATOM_TV_PAL:
- tv_std = TV_STD_PAL;
- DRM_INFO("Default TV standard: PAL\n");
- break;
- case ATOM_TV_PALM:
- tv_std = TV_STD_PAL_M;
- DRM_INFO("Default TV standard: PAL-M\n");
- break;
- case ATOM_TV_PALN:
- tv_std = TV_STD_PAL_N;
- DRM_INFO("Default TV standard: PAL-N\n");
- break;
- case ATOM_TV_PALCN:
- tv_std = TV_STD_PAL_CN;
- DRM_INFO("Default TV standard: PAL-CN\n");
- break;
- case ATOM_TV_PAL60:
- tv_std = TV_STD_PAL_60;
- DRM_INFO("Default TV standard: PAL-60\n");
- break;
- case ATOM_TV_SECAM:
- tv_std = TV_STD_SECAM;
- DRM_INFO("Default TV standard: SECAM\n");
- break;
- default:
- tv_std = TV_STD_NTSC;
- DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
- break;
+ switch (tv_info->ucTV_BootUpDefaultStandard) {
+ case ATOM_TV_NTSC:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Default TV standard: NTSC\n");
+ break;
+ case ATOM_TV_NTSCJ:
+ tv_std = TV_STD_NTSC_J;
+ DRM_INFO("Default TV standard: NTSC-J\n");
+ break;
+ case ATOM_TV_PAL:
+ tv_std = TV_STD_PAL;
+ DRM_INFO("Default TV standard: PAL\n");
+ break;
+ case ATOM_TV_PALM:
+ tv_std = TV_STD_PAL_M;
+ DRM_INFO("Default TV standard: PAL-M\n");
+ break;
+ case ATOM_TV_PALN:
+ tv_std = TV_STD_PAL_N;
+ DRM_INFO("Default TV standard: PAL-N\n");
+ break;
+ case ATOM_TV_PALCN:
+ tv_std = TV_STD_PAL_CN;
+ DRM_INFO("Default TV standard: PAL-CN\n");
+ break;
+ case ATOM_TV_PAL60:
+ tv_std = TV_STD_PAL_60;
+ DRM_INFO("Default TV standard: PAL-60\n");
+ break;
+ case ATOM_TV_SECAM:
+ tv_std = TV_STD_SECAM;
+ DRM_INFO("Default TV standard: SECAM\n");
+ break;
+ default:
+ tv_std = TV_STD_NTSC;
+ DRM_INFO("Unknown TV standard; defaulting to NTSC\n");
+ break;
+ }
}
return tv_std;
}
@@ -1420,11 +1409,12 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
uint8_t bg, dac;
struct radeon_encoder_tv_dac *tv_dac = NULL;
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
- dac_info = (struct _COMPASSIONATE_DATA *)(mode_info->atom_context->bios + data_offset);
+ dac_info = (struct _COMPASSIONATE_DATA *)
+ (mode_info->atom_context->bios + data_offset);
- if (dac_info) {
tv_dac = kzalloc(sizeof(struct radeon_encoder_tv_dac), GFP_KERNEL);
if (!tv_dac)
@@ -1447,6 +1437,30 @@ radeon_atombios_get_tv_dac_info(struct radeon_encoder *encoder)
return tv_dac;
}
+static const char *thermal_controller_names[] = {
+ "NONE",
+ "LM63",
+ "ADM1032",
+ "ADM1030",
+ "MUA6649",
+ "LM64",
+ "F75375",
+ "ASC7512",
+};
+
+static const char *pp_lib_thermal_controller_names[] = {
+ "NONE",
+ "LM63",
+ "ADM1032",
+ "ADM1030",
+ "MUA6649",
+ "LM64",
+ "F75375",
+ "RV6xx",
+ "RV770",
+ "ADT7473",
+};
+
union power_info {
struct _ATOM_POWERPLAY_INFO info;
struct _ATOM_POWERPLAY_INFO_V2 info_2;
@@ -1466,15 +1480,22 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
struct _ATOM_PPLIB_STATE *power_state;
int num_modes = 0, i, j;
int state_index = 0, mode_index = 0;
-
- atom_parse_data_header(mode_info->atom_context, index, NULL, &frev, &crev, &data_offset);
-
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+ struct radeon_i2c_bus_rec i2c_bus;
rdev->pm.default_power_state = NULL;
- if (power_info) {
+ if (atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
if (frev < 4) {
+ /* add the i2c bus for thermal/fan chip */
+ if (power_info->info.ucOverdriveThermalController > 0) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x\n",
+ thermal_controller_names[power_info->info.ucOverdriveThermalController],
+ power_info->info.ucOverdriveControllerAddress >> 1);
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info.ucOverdriveI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ }
num_modes = power_info->info.ucNumOfPowerModeEntries;
if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK)
num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK;
@@ -1684,6 +1705,24 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev)
}
}
} else if (frev == 4) {
+ /* add the i2c bus for thermal/fan chip */
+ /* no support for internal controller yet */
+ if (power_info->info_4.sThermalController.ucType > 0) {
+ if ((power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) ||
+ (power_info->info_4.sThermalController.ucType == ATOM_PP_THERMALCONTROLLER_RV770)) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (power_info->info_4.sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ } else {
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[power_info->info_4.sThermalController.ucType],
+ power_info->info_4.sThermalController.ucI2cAddress >> 1,
+ (power_info->info_4.sThermalController.ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ i2c_bus = radeon_lookup_i2c_gpio(rdev, power_info->info_4.sThermalController.ucI2cLine);
+ rdev->pm.i2c_bus = radeon_i2c_create(rdev->ddev, &i2c_bus, "Thermal");
+ }
+ }
for (i = 0; i < power_info->info_4.ucNumStates; i++) {
mode_index = 0;
power_state = (struct _ATOM_PPLIB_STATE *)
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index e9ea38ece37..2becdeda68a 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -531,10 +531,7 @@ static struct radeon_i2c_bus_rec combios_setup_i2c_bus(struct radeon_device *rde
case CHIP_RS300:
switch (ddc_line) {
case RADEON_GPIO_DVI_DDC:
- /* in theory this should be hw capable,
- * but it doesn't seem to work
- */
- i2c.hw_capable = false;
+ i2c.hw_capable = true;
break;
default:
i2c.hw_capable = false;
@@ -633,6 +630,8 @@ bool radeon_combios_get_clock_info(struct drm_device *dev)
p1pll->reference_div = RBIOS16(pll_info + 0x10);
p1pll->pll_out_min = RBIOS32(pll_info + 0x12);
p1pll->pll_out_max = RBIOS32(pll_info + 0x16);
+ p1pll->lcd_pll_out_min = p1pll->pll_out_min;
+ p1pll->lcd_pll_out_max = p1pll->pll_out_max;
if (rev > 9) {
p1pll->pll_in_min = RBIOS32(pll_info + 0x36);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index ee0083f982d..60d59816b94 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -940,7 +940,7 @@ static void radeon_dp_connector_destroy(struct drm_connector *connector)
if (radeon_connector->edid)
kfree(radeon_connector->edid);
if (radeon_dig_connector->dp_i2c_bus)
- radeon_i2c_destroy_dp(radeon_dig_connector->dp_i2c_bus);
+ radeon_i2c_destroy(radeon_dig_connector->dp_i2c_bus);
kfree(radeon_connector->con_priv);
drm_sysfs_connector_remove(connector);
drm_connector_cleanup(connector);
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 70ba02ed772..f9b0fe002c0 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -193,9 +193,11 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
radeon_bo_list_fence(&parser->validated, parser->ib->fence);
}
radeon_bo_list_unreserve(&parser->validated);
- for (i = 0; i < parser->nrelocs; i++) {
- if (parser->relocs[i].gobj)
- drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+ if (parser->relocs != NULL) {
+ for (i = 0; i < parser->nrelocs; i++) {
+ if (parser->relocs[i].gobj)
+ drm_gem_object_unreference_unlocked(parser->relocs[i].gobj);
+ }
}
kfree(parser->track);
kfree(parser->relocs);
@@ -243,7 +245,8 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
}
r = radeon_cs_parser_relocs(&parser);
if (r) {
- DRM_ERROR("Failed to parse relocation !\n");
+ if (r != -ERESTARTSYS)
+ DRM_ERROR("Failed to parse relocation %d!\n", r);
radeon_cs_parser_fini(&parser, r);
mutex_unlock(&rdev->cs_mutex);
return r;
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index e28e4ed5f72..60ec47b7164 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -33,7 +33,6 @@
#include <linux/vga_switcheroo.h>
#include "radeon_reg.h"
#include "radeon.h"
-#include "radeon_asic.h"
#include "atom.h"
/*
@@ -242,6 +241,36 @@ bool radeon_card_posted(struct radeon_device *rdev)
}
+void radeon_update_bandwidth_info(struct radeon_device *rdev)
+{
+ fixed20_12 a;
+ u32 sclk, mclk;
+
+ if (rdev->flags & RADEON_IS_IGP) {
+ sclk = radeon_get_engine_clock(rdev);
+ mclk = rdev->clock.default_mclk;
+
+ a.full = rfixed_const(100);
+ rdev->pm.sclk.full = rfixed_const(sclk);
+ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = rfixed_const(mclk);
+ rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+
+ a.full = rfixed_const(16);
+ /* core_bandwidth = sclk(Mhz) * 16 */
+ rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
+ } else {
+ sclk = radeon_get_engine_clock(rdev);
+ mclk = radeon_get_memory_clock(rdev);
+
+ a.full = rfixed_const(100);
+ rdev->pm.sclk.full = rfixed_const(sclk);
+ rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ rdev->pm.mclk.full = rfixed_const(mclk);
+ rdev->pm.mclk.full = rfixed_div(rdev->pm.mclk, a);
+ }
+}
+
bool radeon_boot_test_post_card(struct radeon_device *rdev)
{
if (radeon_card_posted(rdev))
@@ -288,181 +317,6 @@ void radeon_dummy_page_fini(struct radeon_device *rdev)
}
-/*
- * Registers accessors functions.
- */
-uint32_t radeon_invalid_rreg(struct radeon_device *rdev, uint32_t reg)
-{
- DRM_ERROR("Invalid callback to read register 0x%04X\n", reg);
- BUG_ON(1);
- return 0;
-}
-
-void radeon_invalid_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
-{
- DRM_ERROR("Invalid callback to write register 0x%04X with 0x%08X\n",
- reg, v);
- BUG_ON(1);
-}
-
-void radeon_register_accessor_init(struct radeon_device *rdev)
-{
- rdev->mc_rreg = &radeon_invalid_rreg;
- rdev->mc_wreg = &radeon_invalid_wreg;
- rdev->pll_rreg = &radeon_invalid_rreg;
- rdev->pll_wreg = &radeon_invalid_wreg;
- rdev->pciep_rreg = &radeon_invalid_rreg;
- rdev->pciep_wreg = &radeon_invalid_wreg;
-
- /* Don't change order as we are overridding accessor. */
- if (rdev->family < CHIP_RV515) {
- rdev->pcie_reg_mask = 0xff;
- } else {
- rdev->pcie_reg_mask = 0x7ff;
- }
- /* FIXME: not sure here */
- if (rdev->family <= CHIP_R580) {
- rdev->pll_rreg = &r100_pll_rreg;
- rdev->pll_wreg = &r100_pll_wreg;
- }
- if (rdev->family >= CHIP_R420) {
- rdev->mc_rreg = &r420_mc_rreg;
- rdev->mc_wreg = &r420_mc_wreg;
- }
- if (rdev->family >= CHIP_RV515) {
- rdev->mc_rreg = &rv515_mc_rreg;
- rdev->mc_wreg = &rv515_mc_wreg;
- }
- if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480) {
- rdev->mc_rreg = &rs400_mc_rreg;
- rdev->mc_wreg = &rs400_mc_wreg;
- }
- if (rdev->family == CHIP_RS690 || rdev->family == CHIP_RS740) {
- rdev->mc_rreg = &rs690_mc_rreg;
- rdev->mc_wreg = &rs690_mc_wreg;
- }
- if (rdev->family == CHIP_RS600) {
- rdev->mc_rreg = &rs600_mc_rreg;
- rdev->mc_wreg = &rs600_mc_wreg;
- }
- if ((rdev->family >= CHIP_R600) && (rdev->family <= CHIP_RV740)) {
- rdev->pciep_rreg = &r600_pciep_rreg;
- rdev->pciep_wreg = &r600_pciep_wreg;
- }
-}
-
-
-/*
- * ASIC
- */
-int radeon_asic_init(struct radeon_device *rdev)
-{
- radeon_register_accessor_init(rdev);
- switch (rdev->family) {
- case CHIP_R100:
- case CHIP_RV100:
- case CHIP_RS100:
- case CHIP_RV200:
- case CHIP_RS200:
- rdev->asic = &r100_asic;
- break;
- case CHIP_R200:
- case CHIP_RV250:
- case CHIP_RS300:
- case CHIP_RV280:
- rdev->asic = &r200_asic;
- break;
- case CHIP_R300:
- case CHIP_R350:
- case CHIP_RV350:
- case CHIP_RV380:
- if (rdev->flags & RADEON_IS_PCIE)
- rdev->asic = &r300_asic_pcie;
- else
- rdev->asic = &r300_asic;
- break;
- case CHIP_R420:
- case CHIP_R423:
- case CHIP_RV410:
- rdev->asic = &r420_asic;
- break;
- case CHIP_RS400:
- case CHIP_RS480:
- rdev->asic = &rs400_asic;
- break;
- case CHIP_RS600:
- rdev->asic = &rs600_asic;
- break;
- case CHIP_RS690:
- case CHIP_RS740:
- rdev->asic = &rs690_asic;
- break;
- case CHIP_RV515:
- rdev->asic = &rv515_asic;
- break;
- case CHIP_R520:
- case CHIP_RV530:
- case CHIP_RV560:
- case CHIP_RV570:
- case CHIP_R580:
- rdev->asic = &r520_asic;
- break;
- case CHIP_R600:
- case CHIP_RV610:
- case CHIP_RV630:
- case CHIP_RV620:
- case CHIP_RV635:
- case CHIP_RV670:
- case CHIP_RS780:
- case CHIP_RS880:
- rdev->asic = &r600_asic;
- break;
- case CHIP_RV770:
- case CHIP_RV730:
- case CHIP_RV710:
- case CHIP_RV740:
- rdev->asic = &rv770_asic;
- break;
- case CHIP_CEDAR:
- case CHIP_REDWOOD:
- case CHIP_JUNIPER:
- case CHIP_CYPRESS:
- case CHIP_HEMLOCK:
- rdev->asic = &evergreen_asic;
- break;
- default:
- /* FIXME: not supported yet */
- return -EINVAL;
- }
-
- if (rdev->flags & RADEON_IS_IGP) {
- rdev->asic->get_memory_clock = NULL;
- rdev->asic->set_memory_clock = NULL;
- }
-
- return 0;
-}
-
-
-/*
- * Wrapper around modesetting bits.
- */
-int radeon_clocks_init(struct radeon_device *rdev)
-{
- int r;
-
- r = radeon_static_clocks_init(rdev->ddev);
- if (r) {
- return r;
- }
- DRM_INFO("Clocks initialized !\n");
- return 0;
-}
-
-void radeon_clocks_fini(struct radeon_device *rdev)
-{
-}
-
/* ATOM accessor methods */
static uint32_t cail_pll_read(struct card_info *info, uint32_t reg)
{
@@ -567,29 +421,6 @@ static unsigned int radeon_vga_set_decode(void *cookie, bool state)
return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
}
-void radeon_agp_disable(struct radeon_device *rdev)
-{
- rdev->flags &= ~RADEON_IS_AGP;
- if (rdev->family >= CHIP_R600) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
- } else if (rdev->family >= CHIP_RV515 ||
- rdev->family == CHIP_RV380 ||
- rdev->family == CHIP_RV410 ||
- rdev->family == CHIP_R423) {
- DRM_INFO("Forcing AGP to PCIE mode\n");
- rdev->flags |= RADEON_IS_PCIE;
- rdev->asic->gart_tlb_flush = &rv370_pcie_gart_tlb_flush;
- rdev->asic->gart_set_page = &rv370_pcie_gart_set_page;
- } else {
- DRM_INFO("Forcing AGP to PCI mode\n");
- rdev->flags |= RADEON_IS_PCI;
- rdev->asic->gart_tlb_flush = &r100_pci_gart_tlb_flush;
- rdev->asic->gart_set_page = &r100_pci_gart_set_page;
- }
- rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
-}
-
void radeon_check_arguments(struct radeon_device *rdev)
{
/* vramlimit must be a power of two */
@@ -731,6 +562,14 @@ int radeon_device_init(struct radeon_device *rdev,
return r;
radeon_check_arguments(rdev);
+ /* all of the newer IGP chips have an internal gart
+ * However some rs4xx report as AGP, so remove that here.
+ */
+ if ((rdev->family >= CHIP_RS400) &&
+ (rdev->flags & RADEON_IS_IGP)) {
+ rdev->flags &= ~RADEON_IS_AGP;
+ }
+
if (rdev->flags & RADEON_IS_AGP && radeon_agpmode == -1) {
radeon_agp_disable(rdev);
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index ba8d806dcf3..b8d67282824 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -368,10 +368,9 @@ static bool radeon_setup_enc_conn(struct drm_device *dev)
if (rdev->bios) {
if (rdev->is_atom_bios) {
- if (rdev->family >= CHIP_R600)
+ ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
+ if (ret == false)
ret = radeon_get_atom_connector_info_from_object_table(dev);
- else
- ret = radeon_get_atom_connector_info_from_supported_devices_table(dev);
} else {
ret = radeon_get_legacy_connector_info_from_bios(dev);
if (ret == false)
@@ -469,10 +468,19 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
uint32_t best_error = 0xffffffff;
uint32_t best_vco_diff = 1;
uint32_t post_div;
+ u32 pll_out_min, pll_out_max;
DRM_DEBUG("PLL freq %llu %u %u\n", freq, pll->min_ref_div, pll->max_ref_div);
freq = freq * 1000;
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
+
if (pll->flags & RADEON_PLL_USE_REF_DIV)
min_ref_div = max_ref_div = pll->reference_div;
else {
@@ -536,10 +544,10 @@ static void radeon_compute_pll_legacy(struct radeon_pll *pll,
tmp = (uint64_t)pll->reference_freq * feedback_div;
vco = radeon_div(tmp, ref_div);
- if (vco < pll->pll_out_min) {
+ if (vco < pll_out_min) {
min_feed_div = feedback_div + 1;
continue;
- } else if (vco > pll->pll_out_max) {
+ } else if (vco > pll_out_max) {
max_feed_div = feedback_div;
continue;
}
@@ -675,6 +683,15 @@ calc_fb_ref_div(struct radeon_pll *pll,
{
fixed20_12 ffreq, max_error, error, pll_out, a;
u32 vco;
+ u32 pll_out_min, pll_out_max;
+
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
ffreq.full = rfixed_const(freq);
/* max_error = ffreq * 0.0025; */
@@ -686,7 +703,7 @@ calc_fb_ref_div(struct radeon_pll *pll,
vco = pll->reference_freq * (((*fb_div) * 10) + (*fb_div_frac));
vco = vco / ((*ref_div) * 10);
- if ((vco < pll->pll_out_min) || (vco > pll->pll_out_max))
+ if ((vco < pll_out_min) || (vco > pll_out_max))
continue;
/* pll_out = vco / post_div; */
@@ -714,6 +731,15 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
{
u32 fb_div = 0, fb_div_frac = 0, post_div = 0, ref_div = 0;
u32 best_freq = 0, vco_frequency;
+ u32 pll_out_min, pll_out_max;
+
+ if (pll->flags & RADEON_PLL_IS_LCD) {
+ pll_out_min = pll->lcd_pll_out_min;
+ pll_out_max = pll->lcd_pll_out_max;
+ } else {
+ pll_out_min = pll->pll_out_min;
+ pll_out_max = pll->pll_out_max;
+ }
/* freq = freq / 10; */
do_div(freq, 10);
@@ -724,7 +750,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
goto done;
vco_frequency = freq * post_div;
- if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+ if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
goto done;
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
@@ -749,7 +775,7 @@ static void radeon_compute_pll_new(struct radeon_pll *pll,
continue;
vco_frequency = freq * post_div;
- if ((vco_frequency < pll->pll_out_min) || (vco_frequency > pll->pll_out_max))
+ if ((vco_frequency < pll_out_min) || (vco_frequency > pll_out_max))
continue;
if (pll->flags & RADEON_PLL_USE_REF_DIV) {
ref_div = pll->reference_div;
@@ -945,6 +971,23 @@ static int radeon_modeset_create_props(struct radeon_device *rdev)
return 0;
}
+void radeon_update_display_priority(struct radeon_device *rdev)
+{
+ /* adjustment options for the display watermarks */
+ if ((radeon_disp_priority == 0) || (radeon_disp_priority > 2)) {
+ /* set display priority to high for r3xx, rv515 chips
+ * this avoids flickering due to underflow to the
+ * display controllers during heavy acceleration.
+ */
+ if (ASIC_IS_R300(rdev) || (rdev->family == CHIP_RV515))
+ rdev->disp_priority = 2;
+ else
+ rdev->disp_priority = 0;
+ } else
+ rdev->disp_priority = radeon_disp_priority;
+
+}
+
int radeon_modeset_init(struct radeon_device *rdev)
{
int i;
@@ -976,15 +1019,6 @@ int radeon_modeset_init(struct radeon_device *rdev)
radeon_combios_check_hardcoded_edid(rdev);
}
- if (rdev->flags & RADEON_SINGLE_CRTC)
- rdev->num_crtc = 1;
- else {
- if (ASIC_IS_DCE4(rdev))
- rdev->num_crtc = 6;
- else
- rdev->num_crtc = 2;
- }
-
/* allocate crtcs */
for (i = 0; i < rdev->num_crtc; i++) {
radeon_crtc_init(rdev->ddev, i);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6eec0ece6a6..055a51732dc 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -42,9 +42,10 @@
* KMS wrapper.
* - 2.0.0 - initial interface
* - 2.1.0 - add square tiling interface
+ * - 2.2.0 - add r6xx/r7xx const buffer support
*/
#define KMS_DRIVER_MAJOR 2
-#define KMS_DRIVER_MINOR 1
+#define KMS_DRIVER_MINOR 2
#define KMS_DRIVER_PATCHLEVEL 0
int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
int radeon_driver_unload_kms(struct drm_device *dev);
@@ -91,6 +92,8 @@ int radeon_tv = 1;
int radeon_new_pll = -1;
int radeon_dynpm = -1;
int radeon_audio = 1;
+int radeon_disp_priority = 0;
+int radeon_hw_i2c = 0;
MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -134,6 +137,12 @@ module_param_named(dynpm, radeon_dynpm, int, 0444);
MODULE_PARM_DESC(audio, "Audio enable (0 = disable)");
module_param_named(audio, radeon_audio, int, 0444);
+MODULE_PARM_DESC(disp_priority, "Display Priority (0 = auto, 1 = normal, 2 = high)");
+module_param_named(disp_priority, radeon_disp_priority, int, 0444);
+
+MODULE_PARM_DESC(hw_i2c, "hw i2c engine enable (0 = disable)");
+module_param_named(hw_i2c, radeon_hw_i2c, int, 0444);
+
static int radeon_suspend(struct drm_device *dev, pm_message_t state)
{
drm_radeon_private_t *dev_priv = dev->dev_private;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
index ec55f2b23c2..448eba89d1e 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.h
+++ b/drivers/gpu/drm/radeon/radeon_drv.h
@@ -107,9 +107,10 @@
* 1.30- Add support for occlusion queries
* 1.31- Add support for num Z pipes from GET_PARAM
* 1.32- fixes for rv740 setup
+ * 1.33- Add r6xx/r7xx const buffer support
*/
#define DRIVER_MAJOR 1
-#define DRIVER_MINOR 32
+#define DRIVER_MINOR 33
#define DRIVER_PATCHLEVEL 0
enum radeon_cp_microcode_version {
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index bc926ea0a53..52d6f96f274 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -302,7 +302,7 @@ static bool radeon_atom_mode_fixup(struct drm_encoder *encoder,
}
if (ASIC_IS_DCE3(rdev) &&
- (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT))) {
+ (radeon_encoder->active_device & (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT))) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
radeon_dp_set_link_config(connector, mode);
}
@@ -519,7 +519,8 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
break;
}
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
switch (frev) {
case 1:
@@ -593,7 +594,6 @@ atombios_digital_setup(struct drm_encoder *encoder, int action)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
- r600_hdmi_enable(encoder, hdmi_detected);
}
int
@@ -708,7 +708,7 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
struct radeon_connector_atom_dig *dig_connector =
radeon_get_atom_connector_priv_from_encoder(encoder);
union dig_encoder_control args;
- int index = 0, num = 0;
+ int index = 0;
uint8_t frev, crev;
if (!dig || !dig_connector)
@@ -724,9 +724,9 @@ atombios_dig_encoder_setup(struct drm_encoder *encoder, int action)
else
index = GetIndexIntoMasterTable(COMMAND, DIG1EncoderControl);
}
- num = dig->dig_encoder + 1;
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
args.v1.ucAction = action;
args.v1.usPixelClock = cpu_to_le16(radeon_encoder->pixel_clock / 10);
@@ -785,7 +785,7 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
struct drm_connector *connector;
struct radeon_connector *radeon_connector;
union dig_transmitter_control args;
- int index = 0, num = 0;
+ int index = 0;
uint8_t frev, crev;
bool is_dp = false;
int pll_id = 0;
@@ -814,7 +814,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
}
}
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
args.v1.ucAction = action;
if (action == ATOM_TRANSMITTER_ACTION_INIT) {
@@ -860,15 +861,12 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v3.acConfig.ucTransmitterSel = 0;
- num = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v3.acConfig.ucTransmitterSel = 1;
- num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v3.acConfig.ucTransmitterSel = 2;
- num = 2;
break;
}
@@ -879,23 +877,19 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
args.v3.acConfig.fCoherentMode = 1;
}
} else if (ASIC_IS_DCE32(rdev)) {
- if (dig->dig_encoder == 1)
- args.v2.acConfig.ucEncoderSel = 1;
+ args.v2.acConfig.ucEncoderSel = dig->dig_encoder;
if (dig_connector->linkb)
args.v2.acConfig.ucLinkSel = 1;
switch (radeon_encoder->encoder_id) {
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
args.v2.acConfig.ucTransmitterSel = 0;
- num = 0;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY1:
args.v2.acConfig.ucTransmitterSel = 1;
- num = 1;
break;
case ENCODER_OBJECT_ID_INTERNAL_UNIPHY2:
args.v2.acConfig.ucTransmitterSel = 2;
- num = 2;
break;
}
@@ -913,31 +907,25 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_DIG1_ENCODER;
- switch (radeon_encoder->encoder_id) {
- case ENCODER_OBJECT_ID_INTERNAL_UNIPHY:
- if (rdev->flags & RADEON_IS_IGP) {
- if (radeon_encoder->pixel_clock > 165000) {
- if (dig_connector->igp_lane_info & 0x3)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
- else if (dig_connector->igp_lane_info & 0xc)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
- } else {
- if (dig_connector->igp_lane_info & 0x1)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
- else if (dig_connector->igp_lane_info & 0x2)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
- else if (dig_connector->igp_lane_info & 0x4)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
- else if (dig_connector->igp_lane_info & 0x8)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
- }
+ if ((rdev->flags & RADEON_IS_IGP) &&
+ (radeon_encoder->encoder_id == ENCODER_OBJECT_ID_INTERNAL_UNIPHY)) {
+ if (is_dp || (radeon_encoder->pixel_clock <= 165000)) {
+ if (dig_connector->igp_lane_info & 0x1)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_3;
+ else if (dig_connector->igp_lane_info & 0x2)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_4_7;
+ else if (dig_connector->igp_lane_info & 0x4)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_11;
+ else if (dig_connector->igp_lane_info & 0x8)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_12_15;
+ } else {
+ if (dig_connector->igp_lane_info & 0x3)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_0_7;
+ else if (dig_connector->igp_lane_info & 0xc)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LANE_8_15;
}
- break;
}
- if (radeon_encoder->pixel_clock > 165000)
- args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
-
if (dig_connector->linkb)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_LINKB;
else
@@ -948,6 +936,8 @@ atombios_dig_transmitter_setup(struct drm_encoder *encoder, int action, uint8_t
else if (radeon_encoder->devices & (ATOM_DEVICE_DFP_SUPPORT)) {
if (dig->coherent_mode)
args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_COHERENT;
+ if (radeon_encoder->pixel_clock > 165000)
+ args.v1.ucConfig |= ATOM_TRANSMITTER_CONFIG_8LANE_LINK;
}
}
@@ -1054,16 +1044,25 @@ radeon_atom_encoder_dpms(struct drm_encoder *encoder, int mode)
if (is_dig) {
switch (mode) {
case DRM_MODE_DPMS_ON:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
- {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
+
dp_link_train(encoder, connector);
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_ON);
}
+ if (!ASIC_IS_DCE4(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
- atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (!ASIC_IS_DCE4(rdev))
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_DISABLE_OUTPUT, 0, 0);
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_DP) {
+ if (ASIC_IS_DCE4(rdev))
+ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_DP_VIDEO_OFF);
+ }
break;
}
} else {
@@ -1104,7 +1103,8 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
memset(&args, 0, sizeof(args));
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return;
switch (frev) {
case 1:
@@ -1216,6 +1216,9 @@ atombios_set_encoder_crtc_source(struct drm_encoder *encoder)
}
atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args);
+
+ /* update scratch regs with new routing */
+ radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
}
static void
@@ -1326,19 +1329,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
struct drm_device *dev = encoder->dev;
struct radeon_device *rdev = dev->dev_private;
struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
- struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
- if (radeon_encoder->active_device &
- (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
- struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
- if (dig)
- dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
- }
radeon_encoder->pixel_clock = adjusted_mode->clock;
- radeon_atombios_encoder_crtc_scratch_regs(encoder, radeon_crtc->crtc_id);
- atombios_set_encoder_crtc_source(encoder);
-
if (ASIC_IS_AVIVO(rdev)) {
if (radeon_encoder->active_device & (ATOM_DEVICE_CV_SUPPORT | ATOM_DEVICE_TV_SUPPORT))
atombios_yuv_setup(encoder, true);
@@ -1396,9 +1389,10 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
}
atombios_apply_encoder_quirks(encoder, adjusted_mode);
- /* XXX */
- if (!ASIC_IS_DCE4(rdev))
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
+ r600_hdmi_enable(encoder);
r600_hdmi_setmode(encoder, adjusted_mode);
+ }
}
static bool
@@ -1418,7 +1412,8 @@ atombios_dac_load_detect(struct drm_encoder *encoder, struct drm_connector *conn
memset(&args, 0, sizeof(args));
- atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev);
+ if (!atom_parse_cmd_header(rdev->mode_info.atom_context, index, &frev, &crev))
+ return false;
args.sDacload.ucMisc = 0;
@@ -1492,8 +1487,20 @@ radeon_atom_dac_detect(struct drm_encoder *encoder, struct drm_connector *connec
static void radeon_atom_encoder_prepare(struct drm_encoder *encoder)
{
+ struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
+
+ if (radeon_encoder->active_device &
+ (ATOM_DEVICE_DFP_SUPPORT | ATOM_DEVICE_LCD_SUPPORT)) {
+ struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
+ if (dig)
+ dig->dig_encoder = radeon_atom_pick_dig_encoder(encoder);
+ }
+
radeon_atom_output_lock(encoder, true);
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ /* this is needed for the pll/ss setup to work correctly in some cases */
+ atombios_set_encoder_crtc_source(encoder);
}
static void radeon_atom_encoder_commit(struct drm_encoder *encoder)
@@ -1509,6 +1516,8 @@ static void radeon_atom_encoder_disable(struct drm_encoder *encoder)
radeon_atom_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
if (radeon_encoder_is_digital(encoder)) {
+ if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI)
+ r600_hdmi_disable(encoder);
dig = radeon_encoder->enc_priv;
dig->dig_encoder = -1;
}
@@ -1659,6 +1668,4 @@ radeon_add_atom_encoder(struct drm_device *dev, uint32_t encoder_id, uint32_t su
drm_encoder_helper_add(encoder, &radeon_atom_dig_helper_funcs);
break;
}
-
- r600_hdmi_init(encoder);
}
diff --git a/drivers/gpu/drm/radeon/radeon_i2c.c b/drivers/gpu/drm/radeon/radeon_i2c.c
index 4ae50c19589..5def6f5dff3 100644
--- a/drivers/gpu/drm/radeon/radeon_i2c.c
+++ b/drivers/gpu/drm/radeon/radeon_i2c.c
@@ -59,6 +59,7 @@ bool radeon_ddc_probe(struct radeon_connector *radeon_connector)
return false;
}
+/* bit banging i2c */
static void radeon_i2c_do_lock(struct radeon_i2c_chan *i2c, int lock_state)
{
@@ -181,13 +182,30 @@ static void set_data(void *i2c_priv, int data)
WREG32(rec->en_data_reg, val);
}
+static int pre_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+ radeon_i2c_do_lock(i2c, 1);
+
+ return 0;
+}
+
+static void post_xfer(struct i2c_adapter *i2c_adap)
+{
+ struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
+
+ radeon_i2c_do_lock(i2c, 0);
+}
+
+/* hw i2c */
+
static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
{
- struct radeon_pll *spll = &rdev->clock.spll;
u32 sclk = radeon_get_engine_clock(rdev);
u32 prescale = 0;
- u32 n, m;
- u8 loop;
+ u32 nm;
+ u8 n, m, loop;
int i2c_clock;
switch (rdev->family) {
@@ -203,13 +221,15 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
case CHIP_R300:
case CHIP_R350:
case CHIP_RV350:
- n = (spll->reference_freq) / (4 * 6);
+ i2c_clock = 60;
+ nm = (sclk * 10) / (i2c_clock * 4);
for (loop = 1; loop < 255; loop++) {
- if ((loop * (loop - 1)) > n)
+ if ((nm / loop) < loop)
break;
}
- m = loop - 1;
- prescale = m | (loop << 8);
+ n = loop - 1;
+ m = loop - 2;
+ prescale = m | (n << 8);
break;
case CHIP_RV380:
case CHIP_RS400:
@@ -217,7 +237,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
case CHIP_R420:
case CHIP_R423:
case CHIP_RV410:
- sclk = radeon_get_engine_clock(rdev);
prescale = (((sclk * 10)/(4 * 128 * 100) + 1) << 8) + 128;
break;
case CHIP_RS600:
@@ -232,7 +251,6 @@ static u32 radeon_get_i2c_prescale(struct radeon_device *rdev)
case CHIP_RV570:
case CHIP_R580:
i2c_clock = 50;
- sclk = radeon_get_engine_clock(rdev);
if (rdev->family == CHIP_R520)
prescale = (127 << 8) + ((sclk * 10) / (4 * 127 * i2c_clock));
else
@@ -291,6 +309,7 @@ static int r100_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
prescale = radeon_get_i2c_prescale(rdev);
reg = ((prescale << RADEON_I2C_PRESCALE_SHIFT) |
+ RADEON_I2C_DRIVE_EN |
RADEON_I2C_START |
RADEON_I2C_STOP |
RADEON_I2C_GO);
@@ -757,26 +776,13 @@ done:
return ret;
}
-static int radeon_sw_i2c_xfer(struct i2c_adapter *i2c_adap,
+static int radeon_hw_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msgs, int num)
{
struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
- int ret;
-
- radeon_i2c_do_lock(i2c, 1);
- ret = i2c_transfer(&i2c->algo.radeon.bit_adapter, msgs, num);
- radeon_i2c_do_lock(i2c, 0);
-
- return ret;
-}
-
-static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
- struct i2c_msg *msgs, int num)
-{
- struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap);
struct radeon_device *rdev = i2c->dev->dev_private;
struct radeon_i2c_bus_rec *rec = &i2c->rec;
- int ret;
+ int ret = 0;
switch (rdev->family) {
case CHIP_R100:
@@ -797,16 +803,12 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
case CHIP_RV410:
case CHIP_RS400:
case CHIP_RS480:
- if (rec->hw_capable)
- ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
- else
- ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_RS600:
case CHIP_RS690:
case CHIP_RS740:
/* XXX fill in hw i2c implementation */
- ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_RV515:
case CHIP_R520:
@@ -814,20 +816,16 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
case CHIP_RV560:
case CHIP_RV570:
case CHIP_R580:
- if (rec->hw_capable) {
- if (rec->mm_i2c)
- ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
- else
- ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
- } else
- ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
+ if (rec->mm_i2c)
+ ret = r100_hw_i2c_xfer(i2c_adap, msgs, num);
+ else
+ ret = r500_hw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_R600:
case CHIP_RV610:
case CHIP_RV630:
case CHIP_RV670:
/* XXX fill in hw i2c implementation */
- ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_RV620:
case CHIP_RV635:
@@ -838,7 +836,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
case CHIP_RV710:
case CHIP_RV740:
/* XXX fill in hw i2c implementation */
- ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
case CHIP_CEDAR:
case CHIP_REDWOOD:
@@ -846,7 +843,6 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
case CHIP_CYPRESS:
case CHIP_HEMLOCK:
/* XXX fill in hw i2c implementation */
- ret = radeon_sw_i2c_xfer(i2c_adap, msgs, num);
break;
default:
DRM_ERROR("i2c: unhandled radeon chip\n");
@@ -857,20 +853,21 @@ static int radeon_i2c_xfer(struct i2c_adapter *i2c_adap,
return ret;
}
-static u32 radeon_i2c_func(struct i2c_adapter *adap)
+static u32 radeon_hw_i2c_func(struct i2c_adapter *adap)
{
return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
}
static const struct i2c_algorithm radeon_i2c_algo = {
- .master_xfer = radeon_i2c_xfer,
- .functionality = radeon_i2c_func,
+ .master_xfer = radeon_hw_i2c_xfer,
+ .functionality = radeon_hw_i2c_func,
};
struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name)
{
+ struct radeon_device *rdev = dev->dev_private;
struct radeon_i2c_chan *i2c;
int ret;
@@ -878,37 +875,43 @@ struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
if (i2c == NULL)
return NULL;
- /* set the internal bit adapter */
- i2c->algo.radeon.bit_adapter.owner = THIS_MODULE;
- i2c_set_adapdata(&i2c->algo.radeon.bit_adapter, i2c);
- sprintf(i2c->algo.radeon.bit_adapter.name, "Radeon internal i2c bit bus %s", name);
- i2c->algo.radeon.bit_adapter.algo_data = &i2c->algo.radeon.bit_data;
- i2c->algo.radeon.bit_data.setsda = set_data;
- i2c->algo.radeon.bit_data.setscl = set_clock;
- i2c->algo.radeon.bit_data.getsda = get_data;
- i2c->algo.radeon.bit_data.getscl = get_clock;
- i2c->algo.radeon.bit_data.udelay = 20;
- /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
- * make this, 2 jiffies is a lot more reliable */
- i2c->algo.radeon.bit_data.timeout = 2;
- i2c->algo.radeon.bit_data.data = i2c;
- ret = i2c_bit_add_bus(&i2c->algo.radeon.bit_adapter);
- if (ret) {
- DRM_ERROR("Failed to register internal bit i2c %s\n", name);
- goto out_free;
- }
- /* set the radeon i2c adapter */
- i2c->dev = dev;
i2c->rec = *rec;
i2c->adapter.owner = THIS_MODULE;
+ i2c->dev = dev;
i2c_set_adapdata(&i2c->adapter, i2c);
- sprintf(i2c->adapter.name, "Radeon i2c %s", name);
- i2c->adapter.algo_data = &i2c->algo.radeon;
- i2c->adapter.algo = &radeon_i2c_algo;
- ret = i2c_add_adapter(&i2c->adapter);
- if (ret) {
- DRM_ERROR("Failed to register i2c %s\n", name);
- goto out_free;
+ if (rec->mm_i2c ||
+ (rec->hw_capable &&
+ radeon_hw_i2c &&
+ ((rdev->family <= CHIP_RS480) ||
+ ((rdev->family >= CHIP_RV515) && (rdev->family <= CHIP_R580))))) {
+ /* set the radeon hw i2c adapter */
+ sprintf(i2c->adapter.name, "Radeon i2c hw bus %s", name);
+ i2c->adapter.algo = &radeon_i2c_algo;
+ ret = i2c_add_adapter(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register hw i2c %s\n", name);
+ goto out_free;
+ }
+ } else {
+ /* set the radeon bit adapter */
+ sprintf(i2c->adapter.name, "Radeon i2c bit bus %s", name);
+ i2c->adapter.algo_data = &i2c->algo.bit;
+ i2c->algo.bit.pre_xfer = pre_xfer;
+ i2c->algo.bit.post_xfer = post_xfer;
+ i2c->algo.bit.setsda = set_data;
+ i2c->algo.bit.setscl = set_clock;
+ i2c->algo.bit.getsda = get_data;
+ i2c->algo.bit.getscl = get_clock;
+ i2c->algo.bit.udelay = 20;
+ /* vesa says 2.2 ms is enough, 1 jiffy doesn't seem to always
+ * make this, 2 jiffies is a lot more reliable */
+ i2c->algo.bit.timeout = 2;
+ i2c->algo.bit.data = i2c;
+ ret = i2c_bit_add_bus(&i2c->adapter);
+ if (ret) {
+ DRM_ERROR("Failed to register bit i2c %s\n", name);
+ goto out_free;
+ }
}
return i2c;
@@ -953,16 +956,6 @@ void radeon_i2c_destroy(struct radeon_i2c_chan *i2c)
{
if (!i2c)
return;
- i2c_del_adapter(&i2c->algo.radeon.bit_adapter);
- i2c_del_adapter(&i2c->adapter);
- kfree(i2c);
-}
-
-void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c)
-{
- if (!i2c)
- return;
-
i2c_del_adapter(&i2c->adapter);
kfree(i2c);
}
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 3cfd60fd008..a212041e8b0 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -67,9 +67,10 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev)
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
- }
+ for (i = 0; i < 6; i++)
+ rdev->irq.hpd[i] = false;
radeon_irq_set(rdev);
/* Clear bits */
radeon_irq_process(rdev);
@@ -95,34 +96,29 @@ void radeon_driver_irq_uninstall_kms(struct drm_device *dev)
}
/* Disable *all* interrupts */
rdev->irq.sw_int = false;
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < rdev->num_crtc; i++)
rdev->irq.crtc_vblank_int[i] = false;
+ for (i = 0; i < 6; i++)
rdev->irq.hpd[i] = false;
- }
radeon_irq_set(rdev);
}
int radeon_irq_kms_init(struct radeon_device *rdev)
{
int r = 0;
- int num_crtc = 2;
- if (rdev->flags & RADEON_SINGLE_CRTC)
- num_crtc = 1;
spin_lock_init(&rdev->irq.sw_lock);
- r = drm_vblank_init(rdev->ddev, num_crtc);
+ r = drm_vblank_init(rdev->ddev, rdev->num_crtc);
if (r) {
return r;
}
/* enable msi */
rdev->msi_enabled = 0;
- /* MSIs don't seem to work on my rs780;
- * not sure about rs880 or other rs780s.
- * Needs more investigation.
+ /* MSIs don't seem to work reliably on all IGP
+ * chips. Disable MSI on them for now.
*/
if ((rdev->family >= CHIP_RV380) &&
- (rdev->family != CHIP_RS780) &&
- (rdev->family != CHIP_RS880)) {
+ (!(rdev->flags & RADEON_IS_IGP))) {
int ret = pci_enable_msi(rdev->pdev);
if (!ret) {
rdev->msi_enabled = 1;
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
index df23d6a01d0..88865e38fe3 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_crtc.c
@@ -603,6 +603,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC2_INTERLACE_EN
: 0));
+ /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+ crtc2_gen_cntl |= RADEON_CRTC2_EN;
+
disp2_merge_cntl = RREG32(RADEON_DISP2_MERGE_CNTL);
disp2_merge_cntl &= ~RADEON_DISP2_RGB_OFFSET_EN;
@@ -630,6 +634,10 @@ static bool radeon_set_crtc_timing(struct drm_crtc *crtc, struct drm_display_mod
? RADEON_CRTC_INTERLACE_EN
: 0));
+ /* rs4xx chips seem to like to have the crtc enabled when the timing is set */
+ if ((rdev->family == CHIP_RS400) || (rdev->family == CHIP_RS480))
+ crtc_gen_cntl |= RADEON_CRTC_EN;
+
crtc_ext_cntl = RREG32(RADEON_CRTC_EXT_CNTL);
crtc_ext_cntl |= (RADEON_XCRT_CNT_EN |
RADEON_CRTC_VSYNC_DIS |
diff --git a/drivers/gpu/drm/radeon/radeon_legacy_tv.c b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
index 417684daef4..f2ed27c8055 100644
--- a/drivers/gpu/drm/radeon/radeon_legacy_tv.c
+++ b/drivers/gpu/drm/radeon/radeon_legacy_tv.c
@@ -57,6 +57,10 @@
#define NTSC_TV_PLL_N_14 693
#define NTSC_TV_PLL_P_14 7
+#define PAL_TV_PLL_M_14 19
+#define PAL_TV_PLL_N_14 353
+#define PAL_TV_PLL_P_14 5
+
#define VERT_LEAD_IN_LINES 2
#define FRAC_BITS 0xe
#define FRAC_MASK 0x3fff
@@ -205,9 +209,24 @@ static const struct radeon_tv_mode_constants available_tv_modes[] = {
630627, /* defRestart */
347, /* crtcPLL_N */
14, /* crtcPLL_M */
- 8, /* crtcPLL_postDiv */
+ 8, /* crtcPLL_postDiv */
1022, /* pixToTV */
},
+ { /* PAL timing for 14 Mhz ref clk */
+ 800, /* horResolution */
+ 600, /* verResolution */
+ TV_STD_PAL, /* standard */
+ 1131, /* horTotal */
+ 742, /* verTotal */
+ 813, /* horStart */
+ 840, /* horSyncStart */
+ 633, /* verSyncStart */
+ 708369, /* defRestart */
+ 211, /* crtcPLL_N */
+ 9, /* crtcPLL_M */
+ 8, /* crtcPLL_postDiv */
+ 759, /* pixToTV */
+ },
};
#define N_AVAILABLE_MODES ARRAY_SIZE(available_tv_modes)
@@ -242,7 +261,7 @@ static const struct radeon_tv_mode_constants *radeon_legacy_tv_get_std_mode(stru
if (pll->reference_freq == 2700)
const_ptr = &available_tv_modes[1];
else
- const_ptr = &available_tv_modes[1]; /* FIX ME */
+ const_ptr = &available_tv_modes[3];
}
return const_ptr;
}
@@ -685,9 +704,9 @@ void radeon_legacy_tv_mode_set(struct drm_encoder *encoder,
n = PAL_TV_PLL_N_27;
p = PAL_TV_PLL_P_27;
} else {
- m = PAL_TV_PLL_M_27;
- n = PAL_TV_PLL_N_27;
- p = PAL_TV_PLL_P_27;
+ m = PAL_TV_PLL_M_14;
+ n = PAL_TV_PLL_N_14;
+ p = PAL_TV_PLL_P_14;
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 1702b820aa4..0b8e32776b1 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -129,6 +129,7 @@ struct radeon_tmds_pll {
#define RADEON_PLL_USE_FRAC_FB_DIV (1 << 10)
#define RADEON_PLL_PREFER_CLOSEST_LOWER (1 << 11)
#define RADEON_PLL_USE_POST_DIV (1 << 12)
+#define RADEON_PLL_IS_LCD (1 << 13)
/* pll algo */
enum radeon_pll_algo {
@@ -149,6 +150,8 @@ struct radeon_pll {
uint32_t pll_in_max;
uint32_t pll_out_min;
uint32_t pll_out_max;
+ uint32_t lcd_pll_out_min;
+ uint32_t lcd_pll_out_max;
uint32_t best_vco;
/* divider limits */
@@ -170,17 +173,12 @@ struct radeon_pll {
enum radeon_pll_algo algo;
};
-struct i2c_algo_radeon_data {
- struct i2c_adapter bit_adapter;
- struct i2c_algo_bit_data bit_data;
-};
-
struct radeon_i2c_chan {
struct i2c_adapter adapter;
struct drm_device *dev;
union {
+ struct i2c_algo_bit_data bit;
struct i2c_algo_dp_aux_data dp;
- struct i2c_algo_radeon_data radeon;
} algo;
struct radeon_i2c_bus_rec rec;
};
@@ -342,6 +340,7 @@ struct radeon_encoder {
struct drm_display_mode native_mode;
void *enc_priv;
int hdmi_offset;
+ int hdmi_config_offset;
int hdmi_audio_workaround;
int hdmi_buffer_status;
};
@@ -431,7 +430,6 @@ extern struct radeon_i2c_chan *radeon_i2c_create(struct drm_device *dev,
struct radeon_i2c_bus_rec *rec,
const char *name);
extern void radeon_i2c_destroy(struct radeon_i2c_chan *i2c);
-extern void radeon_i2c_destroy_dp(struct radeon_i2c_chan *i2c);
extern void radeon_i2c_get_byte(struct radeon_i2c_chan *i2c_bus,
u8 slave_addr,
u8 addr,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index fc9d00ac6b1..dc7e3f44913 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -185,8 +185,10 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr)
return 0;
}
radeon_ttm_placement_from_domain(bo, domain);
- /* force to pin into visible video ram */
- bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ if (domain == RADEON_GEM_DOMAIN_VRAM) {
+ /* force to pin into visible video ram */
+ bo->placement.lpfn = bo->rdev->mc.visible_vram_size >> PAGE_SHIFT;
+ }
for (i = 0; i < bo->placement.num_placement; i++)
bo->placements[i] |= TTM_PL_FLAG_NO_EVICT;
r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index d4d1c39a0e9..a4b57493aa7 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -28,6 +28,7 @@
#define RADEON_RECLOCK_DELAY_MS 200
#define RADEON_WAIT_VBLANK_TIMEOUT 200
+static bool radeon_pm_debug_check_in_vbl(struct radeon_device *rdev, bool finish);
static void radeon_pm_set_clocks_locked(struct radeon_device *rdev);
static void radeon_pm_set_clocks(struct radeon_device *rdev);
static void radeon_pm_idle_work_handler(struct work_struct *work);
@@ -179,6 +180,16 @@ static void radeon_get_power_state(struct radeon_device *rdev,
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
}
+static inline void radeon_sync_with_vblank(struct radeon_device *rdev)
+{
+ if (rdev->pm.active_crtcs) {
+ rdev->pm.vblank_sync = false;
+ wait_event_timeout(
+ rdev->irq.vblank_queue, rdev->pm.vblank_sync,
+ msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+ }
+}
+
static void radeon_set_power_state(struct radeon_device *rdev)
{
/* if *_clock_mode are the same, *_power_state are as well */
@@ -189,11 +200,28 @@ static void radeon_set_power_state(struct radeon_device *rdev)
rdev->pm.requested_clock_mode->sclk,
rdev->pm.requested_clock_mode->mclk,
rdev->pm.requested_power_state->non_clock_info.pcie_lanes);
+
/* set pcie lanes */
+ /* TODO */
+
/* set voltage */
+ /* TODO */
+
/* set engine clock */
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_engine_clock(rdev, rdev->pm.requested_clock_mode->sclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+
+#if 0
/* set memory clock */
+ if (rdev->asic->set_memory_clock) {
+ radeon_sync_with_vblank(rdev);
+ radeon_pm_debug_check_in_vbl(rdev, false);
+ radeon_set_memory_clock(rdev, rdev->pm.requested_clock_mode->mclk);
+ radeon_pm_debug_check_in_vbl(rdev, true);
+ }
+#endif
rdev->pm.current_power_state = rdev->pm.requested_power_state;
rdev->pm.current_clock_mode = rdev->pm.requested_clock_mode;
@@ -229,6 +257,12 @@ int radeon_pm_init(struct radeon_device *rdev)
return 0;
}
+void radeon_pm_fini(struct radeon_device *rdev)
+{
+ if (rdev->pm.i2c_bus)
+ radeon_i2c_destroy(rdev->pm.i2c_bus);
+}
+
void radeon_pm_compute_clocks(struct radeon_device *rdev)
{
struct drm_device *ddev = rdev->ddev;
@@ -245,7 +279,8 @@ void radeon_pm_compute_clocks(struct radeon_device *rdev)
list_for_each_entry(connector,
&ddev->mode_config.connector_list, head) {
if (connector->encoder &&
- connector->dpms != DRM_MODE_DPMS_OFF) {
+ connector->encoder->crtc &&
+ connector->dpms != DRM_MODE_DPMS_OFF) {
radeon_crtc = to_radeon_crtc(connector->encoder->crtc);
rdev->pm.active_crtcs |= (1 << radeon_crtc->crtc_id);
++count;
@@ -333,10 +368,7 @@ static void radeon_pm_set_clocks_locked(struct radeon_device *rdev)
break;
}
- /* check if we are in vblank */
- radeon_pm_debug_check_in_vbl(rdev, false);
radeon_set_power_state(rdev);
- radeon_pm_debug_check_in_vbl(rdev, true);
rdev->pm.planned_action = PM_ACTION_NONE;
}
@@ -353,10 +385,7 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
rdev->pm.req_vblank |= (1 << 1);
drm_vblank_get(rdev->ddev, 1);
}
- if (rdev->pm.active_crtcs)
- wait_event_interruptible_timeout(
- rdev->irq.vblank_queue, 0,
- msecs_to_jiffies(RADEON_WAIT_VBLANK_TIMEOUT));
+ radeon_pm_set_clocks_locked(rdev);
if (rdev->pm.req_vblank & (1 << 0)) {
rdev->pm.req_vblank &= ~(1 << 0);
drm_vblank_put(rdev->ddev, 0);
@@ -366,7 +395,6 @@ static void radeon_pm_set_clocks(struct radeon_device *rdev)
drm_vblank_put(rdev->ddev, 1);
}
- radeon_pm_set_clocks_locked(rdev);
mutex_unlock(&rdev->cp.mutex);
}
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 5c0dc082d33..eabbc9cf30a 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -346,6 +346,7 @@
# define RADEON_TVPLL_PWRMGT_OFF (1 << 30)
# define RADEON_TVCLK_TURNOFF (1 << 31)
#define RADEON_PLL_PWRMGT_CNTL 0x0015 /* PLL */
+# define RADEON_PM_MODE_SEL (1 << 13)
# define RADEON_TCL_BYPASS_DISABLE (1 << 20)
#define RADEON_CLR_CMP_CLR_3D 0x1a24
#define RADEON_CLR_CMP_CLR_DST 0x15c8
diff --git a/drivers/gpu/drm/radeon/reg_srcs/r600 b/drivers/gpu/drm/radeon/reg_srcs/r600
index 8f414a5f520..af0da4ae3f5 100644
--- a/drivers/gpu/drm/radeon/reg_srcs/r600
+++ b/drivers/gpu/drm/radeon/reg_srcs/r600
@@ -26,20 +26,16 @@ r600 0x9400
0x00028408 VGT_INDX_OFFSET
0x00028AA0 VGT_INSTANCE_STEP_RATE_0
0x00028AA4 VGT_INSTANCE_STEP_RATE_1
-0x000088C0 VGT_LAST_COPY_STATE
0x00028400 VGT_MAX_VTX_INDX
-0x000088D8 VGT_MC_LAT_CNTL
0x00028404 VGT_MIN_VTX_INDX
0x00028A94 VGT_MULTI_PRIM_IB_RESET_EN
0x0002840C VGT_MULTI_PRIM_IB_RESET_INDX
0x00008970 VGT_NUM_INDICES
0x00008974 VGT_NUM_INSTANCES
0x00028A10 VGT_OUTPUT_PATH_CNTL
-0x00028C5C VGT_OUT_DEALLOC_CNTL
0x00028A84 VGT_PRIMITIVEID_EN
0x00008958 VGT_PRIMITIVE_TYPE
0x00028AB4 VGT_REUSE_OFF
-0x00028C58 VGT_VERTEX_REUSE_BLOCK_CNTL
0x00028AB8 VGT_VTX_CNT_EN
0x000088B0 VGT_VTX_VECT_EJECT_REG
0x00028810 PA_CL_CLIP_CNTL
@@ -280,7 +276,6 @@ r600 0x9400
0x00028E00 PA_SU_POLY_OFFSET_FRONT_SCALE
0x00028814 PA_SU_SC_MODE_CNTL
0x00028C08 PA_SU_VTX_CNTL
-0x00008C00 SQ_CONFIG
0x00008C04 SQ_GPR_RESOURCE_MGMT_1
0x00008C08 SQ_GPR_RESOURCE_MGMT_2
0x00008C10 SQ_STACK_RESOURCE_MGMT_1
@@ -320,18 +315,6 @@ r600 0x9400
0x000283FC SQ_VTX_SEMANTIC_31
0x000288E0 SQ_VTX_SEMANTIC_CLEAR
0x0003CFF4 SQ_VTX_START_INST_LOC
-0x0003C000 SQ_TEX_SAMPLER_WORD0_0
-0x0003C004 SQ_TEX_SAMPLER_WORD1_0
-0x0003C008 SQ_TEX_SAMPLER_WORD2_0
-0x00030000 SQ_ALU_CONSTANT0_0
-0x00030004 SQ_ALU_CONSTANT1_0
-0x00030008 SQ_ALU_CONSTANT2_0
-0x0003000C SQ_ALU_CONSTANT3_0
-0x0003E380 SQ_BOOL_CONST_0
-0x0003E384 SQ_BOOL_CONST_1
-0x0003E388 SQ_BOOL_CONST_2
-0x0003E200 SQ_LOOP_CONST_0
-0x0003E200 SQ_LOOP_CONST_DX10_0
0x000281C0 SQ_ALU_CONST_BUFFER_SIZE_GS_0
0x000281C4 SQ_ALU_CONST_BUFFER_SIZE_GS_1
0x000281C8 SQ_ALU_CONST_BUFFER_SIZE_GS_2
@@ -380,54 +363,6 @@ r600 0x9400
0x000281B4 SQ_ALU_CONST_BUFFER_SIZE_VS_13
0x000281B8 SQ_ALU_CONST_BUFFER_SIZE_VS_14
0x000281BC SQ_ALU_CONST_BUFFER_SIZE_VS_15
-0x000289C0 SQ_ALU_CONST_CACHE_GS_0
-0x000289C4 SQ_ALU_CONST_CACHE_GS_1
-0x000289C8 SQ_ALU_CONST_CACHE_GS_2
-0x000289CC SQ_ALU_CONST_CACHE_GS_3
-0x000289D0 SQ_ALU_CONST_CACHE_GS_4
-0x000289D4 SQ_ALU_CONST_CACHE_GS_5
-0x000289D8 SQ_ALU_CONST_CACHE_GS_6
-0x000289DC SQ_ALU_CONST_CACHE_GS_7
-0x000289E0 SQ_ALU_CONST_CACHE_GS_8
-0x000289E4 SQ_ALU_CONST_CACHE_GS_9
-0x000289E8 SQ_ALU_CONST_CACHE_GS_10
-0x000289EC SQ_ALU_CONST_CACHE_GS_11
-0x000289F0 SQ_ALU_CONST_CACHE_GS_12
-0x000289F4 SQ_ALU_CONST_CACHE_GS_13
-0x000289F8 SQ_ALU_CONST_CACHE_GS_14
-0x000289FC SQ_ALU_CONST_CACHE_GS_15
-0x00028940 SQ_ALU_CONST_CACHE_PS_0
-0x00028944 SQ_ALU_CONST_CACHE_PS_1
-0x00028948 SQ_ALU_CONST_CACHE_PS_2
-0x0002894C SQ_ALU_CONST_CACHE_PS_3
-0x00028950 SQ_ALU_CONST_CACHE_PS_4
-0x00028954 SQ_ALU_CONST_CACHE_PS_5
-0x00028958 SQ_ALU_CONST_CACHE_PS_6
-0x0002895C SQ_ALU_CONST_CACHE_PS_7
-0x00028960 SQ_ALU_CONST_CACHE_PS_8
-0x00028964 SQ_ALU_CONST_CACHE_PS_9
-0x00028968 SQ_ALU_CONST_CACHE_PS_10
-0x0002896C SQ_ALU_CONST_CACHE_PS_11
-0x00028970 SQ_ALU_CONST_CACHE_PS_12
-0x00028974 SQ_ALU_CONST_CACHE_PS_13
-0x00028978 SQ_ALU_CONST_CACHE_PS_14
-0x0002897C SQ_ALU_CONST_CACHE_PS_15
-0x00028980 SQ_ALU_CONST_CACHE_VS_0
-0x00028984 SQ_ALU_CONST_CACHE_VS_1
-0x00028988 SQ_ALU_CONST_CACHE_VS_2
-0x0002898C SQ_ALU_CONST_CACHE_VS_3
-0x00028990 SQ_ALU_CONST_CACHE_VS_4
-0x00028994 SQ_ALU_CONST_CACHE_VS_5
-0x00028998 SQ_ALU_CONST_CACHE_VS_6
-0x0002899C SQ_ALU_CONST_CACHE_VS_7
-0x000289A0 SQ_ALU_CONST_CACHE_VS_8
-0x000289A4 SQ_ALU_CONST_CACHE_VS_9
-0x000289A8 SQ_ALU_CONST_CACHE_VS_10
-0x000289AC SQ_ALU_CONST_CACHE_VS_11
-0x000289B0 SQ_ALU_CONST_CACHE_VS_12
-0x000289B4 SQ_ALU_CONST_CACHE_VS_13
-0x000289B8 SQ_ALU_CONST_CACHE_VS_14
-0x000289BC SQ_ALU_CONST_CACHE_VS_15
0x000288D8 SQ_PGM_CF_OFFSET_ES
0x000288DC SQ_PGM_CF_OFFSET_FS
0x000288D4 SQ_PGM_CF_OFFSET_GS
@@ -494,12 +429,7 @@ r600 0x9400
0x00028438 SX_ALPHA_REF
0x00028410 SX_ALPHA_TEST_CONTROL
0x00028350 SX_MISC
-0x0000A020 SMX_DC_CTL0
-0x0000A024 SMX_DC_CTL1
-0x0000A028 SMX_DC_CTL2
-0x00009608 TC_CNTL
0x00009604 TC_INVALIDATE
-0x00009490 TD_CNTL
0x00009400 TD_FILTER4
0x00009404 TD_FILTER4_1
0x00009408 TD_FILTER4_2
@@ -824,14 +754,9 @@ r600 0x9400
0x00028428 CB_FOG_GREEN
0x00028424 CB_FOG_RED
0x00008040 WAIT_UNTIL
-0x00008950 CC_GC_SHADER_PIPE_CONFIG
-0x00008954 GC_USER_SHADER_PIPE_CONFIG
0x00009714 VC_ENHANCE
0x00009830 DB_DEBUG
0x00009838 DB_WATERMARKS
0x00028D28 DB_SRESULTS_COMPARE_STATE0
0x00028D44 DB_ALPHA_TO_MASK
-0x00009504 TA_CNTL
0x00009700 VC_CNTL
-0x00009718 VC_CONFIG
-0x0000A02C SMX_DC_MC_INTF_CTL
diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c
index 626d51891ee..626aaf082b1 100644
--- a/drivers/gpu/drm/radeon/rs400.c
+++ b/drivers/gpu/drm/radeon/rs400.c
@@ -28,6 +28,7 @@
#include <linux/seq_file.h>
#include <drm/drmP.h>
#include "radeon.h"
+#include "radeon_asic.h"
#include "rs400d.h"
/* This files gather functions specifics to : rs400,rs480 */
@@ -202,9 +203,9 @@ void rs400_gart_disable(struct radeon_device *rdev)
void rs400_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rs400_gart_disable(rdev);
radeon_gart_table_ram_free(rdev);
- radeon_gart_fini(rdev);
}
int rs400_gart_set_page(struct radeon_device *rdev, int i, uint64_t addr)
@@ -264,6 +265,7 @@ void rs400_mc_init(struct radeon_device *rdev)
base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16;
radeon_vram_location(rdev, &rdev->mc, base);
radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
uint32_t rs400_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -388,6 +390,8 @@ static int rs400_startup(struct radeon_device *rdev)
{
int r;
+ r100_set_common_regs(rdev);
+
rs400_mc_program(rdev);
/* Resume clock */
r300_clock_startup(rdev);
@@ -453,6 +457,7 @@ int rs400_suspend(struct radeon_device *rdev)
void rs400_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 47f046b78c6..abf824c2123 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -37,6 +37,7 @@
*/
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "rs600d.h"
@@ -267,9 +268,9 @@ void rs600_gart_disable(struct radeon_device *rdev)
void rs600_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rs600_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
#define R600_PTE_VALID (1 << 0)
@@ -392,10 +393,12 @@ int rs600_irq_process(struct radeon_device *rdev)
/* Vertical blank interrupts */
if (G_007EDC_LB_D1_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 0);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (G_007EDC_LB_D2_VBLANK_INTERRUPT(r500_disp_int)) {
drm_handle_vblank(rdev->ddev, 1);
+ rdev->pm.vblank_sync = true;
wake_up(&rdev->irq.vblank_queue);
}
if (G_007EDC_DC_HOT_PLUG_DETECT1_INTERRUPT(r500_disp_int)) {
@@ -472,13 +475,38 @@ void rs600_mc_init(struct radeon_device *rdev)
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
base = RREG32_MC(R_000004_MC_FB_LOCATION);
base = G_000004_MC_FB_START(base) << 16;
+ rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void rs600_bandwidth_update(struct radeon_device *rdev)
{
- /* FIXME: implement, should this be like rs690 ? */
+ struct drm_display_mode *mode0 = NULL;
+ struct drm_display_mode *mode1 = NULL;
+ u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
+ /* FIXME: implement full support */
+
+ radeon_update_display_priority(rdev);
+
+ if (rdev->mode_info.crtcs[0]->base.enabled)
+ mode0 = &rdev->mode_info.crtcs[0]->base.mode;
+ if (rdev->mode_info.crtcs[1]->base.enabled)
+ mode1 = &rdev->mode_info.crtcs[1]->base.mode;
+
+ rs690_line_buffer_adjust(rdev, mode0, mode1);
+
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt = RREG32(R_006548_D1MODE_PRIORITY_A_CNT);
+ d2mode_priority_a_cnt = RREG32(R_006D48_D2MODE_PRIORITY_A_CNT);
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
+ }
}
uint32_t rs600_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -598,6 +626,7 @@ int rs600_suspend(struct radeon_device *rdev)
void rs600_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs600d.h b/drivers/gpu/drm/radeon/rs600d.h
index c1c8f5885cb..e52d2695510 100644
--- a/drivers/gpu/drm/radeon/rs600d.h
+++ b/drivers/gpu/drm/radeon/rs600d.h
@@ -535,4 +535,57 @@
#define G_00016C_INVALIDATE_L1_TLB(x) (((x) >> 20) & 0x1)
#define C_00016C_INVALIDATE_L1_TLB 0xFFEFFFFF
+#define R_006548_D1MODE_PRIORITY_A_CNT 0x006548
+#define S_006548_D1MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006548_D1MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006548_D1MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_00654C_D1MODE_PRIORITY_B_CNT 0x00654C
+#define S_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_00654C_D1MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_00654C_D1MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_00654C_D1MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_00654C_D1MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_00654C_D1MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_00654C_D1MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+#define R_006D48_D2MODE_PRIORITY_A_CNT 0x006D48
+#define S_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) & 0x7FFF) << 0)
+#define G_006D48_D2MODE_PRIORITY_MARK_A(x) (((x) >> 0) & 0x7FFF)
+#define C_006D48_D2MODE_PRIORITY_MARK_A 0xFFFF8000
+#define S_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
+#define G_006D48_D2MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
+#define S_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D48_D2MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D48_D2MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
+#define R_006D4C_D2MODE_PRIORITY_B_CNT 0x006D4C
+#define S_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) & 0x7FFF) << 0)
+#define G_006D4C_D2MODE_PRIORITY_MARK_B(x) (((x) >> 0) & 0x7FFF)
+#define C_006D4C_D2MODE_PRIORITY_MARK_B 0xFFFF8000
+#define S_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) & 0x1) << 16)
+#define G_006D4C_D2MODE_PRIORITY_B_OFF(x) (((x) >> 16) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_OFF 0xFFFEFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_ALWAYS_ON 0xFFEFFFFF
+#define S_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) & 0x1) << 24)
+#define G_006D4C_D2MODE_PRIORITY_B_FORCE_MASK(x) (((x) >> 24) & 0x1)
+#define C_006D4C_D2MODE_PRIORITY_B_FORCE_MASK 0xFEFFFFFF
+
#endif
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 83b9174f76f..bbf3da790fd 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -27,6 +27,7 @@
*/
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "rs690d.h"
@@ -57,42 +58,57 @@ static void rs690_gpu_init(struct radeon_device *rdev)
}
}
+union igp_info {
+ struct _ATOM_INTEGRATED_SYSTEM_INFO info;
+ struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_v2;
+};
+
void rs690_pm_info(struct radeon_device *rdev)
{
int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo);
- struct _ATOM_INTEGRATED_SYSTEM_INFO *info;
- struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *info_v2;
- void *ptr;
+ union igp_info *info;
uint16_t data_offset;
uint8_t frev, crev;
fixed20_12 tmp;
- atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
- &frev, &crev, &data_offset);
- ptr = rdev->mode_info.atom_context->bios + data_offset;
- info = (struct _ATOM_INTEGRATED_SYSTEM_INFO *)ptr;
- info_v2 = (struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 *)ptr;
- /* Get various system informations from bios */
- switch (crev) {
- case 1:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info->ulBootUpMemoryClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->usK8MemoryClock));
- rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->usFSBClock));
- rdev->pm.igp_ht_link_width.full = rfixed_const(info->ucHTLinkWidth);
- break;
- case 2:
- tmp.full = rfixed_const(100);
- rdev->pm.igp_sideport_mclk.full = rfixed_const(info_v2->ulBootUpSidePortClock);
- rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
- rdev->pm.igp_system_mclk.full = rfixed_const(info_v2->ulBootUpUMAClock);
- rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
- rdev->pm.igp_ht_link_clk.full = rfixed_const(info_v2->ulHTLinkFreq);
- rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
- rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info_v2->usMinHTLinkWidth));
- break;
- default:
+ if (atom_parse_data_header(rdev->mode_info.atom_context, index, NULL,
+ &frev, &crev, &data_offset)) {
+ info = (union igp_info *)(rdev->mode_info.atom_context->bios + data_offset);
+
+ /* Get various system informations from bios */
+ switch (crev) {
+ case 1:
+ tmp.full = rfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info.ulBootUpMemoryClock);
+ rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = rfixed_const(le16_to_cpu(info->info.usK8MemoryClock));
+ rdev->pm.igp_ht_link_clk.full = rfixed_const(le16_to_cpu(info->info.usFSBClock));
+ rdev->pm.igp_ht_link_width.full = rfixed_const(info->info.ucHTLinkWidth);
+ break;
+ case 2:
+ tmp.full = rfixed_const(100);
+ rdev->pm.igp_sideport_mclk.full = rfixed_const(info->info_v2.ulBootUpSidePortClock);
+ rdev->pm.igp_sideport_mclk.full = rfixed_div(rdev->pm.igp_sideport_mclk, tmp);
+ rdev->pm.igp_system_mclk.full = rfixed_const(info->info_v2.ulBootUpUMAClock);
+ rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = rfixed_const(info->info_v2.ulHTLinkFreq);
+ rdev->pm.igp_ht_link_clk.full = rfixed_div(rdev->pm.igp_ht_link_clk, tmp);
+ rdev->pm.igp_ht_link_width.full = rfixed_const(le16_to_cpu(info->info_v2.usMinHTLinkWidth));
+ break;
+ default:
+ tmp.full = rfixed_const(100);
+ /* We assume the slower possible clock ie worst case */
+ /* DDR 333Mhz */
+ rdev->pm.igp_sideport_mclk.full = rfixed_const(333);
+ /* FIXME: system clock ? */
+ rdev->pm.igp_system_mclk.full = rfixed_const(100);
+ rdev->pm.igp_system_mclk.full = rfixed_div(rdev->pm.igp_system_mclk, tmp);
+ rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
+ rdev->pm.igp_ht_link_width.full = rfixed_const(8);
+ DRM_ERROR("No integrated system info for your GPU, using safe default\n");
+ break;
+ }
+ } else {
tmp.full = rfixed_const(100);
/* We assume the slower possible clock ie worst case */
/* DDR 333Mhz */
@@ -103,7 +119,6 @@ void rs690_pm_info(struct radeon_device *rdev)
rdev->pm.igp_ht_link_clk.full = rfixed_const(200);
rdev->pm.igp_ht_link_width.full = rfixed_const(8);
DRM_ERROR("No integrated system info for your GPU, using safe default\n");
- break;
}
/* Compute various bandwidth */
/* k8_bandwidth = (memory_clk / 2) * 2 * 8 * 0.5 = memory_clk * 4 */
@@ -131,7 +146,6 @@ void rs690_pm_info(struct radeon_device *rdev)
void rs690_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
u64 base;
rs400_gart_adjust_size(rdev);
@@ -145,18 +159,10 @@ void rs690_mc_init(struct radeon_device *rdev)
base = RREG32_MC(R_000100_MCCFG_FB_LOCATION);
base = G_000100_MC_FB_START(base) << 16;
rs690_pm_info(rdev);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
- a.full = rfixed_const(16);
- /* core_bandwidth = sclk(Mhz) * 16 */
- rdev->pm.core_bandwidth.full = rfixed_div(rdev->pm.sclk, a);
rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev);
radeon_vram_location(rdev, &rdev->mc, base);
radeon_gtt_location(rdev, &rdev->mc);
+ radeon_update_bandwidth_info(rdev);
}
void rs690_line_buffer_adjust(struct radeon_device *rdev,
@@ -394,10 +400,12 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rs690_watermark wm0;
struct rs690_watermark wm1;
- u32 tmp;
+ u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
+ radeon_update_display_priority(rdev);
+
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -407,7 +415,8 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
* modes if the user specifies HIGH for displaypriority
* option.
*/
- if (rdev->disp_priority == 2) {
+ if ((rdev->disp_priority == 2) &&
+ ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740))) {
tmp = RREG32_MC(R_000104_MC_INIT_MISC_LAT_TIMER);
tmp &= C_000104_MC_DISP0R_INIT_LAT;
tmp &= C_000104_MC_DISP1R_INIT_LAT;
@@ -482,10 +491,16 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
+ }
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_mul(wm0.dbpp, wm0.num_line_pair);
@@ -512,8 +527,11 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- WREG32(R_006548_D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+ d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ if (rdev->disp_priority == 2)
+ d1mode_priority_a_cnt |= S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(1);
+ WREG32(R_006548_D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(R_00654C_D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(R_006D48_D2MODE_PRIORITY_A_CNT,
S_006D48_D2MODE_PRIORITY_A_OFF(1));
WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT,
@@ -544,12 +562,15 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2)
+ d2mode_priority_a_cnt |= S_006D48_D2MODE_PRIORITY_A_ALWAYS_ON(1);
WREG32(R_006548_D1MODE_PRIORITY_A_CNT,
S_006548_D1MODE_PRIORITY_A_OFF(1));
WREG32(R_00654C_D1MODE_PRIORITY_B_CNT,
S_00654C_D1MODE_PRIORITY_B_OFF(1));
- WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ WREG32(R_006D48_D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(R_006D4C_D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
}
@@ -657,6 +678,7 @@ int rs690_suspend(struct radeon_device *rdev)
void rs690_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
diff --git a/drivers/gpu/drm/radeon/rs690d.h b/drivers/gpu/drm/radeon/rs690d.h
index 62d31e7a897..36e6398a98a 100644
--- a/drivers/gpu/drm/radeon/rs690d.h
+++ b/drivers/gpu/drm/radeon/rs690d.h
@@ -182,6 +182,9 @@
#define S_006548_D1MODE_PRIORITY_A_OFF(x) (((x) & 0x1) << 16)
#define G_006548_D1MODE_PRIORITY_A_OFF(x) (((x) >> 16) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_OFF 0xFFFEFFFF
+#define S_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) & 0x1) << 20)
+#define G_006548_D1MODE_PRIORITY_A_ALWAYS_ON(x) (((x) >> 20) & 0x1)
+#define C_006548_D1MODE_PRIORITY_A_ALWAYS_ON 0xFFEFFFFF
#define S_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) & 0x1) << 24)
#define G_006548_D1MODE_PRIORITY_A_FORCE_MASK(x) (((x) >> 24) & 0x1)
#define C_006548_D1MODE_PRIORITY_A_FORCE_MASK 0xFEFFFFFF
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index bea747da123..1cf233f7e51 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -29,6 +29,7 @@
#include "drmP.h"
#include "rv515d.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "atom.h"
#include "rv515_reg_safe.h"
@@ -279,19 +280,13 @@ static void rv515_vram_get_type(struct radeon_device *rdev)
void rv515_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
rv515_vram_get_type(rdev);
r100_vram_init_sizes(rdev);
radeon_vram_location(rdev, &rdev->mc, 0);
if (!(rdev->flags & RADEON_IS_AGP))
radeon_gtt_location(rdev, &rdev->mc);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ radeon_update_bandwidth_info(rdev);
}
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
@@ -539,6 +534,7 @@ void rv515_set_safe_registers(struct radeon_device *rdev)
void rv515_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r100_cp_fini(rdev);
r100_wb_fini(rdev);
r100_ib_fini(rdev);
@@ -1020,7 +1016,7 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
struct drm_display_mode *mode1 = NULL;
struct rv515_watermark wm0;
struct rv515_watermark wm1;
- u32 tmp;
+ u32 tmp, d1mode_priority_a_cnt, d2mode_priority_a_cnt;
fixed20_12 priority_mark02, priority_mark12, fill_rate;
fixed20_12 a, b;
@@ -1088,10 +1084,16 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
- WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2) {
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ }
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
} else if (mode0) {
if (rfixed_trunc(wm0.dbpp) > 64)
a.full = rfixed_div(wm0.dbpp, wm0.num_line_pair);
@@ -1118,8 +1120,11 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
priority_mark02.full = 0;
if (wm0.priority_mark_max.full > priority_mark02.full)
priority_mark02.full = wm0.priority_mark_max.full;
- WREG32(D1MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark02));
- WREG32(D1MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark02));
+ d1mode_priority_a_cnt = rfixed_trunc(priority_mark02);
+ if (rdev->disp_priority == 2)
+ d1mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
+ WREG32(D1MODE_PRIORITY_A_CNT, d1mode_priority_a_cnt);
+ WREG32(D1MODE_PRIORITY_B_CNT, d1mode_priority_a_cnt);
WREG32(D2MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D2MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
} else {
@@ -1148,10 +1153,13 @@ void rv515_bandwidth_avivo_update(struct radeon_device *rdev)
priority_mark12.full = 0;
if (wm1.priority_mark_max.full > priority_mark12.full)
priority_mark12.full = wm1.priority_mark_max.full;
+ d2mode_priority_a_cnt = rfixed_trunc(priority_mark12);
+ if (rdev->disp_priority == 2)
+ d2mode_priority_a_cnt |= MODE_PRIORITY_ALWAYS_ON;
WREG32(D1MODE_PRIORITY_A_CNT, MODE_PRIORITY_OFF);
WREG32(D1MODE_PRIORITY_B_CNT, MODE_PRIORITY_OFF);
- WREG32(D2MODE_PRIORITY_A_CNT, rfixed_trunc(priority_mark12));
- WREG32(D2MODE_PRIORITY_B_CNT, rfixed_trunc(priority_mark12));
+ WREG32(D2MODE_PRIORITY_A_CNT, d2mode_priority_a_cnt);
+ WREG32(D2MODE_PRIORITY_B_CNT, d2mode_priority_a_cnt);
}
}
@@ -1161,6 +1169,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
struct drm_display_mode *mode0 = NULL;
struct drm_display_mode *mode1 = NULL;
+ radeon_update_display_priority(rdev);
+
if (rdev->mode_info.crtcs[0]->base.enabled)
mode0 = &rdev->mode_info.crtcs[0]->base.mode;
if (rdev->mode_info.crtcs[1]->base.enabled)
@@ -1170,7 +1180,8 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
* modes if the user specifies HIGH for displaypriority
* option.
*/
- if (rdev->disp_priority == 2) {
+ if ((rdev->disp_priority == 2) &&
+ (rdev->family == CHIP_RV515)) {
tmp = RREG32_MC(MC_MISC_LAT_TIMER);
tmp &= ~MC_DISP1R_INIT_LAT_MASK;
tmp &= ~MC_DISP0R_INIT_LAT_MASK;
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index 37887dee12a..9f37d2efb0a 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -29,6 +29,7 @@
#include <linux/platform_device.h>
#include "drmP.h"
#include "radeon.h"
+#include "radeon_asic.h"
#include "radeon_drm.h"
#include "rv770d.h"
#include "atom.h"
@@ -125,9 +126,9 @@ void rv770_pcie_gart_disable(struct radeon_device *rdev)
void rv770_pcie_gart_fini(struct radeon_device *rdev)
{
+ radeon_gart_fini(rdev);
rv770_pcie_gart_disable(rdev);
radeon_gart_table_vram_free(rdev);
- radeon_gart_fini(rdev);
}
@@ -647,10 +648,13 @@ static void rv770_gpu_init(struct radeon_device *rdev)
WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
+ WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
WREG32(CGTS_SYS_TCC_DISABLE, 0);
WREG32(CGTS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
+ WREG32(CGTS_USER_TCC_DISABLE, 0);
num_qd_pipes =
R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
@@ -864,7 +868,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
int rv770_mc_init(struct radeon_device *rdev)
{
- fixed20_12 a;
u32 tmp;
int chansize, numchan;
@@ -908,12 +911,8 @@ int rv770_mc_init(struct radeon_device *rdev)
rdev->mc.real_vram_size = rdev->mc.aper_size;
}
r600_vram_gtt_location(rdev, &rdev->mc);
- /* FIXME: we should enforce default clock in case GPU is not in
- * default setup
- */
- a.full = rfixed_const(100);
- rdev->pm.sclk.full = rfixed_const(rdev->clock.default_sclk);
- rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
+ radeon_update_bandwidth_info(rdev);
+
return 0;
}
@@ -1013,6 +1012,13 @@ int rv770_resume(struct radeon_device *rdev)
DRM_ERROR("radeon: failled testing IB (%d).\n", r);
return r;
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon: audio init failed\n");
+ return r;
+ }
+
return r;
}
@@ -1021,6 +1027,7 @@ int rv770_suspend(struct radeon_device *rdev)
{
int r;
+ r600_audio_fini(rdev);
/* FIXME: we should wait for ring to be empty */
r700_cp_stop(rdev);
rdev->cp.ready = false;
@@ -1144,11 +1151,19 @@ int rv770_init(struct radeon_device *rdev)
}
}
}
+
+ r = r600_audio_init(rdev);
+ if (r) {
+ dev_err(rdev->dev, "radeon: audio init failed\n");
+ return r;
+ }
+
return 0;
}
void rv770_fini(struct radeon_device *rdev)
{
+ radeon_pm_fini(rdev);
r600_blit_fini(rdev);
r600_cp_fini(rdev);
r600_wb_fini(rdev);
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 89c38c49066..dd47b2a9a79 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1425,8 +1425,8 @@ int ttm_bo_global_init(struct ttm_global_reference *ref)
atomic_set(&glob->bo_count, 0);
- kobject_init(&glob->kobj, &ttm_bo_glob_kobj_type);
- ret = kobject_add(&glob->kobj, ttm_get_kobj(), "buffer_objects");
+ ret = kobject_init_and_add(
+ &glob->kobj, &ttm_bo_glob_kobj_type, ttm_get_kobj(), "buffer_objects");
if (unlikely(ret != 0))
kobject_put(&glob->kobj);
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
index eb143e04d40..c40e5f48e9a 100644
--- a/drivers/gpu/drm/ttm/ttm_memory.c
+++ b/drivers/gpu/drm/ttm/ttm_memory.c
@@ -260,8 +260,8 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_kernel = zone;
- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+ ret = kobject_init_and_add(
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -296,8 +296,8 @@ static int ttm_mem_init_highmem_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_highmem = zone;
- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+ ret = kobject_init_and_add(
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -343,8 +343,8 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
zone->used_mem = 0;
zone->glob = glob;
glob->zone_dma32 = zone;
- kobject_init(&zone->kobj, &ttm_mem_zone_kobj_type);
- ret = kobject_add(&zone->kobj, &glob->kobj, zone->name);
+ ret = kobject_init_and_add(
+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
if (unlikely(ret != 0)) {
kobject_put(&zone->kobj);
return ret;
@@ -365,10 +365,8 @@ int ttm_mem_global_init(struct ttm_mem_global *glob)
glob->swap_queue = create_singlethread_workqueue("ttm_swap");
INIT_WORK(&glob->work, ttm_shrink_work);
init_waitqueue_head(&glob->queue);
- kobject_init(&glob->kobj, &ttm_mem_glob_kobj_type);
- ret = kobject_add(&glob->kobj,
- ttm_get_kobj(),
- "memory_accounting");
+ ret = kobject_init_and_add(
+ &glob->kobj, &ttm_mem_glob_kobj_type, ttm_get_kobj(), "memory_accounting");
if (unlikely(ret != 0)) {
kobject_put(&glob->kobj);
return ret;
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a759170763b..bab6cd8d8a1 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -28,13 +28,13 @@
* Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
*/
-#include <linux/vmalloc.h>
#include <linux/sched.h>
#include <linux/highmem.h>
#include <linux/pagemap.h>
#include <linux/file.h>
#include <linux/swap.h>
#include "drm_cache.h"
+#include "drm_mem_util.h"
#include "ttm/ttm_module.h"
#include "ttm/ttm_bo_driver.h"
#include "ttm/ttm_placement.h"
@@ -43,32 +43,15 @@ static int ttm_tt_swapin(struct ttm_tt *ttm);
/**
* Allocates storage for pointers to the pages that back the ttm.
- *
- * Uses kmalloc if possible. Otherwise falls back to vmalloc.
*/
static void ttm_tt_alloc_page_directory(struct ttm_tt *ttm)
{
- unsigned long size = ttm->num_pages * sizeof(*ttm->pages);
- ttm->pages = NULL;
-
- if (size <= PAGE_SIZE)
- ttm->pages = kzalloc(size, GFP_KERNEL);
-
- if (!ttm->pages) {
- ttm->pages = vmalloc_user(size);
- if (ttm->pages)
- ttm->page_flags |= TTM_PAGE_FLAG_VMALLOC;
- }
+ ttm->pages = drm_calloc_large(ttm->num_pages, sizeof(*ttm->pages));
}
static void ttm_tt_free_page_directory(struct ttm_tt *ttm)
{
- if (ttm->page_flags & TTM_PAGE_FLAG_VMALLOC) {
- vfree(ttm->pages);
- ttm->page_flags &= ~TTM_PAGE_FLAG_VMALLOC;
- } else {
- kfree(ttm->pages);
- }
+ drm_free_large(ttm->pages);
ttm->pages = NULL;
}
diff --git a/drivers/gpu/drm/vmwgfx/Kconfig b/drivers/gpu/drm/vmwgfx/Kconfig
index f20b8bcbef3..30ad13344f7 100644
--- a/drivers/gpu/drm/vmwgfx/Kconfig
+++ b/drivers/gpu/drm/vmwgfx/Kconfig
@@ -1,6 +1,6 @@
config DRM_VMWGFX
tristate "DRM driver for VMware Virtual GPU"
- depends on DRM && PCI
+ depends on DRM && PCI && FB
select FB_DEFERRED_IO
select FB_CFB_FILLRECT
select FB_CFB_COPYAREA
diff --git a/drivers/hid/hid-gyration.c b/drivers/hid/hid-gyration.c
index cab13e8c7d2..62416e6baec 100644
--- a/drivers/hid/hid-gyration.c
+++ b/drivers/hid/hid-gyration.c
@@ -53,10 +53,13 @@ static int gyration_input_mapping(struct hid_device *hdev, struct hid_input *hi,
static int gyration_event(struct hid_device *hdev, struct hid_field *field,
struct hid_usage *usage, __s32 value)
{
- struct input_dev *input = field->hidinput->input;
+
+ if (!(hdev->claimed & HID_CLAIMED_INPUT) || !field->hidinput)
+ return 0;
if ((usage->hid & HID_USAGE_PAGE) == HID_UP_GENDESK &&
(usage->hid & 0xff) == 0x82) {
+ struct input_dev *input = field->hidinput->input;
input_event(input, usage->type, usage->code, 1);
input_sync(input);
input_event(input, usage->type, usage->code, 0);
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 928943c7ce9..e71e0057284 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -60,6 +60,7 @@ static const struct hid_blacklist {
{ USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_PRODIGE, USB_DEVICE_ID_PRODIGE_CORDLESS, HID_QUIRK_NOGET },
+ { USB_VENDOR_ID_QUANTA, USB_DEVICE_ID_PIXART_IMAGING_INC_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_SUN, USB_DEVICE_ID_RARITAN_KVM_DONGLE, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_TURBOX, USB_DEVICE_ID_TURBOX_KEYBOARD, HID_QUIRK_NOGET },
{ USB_VENDOR_ID_UCLOGIC, USB_DEVICE_ID_UCLOGIC_TABLET_PF1209, HID_QUIRK_MULTI_INPUT },
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index e4595e6147b..9be8e1754a0 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -217,8 +217,8 @@ config SENSORS_ASC7621
depends on HWMON && I2C
help
If you say yes here you get support for the aSC7621
- family of SMBus sensors chip found on most Intel X48, X38, 975,
- 965 and 945 desktop boards. Currently supported chips:
+ family of SMBus sensors chip found on most Intel X38, X48, X58,
+ 945, 965 and 975 desktop boards. Currently supported chips:
aSC7621
aSC7621a
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 2d7bceeed0b..e9b7fbc5a44 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -228,7 +228,7 @@ static int __devinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, struct device *
if (err) {
dev_warn(dev,
"Unable to access MSR 0xEE, for Tjmax, left"
- " at default");
+ " at default\n");
} else if (eax & 0x40000000) {
tjmax = tjmax_ee;
}
@@ -466,7 +466,7 @@ static int __init coretemp_init(void)
family 6 CPU */
if ((c->x86 == 0x6) && (c->x86_model > 0xf))
printk(KERN_WARNING DRVNAME ": Unknown CPU "
- "model %x\n", c->x86_model);
+ "model 0x%x\n", c->x86_model);
continue;
}
diff --git a/drivers/hwmon/w83793.c b/drivers/hwmon/w83793.c
index 9de81a4c15a..612807d9715 100644
--- a/drivers/hwmon/w83793.c
+++ b/drivers/hwmon/w83793.c
@@ -1294,7 +1294,7 @@ static int watchdog_close(struct inode *inode, struct file *filp)
static ssize_t watchdog_write(struct file *filp, const char __user *buf,
size_t count, loff_t *offset)
{
- size_t ret;
+ ssize_t ret;
struct w83793_data *data = filp->private_data;
if (count) {
diff --git a/drivers/ide/ide-probe.c b/drivers/ide/ide-probe.c
index fbedd35feb4..4c3d1bfec0c 100644
--- a/drivers/ide/ide-probe.c
+++ b/drivers/ide/ide-probe.c
@@ -695,14 +695,8 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (irqd)
disable_irq(hwif->irq);
- rc = ide_port_wait_ready(hwif);
- if (rc == -ENODEV) {
- printk(KERN_INFO "%s: no devices on the port\n", hwif->name);
- goto out;
- } else if (rc == -EBUSY)
- printk(KERN_ERR "%s: not ready before the probe\n", hwif->name);
- else
- rc = -ENODEV;
+ if (ide_port_wait_ready(hwif) == -EBUSY)
+ printk(KERN_DEBUG "%s: Wait for ready failed before probe !\n", hwif->name);
/*
* Second drive should only exist if first drive was found,
@@ -713,7 +707,7 @@ static int ide_probe_port(ide_hwif_t *hwif)
if (drive->dev_flags & IDE_DFLAG_PRESENT)
rc = 0;
}
-out:
+
/*
* Use cached IRQ number. It might be (and is...) changed by probe
* code above
diff --git a/drivers/ide/via82cxxx.c b/drivers/ide/via82cxxx.c
index e65d010b708..48fd4efc90a 100644
--- a/drivers/ide/via82cxxx.c
+++ b/drivers/ide/via82cxxx.c
@@ -110,7 +110,6 @@ struct via82cxxx_dev
{
struct via_isa_bridge *via_config;
unsigned int via_80w;
- u8 cached_device[2];
};
/**
@@ -403,66 +402,10 @@ static const struct ide_port_ops via_port_ops = {
.cable_detect = via82cxxx_cable_detect,
};
-static void via_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
- struct via82cxxx_dev *vdev = hwif->host->host_priv;
-
- outb(ctl, hwif->io_ports.ctl_addr);
- outb(vdev->cached_device[hwif->channel], hwif->io_ports.device_addr);
-}
-
-static void __via_dev_select(ide_drive_t *drive, u8 select)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct via82cxxx_dev *vdev = hwif->host->host_priv;
-
- outb(select, hwif->io_ports.device_addr);
- vdev->cached_device[hwif->channel] = select;
-}
-
-static void via_dev_select(ide_drive_t *drive)
-{
- __via_dev_select(drive, drive->select | ATA_DEVICE_OBS);
-}
-
-static void via_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
- ide_hwif_t *hwif = drive->hwif;
- struct ide_io_ports *io_ports = &hwif->io_ports;
-
- if (valid & IDE_VALID_FEATURE)
- outb(tf->feature, io_ports->feature_addr);
- if (valid & IDE_VALID_NSECT)
- outb(tf->nsect, io_ports->nsect_addr);
- if (valid & IDE_VALID_LBAL)
- outb(tf->lbal, io_ports->lbal_addr);
- if (valid & IDE_VALID_LBAM)
- outb(tf->lbam, io_ports->lbam_addr);
- if (valid & IDE_VALID_LBAH)
- outb(tf->lbah, io_ports->lbah_addr);
- if (valid & IDE_VALID_DEVICE)
- __via_dev_select(drive, tf->device);
-}
-
-const struct ide_tp_ops via_tp_ops = {
- .exec_command = ide_exec_command,
- .read_status = ide_read_status,
- .read_altstatus = ide_read_altstatus,
- .write_devctl = via_write_devctl,
-
- .dev_select = via_dev_select,
- .tf_load = via_tf_load,
- .tf_read = ide_tf_read,
-
- .input_data = ide_input_data,
- .output_data = ide_output_data,
-};
-
static const struct ide_port_info via82cxxx_chipset __devinitdata = {
.name = DRV_NAME,
.init_chipset = init_chipset_via82cxxx,
.enablebits = { { 0x40, 0x02, 0x02 }, { 0x40, 0x01, 0x01 } },
- .tp_ops = &via_tp_ops,
.port_ops = &via_port_ops,
.host_flags = IDE_HFLAG_PIO_NO_BLACKLIST |
IDE_HFLAG_POST_SET_MODE |
diff --git a/drivers/isdn/hisax/avma1_cs.c b/drivers/isdn/hisax/avma1_cs.c
index e5deb15cf40..8d1d63a02b3 100644
--- a/drivers/isdn/hisax/avma1_cs.c
+++ b/drivers/isdn/hisax/avma1_cs.c
@@ -50,7 +50,7 @@ module_param(isdnprot, int, 0);
handler.
*/
-static int avma1cs_config(struct pcmcia_device *link);
+static int avma1cs_config(struct pcmcia_device *link) __devinit ;
static void avma1cs_release(struct pcmcia_device *link);
/*
@@ -59,7 +59,7 @@ static void avma1cs_release(struct pcmcia_device *link);
needed to manage one actual PCMCIA card.
*/
-static void avma1cs_detach(struct pcmcia_device *p_dev);
+static void avma1cs_detach(struct pcmcia_device *p_dev) __devexit ;
/*
@@ -99,7 +99,7 @@ typedef struct local_info_t {
======================================================================*/
-static int avma1cs_probe(struct pcmcia_device *p_dev)
+static int __devinit avma1cs_probe(struct pcmcia_device *p_dev)
{
local_info_t *local;
@@ -140,7 +140,7 @@ static int avma1cs_probe(struct pcmcia_device *p_dev)
======================================================================*/
-static void avma1cs_detach(struct pcmcia_device *link)
+static void __devexit avma1cs_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "avma1cs_detach(0x%p)\n", link);
avma1cs_release(link);
@@ -174,7 +174,7 @@ static int avma1cs_configcheck(struct pcmcia_device *p_dev,
}
-static int avma1cs_config(struct pcmcia_device *link)
+static int __devinit avma1cs_config(struct pcmcia_device *link)
{
local_info_t *dev;
int i;
@@ -282,7 +282,7 @@ static struct pcmcia_driver avma1cs_driver = {
.name = "avma1_cs",
},
.probe = avma1cs_probe,
- .remove = avma1cs_detach,
+ .remove = __devexit_p(avma1cs_detach),
.id_table = avma1cs_ids,
};
diff --git a/drivers/isdn/hisax/elsa_cs.c b/drivers/isdn/hisax/elsa_cs.c
index c9a30b1c923..c9f2279e21f 100644
--- a/drivers/isdn/hisax/elsa_cs.c
+++ b/drivers/isdn/hisax/elsa_cs.c
@@ -76,7 +76,7 @@ module_param(protocol, int, 0);
handler.
*/
-static int elsa_cs_config(struct pcmcia_device *link);
+static int elsa_cs_config(struct pcmcia_device *link) __devinit ;
static void elsa_cs_release(struct pcmcia_device *link);
/*
@@ -85,7 +85,7 @@ static void elsa_cs_release(struct pcmcia_device *link);
needed to manage one actual PCMCIA card.
*/
-static void elsa_cs_detach(struct pcmcia_device *p_dev);
+static void elsa_cs_detach(struct pcmcia_device *p_dev) __devexit;
/*
A driver needs to provide a dev_node_t structure for each device
@@ -121,7 +121,7 @@ typedef struct local_info_t {
======================================================================*/
-static int elsa_cs_probe(struct pcmcia_device *link)
+static int __devinit elsa_cs_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -166,7 +166,7 @@ static int elsa_cs_probe(struct pcmcia_device *link)
======================================================================*/
-static void elsa_cs_detach(struct pcmcia_device *link)
+static void __devexit elsa_cs_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
@@ -210,7 +210,7 @@ static int elsa_cs_configcheck(struct pcmcia_device *p_dev,
return -ENODEV;
}
-static int elsa_cs_config(struct pcmcia_device *link)
+static int __devinit elsa_cs_config(struct pcmcia_device *link)
{
local_info_t *dev;
int i;
@@ -327,7 +327,7 @@ static struct pcmcia_driver elsa_cs_driver = {
.name = "elsa_cs",
},
.probe = elsa_cs_probe,
- .remove = elsa_cs_detach,
+ .remove = __devexit_p(elsa_cs_detach),
.id_table = elsa_ids,
.suspend = elsa_suspend,
.resume = elsa_resume,
diff --git a/drivers/isdn/hisax/sedlbauer_cs.c b/drivers/isdn/hisax/sedlbauer_cs.c
index 7836ec3c7f8..71b3ddef03b 100644
--- a/drivers/isdn/hisax/sedlbauer_cs.c
+++ b/drivers/isdn/hisax/sedlbauer_cs.c
@@ -76,7 +76,7 @@ module_param(protocol, int, 0);
event handler.
*/
-static int sedlbauer_config(struct pcmcia_device *link);
+static int sedlbauer_config(struct pcmcia_device *link) __devinit ;
static void sedlbauer_release(struct pcmcia_device *link);
/*
@@ -85,7 +85,7 @@ static void sedlbauer_release(struct pcmcia_device *link);
needed to manage one actual PCMCIA card.
*/
-static void sedlbauer_detach(struct pcmcia_device *p_dev);
+static void sedlbauer_detach(struct pcmcia_device *p_dev) __devexit;
/*
You'll also need to prototype all the functions that will actually
@@ -129,7 +129,7 @@ typedef struct local_info_t {
======================================================================*/
-static int sedlbauer_probe(struct pcmcia_device *link)
+static int __devinit sedlbauer_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -177,7 +177,7 @@ static int sedlbauer_probe(struct pcmcia_device *link)
======================================================================*/
-static void sedlbauer_detach(struct pcmcia_device *link)
+static void __devexit sedlbauer_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "sedlbauer_detach(0x%p)\n", link);
@@ -283,7 +283,7 @@ static int sedlbauer_config_check(struct pcmcia_device *p_dev,
-static int sedlbauer_config(struct pcmcia_device *link)
+static int __devinit sedlbauer_config(struct pcmcia_device *link)
{
local_info_t *dev = link->priv;
win_req_t *req;
@@ -441,7 +441,7 @@ static struct pcmcia_driver sedlbauer_driver = {
.name = "sedlbauer_cs",
},
.probe = sedlbauer_probe,
- .remove = sedlbauer_detach,
+ .remove = __devexit_p(sedlbauer_detach),
.id_table = sedlbauer_ids,
.suspend = sedlbauer_suspend,
.resume = sedlbauer_resume,
diff --git a/drivers/isdn/hisax/teles_cs.c b/drivers/isdn/hisax/teles_cs.c
index b0c5976cbdb..d010a0da8e1 100644
--- a/drivers/isdn/hisax/teles_cs.c
+++ b/drivers/isdn/hisax/teles_cs.c
@@ -57,7 +57,7 @@ module_param(protocol, int, 0);
handler.
*/
-static int teles_cs_config(struct pcmcia_device *link);
+static int teles_cs_config(struct pcmcia_device *link) __devinit ;
static void teles_cs_release(struct pcmcia_device *link);
/*
@@ -66,7 +66,7 @@ static void teles_cs_release(struct pcmcia_device *link);
needed to manage one actual PCMCIA card.
*/
-static void teles_detach(struct pcmcia_device *p_dev);
+static void teles_detach(struct pcmcia_device *p_dev) __devexit ;
/*
A linked list of "instances" of the teles_cs device. Each actual
@@ -112,7 +112,7 @@ typedef struct local_info_t {
======================================================================*/
-static int teles_probe(struct pcmcia_device *link)
+static int __devinit teles_probe(struct pcmcia_device *link)
{
local_info_t *local;
@@ -156,7 +156,7 @@ static int teles_probe(struct pcmcia_device *link)
======================================================================*/
-static void teles_detach(struct pcmcia_device *link)
+static void __devexit teles_detach(struct pcmcia_device *link)
{
local_info_t *info = link->priv;
@@ -200,7 +200,7 @@ static int teles_cs_configcheck(struct pcmcia_device *p_dev,
return -ENODEV;
}
-static int teles_cs_config(struct pcmcia_device *link)
+static int __devinit teles_cs_config(struct pcmcia_device *link)
{
local_info_t *dev;
int i;
@@ -319,7 +319,7 @@ static struct pcmcia_driver teles_cs_driver = {
.name = "teles_cs",
},
.probe = teles_probe,
- .remove = teles_detach,
+ .remove = __devexit_p(teles_detach),
.id_table = teles_ids,
.suspend = teles_suspend,
.resume = teles_resume,
diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
index fcb6ec1af17..72450237a0f 100644
--- a/drivers/misc/kgdbts.c
+++ b/drivers/misc/kgdbts.c
@@ -295,6 +295,10 @@ static int check_and_rewind_pc(char *put_str, char *arg)
/* On x86 a breakpoint stop requires it to be decremented */
if (addr + 1 == kgdbts_regs.ip)
offset = -1;
+#elif defined(CONFIG_SUPERH)
+ /* On SUPERH a breakpoint stop requires it to be decremented */
+ if (addr + 2 == kgdbts_regs.pc)
+ offset = -2;
#endif
if (strcmp(arg, "silent") &&
instruction_pointer(&kgdbts_regs) + offset != addr) {
@@ -305,6 +309,8 @@ static int check_and_rewind_pc(char *put_str, char *arg)
#ifdef CONFIG_X86
/* On x86 adjust the instruction pointer if needed */
kgdbts_regs.ip += offset;
+#elif defined(CONFIG_SUPERH)
+ kgdbts_regs.pc += offset;
#endif
return 0;
}
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 9ba547069db..0ebd8208f60 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -84,7 +84,7 @@
#define ATLX_DRIVER_VERSION "2.1.3"
MODULE_AUTHOR("Xiong Huang <xiong.huang@atheros.com>, \
- Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
+Chris Snook <csnook@redhat.com>, Jay Cliburn <jcliburn@gmail.com>");
MODULE_LICENSE("GPL");
MODULE_VERSION(ATLX_DRIVER_VERSION);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index 9560d48944a..51e1065e789 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -490,7 +490,7 @@ be_test_ddr_dma(struct be_adapter *adapter)
{
int ret, i;
struct be_dma_mem ddrdma_cmd;
- u64 pattern[2] = {0x5a5a5a5a5a5a5a5a, 0xa5a5a5a5a5a5a5a5};
+ u64 pattern[2] = {0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL};
ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
ddrdma_cmd.va = pci_alloc_consistent(adapter->pdev, ddrdma_cmd.size,
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 381887ba677..a257babd1bb 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -246,6 +246,8 @@ static const struct flash_spec flash_5709 = {
MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
+static void bnx2_init_napi(struct bnx2 *bp);
+
static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
{
u32 diff;
@@ -6197,6 +6199,7 @@ bnx2_open(struct net_device *dev)
bnx2_disable_int(bp);
bnx2_setup_int_mode(bp, disable_msi);
+ bnx2_init_napi(bp);
bnx2_napi_enable(bp);
rc = bnx2_alloc_mem(bp);
if (rc)
@@ -7643,9 +7646,11 @@ poll_bnx2(struct net_device *dev)
int i;
for (i = 0; i < bp->irq_nvecs; i++) {
- disable_irq(bp->irq_tbl[i].vector);
- bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
- enable_irq(bp->irq_tbl[i].vector);
+ struct bnx2_irq *irq = &bp->irq_tbl[i];
+
+ disable_irq(irq->vector);
+ irq->handler(irq->vector, &bp->bnx2_napi[i]);
+ enable_irq(irq->vector);
}
}
#endif
@@ -8207,7 +8212,7 @@ bnx2_init_napi(struct bnx2 *bp)
{
int i;
- for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
+ for (i = 0; i < bp->irq_nvecs; i++) {
struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
int (*poll)(struct napi_struct *, int);
@@ -8276,7 +8281,6 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
dev->ethtool_ops = &bnx2_ethtool_ops;
bp = netdev_priv(dev);
- bnx2_init_napi(bp);
pci_set_drvdata(pdev, dev);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 430c02267d7..5b92fbff431 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1235,6 +1235,11 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active)
write_lock_bh(&bond->curr_slave_lock);
}
}
+
+ /* resend IGMP joins since all were sent on curr_active_slave */
+ if (bond->params.mode == BOND_MODE_ROUNDROBIN) {
+ bond_resend_igmp_join_requests(bond);
+ }
}
/**
@@ -4138,22 +4143,41 @@ static int bond_xmit_roundrobin(struct sk_buff *skb, struct net_device *bond_dev
struct bonding *bond = netdev_priv(bond_dev);
struct slave *slave, *start_at;
int i, slave_no, res = 1;
+ struct iphdr *iph = ip_hdr(skb);
read_lock(&bond->lock);
if (!BOND_IS_OK(bond))
goto out;
-
/*
- * Concurrent TX may collide on rr_tx_counter; we accept that
- * as being rare enough not to justify using an atomic op here
+ * Start with the curr_active_slave that joined the bond as the
+ * default for sending IGMP traffic. For failover purposes one
+ * needs to maintain some consistency for the interface that will
+ * send the join/membership reports. The curr_active_slave found
+ * will send all of this type of traffic.
*/
- slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+ if ((iph->protocol == htons(IPPROTO_IGMP)) &&
+ (skb->protocol == htons(ETH_P_IP))) {
- bond_for_each_slave(bond, slave, i) {
- slave_no--;
- if (slave_no < 0)
- break;
+ read_lock(&bond->curr_slave_lock);
+ slave = bond->curr_active_slave;
+ read_unlock(&bond->curr_slave_lock);
+
+ if (!slave)
+ goto out;
+ } else {
+ /*
+ * Concurrent TX may collide on rr_tx_counter; we accept
+ * that as being rare enough not to justify using an
+ * atomic op here.
+ */
+ slave_no = bond->rr_tx_counter++ % bond->slave_cnt;
+
+ bond_for_each_slave(bond, slave, i) {
+ slave_no--;
+ if (slave_no < 0)
+ break;
+ }
}
start_at = slave;
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 866905fa411..03489864376 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -22,6 +22,7 @@
#include <linux/can/dev.h>
#include <linux/can/error.h>
+#include <asm/bfin_can.h>
#include <asm/portmux.h>
#define DRV_NAME "bfin_can"
@@ -29,90 +30,6 @@
#define TX_ECHO_SKB_MAX 1
/*
- * transmit and receive channels
- */
-#define TRANSMIT_CHL 24
-#define RECEIVE_STD_CHL 0
-#define RECEIVE_EXT_CHL 4
-#define RECEIVE_RTR_CHL 8
-#define RECEIVE_EXT_RTR_CHL 12
-#define MAX_CHL_NUMBER 32
-
-/*
- * bfin can registers layout
- */
-struct bfin_can_mask_regs {
- u16 aml;
- u16 dummy1;
- u16 amh;
- u16 dummy2;
-};
-
-struct bfin_can_channel_regs {
- u16 data[8];
- u16 dlc;
- u16 dummy1;
- u16 tsv;
- u16 dummy2;
- u16 id0;
- u16 dummy3;
- u16 id1;
- u16 dummy4;
-};
-
-struct bfin_can_regs {
- /*
- * global control and status registers
- */
- u16 mc1; /* offset 0 */
- u16 dummy1;
- u16 md1; /* offset 4 */
- u16 rsv1[13];
- u16 mbtif1; /* offset 0x20 */
- u16 dummy2;
- u16 mbrif1; /* offset 0x24 */
- u16 dummy3;
- u16 mbim1; /* offset 0x28 */
- u16 rsv2[11];
- u16 mc2; /* offset 0x40 */
- u16 dummy4;
- u16 md2; /* offset 0x44 */
- u16 dummy5;
- u16 trs2; /* offset 0x48 */
- u16 rsv3[11];
- u16 mbtif2; /* offset 0x60 */
- u16 dummy6;
- u16 mbrif2; /* offset 0x64 */
- u16 dummy7;
- u16 mbim2; /* offset 0x68 */
- u16 rsv4[11];
- u16 clk; /* offset 0x80 */
- u16 dummy8;
- u16 timing; /* offset 0x84 */
- u16 rsv5[3];
- u16 status; /* offset 0x8c */
- u16 dummy9;
- u16 cec; /* offset 0x90 */
- u16 dummy10;
- u16 gis; /* offset 0x94 */
- u16 dummy11;
- u16 gim; /* offset 0x98 */
- u16 rsv6[3];
- u16 ctrl; /* offset 0xa0 */
- u16 dummy12;
- u16 intr; /* offset 0xa4 */
- u16 rsv7[7];
- u16 esr; /* offset 0xb4 */
- u16 rsv8[37];
-
- /*
- * channel(mailbox) mask and message registers
- */
- struct bfin_can_mask_regs msk[MAX_CHL_NUMBER]; /* offset 0x100 */
- struct bfin_can_channel_regs chl[MAX_CHL_NUMBER]; /* offset 0x200 */
-};
-
-/*
* bfin can private data
*/
struct bfin_can_priv {
@@ -163,7 +80,7 @@ static int bfin_can_set_bittiming(struct net_device *dev)
if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)
timing |= SAM;
- bfin_write16(&reg->clk, clk);
+ bfin_write16(&reg->clock, clk);
bfin_write16(&reg->timing, timing);
dev_info(dev->dev.parent, "setting CLOCK=0x%04x TIMING=0x%04x\n",
@@ -185,11 +102,11 @@ static void bfin_can_set_reset_mode(struct net_device *dev)
bfin_write16(&reg->gim, 0);
/* reset can and enter configuration mode */
- bfin_write16(&reg->ctrl, SRS | CCR);
+ bfin_write16(&reg->control, SRS | CCR);
SSYNC();
- bfin_write16(&reg->ctrl, CCR);
+ bfin_write16(&reg->control, CCR);
SSYNC();
- while (!(bfin_read16(&reg->ctrl) & CCA)) {
+ while (!(bfin_read16(&reg->control) & CCA)) {
udelay(10);
if (--timeout == 0) {
dev_err(dev->dev.parent,
@@ -244,7 +161,7 @@ static void bfin_can_set_normal_mode(struct net_device *dev)
/*
* leave configuration mode
*/
- bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) & ~CCR);
+ bfin_write16(&reg->control, bfin_read16(&reg->control) & ~CCR);
while (bfin_read16(&reg->status) & CCA) {
udelay(10);
@@ -726,7 +643,7 @@ static int bfin_can_suspend(struct platform_device *pdev, pm_message_t mesg)
if (netif_running(dev)) {
/* enter sleep mode */
- bfin_write16(&reg->ctrl, bfin_read16(&reg->ctrl) | SMR);
+ bfin_write16(&reg->control, bfin_read16(&reg->control) | SMR);
SSYNC();
while (!(bfin_read16(&reg->intr) & SMACK)) {
udelay(10);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 9902b33b716..2f29c213185 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -261,7 +261,6 @@ struct e1000_adapter {
/* TX */
struct e1000_tx_ring *tx_ring; /* One per active queue */
unsigned int restart_queue;
- unsigned long tx_queue_len;
u32 txd_cmd;
u32 tx_int_delay;
u32 tx_abs_int_delay;
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index 8be6faee43e..b15ece26ed8 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -383,8 +383,6 @@ static void e1000_configure(struct e1000_adapter *adapter)
adapter->alloc_rx_buf(adapter, ring,
E1000_DESC_UNUSED(ring));
}
-
- adapter->tx_queue_len = netdev->tx_queue_len;
}
int e1000_up(struct e1000_adapter *adapter)
@@ -503,7 +501,6 @@ void e1000_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
adapter->link_speed = 0;
adapter->link_duplex = 0;
netif_carrier_off(netdev);
@@ -2316,19 +2313,15 @@ static void e1000_watchdog(unsigned long data)
E1000_CTRL_RFCE) ? "RX" : ((ctrl &
E1000_CTRL_TFCE) ? "TX" : "None" )));
- /* tweak tx_queue_len according to speed/duplex
- * and adjust the timeout factor */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = false;
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = false;
- netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index c2ec095d216..118bdf48359 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -279,7 +279,6 @@ struct e1000_adapter {
struct napi_struct napi;
- unsigned long tx_queue_len;
unsigned int restart_queue;
u32 txd_cmd;
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 88d54d3efce..e1cceb60657 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -2289,8 +2289,6 @@ static void e1000_configure_tx(struct e1000_adapter *adapter)
ew32(TCTL, tctl);
e1000e_config_collision_dist(hw);
-
- adapter->tx_queue_len = adapter->netdev->tx_queue_len;
}
/**
@@ -2877,7 +2875,6 @@ void e1000e_down(struct e1000_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
adapter->link_speed = 0;
adapter->link_duplex = 0;
@@ -3588,21 +3585,15 @@ static void e1000_watchdog_task(struct work_struct *work)
"link gets many collisions.\n");
}
- /*
- * tweak tx_queue_len according to speed/duplex
- * and adjust the timeout factor
- */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = 0;
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = 0;
- netdev->tx_queue_len = 100;
adapter->tx_timeout_factor = 10;
break;
}
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index b6715553cf1..669de028d44 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -2393,6 +2393,7 @@ struct sk_buff * gfar_new_skb(struct net_device *dev)
* as many bytes as needed to align the data properly
*/
skb_reserve(skb, alignamount);
+ GFAR_CB(skb)->alignamount = alignamount;
return skb;
}
@@ -2533,13 +2534,13 @@ int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
newskb = skb;
else if (skb) {
/*
- * We need to reset ->data to what it
+ * We need to un-reserve() the skb to what it
* was before gfar_new_skb() re-aligned
* it to an RXBUF_ALIGNMENT boundary
* before we put the skb back on the
* recycle list.
*/
- skb->data = skb->head + NET_SKB_PAD;
+ skb_reserve(skb, -GFAR_CB(skb)->alignamount);
__skb_queue_head(&priv->rx_recycle, skb);
}
} else {
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 3d72dc43dca..17d25e71423 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -566,6 +566,12 @@ struct rxfcb {
u16 vlctl; /* VLAN control word */
};
+struct gianfar_skb_cb {
+ int alignamount;
+};
+
+#define GFAR_CB(skb) ((struct gianfar_skb_cb *)((skb)->cb))
+
struct rmon_mib
{
u32 tr64; /* 0x.680 - Transmit and Receive 64-byte Frame Counter */
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 2a8a886b37e..be8d010e402 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -1367,7 +1367,8 @@ out:
* igb_enable_mng_pass_thru - Enable processing of ARP's
* @hw: pointer to the HW structure
*
- * Verifies the hardware needs to allow ARPs to be processed by the host.
+ * Verifies the hardware needs to leave interface enabled so that frames can
+ * be directed to and from the management interface.
**/
bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
{
@@ -1380,8 +1381,7 @@ bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
manc = rd32(E1000_MANC);
- if (!(manc & E1000_MANC_RCV_TCO_EN) ||
- !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+ if (!(manc & E1000_MANC_RCV_TCO_EN))
goto out;
if (hw->mac.arc_subsystem_valid) {
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index a1775705b24..3b772b822a5 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -267,7 +267,6 @@ struct igb_adapter {
/* TX */
struct igb_ring *tx_ring[16];
- unsigned long tx_queue_len;
u32 tx_timeout_count;
/* RX */
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 45a0e4fd587..01c65c7447e 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -1105,9 +1105,6 @@ static void igb_configure(struct igb_adapter *adapter)
struct igb_ring *ring = adapter->rx_ring[i];
igb_alloc_rx_buffers_adv(ring, igb_desc_unused(ring));
}
-
-
- adapter->tx_queue_len = netdev->tx_queue_len;
}
/**
@@ -1213,7 +1210,6 @@ void igb_down(struct igb_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
del_timer_sync(&adapter->phy_info_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
/* record the stats before reset*/
@@ -3106,17 +3102,13 @@ static void igb_watchdog_task(struct work_struct *work)
((ctrl & E1000_CTRL_RFCE) ? "RX" :
((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
- /* tweak tx_queue_len according to speed/duplex and
- * adjust the timeout factor */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 14;
break;
case SPEED_100:
- netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
@@ -3963,7 +3955,7 @@ void igb_update_stats(struct igb_adapter *adapter)
struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
struct e1000_hw *hw = &adapter->hw;
struct pci_dev *pdev = adapter->pdev;
- u32 rnbc, reg;
+ u32 reg, mpc;
u16 phy_tmp;
int i;
u64 bytes, packets;
@@ -4021,7 +4013,9 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.symerrs += rd32(E1000_SYMERRS);
adapter->stats.sec += rd32(E1000_SEC);
- adapter->stats.mpc += rd32(E1000_MPC);
+ mpc = rd32(E1000_MPC);
+ adapter->stats.mpc += mpc;
+ net_stats->rx_fifo_errors += mpc;
adapter->stats.scc += rd32(E1000_SCC);
adapter->stats.ecol += rd32(E1000_ECOL);
adapter->stats.mcc += rd32(E1000_MCC);
@@ -4036,9 +4030,7 @@ void igb_update_stats(struct igb_adapter *adapter)
adapter->stats.gptc += rd32(E1000_GPTC);
adapter->stats.gotc += rd32(E1000_GOTCL);
rd32(E1000_GOTCH); /* clear GOTCL */
- rnbc = rd32(E1000_RNBC);
- adapter->stats.rnbc += rnbc;
- net_stats->rx_fifo_errors += rnbc;
+ adapter->stats.rnbc += rd32(E1000_RNBC);
adapter->stats.ruc += rd32(E1000_RUC);
adapter->stats.rfc += rd32(E1000_RFC);
adapter->stats.rjc += rd32(E1000_RJC);
@@ -5110,7 +5102,7 @@ static void igb_receive_skb(struct igb_q_vector *q_vector,
{
struct igb_adapter *adapter = q_vector->adapter;
- if (vlan_tag)
+ if (vlan_tag && adapter->vlgrp)
vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
vlan_tag, skb);
else
diff --git a/drivers/net/igbvf/igbvf.h b/drivers/net/igbvf/igbvf.h
index a1774b29d22..debeee2dc71 100644
--- a/drivers/net/igbvf/igbvf.h
+++ b/drivers/net/igbvf/igbvf.h
@@ -198,7 +198,6 @@ struct igbvf_adapter {
struct igbvf_ring *tx_ring /* One per active queue */
____cacheline_aligned_in_smp;
- unsigned long tx_queue_len;
unsigned int restart_queue;
u32 txd_cmd;
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index a77afd8a14b..b41037ed808 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -1304,8 +1304,6 @@ static void igbvf_configure_tx(struct igbvf_adapter *adapter)
/* enable Report Status bit */
adapter->txd_cmd |= E1000_ADVTXD_DCMD_RS;
-
- adapter->tx_queue_len = adapter->netdev->tx_queue_len;
}
/**
@@ -1524,7 +1522,6 @@ void igbvf_down(struct igbvf_adapter *adapter)
del_timer_sync(&adapter->watchdog_timer);
- netdev->tx_queue_len = adapter->tx_queue_len;
netif_carrier_off(netdev);
/* record the stats before reset*/
@@ -1857,21 +1854,15 @@ static void igbvf_watchdog_task(struct work_struct *work)
&adapter->link_duplex);
igbvf_print_link_info(adapter);
- /*
- * tweak tx_queue_len according to speed/duplex
- * and adjust the timeout factor
- */
- netdev->tx_queue_len = adapter->tx_queue_len;
+ /* adjust timeout factor according to speed/duplex */
adapter->tx_timeout_factor = 1;
switch (adapter->link_speed) {
case SPEED_10:
txb2b = 0;
- netdev->tx_queue_len = 10;
adapter->tx_timeout_factor = 16;
break;
case SPEED_100:
txb2b = 0;
- netdev->tx_queue_len = 100;
/* maybe add some timeout factor ? */
break;
}
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 19e94ee155a..79c35ae3718 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -204,14 +204,17 @@ enum ixgbe_ring_f_enum {
#define IXGBE_MAX_FDIR_INDICES 64
#ifdef IXGBE_FCOE
#define IXGBE_MAX_FCOE_INDICES 8
+#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES)
+#else
+#define MAX_RX_QUEUES IXGBE_MAX_FDIR_INDICES
+#define MAX_TX_QUEUES IXGBE_MAX_FDIR_INDICES
#endif /* IXGBE_FCOE */
struct ixgbe_ring_feature {
int indices;
int mask;
} ____cacheline_internodealigned_in_smp;
-#define MAX_RX_QUEUES 128
-#define MAX_TX_QUEUES 128
#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \
? 8 : 1)
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 7949a446e4c..1959ef76c96 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -1853,6 +1853,26 @@ static void ixgbe_diag_test(struct net_device *netdev,
if (ixgbe_link_test(adapter, &data[4]))
eth_test->flags |= ETH_TEST_FL_FAILED;
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) {
+ int i;
+ for (i = 0; i < adapter->num_vfs; i++) {
+ if (adapter->vfinfo[i].clear_to_send) {
+ netdev_warn(netdev, "%s",
+ "offline diagnostic is not "
+ "supported when VFs are "
+ "present\n");
+ data[0] = 1;
+ data[1] = 1;
+ data[2] = 1;
+ data[3] = 1;
+ eth_test->flags |= ETH_TEST_FL_FAILED;
+ clear_bit(__IXGBE_TESTING,
+ &adapter->state);
+ goto skip_ol_tests;
+ }
+ }
+ }
+
if (if_running)
/* indicate we're in test mode */
dev_close(netdev);
@@ -1908,6 +1928,7 @@ skip_loopback:
clear_bit(__IXGBE_TESTING, &adapter->state);
}
+skip_ol_tests:
msleep_interruptible(4 * 1000);
}
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index 700cfc0aa1b..9276d5965b0 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -202,6 +202,15 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
addr = sg_dma_address(sg);
len = sg_dma_len(sg);
while (len) {
+ /* max number of buffers allowed in one DDP context */
+ if (j >= IXGBE_BUFFCNT_MAX) {
+ netif_err(adapter, drv, adapter->netdev,
+ "xid=%x:%d,%d,%d:addr=%llx "
+ "not enough descriptors\n",
+ xid, i, j, dmacount, (u64)addr);
+ goto out_noddp_free;
+ }
+
/* get the offset of length of current buffer */
thisoff = addr & ((dma_addr_t)bufflen - 1);
thislen = min((bufflen - thisoff), len);
@@ -227,20 +236,13 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid,
len -= thislen;
addr += thislen;
j++;
- /* max number of buffers allowed in one DDP context */
- if (j > IXGBE_BUFFCNT_MAX) {
- DPRINTK(DRV, ERR, "xid=%x:%d,%d,%d:addr=%llx "
- "not enough descriptors\n",
- xid, i, j, dmacount, (u64)addr);
- goto out_noddp_free;
- }
}
}
/* only the last buffer may have non-full bufflen */
lastsize = thisoff + thislen;
fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT);
- fcbuff |= (j << IXGBE_FCBUFF_BUFFCNT_SHIFT);
+ fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT);
fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT);
fcbuff |= (IXGBE_FCBUFF_VALID);
@@ -520,6 +522,9 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
/* Enable L2 eth type filter for FCoE */
IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE),
(ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN));
+ /* Enable L2 eth type filter for FIP */
+ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP),
+ (ETH_P_FIP | IXGBE_ETQF_FILTER_EN));
if (adapter->ring_feature[RING_F_FCOE].indices) {
/* Use multiple rx queues for FCoE by redirection table */
for (i = 0; i < IXGBE_FCRETA_SIZE; i++) {
@@ -530,6 +535,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
}
IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA);
IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0);
+ fcoe_i = f->mask;
+ fcoe_i &= IXGBE_FCRETA_ENTRY_MASK;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
} else {
/* Use single rx queue for FCoE */
fcoe_i = f->mask;
@@ -539,6 +550,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter)
IXGBE_ETQS_QUEUE_EN |
(fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
}
+ /* send FIP frames to the first FCoE queue */
+ fcoe_i = f->mask;
+ fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx;
+ IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP),
+ IXGBE_ETQS_QUEUE_EN |
+ (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT));
IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL,
IXGBE_FCRXCTRL_FCOELLI |
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index d75c46ff31f..0c553f6cb53 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -3056,6 +3056,14 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter)
while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state))
msleep(1);
ixgbe_down(adapter);
+ /*
+ * If SR-IOV enabled then wait a bit before bringing the adapter
+ * back up to give the VFs time to respond to the reset. The
+ * two second wait is based upon the watchdog timer cycle in
+ * the VF driver.
+ */
+ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
+ msleep(2000);
ixgbe_up(adapter);
clear_bit(__IXGBE_RESETTING, &adapter->state);
}
@@ -3236,13 +3244,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
/* disable receive for all VFs and wait one second */
if (adapter->num_vfs) {
- for (i = 0 ; i < adapter->num_vfs; i++)
- adapter->vfinfo[i].clear_to_send = 0;
-
/* ping all the active vfs to let them know we are going down */
ixgbe_ping_all_vfs(adapter);
+
/* Disable all VFTE/VFRE TX/RX */
ixgbe_disable_tx_rx(adapter);
+
+ /* Mark all the VFs as inactive */
+ for (i = 0 ; i < adapter->num_vfs; i++)
+ adapter->vfinfo[i].clear_to_send = 0;
}
/* disable receives */
@@ -5638,7 +5648,8 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
#ifdef IXGBE_FCOE
if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
- (skb->protocol == htons(ETH_P_FCOE))) {
+ ((skb->protocol == htons(ETH_P_FCOE)) ||
+ (skb->protocol == htons(ETH_P_FIP)))) {
txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
txq += adapter->ring_feature[RING_F_FCOE].mask;
return txq;
@@ -5685,18 +5696,25 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
tx_ring = adapter->tx_ring[skb->queue_mapping];
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) &&
- (skb->protocol == htons(ETH_P_FCOE))) {
- tx_flags |= IXGBE_TX_FLAGS_FCOE;
#ifdef IXGBE_FCOE
+ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
#ifdef CONFIG_IXGBE_DCB
- tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
- << IXGBE_TX_FLAGS_VLAN_SHIFT);
- tx_flags |= ((adapter->fcoe.up << 13)
- << IXGBE_TX_FLAGS_VLAN_SHIFT);
-#endif
+ /* for FCoE with DCB, we force the priority to what
+ * was specified by the switch */
+ if ((skb->protocol == htons(ETH_P_FCOE)) ||
+ (skb->protocol == htons(ETH_P_FIP))) {
+ tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ tx_flags |= ((adapter->fcoe.up << 13)
+ << IXGBE_TX_FLAGS_VLAN_SHIFT);
+ }
#endif
+ /* flag for FCoE offloads */
+ if (skb->protocol == htons(ETH_P_FCOE))
+ tx_flags |= IXGBE_TX_FLAGS_FCOE;
}
+#endif
+
/* four things can cause us to need a context descriptor */
if (skb_is_gso(skb) ||
(skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -6051,7 +6069,6 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
indices += min_t(unsigned int, num_possible_cpus(),
IXGBE_MAX_FCOE_INDICES);
#endif
- indices = min_t(unsigned int, indices, MAX_TX_QUEUES);
netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices);
if (!netdev) {
err = -ENOMEM;
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index 0ed5ab37cc5..4ec6dc1a5b7 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -1298,6 +1298,7 @@
#define IXGBE_ETQF_FILTER_BCN 1
#define IXGBE_ETQF_FILTER_FCOE 2
#define IXGBE_ETQF_FILTER_1588 3
+#define IXGBE_ETQF_FILTER_FIP 4
/* VLAN Control Bit Masks */
#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */
#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */
diff --git a/drivers/net/ixgbevf/ixgbevf_main.c b/drivers/net/ixgbevf/ixgbevf_main.c
index d6cbd943a6f..1bbbef3ee3f 100644
--- a/drivers/net/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ixgbevf/ixgbevf_main.c
@@ -2943,9 +2943,10 @@ static int ixgbevf_tx_map(struct ixgbevf_adapter *adapter,
struct ixgbevf_tx_buffer *tx_buffer_info;
unsigned int len;
unsigned int total = skb->len;
- unsigned int offset = 0, size, count = 0, i;
+ unsigned int offset = 0, size, count = 0;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
+ int i;
i = tx_ring->next_to_use;
diff --git a/drivers/net/ksz884x.c b/drivers/net/ksz884x.c
index 0f59099ee72..6c5327af1bf 100644
--- a/drivers/net/ksz884x.c
+++ b/drivers/net/ksz884x.c
@@ -6322,7 +6322,7 @@ static int netdev_set_eeprom(struct net_device *dev,
int len;
if (eeprom->magic != EEPROM_MAGIC)
- return 1;
+ return -EINVAL;
len = (eeprom->offset + eeprom->len + 1) / 2;
for (i = eeprom->offset / 2; i < len; i++)
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 144d2e88042..0f703838e21 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
#define _NETXEN_NIC_LINUX_MAJOR 4
#define _NETXEN_NIC_LINUX_MINOR 0
-#define _NETXEN_NIC_LINUX_SUBVERSION 72
-#define NETXEN_NIC_LINUX_VERSIONID "4.0.72"
+#define _NETXEN_NIC_LINUX_SUBVERSION 73
+#define NETXEN_NIC_LINUX_VERSIONID "4.0.73"
#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
#define _major(v) (((v) >> 24) & 0xff)
diff --git a/drivers/net/netxen/netxen_nic_ctx.c b/drivers/net/netxen/netxen_nic_ctx.c
index 2a8ef5fc966..f26e54716c8 100644
--- a/drivers/net/netxen/netxen_nic_ctx.c
+++ b/drivers/net/netxen/netxen_nic_ctx.c
@@ -669,13 +669,15 @@ int netxen_alloc_hw_resources(struct netxen_adapter *adapter)
}
sds_ring->desc_head = (struct status_desc *)addr;
- sds_ring->crb_sts_consumer =
- netxen_get_ioaddr(adapter,
- recv_crb_registers[port].crb_sts_consumer[ring]);
+ if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
+ sds_ring->crb_sts_consumer =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].crb_sts_consumer[ring]);
- sds_ring->crb_intr_mask =
- netxen_get_ioaddr(adapter,
- recv_crb_registers[port].sw_int_mask[ring]);
+ sds_ring->crb_intr_mask =
+ netxen_get_ioaddr(adapter,
+ recv_crb_registers[port].sw_int_mask[ring]);
+ }
}
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index 1c63610ead4..7eb925a9f36 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -761,7 +761,7 @@ nx_get_bios_version(struct netxen_adapter *adapter)
if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
+ NX_UNI_BIOS_VERSION_OFF));
- return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
+ return (bios_ver << 16) + ((bios_ver >> 8) & 0xff00) +
(bios_ver >> 24);
} else
return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 08780ef1c1f..01808b28d1b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -604,16 +604,14 @@ netxen_cleanup_pci_map(struct netxen_adapter *adapter)
static int
netxen_setup_pci_map(struct netxen_adapter *adapter)
{
- void __iomem *mem_ptr0 = NULL;
- void __iomem *mem_ptr1 = NULL;
- void __iomem *mem_ptr2 = NULL;
void __iomem *db_ptr = NULL;
resource_size_t mem_base, db_base;
- unsigned long mem_len, db_len = 0, pci_len0 = 0;
+ unsigned long mem_len, db_len = 0;
struct pci_dev *pdev = adapter->pdev;
int pci_func = adapter->ahw.pci_func;
+ struct netxen_hardware_context *ahw = &adapter->ahw;
int err = 0;
@@ -630,24 +628,40 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
/* 128 Meg of memory */
if (mem_len == NETXEN_PCI_128MB_SIZE) {
- mem_ptr0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
- mem_ptr1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
+
+ ahw->pci_base0 = ioremap(mem_base, FIRST_PAGE_GROUP_SIZE);
+ ahw->pci_base1 = ioremap(mem_base + SECOND_PAGE_GROUP_START,
SECOND_PAGE_GROUP_SIZE);
- mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
THIRD_PAGE_GROUP_SIZE);
- pci_len0 = FIRST_PAGE_GROUP_SIZE;
+ if (ahw->pci_base0 == NULL || ahw->pci_base1 == NULL ||
+ ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
+ ahw->pci_len0 = FIRST_PAGE_GROUP_SIZE;
+
} else if (mem_len == NETXEN_PCI_32MB_SIZE) {
- mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
- mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
+
+ ahw->pci_base1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
+ ahw->pci_base2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
SECOND_PAGE_GROUP_START, THIRD_PAGE_GROUP_SIZE);
+ if (ahw->pci_base1 == NULL || ahw->pci_base2 == NULL) {
+ dev_err(&pdev->dev, "failed to map PCI bar 0\n");
+ err = -EIO;
+ goto err_out;
+ }
+
} else if (mem_len == NETXEN_PCI_2MB_SIZE) {
- mem_ptr0 = pci_ioremap_bar(pdev, 0);
- if (mem_ptr0 == NULL) {
+ ahw->pci_base0 = pci_ioremap_bar(pdev, 0);
+ if (ahw->pci_base0 == NULL) {
dev_err(&pdev->dev, "failed to map PCI bar 0\n");
return -EIO;
}
- pci_len0 = mem_len;
+ ahw->pci_len0 = mem_len;
} else {
return -EIO;
}
@@ -656,11 +670,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
dev_info(&pdev->dev, "%dMB memory map\n", (int)(mem_len>>20));
- adapter->ahw.pci_base0 = mem_ptr0;
- adapter->ahw.pci_len0 = pci_len0;
- adapter->ahw.pci_base1 = mem_ptr1;
- adapter->ahw.pci_base2 = mem_ptr2;
-
if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
@@ -1246,8 +1255,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
int pci_func_id = PCI_FUNC(pdev->devfn);
uint8_t revision_id;
- if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
- pr_warning("%s: chip revisions between 0x%x-0x%x"
+ if (pdev->revision >= NX_P3_A0 && pdev->revision <= NX_P3_B1) {
+ pr_warning("%s: chip revisions between 0x%x-0x%x "
"will not be enabled.\n",
module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
return -ENODEV;
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 776cad2f571..1028fcb91a2 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1549,6 +1549,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x021b, 0x0101),
PCMCIA_PFC_DEVICE_MANF_CARD(0, 0x08a1, 0xc0ab),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+ PCMCIA_PFC_DEVICE_PROD_ID12(0, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
PCMCIA_PFC_DEVICE_PROD_ID12(0, "Linksys", "EtherFast 10&100 + 56K PC Card (PCMLM56)", 0x0733cc81, 0xb3765033),
@@ -1740,7 +1741,7 @@ static struct pcmcia_device_id pcnet_ids[] = {
PCMCIA_MFC_DEVICE_CIS_PROD_ID12(0, "DAYNA COMMUNICATIONS", "LAN AND MODEM MULTIFUNCTION", 0x8fdf8f89, 0xdd5ed9e8, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_PROD_ID4(0, "NSC MF LAN/Modem", 0x58fc6056, "cis/DP83903.cis"),
PCMCIA_MFC_DEVICE_CIS_MANF_CARD(0, 0x0175, 0x0000, "cis/DP83903.cis"),
- PCMCIA_DEVICE_CIS_MANF_CARD(0xc00f, 0x0002, "cis/LA-PCM.cis"),
+ PCMCIA_DEVICE_CIS_PROD_ID12("Allied Telesis,K.K", "Ethernet LAN Card", 0x2ad62f3c, 0x9fd2f0a2, "cis/LA-PCM.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("KTI", "PE520 PLUS", 0xad180345, 0x9d58d392, "cis/PE520.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("NDC", "Ethernet", 0x01c43ae1, 0x00b2e941, "cis/NE2K.cis"),
PCMCIA_DEVICE_CIS_PROD_ID12("PMX ", "PE-200", 0x34f3f1c8, 0x10b59f8c, "cis/PE-200.cis"),
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index 9d3ebf3e975..96740051cdc 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -186,8 +186,13 @@ static DEFINE_PCI_DEVICE_TABLE(rtl8169_pci_tbl) = {
MODULE_DEVICE_TABLE(pci, rtl8169_pci_tbl);
-static int rx_copybreak = 200;
-static int use_dac = -1;
+/*
+ * we set our copybreak very high so that we don't have
+ * to allocate 16k frames all the time (see note in
+ * rtl8169_open()
+ */
+static int rx_copybreak = 16383;
+static int use_dac;
static struct {
u32 msg_enable;
} debug = { -1 };
@@ -511,8 +516,7 @@ MODULE_DESCRIPTION("RealTek RTL-8169 Gigabit Ethernet driver");
module_param(rx_copybreak, int, 0);
MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
module_param(use_dac, int, 0);
-MODULE_PARM_DESC(use_dac, "Enable PCI DAC. -1 defaults on for PCI Express only."
-" Unsafe on 32 bit PCI slot.");
+MODULE_PARM_DESC(use_dac, "Enable PCI DAC. Unsafe on 32 bit PCI slot.");
module_param_named(debug, debug.msg_enable, int, 0);
MODULE_PARM_DESC(debug, "Debug verbosity level (0=none, ..., 16=all)");
MODULE_LICENSE("GPL");
@@ -2821,8 +2825,8 @@ static void rtl_rar_set(struct rtl8169_private *tp, u8 *addr)
spin_lock_irq(&tp->lock);
RTL_W8(Cfg9346, Cfg9346_Unlock);
- RTL_W32(MAC0, low);
RTL_W32(MAC4, high);
+ RTL_W32(MAC0, low);
RTL_W8(Cfg9346, Cfg9346_Lock);
spin_unlock_irq(&tp->lock);
@@ -2974,7 +2978,6 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
void __iomem *ioaddr;
unsigned int i;
int rc;
- int this_use_dac = use_dac;
if (netif_msg_drv(&debug)) {
printk(KERN_INFO "%s Gigabit Ethernet driver %s loaded\n",
@@ -3040,17 +3043,8 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
tp->cp_cmd = PCIMulRW | RxChkSum;
- tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
- if (!tp->pcie_cap)
- netif_info(tp, probe, dev, "no PCI Express capability\n");
-
- if (this_use_dac < 0)
- this_use_dac = tp->pcie_cap != 0;
-
if ((sizeof(dma_addr_t) > 4) &&
- this_use_dac &&
- !pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) {
- netif_info(tp, probe, dev, "using 64-bit DMA\n");
+ !pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) && use_dac) {
tp->cp_cmd |= PCIDAC;
dev->features |= NETIF_F_HIGHDMA;
} else {
@@ -3069,6 +3063,10 @@ rtl8169_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
goto err_out_free_res_4;
}
+ tp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
+ if (!tp->pcie_cap)
+ netif_info(tp, probe, dev, "no PCI Express capability\n");
+
RTL_W16(IntrMask, 0x0000);
/* Soft reset the chip. */
@@ -3224,9 +3222,13 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
}
static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
- struct net_device *dev)
+ unsigned int mtu)
{
- unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+ unsigned int max_frame = mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
+
+ if (max_frame != 16383)
+ printk(KERN_WARNING "WARNING! Changing of MTU on this NIC"
+ "May lead to frame reception errors!\n");
tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
}
@@ -3238,7 +3240,17 @@ static int rtl8169_open(struct net_device *dev)
int retval = -ENOMEM;
- rtl8169_set_rxbufsize(tp, dev);
+ /*
+ * Note that we use a magic value here, its wierd I know
+ * its done because, some subset of rtl8169 hardware suffers from
+ * a problem in which frames received that are longer than
+ * the size set in RxMaxSize register return garbage sizes
+ * when received. To avoid this we need to turn off filtering,
+ * which is done by setting a value of 16383 in the RxMaxSize register
+ * and allocating 16k frames to handle the largest possible rx value
+ * thats what the magic math below does.
+ */
+ rtl8169_set_rxbufsize(tp, 16383 - VLAN_ETH_HLEN - ETH_FCS_LEN);
/*
* Rx and Tx desscriptors needs 256 bytes alignment.
@@ -3891,7 +3903,7 @@ static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
rtl8169_down(dev);
- rtl8169_set_rxbufsize(tp, dev);
+ rtl8169_set_rxbufsize(tp, dev->mtu);
ret = rtl8169_init_ring(dev);
if (ret < 0)
@@ -4754,8 +4766,8 @@ static void rtl_set_rx_mode(struct net_device *dev)
mc_filter[1] = swab32(data);
}
- RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(MAR0 + 4, mc_filter[1]);
+ RTL_W32(MAR0 + 0, mc_filter[0]);
RTL_W32(RxConfig, tmp);
diff --git a/drivers/net/tulip/uli526x.c b/drivers/net/tulip/uli526x.c
index 0ab05af237e..a4f09d49053 100644
--- a/drivers/net/tulip/uli526x.c
+++ b/drivers/net/tulip/uli526x.c
@@ -851,13 +851,15 @@ static void uli526x_rx_packet(struct net_device *dev, struct uli526x_board_info
if ( !(rdes0 & 0x8000) ||
((db->cr6_data & CR6_PM) && (rxlen>6)) ) {
+ struct sk_buff *new_skb = NULL;
+
skb = rxptr->rx_skb_ptr;
/* Good packet, send to upper layer */
/* Shorst packet used new SKB */
- if ( (rxlen < RX_COPY_SIZE) &&
- ( (skb = dev_alloc_skb(rxlen + 2) )
- != NULL) ) {
+ if ((rxlen < RX_COPY_SIZE) &&
+ (((new_skb = dev_alloc_skb(rxlen + 2)) != NULL))) {
+ skb = new_skb;
/* size less than COPY_SIZE, allocate a rxlen SKB */
skb_reserve(skb, 2); /* 16byte align */
memcpy(skb_put(skb, rxlen),
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index 3a486f3bad3..bc278d4ee89 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -812,7 +812,7 @@ static void set_mii_flow_control(struct velocity_info *vptr)
case FLOW_CNTL_TX_RX:
MII_REG_BITS_ON(ANAR_PAUSE, MII_REG_ANAR, vptr->mac_regs);
- MII_REG_BITS_ON(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
+ MII_REG_BITS_OFF(ANAR_ASMDIR, MII_REG_ANAR, vptr->mac_regs);
break;
case FLOW_CNTL_DISABLE:
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index 406757a9d7e..dee4fb56b09 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -376,8 +376,11 @@ unsigned long __init unflatten_dt_node(unsigned long mem,
if (!np->type)
np->type = "<NULL>";
}
- while (tag == OF_DT_BEGIN_NODE) {
- mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
+ while (tag == OF_DT_BEGIN_NODE || tag == OF_DT_NOP) {
+ if (tag == OF_DT_NOP)
+ *p += 4;
+ else
+ mem = unflatten_dt_node(mem, p, np, allnextpp, fpsize);
tag = be32_to_cpup((__be32 *)(*p));
}
if (tag != OF_DT_END_NODE) {
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 40b48f569b1..9665d6b17a2 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -832,9 +832,8 @@ static inline void dbg_ctrl(struct controller *ctrl)
for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) {
if (!pci_resource_len(pdev, i))
continue;
- ctrl_info(ctrl, " PCI resource [%d] : 0x%llx@0x%llx\n",
- i, (unsigned long long)pci_resource_len(pdev, i),
- (unsigned long long)pci_resource_start(pdev, i));
+ ctrl_info(ctrl, " PCI resource [%d] : %pR\n",
+ i, &pdev->resource[i]);
}
ctrl_info(ctrl, "Slot Capabilities : 0x%08x\n", ctrl->slot_cap);
ctrl_info(ctrl, " Physical Slot Number : %d\n", PSN(ctrl));
diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c
index 3e0d7b5dd1b..fb9fdf4a42b 100644
--- a/drivers/pci/ioapic.c
+++ b/drivers/pci/ioapic.c
@@ -31,9 +31,9 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
acpi_status status;
unsigned long long gsb;
struct ioapic *ioapic;
- u64 addr;
int ret;
char *type;
+ struct resource *res;
handle = DEVICE_ACPI_HANDLE(&dev->dev);
if (!handle)
@@ -69,13 +69,12 @@ static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent)
if (pci_request_region(dev, 0, type))
goto exit_disable;
- addr = pci_resource_start(dev, 0);
- if (acpi_register_ioapic(ioapic->handle, addr, ioapic->gsi_base))
+ res = &dev->resource[0];
+ if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base))
goto exit_release;
pci_set_drvdata(dev, ioapic);
- dev_info(&dev->dev, "%s at %#llx, GSI %u\n", type, addr,
- ioapic->gsi_base);
+ dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base);
return 0;
exit_release:
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index cb1dd5f4988..1531f3a4987 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2576,18 +2576,17 @@ EXPORT_SYMBOL_GPL(pci_reset_function);
*/
int pcix_get_max_mmrbc(struct pci_dev *dev)
{
- int err, cap;
+ int cap;
u32 stat;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
- if (err)
+ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
return -EINVAL;
- return (stat & PCI_X_STATUS_MAX_READ) >> 12;
+ return 512 << ((stat & PCI_X_STATUS_MAX_READ) >> 21);
}
EXPORT_SYMBOL(pcix_get_max_mmrbc);
@@ -2600,18 +2599,17 @@ EXPORT_SYMBOL(pcix_get_max_mmrbc);
*/
int pcix_get_mmrbc(struct pci_dev *dev)
{
- int ret, cap;
- u32 cmd;
+ int cap;
+ u16 cmd;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
return -EINVAL;
- ret = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
- if (!ret)
- ret = 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
+ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
+ return -EINVAL;
- return ret;
+ return 512 << ((cmd & PCI_X_CMD_MAX_READ) >> 2);
}
EXPORT_SYMBOL(pcix_get_mmrbc);
@@ -2626,28 +2624,27 @@ EXPORT_SYMBOL(pcix_get_mmrbc);
*/
int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
{
- int cap, err = -EINVAL;
- u32 stat, cmd, v, o;
+ int cap;
+ u32 stat, v, o;
+ u16 cmd;
if (mmrbc < 512 || mmrbc > 4096 || !is_power_of_2(mmrbc))
- goto out;
+ return -EINVAL;
v = ffs(mmrbc) - 10;
cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
if (!cap)
- goto out;
+ return -EINVAL;
- err = pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat);
- if (err)
- goto out;
+ if (pci_read_config_dword(dev, cap + PCI_X_STATUS, &stat))
+ return -EINVAL;
if (v > (stat & PCI_X_STATUS_MAX_READ) >> 21)
return -E2BIG;
- err = pci_read_config_dword(dev, cap + PCI_X_CMD, &cmd);
- if (err)
- goto out;
+ if (pci_read_config_word(dev, cap + PCI_X_CMD, &cmd))
+ return -EINVAL;
o = (cmd & PCI_X_CMD_MAX_READ) >> 2;
if (o != v) {
@@ -2657,10 +2654,10 @@ int pcix_set_mmrbc(struct pci_dev *dev, int mmrbc)
cmd &= ~PCI_X_CMD_MAX_READ;
cmd |= v << 2;
- err = pci_write_config_dword(dev, cap + PCI_X_CMD, cmd);
+ if (pci_write_config_word(dev, cap + PCI_X_CMD, cmd))
+ return -EIO;
}
-out:
- return err;
+ return 0;
}
EXPORT_SYMBOL(pcix_set_mmrbc);
@@ -3023,7 +3020,6 @@ EXPORT_SYMBOL(pcim_pin_device);
EXPORT_SYMBOL(pci_disable_device);
EXPORT_SYMBOL(pci_find_capability);
EXPORT_SYMBOL(pci_bus_find_capability);
-EXPORT_SYMBOL(pci_register_set_vga_state);
EXPORT_SYMBOL(pci_release_regions);
EXPORT_SYMBOL(pci_request_regions);
EXPORT_SYMBOL(pci_request_regions_exclusive);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 2a943090a3b..882bd8d29fe 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -174,14 +174,19 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pci_read_config_dword(dev, pos, &sz);
pci_write_config_dword(dev, pos, l);
+ if (!sz)
+ goto fail; /* BAR not implemented */
+
/*
* All bits set in sz means the device isn't working properly.
- * If the BAR isn't implemented, all bits must be 0. If it's a
- * memory BAR or a ROM, bit 0 must be clear; if it's an io BAR, bit
- * 1 must be clear.
+ * If it's a memory BAR or a ROM, bit 0 must be clear; if it's
+ * an io BAR, bit 1 must be clear.
*/
- if (!sz || sz == 0xffffffff)
+ if (sz == 0xffffffff) {
+ dev_err(&dev->dev, "reg %x: invalid size %#x; broken device?\n",
+ pos, sz);
goto fail;
+ }
/*
* I don't know how l can have all bits set. Copied from old code.
@@ -244,13 +249,17 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
pos, res);
}
} else {
- sz = pci_size(l, sz, mask);
+ u32 size = pci_size(l, sz, mask);
- if (!sz)
+ if (!size) {
+ dev_err(&dev->dev, "reg %x: invalid size "
+ "(l %#x sz %#x mask %#x); broken device?",
+ pos, l, sz, mask);
goto fail;
+ }
res->start = l;
- res->end = l + sz;
+ res->end = l + size;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x: %pR\n", pos, res);
}
@@ -312,7 +321,7 @@ static void __devinit pci_read_bridge_io(struct pci_bus *child)
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [io %04lx - %04lx] reg reading\n",
+ " bridge window [io %#06lx-%#06lx] (disabled)\n",
base, limit);
}
}
@@ -336,7 +345,7 @@ static void __devinit pci_read_bridge_mmio(struct pci_bus *child)
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem 0x%08lx - 0x%08lx] reg reading\n",
+ " bridge window [mem %#010lx-%#010lx] (disabled)\n",
base, limit + 0xfffff);
}
}
@@ -387,7 +396,7 @@ static void __devinit pci_read_bridge_mmio_pref(struct pci_bus *child)
dev_printk(KERN_DEBUG, &dev->dev, " bridge window %pR\n", res);
} else {
dev_printk(KERN_DEBUG, &dev->dev,
- " bridge window [mem 0x%08lx - %08lx pref] reg reading\n",
+ " bridge window [mem %#010lx-%#010lx pref] (disabled)\n",
base, limit + 0xfffff);
}
}
@@ -673,16 +682,20 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
int is_cardbus = (dev->hdr_type == PCI_HEADER_TYPE_CARDBUS);
u32 buses, i, j = 0;
u16 bctl;
+ u8 primary, secondary, subordinate;
int broken = 0;
pci_read_config_dword(dev, PCI_PRIMARY_BUS, &buses);
+ primary = buses & 0xFF;
+ secondary = (buses >> 8) & 0xFF;
+ subordinate = (buses >> 16) & 0xFF;
- dev_dbg(&dev->dev, "scanning behind bridge, config %06x, pass %d\n",
- buses & 0xffffff, pass);
+ dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n",
+ secondary, subordinate, pass);
/* Check if setup is sensible at all */
if (!pass &&
- ((buses & 0xff) != bus->number || ((buses >> 8) & 0xff) <= bus->number)) {
+ (primary != bus->number || secondary <= bus->number)) {
dev_dbg(&dev->dev, "bus configuration invalid, reconfiguring\n");
broken = 1;
}
@@ -693,15 +706,15 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
- if ((buses & 0xffff00) && !pcibios_assign_all_busses() && !is_cardbus && !broken) {
- unsigned int cmax, busnr;
+ if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
+ !is_cardbus && !broken) {
+ unsigned int cmax;
/*
* Bus already configured by firmware, process it in the first
* pass and just note the configuration.
*/
if (pass)
goto out;
- busnr = (buses >> 8) & 0xFF;
/*
* If we already got to this bus through a different bridge,
@@ -710,13 +723,13 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max,
* However, we continue to descend down the hierarchy and
* scan remaining child buses.
*/
- child = pci_find_bus(pci_domain_nr(bus), busnr);
+ child = pci_find_bus(pci_domain_nr(bus), secondary);
if (!child) {
- child = pci_add_new_bus(bus, dev, busnr);
+ child = pci_add_new_bus(bus, dev, secondary);
if (!child)
goto out;
- child->primary = buses & 0xFF;
- child->subordinate = (buses >> 16) & 0xFF;
+ child->primary = primary;
+ child->subordinate = subordinate;
child->bridge_ctl = bctl;
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 81d19d5683a..27c0e6eb713 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -368,8 +368,9 @@ static void __devinit quirk_io_region(struct pci_dev *dev, unsigned region,
bus_region.end = res->end;
pcibios_bus_to_resource(dev, res, &bus_region);
- pci_claim_resource(dev, nr);
- dev_info(&dev->dev, "quirk: %pR claimed by %s\n", res, name);
+ if (pci_claim_resource(dev, nr) == 0)
+ dev_info(&dev->dev, "quirk: %pR claimed by %s\n",
+ res, name);
}
}
@@ -1977,11 +1978,25 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
/*
* Disable PCI Bus Parking and PCI Master read caching on CX700
* which causes unspecified timing errors with a VT6212L on the PCI
- * bus leading to USB2.0 packet loss. The defaults are that these
- * features are turned off but some BIOSes turn them on.
+ * bus leading to USB2.0 packet loss.
+ *
+ * This quirk is only enabled if a second (on the external PCI bus)
+ * VT6212L is found -- the CX700 core itself also contains a USB
+ * host controller with the same PCI ID as the VT6212L.
*/
+ /* Count VT6212L instances */
+ struct pci_dev *p = pci_get_device(PCI_VENDOR_ID_VIA,
+ PCI_DEVICE_ID_VIA_8235_USB_2, NULL);
uint8_t b;
+
+ /* p should contain the first (internal) VT6212L -- see if we have
+ an external one by searching again */
+ p = pci_get_device(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8235_USB_2, p);
+ if (!p)
+ return;
+ pci_dev_put(p);
+
if (pci_read_config_byte(dev, 0x76, &b) == 0) {
if (b & 0x40) {
/* Turn off PCI Bus Parking */
@@ -2008,7 +2023,7 @@ static void __devinit quirk_via_cx700_pci_parking_caching(struct pci_dev *dev)
}
}
}
-DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0x324e, quirk_via_cx700_pci_parking_caching);
/*
* For Broadcom 5706, 5708, 5709 rev. A nics, any read beyond the
@@ -2108,6 +2123,10 @@ static void __devinit quirk_disable_msi(struct pci_dev *dev)
}
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, 0x9602, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ASUSTEK, 0x9602, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AI, 0x9602, quirk_disable_msi);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, 0xa238, quirk_disable_msi);
/* Go through the list of Hypertransport capabilities and
* return 1 if a HT MSI capability is found and enabled */
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 7d678bb15ff..17bed18d24a 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -93,8 +93,7 @@ void pci_update_resource(struct pci_dev *dev, int resno)
int pci_claim_resource(struct pci_dev *dev, int resource)
{
struct resource *res = &dev->resource[resource];
- struct resource *root;
- int err;
+ struct resource *root, *conflict;
root = pci_find_parent_resource(dev, res);
if (!root) {
@@ -103,12 +102,15 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return -EINVAL;
}
- err = request_resource(root, res);
- if (err)
+ conflict = request_resource_conflict(root, res);
+ if (conflict) {
dev_err(&dev->dev,
- "address space collision: %pR already in use\n", res);
+ "address space collision: %pR conflicts with %s %pR\n",
+ res, conflict->name, conflict);
+ return -EBUSY;
+ }
- return err;
+ return 0;
}
EXPORT_SYMBOL(pci_claim_resource);
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 5d228071ec6..fb904f444d9 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -361,7 +361,6 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
struct at91_cf_socket *cf = platform_get_drvdata(pdev);
struct at91_cf_data *board = cf->board;
- pcmcia_socket_dev_suspend(&pdev->dev);
if (device_may_wakeup(&pdev->dev)) {
enable_irq_wake(board->det_pin);
if (board->irq_pin)
@@ -381,7 +380,6 @@ static int at91_cf_resume(struct platform_device *pdev)
disable_irq_wake(board->irq_pin);
}
- pcmcia_socket_dev_resume(&pdev->dev);
return 0;
}
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index 171c8a65488..ac4d089430f 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -510,17 +510,6 @@ static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
return ret;
}
-static int au1x00_drv_pcmcia_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int au1x00_drv_pcmcia_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
-
static struct platform_driver au1x00_pcmcia_driver = {
.driver = {
.name = "au1x00-pcmcia",
@@ -528,8 +517,6 @@ static struct platform_driver au1x00_pcmcia_driver = {
},
.probe = au1x00_drv_pcmcia_probe,
.remove = au1x00_drv_pcmcia_remove,
- .suspend = au1x00_drv_pcmcia_suspend,
- .resume = au1x00_drv_pcmcia_resume,
};
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index 2482ce7ac6d..93f9ddeb0c3 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -300,16 +300,6 @@ static int __devexit bfin_cf_remove(struct platform_device *pdev)
return 0;
}
-static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- return pcmcia_socket_dev_suspend(&pdev->dev);
-}
-
-static int bfin_cf_resume(struct platform_device *pdev)
-{
- return pcmcia_socket_dev_resume(&pdev->dev);
-}
-
static struct platform_driver bfin_cf_driver = {
.driver = {
.name = (char *)driver_name,
@@ -317,8 +307,6 @@ static struct platform_driver bfin_cf_driver = {
},
.probe = bfin_cf_probe,
.remove = __devexit_p(bfin_cf_remove),
- .suspend = bfin_cf_suspend,
- .resume = bfin_cf_resume,
};
static int __init bfin_cf_init(void)
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index e679e708db6..75ed866e695 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -76,65 +76,6 @@ DECLARE_RWSEM(pcmcia_socket_list_rwsem);
EXPORT_SYMBOL(pcmcia_socket_list_rwsem);
-/*
- * Low-level PCMCIA socket drivers need to register with the PCCard
- * core using pcmcia_register_socket.
- *
- * socket drivers are expected to use the following callbacks in their
- * .drv struct:
- * - pcmcia_socket_dev_suspend
- * - pcmcia_socket_dev_resume
- * These functions check for the appropriate struct pcmcia_soket arrays,
- * and pass them to the low-level functions pcmcia_{suspend,resume}_socket
- */
-static int socket_early_resume(struct pcmcia_socket *skt);
-static int socket_late_resume(struct pcmcia_socket *skt);
-static int socket_resume(struct pcmcia_socket *skt);
-static int socket_suspend(struct pcmcia_socket *skt);
-
-static void pcmcia_socket_dev_run(struct device *dev,
- int (*cb)(struct pcmcia_socket *))
-{
- struct pcmcia_socket *socket;
-
- down_read(&pcmcia_socket_list_rwsem);
- list_for_each_entry(socket, &pcmcia_socket_list, socket_list) {
- if (socket->dev.parent != dev)
- continue;
- mutex_lock(&socket->skt_mutex);
- cb(socket);
- mutex_unlock(&socket->skt_mutex);
- }
- up_read(&pcmcia_socket_list_rwsem);
-}
-
-int pcmcia_socket_dev_suspend(struct device *dev)
-{
- pcmcia_socket_dev_run(dev, socket_suspend);
- return 0;
-}
-EXPORT_SYMBOL(pcmcia_socket_dev_suspend);
-
-void pcmcia_socket_dev_early_resume(struct device *dev)
-{
- pcmcia_socket_dev_run(dev, socket_early_resume);
-}
-EXPORT_SYMBOL(pcmcia_socket_dev_early_resume);
-
-void pcmcia_socket_dev_late_resume(struct device *dev)
-{
- pcmcia_socket_dev_run(dev, socket_late_resume);
-}
-EXPORT_SYMBOL(pcmcia_socket_dev_late_resume);
-
-int pcmcia_socket_dev_resume(struct device *dev)
-{
- pcmcia_socket_dev_run(dev, socket_resume);
- return 0;
-}
-EXPORT_SYMBOL(pcmcia_socket_dev_resume);
-
-
struct pcmcia_socket *pcmcia_get_socket(struct pcmcia_socket *skt)
{
struct device *dev = get_device(&skt->dev);
@@ -578,12 +519,18 @@ static int socket_early_resume(struct pcmcia_socket *skt)
static int socket_late_resume(struct pcmcia_socket *skt)
{
+ int ret;
+
mutex_lock(&skt->ops_mutex);
skt->state &= ~SOCKET_SUSPEND;
mutex_unlock(&skt->ops_mutex);
- if (!(skt->state & SOCKET_PRESENT))
- return socket_insert(skt);
+ if (!(skt->state & SOCKET_PRESENT)) {
+ ret = socket_insert(skt);
+ if (ret == -ENODEV)
+ ret = 0;
+ return ret;
+ }
if (skt->resume_status) {
socket_shutdown(skt);
@@ -919,11 +866,66 @@ static void pcmcia_release_socket_class(struct class *data)
}
+#ifdef CONFIG_PM
+
+static int __pcmcia_pm_op(struct device *dev,
+ int (*callback) (struct pcmcia_socket *skt))
+{
+ struct pcmcia_socket *s = container_of(dev, struct pcmcia_socket, dev);
+ int ret;
+
+ mutex_lock(&s->skt_mutex);
+ ret = callback(s);
+ mutex_unlock(&s->skt_mutex);
+
+ return ret;
+}
+
+static int pcmcia_socket_dev_suspend_noirq(struct device *dev)
+{
+ return __pcmcia_pm_op(dev, socket_suspend);
+}
+
+static int pcmcia_socket_dev_resume_noirq(struct device *dev)
+{
+ return __pcmcia_pm_op(dev, socket_early_resume);
+}
+
+static int pcmcia_socket_dev_resume(struct device *dev)
+{
+ return __pcmcia_pm_op(dev, socket_late_resume);
+}
+
+static const struct dev_pm_ops pcmcia_socket_pm_ops = {
+ /* dev_resume may be called with IRQs enabled */
+ SET_SYSTEM_SLEEP_PM_OPS(NULL,
+ pcmcia_socket_dev_resume)
+
+ /* late suspend must be called with IRQs disabled */
+ .suspend_noirq = pcmcia_socket_dev_suspend_noirq,
+ .freeze_noirq = pcmcia_socket_dev_suspend_noirq,
+ .poweroff_noirq = pcmcia_socket_dev_suspend_noirq,
+
+ /* early resume must be called with IRQs disabled */
+ .resume_noirq = pcmcia_socket_dev_resume_noirq,
+ .thaw_noirq = pcmcia_socket_dev_resume_noirq,
+ .restore_noirq = pcmcia_socket_dev_resume_noirq,
+};
+
+#define PCMCIA_SOCKET_CLASS_PM_OPS (&pcmcia_socket_pm_ops)
+
+#else /* CONFIG_PM */
+
+#define PCMCIA_SOCKET_CLASS_PM_OPS NULL
+
+#endif /* CONFIG_PM */
+
struct class pcmcia_socket_class = {
.name = "pcmcia_socket",
.dev_uevent = pcmcia_socket_uevent,
.dev_release = pcmcia_release_socket,
.class_release = pcmcia_release_socket_class,
+ .pm = PCMCIA_SOCKET_CLASS_PM_OPS,
};
EXPORT_SYMBOL(pcmcia_socket_class);
diff --git a/drivers/pcmcia/db1xxx_ss.c b/drivers/pcmcia/db1xxx_ss.c
index 9254ab0b29b..a520193b645 100644
--- a/drivers/pcmcia/db1xxx_ss.c
+++ b/drivers/pcmcia/db1xxx_ss.c
@@ -558,37 +558,10 @@ static int __devexit db1x_pcmcia_socket_remove(struct platform_device *pdev)
return 0;
}
-#ifdef CONFIG_PM
-static int db1x_pcmcia_suspend(struct device *dev)
-{
- return pcmcia_socket_dev_suspend(dev);
-}
-
-static int db1x_pcmcia_resume(struct device *dev)
-{
- return pcmcia_socket_dev_resume(dev);
-}
-
-static struct dev_pm_ops db1x_pcmcia_pmops = {
- .resume = db1x_pcmcia_resume,
- .suspend = db1x_pcmcia_suspend,
- .thaw = db1x_pcmcia_resume,
- .freeze = db1x_pcmcia_suspend,
-};
-
-#define DB1XXX_SS_PMOPS &db1x_pcmcia_pmops
-
-#else
-
-#define DB1XXX_SS_PMOPS NULL
-
-#endif
-
static struct platform_driver db1x_pcmcia_socket_driver = {
.driver = {
.name = "db1xxx_pcmcia",
.owner = THIS_MODULE,
- .pm = DB1XXX_SS_PMOPS
},
.probe = db1x_pcmcia_socket_probe,
.remove = __devexit_p(db1x_pcmcia_socket_remove),
diff --git a/drivers/pcmcia/ds.c b/drivers/pcmcia/ds.c
index ad93ebd7b2a..52d33b2a5bc 100644
--- a/drivers/pcmcia/ds.c
+++ b/drivers/pcmcia/ds.c
@@ -509,8 +509,12 @@ struct pcmcia_device *pcmcia_device_add(struct pcmcia_socket *s, unsigned int fu
p_dev->device_no = (s->device_count++);
mutex_unlock(&s->ops_mutex);
- /* max of 2 devices per card */
- if (p_dev->device_no >= 2)
+ /* max of 2 PFC devices */
+ if ((p_dev->device_no >= 2) && (function == 0))
+ goto err_free;
+
+ /* max of 4 devices overall */
+ if (p_dev->device_no >= 4)
goto err_free;
p_dev->socket = s;
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index f5da6265331..3003bb3dfcc 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -39,27 +39,11 @@ static struct pci_device_id i82092aa_pci_ids[] = {
};
MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
-#ifdef CONFIG_PM
-static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int i82092aa_socket_resume (struct pci_dev *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
-#endif
-
static struct pci_driver i82092aa_pci_driver = {
.name = "i82092aa",
.id_table = i82092aa_pci_ids,
.probe = i82092aa_pci_probe,
.remove = __devexit_p(i82092aa_pci_remove),
-#ifdef CONFIG_PM
- .suspend = i82092aa_socket_suspend,
- .resume = i82092aa_socket_resume,
-#endif
};
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index c13fd936051..d53d9b5659c 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1223,16 +1223,7 @@ static int pcic_init(struct pcmcia_socket *s)
return 0;
}
-static int i82365_drv_pcmcia_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-static int i82365_drv_pcmcia_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
static struct pccard_operations pcic_operations = {
.init = pcic_init,
.get_status = pcic_get_status,
@@ -1248,8 +1239,6 @@ static struct platform_driver i82365_driver = {
.name = "i82365",
.owner = THIS_MODULE,
},
- .suspend = i82365_drv_pcmcia_suspend,
- .resume = i82365_drv_pcmcia_resume,
};
static struct platform_device *i82365_device;
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 0ece2cd4a85..ab21264468d 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -685,16 +685,7 @@ static struct pccard_operations pcc_operations = {
.set_mem_map = pcc_set_mem_map,
};
-static int cfc_drv_pcmcia_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-static int cfc_drv_pcmcia_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
/*====================================================================*/
static struct platform_driver pcc_driver = {
@@ -702,8 +693,6 @@ static struct platform_driver pcc_driver = {
.name = "cfc",
.owner = THIS_MODULE,
},
- .suspend = cfc_drv_pcmcia_suspend,
- .resume = cfc_drv_pcmcia_resume,
};
static struct platform_device pcc_device = {
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index 72844c5a6d0..0caf3db7c70 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -663,16 +663,6 @@ static struct pccard_operations pcc_operations = {
.set_mem_map = pcc_set_mem_map,
};
-static int pcc_drv_pcmcia_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int pcc_drv_pcmcia_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
/*====================================================================*/
static struct platform_driver pcc_driver = {
@@ -680,8 +670,6 @@ static struct platform_driver pcc_driver = {
.name = "pcc",
.owner = THIS_MODULE,
},
- .suspend = pcc_drv_pcmcia_suspend,
- .resume = pcc_drv_pcmcia_resume,
};
static struct platform_device pcc_device = {
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index 61c21591812..01ef7de1532 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1288,21 +1288,6 @@ static int m8xx_remove(struct of_device *ofdev)
return 0;
}
-#ifdef CONFIG_PM
-static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&pdev->dev);
-}
-
-static int m8xx_resume(struct platform_device *pdev)
-{
- return pcmcia_socket_dev_resume(&pdev->dev);
-}
-#else
-#define m8xx_suspend NULL
-#define m8xx_resume NULL
-#endif
-
static const struct of_device_id m8xx_pcmcia_match[] = {
{
.type = "pcmcia",
@@ -1318,8 +1303,6 @@ static struct of_platform_driver m8xx_pcmcia_driver = {
.match_table = m8xx_pcmcia_match,
.probe = m8xx_probe,
.remove = m8xx_remove,
- .suspend = m8xx_suspend,
- .resume = m8xx_resume,
};
static int __init m8xx_init(void)
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index 3ef99155239..9edc396577b 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -330,24 +330,12 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
return 0;
}
-static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
-{
- return pcmcia_socket_dev_suspend(&pdev->dev);
-}
-
-static int omap_cf_resume(struct platform_device *pdev)
-{
- return pcmcia_socket_dev_resume(&pdev->dev);
-}
-
static struct platform_driver omap_cf_driver = {
.driver = {
.name = (char *) driver_name,
.owner = THIS_MODULE,
},
.remove = __exit_p(omap_cf_remove),
- .suspend = omap_cf_suspend,
- .resume = omap_cf_resume,
};
static int __init omap_cf_init(void)
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 7ba57a565cd..4a34268cc51 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -14,13 +14,13 @@
#include <linux/workqueue.h>
#include <linux/interrupt.h>
#include <linux/device.h>
+#include <linux/io.h>
#include <pcmcia/cs_types.h>
#include <pcmcia/ss.h>
#include <pcmcia/cs.h>
#include <asm/system.h>
-#include <asm/io.h>
#include "pd6729.h"
#include "i82365.h"
@@ -222,9 +222,9 @@ static irqreturn_t pd6729_interrupt(int irq, void *dev)
? SS_READY : 0;
}
- if (events) {
+ if (events)
pcmcia_parse_events(&socket[i].socket, events);
- }
+
active |= events;
}
@@ -256,9 +256,8 @@ static int pd6729_get_status(struct pcmcia_socket *sock, u_int *value)
status = indirect_read(socket, I365_STATUS);
*value = 0;
- if ((status & I365_CS_DETECT) == I365_CS_DETECT) {
+ if ((status & I365_CS_DETECT) == I365_CS_DETECT)
*value |= SS_DETECT;
- }
/*
* IO cards have a different meaning of bits 0,1
@@ -308,7 +307,7 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
socket->card_irq = state->io_irq;
reg = 0;
- /* The reset bit has "inverse" logic */
+ /* The reset bit has "inverse" logic */
if (!(state->flags & SS_RESET))
reg |= I365_PC_RESET;
if (state->flags & SS_IOCARD)
@@ -380,7 +379,7 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
indirect_write(socket, I365_POWER, reg);
if (irq_mode == 1) {
- /* all interrupts are to be done as PCI interrupts */
+ /* all interrupts are to be done as PCI interrupts */
data = PD67_EC1_INV_MGMT_IRQ | PD67_EC1_INV_CARD_IRQ;
} else
data = 0;
@@ -391,9 +390,9 @@ static int pd6729_set_socket(struct pcmcia_socket *sock, socket_state_t *state)
/* Enable specific interrupt events */
reg = 0x00;
- if (state->csc_mask & SS_DETECT) {
+ if (state->csc_mask & SS_DETECT)
reg |= I365_CSC_DETECT;
- }
+
if (state->flags & SS_IOCARD) {
if (state->csc_mask & SS_STSCHG)
reg |= I365_CSC_STSCHG;
@@ -450,9 +449,12 @@ static int pd6729_set_io_map(struct pcmcia_socket *sock,
ioctl = indirect_read(socket, I365_IOCTL) & ~I365_IOCTL_MASK(map);
- if (io->flags & MAP_0WS) ioctl |= I365_IOCTL_0WS(map);
- if (io->flags & MAP_16BIT) ioctl |= I365_IOCTL_16BIT(map);
- if (io->flags & MAP_AUTOSZ) ioctl |= I365_IOCTL_IOCS16(map);
+ if (io->flags & MAP_0WS)
+ ioctl |= I365_IOCTL_0WS(map);
+ if (io->flags & MAP_16BIT)
+ ioctl |= I365_IOCTL_16BIT(map);
+ if (io->flags & MAP_AUTOSZ)
+ ioctl |= I365_IOCTL_IOCS16(map);
indirect_write(socket, I365_IOCTL, ioctl);
@@ -497,7 +499,7 @@ static int pd6729_set_mem_map(struct pcmcia_socket *sock,
/* write the stop address */
- i= (mem->res->end >> 12) & 0x0fff;
+ i = (mem->res->end >> 12) & 0x0fff;
switch (to_cycles(mem->speed)) {
case 0:
break;
@@ -563,7 +565,7 @@ static int pd6729_init(struct pcmcia_socket *sock)
/* the pccard structure and its functions */
static struct pccard_operations pd6729_operations = {
- .init = pd6729_init,
+ .init = pd6729_init,
.get_status = pd6729_get_status,
.set_socket = pd6729_set_socket,
.set_io_map = pd6729_set_io_map,
@@ -578,8 +580,13 @@ static irqreturn_t pd6729_test(int irq, void *dev)
static int pd6729_check_irq(int irq)
{
- if (request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x", pd6729_test)
- != 0) return -1;
+ int ret;
+
+ ret = request_irq(irq, pd6729_test, IRQF_PROBE_SHARED, "x",
+ pd6729_test);
+ if (ret)
+ return -1;
+
free_irq(irq, pd6729_test);
return 0;
}
@@ -591,7 +598,7 @@ static u_int __devinit pd6729_isa_scan(void)
if (irq_mode == 1) {
printk(KERN_INFO "pd6729: PCI card interrupts, "
- "PCI status changes\n");
+ "PCI status changes\n");
return 0;
}
@@ -607,9 +614,10 @@ static u_int __devinit pd6729_isa_scan(void)
if (mask & (1<<i))
printk("%s%d", ((mask & ((1<<i)-1)) ? "," : ""), i);
- if (mask == 0) printk("none!");
-
- printk(" polling status changes.\n");
+ if (mask == 0)
+ printk("none!");
+ else
+ printk(" polling status changes.\n");
return mask;
}
@@ -624,11 +632,16 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
socket = kzalloc(sizeof(struct pd6729_socket) * MAX_SOCKETS,
GFP_KERNEL);
- if (!socket)
+ if (!socket) {
+ dev_warn(&dev->dev, "failed to kzalloc socket.\n");
return -ENOMEM;
+ }
- if ((ret = pci_enable_device(dev)))
+ ret = pci_enable_device(dev);
+ if (ret) {
+ dev_warn(&dev->dev, "failed to enable pci_device.\n");
goto err_out_free_mem;
+ }
if (!pci_resource_start(dev, 0)) {
dev_warn(&dev->dev, "refusing to load the driver as the "
@@ -639,7 +652,7 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
dev_info(&dev->dev, "Cirrus PD6729 PCI to PCMCIA Bridge at 0x%llx "
"on irq %d\n",
(unsigned long long)pci_resource_start(dev, 0), dev->irq);
- /*
+ /*
* Since we have no memory BARs some firmware may not
* have had PCI_COMMAND_MEMORY enabled, yet the device needs it.
*/
@@ -685,8 +698,9 @@ static int __devinit pd6729_pci_probe(struct pci_dev *dev,
pci_set_drvdata(dev, socket);
if (irq_mode == 1) {
/* Register the interrupt handler */
- if ((ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED,
- "pd6729", socket))) {
+ ret = request_irq(dev->irq, pd6729_interrupt, IRQF_SHARED,
+ "pd6729", socket);
+ if (ret) {
dev_err(&dev->dev, "Failed to register irq %d\n",
dev->irq);
goto err_out_free_res;
@@ -750,18 +764,6 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
kfree(socket);
}
-#ifdef CONFIG_PM
-static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int pd6729_socket_resume(struct pci_dev *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
-#endif
-
static struct pci_device_id pd6729_pci_ids[] = {
{
.vendor = PCI_VENDOR_ID_CIRRUS,
@@ -778,10 +780,6 @@ static struct pci_driver pd6729_pci_driver = {
.id_table = pd6729_pci_ids,
.probe = pd6729_pci_probe,
.remove = __devexit_p(pd6729_pci_remove),
-#ifdef CONFIG_PM
- .suspend = pd6729_socket_suspend,
- .resume = pd6729_socket_resume,
-#endif
};
static int pd6729_module_init(void)
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index 76e640bccde..0a876fabfe4 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -325,19 +325,13 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
return 0;
}
-static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
-{
- return pcmcia_socket_dev_suspend(dev);
-}
-
static int pxa2xx_drv_pcmcia_resume(struct device *dev)
{
pxa2xx_configure_sockets(dev);
- return pcmcia_socket_dev_resume(dev);
+ return 0;
}
static const struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
- .suspend = pxa2xx_drv_pcmcia_suspend,
.resume = pxa2xx_drv_pcmcia_resume,
};
diff --git a/drivers/pcmcia/rsrc_nonstatic.c b/drivers/pcmcia/rsrc_nonstatic.c
index 4663b3fa9f9..2e47991eccf 100644
--- a/drivers/pcmcia/rsrc_nonstatic.c
+++ b/drivers/pcmcia/rsrc_nonstatic.c
@@ -810,6 +810,13 @@ static int adjust_io(struct pcmcia_socket *s, unsigned int action, unsigned long
unsigned long size = end - start + 1;
int ret = 0;
+#if defined(CONFIG_X86)
+ /* on x86, avoid anything < 0x100 for it is often used for
+ * legacy platform devices */
+ if (start < 0x100)
+ start = 0x100;
+#endif
+
if (end < start)
return -EINVAL;
@@ -867,10 +874,8 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
if (res == &ioport_resource)
continue;
dev_printk(KERN_INFO, &s->cb_dev->dev,
- "pcmcia: parent PCI bridge I/O "
- "window: 0x%llx - 0x%llx\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end);
+ "pcmcia: parent PCI bridge window: %pR\n",
+ res);
if (!adjust_io(s, ADD_MANAGED_RESOURCE, res->start, res->end))
done |= IORESOURCE_IO;
@@ -880,10 +885,8 @@ static int nonstatic_autoadd_resources(struct pcmcia_socket *s)
if (res == &iomem_resource)
continue;
dev_printk(KERN_INFO, &s->cb_dev->dev,
- "pcmcia: parent PCI bridge Memory "
- "window: 0x%llx - 0x%llx\n",
- (unsigned long long)res->start,
- (unsigned long long)res->end);
+ "pcmcia: parent PCI bridge window: %pR\n",
+ res);
if (!adjust_memory(s, ADD_MANAGED_RESOURCE, res->start, res->end))
done |= IORESOURCE_MEM;
}
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index 8db86b90c20..51889624142 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -95,17 +95,6 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
return 0;
}
-static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int sa11x0_drv_pcmcia_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
-
static struct platform_driver sa11x0_pcmcia_driver = {
.driver = {
.name = "sa11x0-pcmcia",
@@ -113,8 +102,6 @@ static struct platform_driver sa11x0_pcmcia_driver = {
},
.probe = sa11x0_drv_pcmcia_probe,
.remove = sa11x0_drv_pcmcia_remove,
- .suspend = sa11x0_drv_pcmcia_suspend,
- .resume = sa11x0_drv_pcmcia_resume,
};
/* sa11x0_pcmcia_init()
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index db79ca61cf9..799e9793e49 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -213,16 +213,6 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
return 0;
}
-static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int pcmcia_resume(struct sa1111_dev *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
-
static struct sa1111_driver pcmcia_driver = {
.drv = {
.name = "sa1111-pcmcia",
@@ -230,8 +220,6 @@ static struct sa1111_driver pcmcia_driver = {
.devid = SA1111_DEVID_PCMCIA,
.probe = pcmcia_probe,
.remove = __devexit_p(pcmcia_remove),
- .suspend = pcmcia_suspend,
- .resume = pcmcia_resume,
};
static int __init sa1111_drv_pcmcia_init(void)
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 12c49ee135e..bac85f3236b 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -348,16 +348,6 @@ static int __init get_tcic_id(void)
return id;
}
-static int tcic_drv_pcmcia_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int tcic_drv_pcmcia_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
/*====================================================================*/
static struct platform_driver tcic_driver = {
@@ -365,8 +355,6 @@ static struct platform_driver tcic_driver = {
.name = "tcic-pcmcia",
.owner = THIS_MODULE,
},
- .suspend = tcic_drv_pcmcia_suspend,
- .resume = tcic_drv_pcmcia_resume,
};
static struct platform_device tcic_device = {
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index aaccdb9f4ba..86e4a1a3c64 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -705,24 +705,11 @@ static int __devinit vrc4171_card_setup(char *options)
__setup("vrc4171_card=", vrc4171_card_setup);
-static int vrc4171_card_suspend(struct platform_device *dev,
- pm_message_t state)
-{
- return pcmcia_socket_dev_suspend(&dev->dev);
-}
-
-static int vrc4171_card_resume(struct platform_device *dev)
-{
- return pcmcia_socket_dev_resume(&dev->dev);
-}
-
static struct platform_driver vrc4171_card_driver = {
.driver = {
.name = vrc4171_card_name,
.owner = THIS_MODULE,
},
- .suspend = vrc4171_card_suspend,
- .resume = vrc4171_card_resume,
};
static int __devinit vrc4171_card_init(void)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 418988ab6ed..f19ad02374d 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1290,12 +1290,9 @@ static int yenta_dev_suspend_noirq(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct yenta_socket *socket = pci_get_drvdata(pdev);
- int ret;
-
- ret = pcmcia_socket_dev_suspend(dev);
if (!socket)
- return ret;
+ return 0;
if (socket->type && socket->type->save_state)
socket->type->save_state(socket);
@@ -1312,7 +1309,7 @@ static int yenta_dev_suspend_noirq(struct device *dev)
*/
/* pci_set_power_state(dev, 3); */
- return ret;
+ return 0;
}
static int yenta_dev_resume_noirq(struct device *dev)
@@ -1336,26 +1333,16 @@ static int yenta_dev_resume_noirq(struct device *dev)
if (socket->type && socket->type->restore_state)
socket->type->restore_state(socket);
- pcmcia_socket_dev_early_resume(dev);
- return 0;
-}
-
-static int yenta_dev_resume(struct device *dev)
-{
- pcmcia_socket_dev_late_resume(dev);
return 0;
}
static const struct dev_pm_ops yenta_pm_ops = {
.suspend_noirq = yenta_dev_suspend_noirq,
.resume_noirq = yenta_dev_resume_noirq,
- .resume = yenta_dev_resume,
.freeze_noirq = yenta_dev_suspend_noirq,
.thaw_noirq = yenta_dev_resume_noirq,
- .thaw = yenta_dev_resume,
.poweroff_noirq = yenta_dev_suspend_noirq,
.restore_noirq = yenta_dev_resume_noirq,
- .restore = yenta_dev_resume,
};
#define YENTA_PM_OPS (&yenta_pm_ops)
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index e631dbeafd7..7bec4588c26 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -385,6 +385,16 @@ config EEEPC_LAPTOP
If you have an Eee PC laptop, say Y or M here.
+config EEEPC_WMI
+ tristate "Eee PC WMI Hotkey Driver (EXPERIMENTAL)"
+ depends on ACPI_WMI
+ depends on INPUT
+ depends on EXPERIMENTAL
+ ---help---
+ Say Y here if you want to support WMI-based hotkeys on Eee PC laptops.
+
+ To compile this driver as a module, choose M here: the module will
+ be called eeepc-wmi.
config ACPI_WMI
tristate "WMI"
diff --git a/drivers/platform/x86/Makefile b/drivers/platform/x86/Makefile
index 9cd9fa0a27e..a906490e353 100644
--- a/drivers/platform/x86/Makefile
+++ b/drivers/platform/x86/Makefile
@@ -4,6 +4,7 @@
#
obj-$(CONFIG_ASUS_LAPTOP) += asus-laptop.o
obj-$(CONFIG_EEEPC_LAPTOP) += eeepc-laptop.o
+obj-$(CONFIG_EEEPC_WMI) += eeepc-wmi.o
obj-$(CONFIG_MSI_LAPTOP) += msi-laptop.o
obj-$(CONFIG_ACPI_CMPC) += classmate-laptop.o
obj-$(CONFIG_COMPAL_LAPTOP) += compal-laptop.o
diff --git a/drivers/platform/x86/asus-laptop.c b/drivers/platform/x86/asus-laptop.c
index db5f7db2ba3..475ab50732a 100644
--- a/drivers/platform/x86/asus-laptop.c
+++ b/drivers/platform/x86/asus-laptop.c
@@ -139,7 +139,7 @@ MODULE_PARM_DESC(bluetooth_status, "Set the wireless status on boot "
/* Backlight */
static acpi_handle lcd_switch_handle;
-static const char *lcd_switch_paths[] = {
+static char *lcd_switch_paths[] = {
"\\_SB.PCI0.SBRG.EC0._Q10", /* All new models */
"\\_SB.PCI0.ISA.EC0._Q10", /* A1x */
"\\_SB.PCI0.PX40.ECD0._Q10", /* L3C */
@@ -153,7 +153,7 @@ static const char *lcd_switch_paths[] = {
#define METHOD_SWITCH_DISPLAY "SDSP"
static acpi_handle display_get_handle;
-static const char *display_get_paths[] = {
+static char *display_get_paths[] = {
/* A6B, A6K A6R A7D F3JM L4R M6R A3G M6A M6V VX-1 V6J V6V W3Z */
"\\_SB.PCI0.P0P1.VGA.GETD",
/* A3E A4K, A4D A4L A6J A7J A8J Z71V M9V S5A M5A z33A W1Jc W2V G1 */
diff --git a/drivers/platform/x86/eeepc-wmi.c b/drivers/platform/x86/eeepc-wmi.c
new file mode 100644
index 00000000000..2466b7b7fb0
--- /dev/null
+++ b/drivers/platform/x86/eeepc-wmi.c
@@ -0,0 +1,157 @@
+/*
+ * Eee PC WMI hotkey driver
+ *
+ * Copyright(C) 2010 Intel Corporation.
+ *
+ * Portions based on wistron_btns.c:
+ * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz>
+ * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org>
+ * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/input.h>
+#include <linux/input/sparse-keymap.h>
+#include <acpi/acpi_bus.h>
+#include <acpi/acpi_drivers.h>
+
+MODULE_AUTHOR("Yong Wang <yong.y.wang@intel.com>");
+MODULE_DESCRIPTION("Eee PC WMI Hotkey Driver");
+MODULE_LICENSE("GPL");
+
+#define EEEPC_WMI_EVENT_GUID "ABBC0F72-8EA1-11D1-00A0-C90629100000"
+
+MODULE_ALIAS("wmi:"EEEPC_WMI_EVENT_GUID);
+
+#define NOTIFY_BRNUP_MIN 0x11
+#define NOTIFY_BRNUP_MAX 0x1f
+#define NOTIFY_BRNDOWN_MIN 0x20
+#define NOTIFY_BRNDOWN_MAX 0x2e
+
+static const struct key_entry eeepc_wmi_keymap[] = {
+ /* Sleep already handled via generic ACPI code */
+ { KE_KEY, 0x5d, { KEY_WLAN } },
+ { KE_KEY, 0x32, { KEY_MUTE } },
+ { KE_KEY, 0x31, { KEY_VOLUMEDOWN } },
+ { KE_KEY, 0x30, { KEY_VOLUMEUP } },
+ { KE_IGNORE, NOTIFY_BRNDOWN_MIN, { KEY_BRIGHTNESSDOWN } },
+ { KE_IGNORE, NOTIFY_BRNUP_MIN, { KEY_BRIGHTNESSUP } },
+ { KE_KEY, 0xcc, { KEY_SWITCHVIDEOMODE } },
+ { KE_END, 0},
+};
+
+static struct input_dev *eeepc_wmi_input_dev;
+
+static void eeepc_wmi_notify(u32 value, void *context)
+{
+ struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL };
+ union acpi_object *obj;
+ acpi_status status;
+ int code;
+
+ status = wmi_get_event_data(value, &response);
+ if (status != AE_OK) {
+ pr_err("EEEPC WMI: bad event status 0x%x\n", status);
+ return;
+ }
+
+ obj = (union acpi_object *)response.pointer;
+
+ if (obj && obj->type == ACPI_TYPE_INTEGER) {
+ code = obj->integer.value;
+
+ if (code >= NOTIFY_BRNUP_MIN && code <= NOTIFY_BRNUP_MAX)
+ code = NOTIFY_BRNUP_MIN;
+ else if (code >= NOTIFY_BRNDOWN_MIN && code <= NOTIFY_BRNDOWN_MAX)
+ code = NOTIFY_BRNDOWN_MIN;
+
+ if (!sparse_keymap_report_event(eeepc_wmi_input_dev,
+ code, 1, true))
+ pr_info("EEEPC WMI: Unknown key %x pressed\n", code);
+ }
+
+ kfree(obj);
+}
+
+static int eeepc_wmi_input_setup(void)
+{
+ int err;
+
+ eeepc_wmi_input_dev = input_allocate_device();
+ if (!eeepc_wmi_input_dev)
+ return -ENOMEM;
+
+ eeepc_wmi_input_dev->name = "Eee PC WMI hotkeys";
+ eeepc_wmi_input_dev->phys = "wmi/input0";
+ eeepc_wmi_input_dev->id.bustype = BUS_HOST;
+
+ err = sparse_keymap_setup(eeepc_wmi_input_dev, eeepc_wmi_keymap, NULL);
+ if (err)
+ goto err_free_dev;
+
+ err = input_register_device(eeepc_wmi_input_dev);
+ if (err)
+ goto err_free_keymap;
+
+ return 0;
+
+err_free_keymap:
+ sparse_keymap_free(eeepc_wmi_input_dev);
+err_free_dev:
+ input_free_device(eeepc_wmi_input_dev);
+ return err;
+}
+
+static int __init eeepc_wmi_init(void)
+{
+ int err;
+ acpi_status status;
+
+ if (!wmi_has_guid(EEEPC_WMI_EVENT_GUID)) {
+ pr_warning("EEEPC WMI: No known WMI GUID found\n");
+ return -ENODEV;
+ }
+
+ err = eeepc_wmi_input_setup();
+ if (err)
+ return err;
+
+ status = wmi_install_notify_handler(EEEPC_WMI_EVENT_GUID,
+ eeepc_wmi_notify, NULL);
+ if (ACPI_FAILURE(status)) {
+ sparse_keymap_free(eeepc_wmi_input_dev);
+ input_unregister_device(eeepc_wmi_input_dev);
+ pr_err("EEEPC WMI: Unable to register notify handler - %d\n",
+ status);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static void __exit eeepc_wmi_exit(void)
+{
+ wmi_remove_notify_handler(EEEPC_WMI_EVENT_GUID);
+ sparse_keymap_free(eeepc_wmi_input_dev);
+ input_unregister_device(eeepc_wmi_input_dev);
+}
+
+module_init(eeepc_wmi_init);
+module_exit(eeepc_wmi_exit);
diff --git a/drivers/serial/serial_cs.c b/drivers/serial/serial_cs.c
index e91db4b3801..175d202ab37 100644
--- a/drivers/serial/serial_cs.c
+++ b/drivers/serial/serial_cs.c
@@ -745,6 +745,7 @@ static struct pcmcia_device_id serial_ids[] = {
PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "REM10", 0x2e3ee845, 0x76df1d29),
PCMCIA_PFC_DEVICE_PROD_ID13(1, "Xircom", "XEM5600", 0x2e3ee845, 0xf1403719),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "AnyCom", "Fast Ethernet + 56K COMBO", 0x578ba6e7, 0xb0ac62c4),
+ PCMCIA_PFC_DEVICE_PROD_ID12(1, "ATKK", "LM33-PCM-T", 0xba9eb7e2, 0x077c174e),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "D-Link", "DME336T", 0x1a424a1c, 0xb23897ff),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Gateway 2000", "XJEM3336", 0xdd9989be, 0x662c394c),
PCMCIA_PFC_DEVICE_PROD_ID12(1, "Grey Cell", "GCS3000", 0x2a151fac, 0x48b932ae),
diff --git a/drivers/serial/sunsu.c b/drivers/serial/sunsu.c
index 170d3d68c8f..cbcfb1885f7 100644
--- a/drivers/serial/sunsu.c
+++ b/drivers/serial/sunsu.c
@@ -1453,8 +1453,10 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
if (up->su_type == SU_PORT_KBD || up->su_type == SU_PORT_MS) {
err = sunsu_kbd_ms_init(up);
if (err) {
+ of_iounmap(&op->resource[0],
+ up->port.membase, up->reg_size);
kfree(up);
- goto out_unmap;
+ return err;
}
dev_set_drvdata(&op->dev, up);
diff --git a/drivers/staging/et131x/et1310_mac.c b/drivers/staging/et131x/et1310_mac.c
index a292b1edc41..737a9f5401d 100644
--- a/drivers/staging/et131x/et1310_mac.c
+++ b/drivers/staging/et131x/et1310_mac.c
@@ -226,7 +226,7 @@ void ConfigMACRegs2(struct et131x_adapter *etdev)
}
/* Enable TXMAC */
- ctl |= 0x05; /* TX mac enable, FC disable */
+ ctl |= 0x09; /* TX mac enable, FC disable */
writel(ctl, &etdev->regs->txmac.ctl);
/* Ready to start the RXDMA/TXDMA engine */
diff --git a/drivers/usb/gadget/at91_udc.c b/drivers/usb/gadget/at91_udc.c
index 12ac9cd32a0..df1bae9b048 100644
--- a/drivers/usb/gadget/at91_udc.c
+++ b/drivers/usb/gadget/at91_udc.c
@@ -1370,6 +1370,12 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
{
struct at91_udc *udc = _udc;
u32 rescans = 5;
+ int disable_clock = 0;
+
+ if (!udc->clocked) {
+ clk_on(udc);
+ disable_clock = 1;
+ }
while (rescans--) {
u32 status;
@@ -1458,6 +1464,9 @@ static irqreturn_t at91_udc_irq (int irq, void *_udc)
}
}
+ if (disable_clock)
+ clk_off(udc);
+
return IRQ_HANDLED;
}
diff --git a/drivers/video/sunxvr500.c b/drivers/video/sunxvr500.c
index 4cd50497264..3803745d6ee 100644
--- a/drivers/video/sunxvr500.c
+++ b/drivers/video/sunxvr500.c
@@ -242,11 +242,27 @@ static int __devinit e3d_set_fbinfo(struct e3d_info *ep)
static int __devinit e3d_pci_register(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
+ struct device_node *of_node;
+ const char *device_type;
struct fb_info *info;
struct e3d_info *ep;
unsigned int line_length;
int err;
+ of_node = pci_device_to_OF_node(pdev);
+ if (!of_node) {
+ printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
+ pci_name(pdev));
+ return -ENODEV;
+ }
+
+ device_type = of_get_property(of_node, "device_type", NULL);
+ if (!device_type) {
+ printk(KERN_INFO "e3d: Ignoring secondary output device "
+ "at %s\n", pci_name(pdev));
+ return -ENODEV;
+ }
+
err = pci_enable_device(pdev);
if (err < 0) {
printk(KERN_ERR "e3d: Cannot enable PCI device %s\n",
@@ -265,13 +281,7 @@ static int __devinit e3d_pci_register(struct pci_dev *pdev,
ep->info = info;
ep->pdev = pdev;
spin_lock_init(&ep->lock);
- ep->of_node = pci_device_to_OF_node(pdev);
- if (!ep->of_node) {
- printk(KERN_ERR "e3d: Cannot find OF node of %s\n",
- pci_name(pdev));
- err = -ENODEV;
- goto err_release_fb;
- }
+ ep->of_node = of_node;
/* Read the PCI base register of the frame buffer, which we
* need in order to interpret the RAMDAC_VID_*FB* values in
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 23bb0ceabe3..ce8ef610772 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -919,6 +919,10 @@ static int context_is_writeable_or_written(struct inode *inode,
/*
* We are only allowed to write into/dirty the page if the page is
* clean, or already dirty within the same snap context.
+ *
+ * called with page locked.
+ * return success with page locked,
+ * or any failure (incl -EAGAIN) with page unlocked.
*/
static int ceph_update_writeable_page(struct file *file,
loff_t pos, unsigned len,
@@ -961,9 +965,11 @@ retry_locked:
snapc = ceph_get_snap_context((void *)page->private);
unlock_page(page);
ceph_queue_writeback(inode);
- wait_event_interruptible(ci->i_cap_wq,
+ r = wait_event_interruptible(ci->i_cap_wq,
context_is_writeable_or_written(inode, snapc));
ceph_put_snap_context(snapc);
+ if (r == -ERESTARTSYS)
+ return r;
return -EAGAIN;
}
@@ -1035,7 +1041,7 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
int r;
do {
- /* get a page*/
+ /* get a page */
page = grab_cache_page_write_begin(mapping, index, 0);
if (!page)
return -ENOMEM;
diff --git a/fs/ceph/auth_x.c b/fs/ceph/auth_x.c
index f0318427b6d..8d8a8496476 100644
--- a/fs/ceph/auth_x.c
+++ b/fs/ceph/auth_x.c
@@ -28,6 +28,12 @@ static int ceph_x_is_authenticated(struct ceph_auth_client *ac)
return (ac->want_keys & xi->have_keys) == ac->want_keys;
}
+static int ceph_x_encrypt_buflen(int ilen)
+{
+ return sizeof(struct ceph_x_encrypt_header) + ilen + 16 +
+ sizeof(u32);
+}
+
static int ceph_x_encrypt(struct ceph_crypto_key *secret,
void *ibuf, int ilen, void *obuf, size_t olen)
{
@@ -150,6 +156,11 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
struct timespec validity;
struct ceph_crypto_key old_key;
void *tp, *tpend;
+ struct ceph_timespec new_validity;
+ struct ceph_crypto_key new_session_key;
+ struct ceph_buffer *new_ticket_blob;
+ unsigned long new_expires, new_renew_after;
+ u64 new_secret_id;
ceph_decode_need(&p, end, sizeof(u32) + 1, bad);
@@ -182,16 +193,16 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
goto bad;
memcpy(&old_key, &th->session_key, sizeof(old_key));
- ret = ceph_crypto_key_decode(&th->session_key, &dp, dend);
+ ret = ceph_crypto_key_decode(&new_session_key, &dp, dend);
if (ret)
goto out;
- ceph_decode_copy(&dp, &th->validity, sizeof(th->validity));
- ceph_decode_timespec(&validity, &th->validity);
- th->expires = get_seconds() + validity.tv_sec;
- th->renew_after = th->expires - (validity.tv_sec / 4);
- dout(" expires=%lu renew_after=%lu\n", th->expires,
- th->renew_after);
+ ceph_decode_copy(&dp, &new_validity, sizeof(new_validity));
+ ceph_decode_timespec(&validity, &new_validity);
+ new_expires = get_seconds() + validity.tv_sec;
+ new_renew_after = new_expires - (validity.tv_sec / 4);
+ dout(" expires=%lu renew_after=%lu\n", new_expires,
+ new_renew_after);
/* ticket blob for service */
ceph_decode_8_safe(&p, end, is_enc, bad);
@@ -216,10 +227,21 @@ static int ceph_x_proc_ticket_reply(struct ceph_auth_client *ac,
dout(" ticket blob is %d bytes\n", dlen);
ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad);
struct_v = ceph_decode_8(&tp);
- th->secret_id = ceph_decode_64(&tp);
- ret = ceph_decode_buffer(&th->ticket_blob, &tp, tpend);
+ new_secret_id = ceph_decode_64(&tp);
+ ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend);
if (ret)
goto out;
+
+ /* all is well, update our ticket */
+ ceph_crypto_key_destroy(&th->session_key);
+ if (th->ticket_blob)
+ ceph_buffer_put(th->ticket_blob);
+ th->session_key = new_session_key;
+ th->ticket_blob = new_ticket_blob;
+ th->validity = new_validity;
+ th->secret_id = new_secret_id;
+ th->expires = new_expires;
+ th->renew_after = new_renew_after;
dout(" got ticket service %d (%s) secret_id %lld len %d\n",
type, ceph_entity_type_name(type), th->secret_id,
(int)th->ticket_blob->vec.iov_len);
@@ -242,7 +264,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
struct ceph_x_ticket_handler *th,
struct ceph_x_authorizer *au)
{
- int len;
+ int maxlen;
struct ceph_x_authorize_a *msg_a;
struct ceph_x_authorize_b msg_b;
void *p, *end;
@@ -253,15 +275,15 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
dout("build_authorizer for %s %p\n",
ceph_entity_type_name(th->service), au);
- len = sizeof(*msg_a) + sizeof(msg_b) + sizeof(u32) +
- ticket_blob_len + 16;
- dout(" need len %d\n", len);
- if (au->buf && au->buf->alloc_len < len) {
+ maxlen = sizeof(*msg_a) + sizeof(msg_b) +
+ ceph_x_encrypt_buflen(ticket_blob_len);
+ dout(" need len %d\n", maxlen);
+ if (au->buf && au->buf->alloc_len < maxlen) {
ceph_buffer_put(au->buf);
au->buf = NULL;
}
if (!au->buf) {
- au->buf = ceph_buffer_new(len, GFP_NOFS);
+ au->buf = ceph_buffer_new(maxlen, GFP_NOFS);
if (!au->buf)
return -ENOMEM;
}
@@ -296,6 +318,7 @@ static int ceph_x_build_authorizer(struct ceph_auth_client *ac,
au->buf->vec.iov_len = p - au->buf->vec.iov_base;
dout(" built authorizer nonce %llx len %d\n", au->nonce,
(int)au->buf->vec.iov_len);
+ BUG_ON(au->buf->vec.iov_len > maxlen);
return 0;
out_buf:
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index db122bb357b..7d0a0d0adc1 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1407,6 +1407,7 @@ static int try_nonblocking_invalidate(struct inode *inode)
*/
void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_mds_session *session)
+ __releases(session->s_mutex)
{
struct ceph_client *client = ceph_inode_to_client(&ci->vfs_inode);
struct ceph_mds_client *mdsc = &client->mdsc;
@@ -1414,7 +1415,6 @@ void ceph_check_caps(struct ceph_inode_info *ci, int flags,
struct ceph_cap *cap;
int file_wanted, used;
int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
- int drop_session_lock = session ? 0 : 1;
int issued, implemented, want, retain, revoking, flushing = 0;
int mds = -1; /* keep track of how far we've gone through i_caps list
to avoid an infinite loop on retry */
@@ -1639,7 +1639,7 @@ ack:
if (queue_invalidate)
ceph_queue_invalidate(inode);
- if (session && drop_session_lock)
+ if (session)
mutex_unlock(&session->s_mutex);
if (took_snap_rwsem)
up_read(&mdsc->snap_rwsem);
@@ -2195,18 +2195,19 @@ void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
* Handle a cap GRANT message from the MDS. (Note that a GRANT may
* actually be a revocation if it specifies a smaller cap set.)
*
- * caller holds s_mutex.
+ * caller holds s_mutex and i_lock, we drop both.
+ *
* return value:
* 0 - ok
* 1 - check_caps on auth cap only (writeback)
* 2 - check_caps (ack revoke)
*/
-static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
- struct ceph_mds_session *session,
- struct ceph_cap *cap,
- struct ceph_buffer *xattr_buf)
+static void handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
+ struct ceph_mds_session *session,
+ struct ceph_cap *cap,
+ struct ceph_buffer *xattr_buf)
__releases(inode->i_lock)
-
+ __releases(session->s_mutex)
{
struct ceph_inode_info *ci = ceph_inode(inode);
int mds = session->s_mds;
@@ -2216,7 +2217,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
u64 size = le64_to_cpu(grant->size);
u64 max_size = le64_to_cpu(grant->max_size);
struct timespec mtime, atime, ctime;
- int reply = 0;
+ int check_caps = 0;
int wake = 0;
int writeback = 0;
int revoked_rdcache = 0;
@@ -2329,11 +2330,12 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
if ((used & ~newcaps) & CEPH_CAP_FILE_BUFFER)
writeback = 1; /* will delay ack */
else if (dirty & ~newcaps)
- reply = 1; /* initiate writeback in check_caps */
+ check_caps = 1; /* initiate writeback in check_caps */
else if (((used & ~newcaps) & CEPH_CAP_FILE_CACHE) == 0 ||
revoked_rdcache)
- reply = 2; /* send revoke ack in check_caps */
+ check_caps = 2; /* send revoke ack in check_caps */
cap->issued = newcaps;
+ cap->implemented |= newcaps;
} else if (cap->issued == newcaps) {
dout("caps unchanged: %s -> %s\n",
ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
@@ -2346,6 +2348,7 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
* pending revocation */
wake = 1;
}
+ BUG_ON(cap->issued & ~cap->implemented);
spin_unlock(&inode->i_lock);
if (writeback)
@@ -2359,7 +2362,14 @@ static int handle_cap_grant(struct inode *inode, struct ceph_mds_caps *grant,
ceph_queue_invalidate(inode);
if (wake)
wake_up(&ci->i_cap_wq);
- return reply;
+
+ if (check_caps == 1)
+ ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
+ session);
+ else if (check_caps == 2)
+ ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
+ else
+ mutex_unlock(&session->s_mutex);
}
/*
@@ -2548,9 +2558,8 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
ci->i_cap_exporting_issued = cap->issued;
}
__ceph_remove_cap(cap);
- } else {
- WARN_ON(!cap);
}
+ /* else, we already released it */
spin_unlock(&inode->i_lock);
}
@@ -2621,9 +2630,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
u64 cap_id;
u64 size, max_size;
u64 tid;
- int check_caps = 0;
void *snaptrace;
- int r;
dout("handle_caps from mds%d\n", mds);
@@ -2668,8 +2675,9 @@ void ceph_handle_caps(struct ceph_mds_session *session,
case CEPH_CAP_OP_IMPORT:
handle_cap_import(mdsc, inode, h, session,
snaptrace, le32_to_cpu(h->snap_trace_len));
- check_caps = 1; /* we may have sent a RELEASE to the old auth */
- goto done;
+ ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY,
+ session);
+ goto done_unlocked;
}
/* the rest require a cap */
@@ -2686,16 +2694,8 @@ void ceph_handle_caps(struct ceph_mds_session *session,
switch (op) {
case CEPH_CAP_OP_REVOKE:
case CEPH_CAP_OP_GRANT:
- r = handle_cap_grant(inode, h, session, cap, msg->middle);
- if (r == 1)
- ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
- session);
- else if (r == 2)
- ceph_check_caps(ceph_inode(inode),
- CHECK_CAPS_NODELAY,
- session);
- break;
+ handle_cap_grant(inode, h, session, cap, msg->middle);
+ goto done_unlocked;
case CEPH_CAP_OP_FLUSH_ACK:
handle_cap_flush_ack(inode, tid, h, session, cap);
@@ -2713,9 +2713,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
done:
mutex_unlock(&session->s_mutex);
-
- if (check_caps)
- ceph_check_caps(ceph_inode(inode), CHECK_CAPS_NODELAY, NULL);
+done_unlocked:
if (inode)
iput(inode);
return;
@@ -2838,11 +2836,18 @@ int ceph_encode_inode_release(void **p, struct inode *inode,
struct ceph_cap *cap;
struct ceph_mds_request_release *rel = *p;
int ret = 0;
-
- dout("encode_inode_release %p mds%d drop %s unless %s\n", inode,
- mds, ceph_cap_string(drop), ceph_cap_string(unless));
+ int used = 0;
spin_lock(&inode->i_lock);
+ used = __ceph_caps_used(ci);
+
+ dout("encode_inode_release %p mds%d used %s drop %s unless %s\n", inode,
+ mds, ceph_cap_string(used), ceph_cap_string(drop),
+ ceph_cap_string(unless));
+
+ /* only drop unused caps */
+ drop &= ~used;
+
cap = __get_cap_for_mds(ci, mds);
if (cap && __cap_is_valid(cap)) {
if (force ||
diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
index 5107384ee02..8a9116e15b7 100644
--- a/fs/ceph/dir.c
+++ b/fs/ceph/dir.c
@@ -288,8 +288,10 @@ more:
CEPH_MDS_OP_LSSNAP : CEPH_MDS_OP_READDIR;
/* discard old result, if any */
- if (fi->last_readdir)
+ if (fi->last_readdir) {
ceph_mdsc_put_request(fi->last_readdir);
+ fi->last_readdir = NULL;
+ }
/* requery frag tree, as the frag topology may have changed */
frag = ceph_choose_frag(ceph_inode(inode), frag, NULL, NULL);
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 7abe1aed819..aca82d55cc5 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -378,6 +378,22 @@ void ceph_destroy_inode(struct inode *inode)
ceph_queue_caps_release(inode);
+ /*
+ * we may still have a snap_realm reference if there are stray
+ * caps in i_cap_exporting_issued or i_snap_caps.
+ */
+ if (ci->i_snap_realm) {
+ struct ceph_mds_client *mdsc =
+ &ceph_client(ci->vfs_inode.i_sb)->mdsc;
+ struct ceph_snap_realm *realm = ci->i_snap_realm;
+
+ dout(" dropping residual ref to snap realm %p\n", realm);
+ spin_lock(&realm->inodes_with_caps_lock);
+ list_del_init(&ci->i_snap_realm_item);
+ spin_unlock(&realm->inodes_with_caps_lock);
+ ceph_put_snap_realm(mdsc, realm);
+ }
+
kfree(ci->i_symlink);
while ((n = rb_first(&ci->i_fragtree)) != NULL) {
frag = rb_entry(n, struct ceph_inode_frag, node);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index a2600101ec2..5c7920be642 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -328,6 +328,8 @@ static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
struct ceph_mds_session *s;
s = kzalloc(sizeof(*s), GFP_NOFS);
+ if (!s)
+ return ERR_PTR(-ENOMEM);
s->s_mdsc = mdsc;
s->s_mds = mds;
s->s_state = CEPH_MDS_SESSION_NEW;
@@ -529,7 +531,7 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
{
dout("__unregister_request %p tid %lld\n", req, req->r_tid);
rb_erase(&req->r_node, &mdsc->request_tree);
- ceph_mdsc_put_request(req);
+ RB_CLEAR_NODE(&req->r_node);
if (req->r_unsafe_dir) {
struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
@@ -538,6 +540,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc,
list_del_init(&req->r_unsafe_dir_item);
spin_unlock(&ci->i_unsafe_lock);
}
+
+ ceph_mdsc_put_request(req);
}
/*
@@ -862,6 +866,7 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
if (time_after_eq(jiffies, session->s_cap_ttl) &&
time_after_eq(session->s_cap_ttl, session->s_renew_requested))
pr_info("mds%d caps stale\n", session->s_mds);
+ session->s_renew_requested = jiffies;
/* do not try to renew caps until a recovering mds has reconnected
* with its clients. */
@@ -874,7 +879,6 @@ static int send_renew_caps(struct ceph_mds_client *mdsc,
dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
ceph_mds_state_name(state));
- session->s_renew_requested = jiffies;
msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
++session->s_renew_seq);
if (IS_ERR(msg))
@@ -1566,8 +1570,13 @@ static int __do_request(struct ceph_mds_client *mdsc,
/* get, open session */
session = __ceph_lookup_mds_session(mdsc, mds);
- if (!session)
+ if (!session) {
session = register_session(mdsc, mds);
+ if (IS_ERR(session)) {
+ err = PTR_ERR(session);
+ goto finish;
+ }
+ }
dout("do_request mds%d session %p state %s\n", mds, session,
session_state_name(session->s_state));
if (session->s_state != CEPH_MDS_SESSION_OPEN &&
@@ -1770,7 +1779,7 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
dout("handle_reply %p\n", req);
/* correct session? */
- if (!req->r_session && req->r_session != session) {
+ if (req->r_session != session) {
pr_err("mdsc_handle_reply got %llu on session mds%d"
" not mds%d\n", tid, session->s_mds,
req->r_session ? req->r_session->s_mds : -1);
@@ -2682,29 +2691,41 @@ void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
*/
static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
{
- struct ceph_mds_request *req = NULL;
+ struct ceph_mds_request *req = NULL, *nextreq;
struct rb_node *n;
mutex_lock(&mdsc->mutex);
dout("wait_unsafe_requests want %lld\n", want_tid);
+restart:
req = __get_oldest_req(mdsc);
while (req && req->r_tid <= want_tid) {
+ /* find next request */
+ n = rb_next(&req->r_node);
+ if (n)
+ nextreq = rb_entry(n, struct ceph_mds_request, r_node);
+ else
+ nextreq = NULL;
if ((req->r_op & CEPH_MDS_OP_WRITE)) {
/* write op */
ceph_mdsc_get_request(req);
+ if (nextreq)
+ ceph_mdsc_get_request(nextreq);
mutex_unlock(&mdsc->mutex);
dout("wait_unsafe_requests wait on %llu (want %llu)\n",
req->r_tid, want_tid);
wait_for_completion(&req->r_safe_completion);
mutex_lock(&mdsc->mutex);
- n = rb_next(&req->r_node);
ceph_mdsc_put_request(req);
- } else {
- n = rb_next(&req->r_node);
+ if (!nextreq)
+ break; /* next dne before, so we're done! */
+ if (RB_EMPTY_NODE(&nextreq->r_node)) {
+ /* next request was removed from tree */
+ ceph_mdsc_put_request(nextreq);
+ goto restart;
+ }
+ ceph_mdsc_put_request(nextreq); /* won't go away */
}
- if (!n)
- break;
- req = rb_entry(n, struct ceph_mds_request, r_node);
+ req = nextreq;
}
mutex_unlock(&mdsc->mutex);
dout("wait_unsafe_requests done\n");
diff --git a/fs/ceph/messenger.c b/fs/ceph/messenger.c
index 781656a49bf..a32f0f896d9 100644
--- a/fs/ceph/messenger.c
+++ b/fs/ceph/messenger.c
@@ -366,6 +366,14 @@ void ceph_con_open(struct ceph_connection *con, struct ceph_entity_addr *addr)
}
/*
+ * return true if this connection ever successfully opened
+ */
+bool ceph_con_opened(struct ceph_connection *con)
+{
+ return con->connect_seq > 0;
+}
+
+/*
* generic get/put
*/
struct ceph_connection *ceph_con_get(struct ceph_connection *con)
@@ -830,13 +838,6 @@ static void prepare_read_connect(struct ceph_connection *con)
con->in_base_pos = 0;
}
-static void prepare_read_connect_retry(struct ceph_connection *con)
-{
- dout("prepare_read_connect_retry %p\n", con);
- con->in_base_pos = strlen(CEPH_BANNER) + sizeof(con->actual_peer_addr)
- + sizeof(con->peer_addr_for_me);
-}
-
static void prepare_read_ack(struct ceph_connection *con)
{
dout("prepare_read_ack %p\n", con);
@@ -1146,7 +1147,7 @@ static int process_connect(struct ceph_connection *con)
}
con->auth_retry = 1;
prepare_write_connect(con->msgr, con, 0);
- prepare_read_connect_retry(con);
+ prepare_read_connect(con);
break;
case CEPH_MSGR_TAG_RESETSESSION:
@@ -1843,8 +1844,6 @@ static void ceph_fault(struct ceph_connection *con)
goto out;
}
- clear_bit(BUSY, &con->state); /* to avoid an improbable race */
-
mutex_lock(&con->mutex);
if (test_bit(CLOSED, &con->state))
goto out_unlock;
diff --git a/fs/ceph/messenger.h b/fs/ceph/messenger.h
index 4caaa591111..a343dae73cd 100644
--- a/fs/ceph/messenger.h
+++ b/fs/ceph/messenger.h
@@ -223,6 +223,7 @@ extern void ceph_con_init(struct ceph_messenger *msgr,
struct ceph_connection *con);
extern void ceph_con_open(struct ceph_connection *con,
struct ceph_entity_addr *addr);
+extern bool ceph_con_opened(struct ceph_connection *con);
extern void ceph_con_close(struct ceph_connection *con);
extern void ceph_con_send(struct ceph_connection *con, struct ceph_msg *msg);
extern void ceph_con_revoke(struct ceph_connection *con, struct ceph_msg *msg);
diff --git a/fs/ceph/osd_client.c b/fs/ceph/osd_client.c
index dbe63db9762..c7b4dedaace 100644
--- a/fs/ceph/osd_client.c
+++ b/fs/ceph/osd_client.c
@@ -413,11 +413,22 @@ static void remove_old_osds(struct ceph_osd_client *osdc, int remove_all)
*/
static int __reset_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
{
+ struct ceph_osd_request *req;
int ret = 0;
dout("__reset_osd %p osd%d\n", osd, osd->o_osd);
if (list_empty(&osd->o_requests)) {
__remove_osd(osdc, osd);
+ } else if (memcmp(&osdc->osdmap->osd_addr[osd->o_osd],
+ &osd->o_con.peer_addr,
+ sizeof(osd->o_con.peer_addr)) == 0 &&
+ !ceph_con_opened(&osd->o_con)) {
+ dout(" osd addr hasn't changed and connection never opened,"
+ " letting msgr retry");
+ /* touch each r_stamp for handle_timeout()'s benfit */
+ list_for_each_entry(req, &osd->o_requests, r_osd_item)
+ req->r_stamp = jiffies;
+ ret = -EAGAIN;
} else {
ceph_con_close(&osd->o_con);
ceph_con_open(&osd->o_con, &osdc->osdmap->osd_addr[osd->o_osd]);
@@ -633,7 +644,7 @@ static int __send_request(struct ceph_osd_client *osdc,
reqhead->flags |= cpu_to_le32(req->r_flags); /* e.g., RETRY */
reqhead->reassert_version = req->r_reassert_version;
- req->r_sent_stamp = jiffies;
+ req->r_stamp = jiffies;
list_move_tail(&osdc->req_lru, &req->r_req_lru_item);
ceph_msg_get(req->r_request); /* send consumes a ref */
@@ -660,7 +671,7 @@ static void handle_timeout(struct work_struct *work)
unsigned long timeout = osdc->client->mount_args->osd_timeout * HZ;
unsigned long keepalive =
osdc->client->mount_args->osd_keepalive_timeout * HZ;
- unsigned long last_sent = 0;
+ unsigned long last_stamp = 0;
struct rb_node *p;
struct list_head slow_osds;
@@ -697,12 +708,12 @@ static void handle_timeout(struct work_struct *work)
req = list_entry(osdc->req_lru.next, struct ceph_osd_request,
r_req_lru_item);
- if (time_before(jiffies, req->r_sent_stamp + timeout))
+ if (time_before(jiffies, req->r_stamp + timeout))
break;
- BUG_ON(req == last_req && req->r_sent_stamp == last_sent);
+ BUG_ON(req == last_req && req->r_stamp == last_stamp);
last_req = req;
- last_sent = req->r_sent_stamp;
+ last_stamp = req->r_stamp;
osd = req->r_osd;
BUG_ON(!osd);
@@ -718,7 +729,7 @@ static void handle_timeout(struct work_struct *work)
*/
INIT_LIST_HEAD(&slow_osds);
list_for_each_entry(req, &osdc->req_lru, r_req_lru_item) {
- if (time_before(jiffies, req->r_sent_stamp + keepalive))
+ if (time_before(jiffies, req->r_stamp + keepalive))
break;
osd = req->r_osd;
@@ -862,7 +873,9 @@ static int __kick_requests(struct ceph_osd_client *osdc,
dout("kick_requests osd%d\n", kickosd ? kickosd->o_osd : -1);
if (kickosd) {
- __reset_osd(osdc, kickosd);
+ err = __reset_osd(osdc, kickosd);
+ if (err == -EAGAIN)
+ return 1;
} else {
for (p = rb_first(&osdc->osds); p; p = n) {
struct ceph_osd *osd =
@@ -913,7 +926,7 @@ static int __kick_requests(struct ceph_osd_client *osdc,
kick:
dout("kicking %p tid %llu osd%d\n", req, req->r_tid,
- req->r_osd->o_osd);
+ req->r_osd ? req->r_osd->o_osd : -1);
req->r_flags |= CEPH_OSD_FLAG_RETRY;
err = __send_request(osdc, req);
if (err) {
diff --git a/fs/ceph/osd_client.h b/fs/ceph/osd_client.h
index 1b1a3ca43af..b0759911e7c 100644
--- a/fs/ceph/osd_client.h
+++ b/fs/ceph/osd_client.h
@@ -70,7 +70,7 @@ struct ceph_osd_request {
char r_oid[40]; /* object name */
int r_oid_len;
- unsigned long r_sent_stamp;
+ unsigned long r_stamp; /* send OR check time */
bool r_resend; /* msg send failed, needs retry */
struct ceph_file_layout r_file_layout;
diff --git a/fs/ceph/osdmap.c b/fs/ceph/osdmap.c
index b83f2692b83..d82fe87c2a6 100644
--- a/fs/ceph/osdmap.c
+++ b/fs/ceph/osdmap.c
@@ -480,6 +480,14 @@ static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, int id)
return NULL;
}
+void __decode_pool(void **p, struct ceph_pg_pool_info *pi)
+{
+ ceph_decode_copy(p, &pi->v, sizeof(pi->v));
+ calc_pg_masks(pi);
+ *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
+ *p += le32_to_cpu(pi->v.num_removed_snap_intervals) * sizeof(u64) * 2;
+}
+
/*
* decode a full map.
*/
@@ -526,12 +534,8 @@ struct ceph_osdmap *osdmap_decode(void **p, void *end)
ev, CEPH_PG_POOL_VERSION);
goto bad;
}
- ceph_decode_copy(p, &pi->v, sizeof(pi->v));
+ __decode_pool(p, pi);
__insert_pg_pool(&map->pg_pools, pi);
- calc_pg_masks(pi);
- *p += le32_to_cpu(pi->v.num_snaps) * sizeof(u64);
- *p += le32_to_cpu(pi->v.num_removed_snap_intervals)
- * sizeof(u64) * 2;
}
ceph_decode_32_safe(p, end, map->pool_max, bad);
@@ -714,8 +718,7 @@ struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
pi->id = pool;
__insert_pg_pool(&map->pg_pools, pi);
}
- ceph_decode_copy(p, &pi->v, sizeof(pi->v));
- calc_pg_masks(pi);
+ __decode_pool(p, pi);
}
/* old_pool */
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index bf2a5f3846a..df04e210a05 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -314,9 +314,9 @@ static int build_snap_context(struct ceph_snap_realm *realm)
because we rebuild_snap_realms() works _downward_ in
hierarchy after each update.) */
if (realm->cached_context &&
- realm->cached_context->seq <= realm->seq &&
+ realm->cached_context->seq == realm->seq &&
(!parent ||
- realm->cached_context->seq <= parent->cached_context->seq)) {
+ realm->cached_context->seq >= parent->cached_context->seq)) {
dout("build_snap_context %llx %p: %p seq %lld (%d snaps)"
" (unchanged)\n",
realm->ino, realm, realm->cached_context,
@@ -818,7 +818,9 @@ void ceph_handle_snap(struct ceph_mds_client *mdsc,
* queued (again) by ceph_update_snap_trace()
* below. Queue it _now_, under the old context.
*/
+ spin_lock(&realm->inodes_with_caps_lock);
list_del_init(&ci->i_snap_realm_item);
+ spin_unlock(&realm->inodes_with_caps_lock);
spin_unlock(&inode->i_lock);
ceph_queue_cap_snap(ci,
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index ef9008b885b..0d0e97ed3ff 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -582,7 +582,9 @@ got:
inode->i_generation = sbi->s_next_generation++;
spin_unlock(&sbi->s_next_gen_lock);
- ei->i_state = EXT3_STATE_NEW;
+ ei->i_state_flags = 0;
+ ext3_set_inode_state(inode, EXT3_STATE_NEW);
+
ei->i_extra_isize =
(EXT3_INODE_SIZE(inode->i_sb) > EXT3_GOOD_OLD_INODE_SIZE) ?
sizeof(struct ext3_inode) - EXT3_GOOD_OLD_INODE_SIZE : 0;
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 7f920b7263a..ea33bdf0a30 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -2811,7 +2811,7 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
inode->i_mtime.tv_sec = (signed)le32_to_cpu(raw_inode->i_mtime);
inode->i_atime.tv_nsec = inode->i_ctime.tv_nsec = inode->i_mtime.tv_nsec = 0;
- ei->i_state = 0;
+ ei->i_state_flags = 0;
ei->i_dir_start_lookup = 0;
ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
/* We now have enough fields to check if the inode was active or not.
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 361c0b9962a..57f6eef6ccd 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -263,7 +263,7 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
ext4_group_t f;
f = ext4_flex_group(sbi, block_group);
- atomic_dec(&sbi->s_flex_groups[f].free_inodes);
+ atomic_dec(&sbi->s_flex_groups[f].used_dirs);
}
}
@@ -773,7 +773,7 @@ static int ext4_claim_inode(struct super_block *sb,
if (sbi->s_log_groups_per_flex) {
ext4_group_t f = ext4_flex_group(sbi, group);
- atomic_inc(&sbi->s_flex_groups[f].free_inodes);
+ atomic_inc(&sbi->s_flex_groups[f].used_dirs);
}
}
gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 986120f3006..11119e07233 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1035,7 +1035,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode,
sector_t lblock)
{
struct ext4_inode_info *ei = EXT4_I(inode);
- int dind_mask = EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1;
+ sector_t dind_mask = ~((sector_t)EXT4_ADDR_PER_BLOCK(inode->i_sb) - 1);
int blk_bits;
if (lblock < EXT4_NDIR_BLOCKS)
@@ -1050,7 +1050,7 @@ static int ext4_indirect_calc_metadata_amount(struct inode *inode,
}
ei->i_da_metadata_calc_last_lblock = lblock & dind_mask;
ei->i_da_metadata_calc_len = 1;
- blk_bits = roundup_pow_of_two(lblock + 1);
+ blk_bits = order_base_2(lblock);
return (blk_bits / EXT4_ADDR_PER_BLOCK_BITS(inode->i_sb)) + 1;
}
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index ba191dae873..e14d22c170d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -68,7 +68,21 @@ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
static int ext4_unfreeze(struct super_block *sb);
static void ext4_write_super(struct super_block *sb);
static int ext4_freeze(struct super_block *sb);
+static int ext4_get_sb(struct file_system_type *fs_type, int flags,
+ const char *dev_name, void *data, struct vfsmount *mnt);
+#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+static struct file_system_type ext3_fs_type = {
+ .owner = THIS_MODULE,
+ .name = "ext3",
+ .get_sb = ext4_get_sb,
+ .kill_sb = kill_block_super,
+ .fs_flags = FS_REQUIRES_DEV,
+};
+#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
+#else
+#define IS_EXT3_SB(sb) (0)
+#endif
ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
struct ext4_group_desc *bg)
@@ -2539,7 +2553,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
* enable delayed allocation by default
* Use -o nodelalloc to turn it off
*/
- set_opt(sbi->s_mount_opt, DELALLOC);
+ if (!IS_EXT3_SB(sb))
+ set_opt(sbi->s_mount_opt, DELALLOC);
if (!parse_options((char *) data, sb, &journal_devnum,
&journal_ioprio, NULL, 0))
@@ -4068,7 +4083,7 @@ static int ext4_get_sb(struct file_system_type *fs_type, int flags,
return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt);
}
-#if !defined(CONTIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
+#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static struct file_system_type ext2_fs_type = {
.owner = THIS_MODULE,
.name = "ext2",
@@ -4095,15 +4110,7 @@ static inline void register_as_ext2(void) { }
static inline void unregister_as_ext2(void) { }
#endif
-#if !defined(CONTIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
-static struct file_system_type ext3_fs_type = {
- .owner = THIS_MODULE,
- .name = "ext3",
- .get_sb = ext4_get_sb,
- .kill_sb = kill_block_super,
- .fs_flags = FS_REQUIRES_DEV,
-};
-
+#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && defined(CONFIG_EXT4_USE_FOR_EXT23)
static inline void register_as_ext3(void)
{
int err = register_filesystem(&ext3_fs_type);
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index c1ef5015486..6fcc7e71fba 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -309,7 +309,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
{
struct fat_mount_options *opts = &MSDOS_SB(dir->i_sb)->options;
wchar_t *ip, *ext_start, *end, *name_start;
- unsigned char base[9], ext[4], buf[8], *p;
+ unsigned char base[9], ext[4], buf[5], *p;
unsigned char charbuf[NLS_MAX_CHARSET_SIZE];
int chl, chi;
int sz = 0, extlen, baselen, i, numtail_baselen, numtail2_baselen;
@@ -467,7 +467,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
return 0;
}
- i = jiffies & 0xffff;
+ i = jiffies;
sz = (jiffies >> 16) & 0x7;
if (baselen > 2) {
baselen = numtail2_baselen;
@@ -476,7 +476,7 @@ static int vfat_create_shortname(struct inode *dir, struct nls_table *nls,
name_res[baselen + 4] = '~';
name_res[baselen + 5] = '1' + sz;
while (1) {
- sprintf(buf, "%04X", i);
+ snprintf(buf, sizeof(buf), "%04X", i & 0xffff);
memcpy(&name_res[baselen], buf, 4);
if (vfat_find_form(dir, name_res) < 0)
break;
diff --git a/fs/fscache/object.c b/fs/fscache/object.c
index e513ac599c8..0b589a9b4ff 100644
--- a/fs/fscache/object.c
+++ b/fs/fscache/object.c
@@ -53,7 +53,7 @@ const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
static void fscache_object_slow_work_put_ref(struct slow_work *);
static int fscache_object_slow_work_get_ref(struct slow_work *);
static void fscache_object_slow_work_execute(struct slow_work *);
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
#endif
static void fscache_initialise_object(struct fscache_object *);
@@ -69,7 +69,7 @@ const struct slow_work_ops fscache_object_slow_work_ops = {
.get_ref = fscache_object_slow_work_get_ref,
.put_ref = fscache_object_slow_work_put_ref,
.execute = fscache_object_slow_work_execute,
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
.desc = fscache_object_slow_work_desc,
#endif
};
@@ -364,7 +364,7 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
/*
* describe an object for slow-work debugging
*/
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
static void fscache_object_slow_work_desc(struct slow_work *work,
struct seq_file *m)
{
diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
index 313e79a1426..9f6c928d458 100644
--- a/fs/fscache/operation.c
+++ b/fs/fscache/operation.c
@@ -500,7 +500,7 @@ static void fscache_op_execute(struct slow_work *work)
/*
* describe an operation for slow-work debugging
*/
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
{
struct fscache_operation *op =
@@ -517,7 +517,7 @@ const struct slow_work_ops fscache_op_slow_work_ops = {
.get_ref = fscache_op_get_ref,
.put_ref = fscache_op_put_ref,
.execute = fscache_op_execute,
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
.desc = fscache_op_desc,
#endif
};
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 9718c22f186..a5d0c56d3eb 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -80,6 +80,7 @@ static void writeseg_end_io(struct bio *bio, int err)
prefetchw(&bvec->bv_page->flags);
end_page_writeback(page);
+ page_cache_release(page);
} while (bvec >= bio->bi_io_vec);
bio_put(bio);
if (atomic_dec_and_test(&super->s_pending_writes))
@@ -97,8 +98,10 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
int i;
+ if (max_pages > BIO_MAX_PAGES)
+ max_pages = BIO_MAX_PAGES;
bio = bio_alloc(GFP_NOFS, max_pages);
- BUG_ON(!bio); /* FIXME: handle this */
+ BUG_ON(!bio);
for (i = 0; i < nr_pages; i++) {
if (i >= max_pages) {
@@ -191,8 +194,10 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
unsigned int max_pages = queue_max_hw_sectors(q) >> (PAGE_SHIFT - 9);
int i;
+ if (max_pages > BIO_MAX_PAGES)
+ max_pages = BIO_MAX_PAGES;
bio = bio_alloc(GFP_NOFS, max_pages);
- BUG_ON(!bio); /* FIXME: handle this */
+ BUG_ON(!bio);
for (i = 0; i < nr_pages; i++) {
if (i >= max_pages) {
diff --git a/fs/logfs/dir.c b/fs/logfs/dir.c
index 56a8bfbb012..c76b4b5c7ff 100644
--- a/fs/logfs/dir.c
+++ b/fs/logfs/dir.c
@@ -303,12 +303,12 @@ static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir)
(filler_t *)logfs_readpage, NULL);
if (IS_ERR(page))
return PTR_ERR(page);
- dd = kmap_atomic(page, KM_USER0);
+ dd = kmap(page);
BUG_ON(dd->namelen == 0);
full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen),
pos, be64_to_cpu(dd->ino), dd->type);
- kunmap_atomic(dd, KM_USER0);
+ kunmap(page);
page_cache_release(page);
if (full)
break;
diff --git a/fs/logfs/journal.c b/fs/logfs/journal.c
index 6ad30a4c905..d57c7b07b60 100644
--- a/fs/logfs/journal.c
+++ b/fs/logfs/journal.c
@@ -800,6 +800,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
{
struct logfs_super *super = logfs_super(sb);
struct logfs_area *area = super->s_journal_area;
+ struct btree_head32 *head = &super->s_reserved_segments;
u32 segno, ec;
int i, err;
@@ -807,6 +808,7 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
/* Drop old segments */
journal_for_each(i)
if (super->s_journal_seg[i]) {
+ btree_remove32(head, super->s_journal_seg[i]);
logfs_set_segment_unreserved(sb,
super->s_journal_seg[i],
super->s_journal_ec[i]);
@@ -819,8 +821,13 @@ void do_logfs_journal_wl_pass(struct super_block *sb)
super->s_journal_seg[i] = segno;
super->s_journal_ec[i] = ec;
logfs_set_segment_reserved(sb, segno);
+ err = btree_insert32(head, segno, (void *)1, GFP_KERNEL);
+ BUG_ON(err); /* mempool should prevent this */
+ err = logfs_erase_segment(sb, segno, 1);
+ BUG_ON(err); /* FIXME: remount-ro would be nicer */
}
/* Manually move journal_area */
+ freeseg(sb, area->a_segno);
area->a_segno = super->s_journal_seg[0];
area->a_is_open = 0;
area->a_used_bytes = 0;
diff --git a/fs/logfs/logfs.h b/fs/logfs/logfs.h
index 12977943137..b84b0eec602 100644
--- a/fs/logfs/logfs.h
+++ b/fs/logfs/logfs.h
@@ -587,6 +587,7 @@ void move_page_to_btree(struct page *page);
int logfs_init_mapping(struct super_block *sb);
void logfs_sync_area(struct logfs_area *area);
void logfs_sync_segments(struct super_block *sb);
+void freeseg(struct super_block *sb, u32 segno);
/* area handling */
int logfs_init_areas(struct super_block *sb);
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index 7a23b3e7c0a..c3a3a6814b8 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -1594,7 +1594,6 @@ int logfs_delete(struct inode *inode, pgoff_t index,
return ret;
}
-/* Rewrite cannot mark the inode dirty but has to write it immediatly. */
int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
gc_level_t gc_level, long flags)
{
@@ -1611,6 +1610,18 @@ int logfs_rewrite_block(struct inode *inode, u64 bix, u64 ofs,
if (level != 0)
alloc_indirect_block(inode, page, 0);
err = logfs_write_buf(inode, page, flags);
+ if (!err && shrink_level(gc_level) == 0) {
+ /* Rewrite cannot mark the inode dirty but has to
+ * write it immediatly.
+ * Q: Can't we just create an alias for the inode
+ * instead? And if not, why not?
+ */
+ if (inode->i_ino == LOGFS_INO_MASTER)
+ logfs_write_anchor(inode->i_sb);
+ else {
+ err = __logfs_write_inode(inode, flags);
+ }
+ }
}
logfs_put_write_page(page);
return err;
diff --git a/fs/logfs/segment.c b/fs/logfs/segment.c
index 1a14f9910d5..0ecd8f07c11 100644
--- a/fs/logfs/segment.c
+++ b/fs/logfs/segment.c
@@ -93,50 +93,58 @@ void __logfs_buf_write(struct logfs_area *area, u64 ofs, void *buf, size_t len,
} while (len);
}
-/*
- * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
- */
-static void pad_wbuf(struct logfs_area *area, int final)
+static void pad_partial_page(struct logfs_area *area)
{
struct super_block *sb = area->a_sb;
- struct logfs_super *super = logfs_super(sb);
struct page *page;
u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
pgoff_t index = ofs >> PAGE_SHIFT;
long offset = ofs & (PAGE_SIZE-1);
u32 len = PAGE_SIZE - offset;
- if (len == PAGE_SIZE) {
- /* The math in this function can surely use some love */
- len = 0;
- }
- if (len) {
- BUG_ON(area->a_used_bytes >= super->s_segsize);
-
- page = get_mapping_page(area->a_sb, index, 0);
+ if (len % PAGE_SIZE) {
+ page = get_mapping_page(sb, index, 0);
BUG_ON(!page); /* FIXME: reserve a pool */
memset(page_address(page) + offset, 0xff, len);
SetPagePrivate(page);
page_cache_release(page);
}
+}
- if (!final)
- return;
+static void pad_full_pages(struct logfs_area *area)
+{
+ struct super_block *sb = area->a_sb;
+ struct logfs_super *super = logfs_super(sb);
+ u64 ofs = dev_ofs(sb, area->a_segno, area->a_used_bytes);
+ u32 len = super->s_segsize - area->a_used_bytes;
+ pgoff_t index = PAGE_CACHE_ALIGN(ofs) >> PAGE_CACHE_SHIFT;
+ pgoff_t no_indizes = len >> PAGE_CACHE_SHIFT;
+ struct page *page;
- area->a_used_bytes += len;
- for ( ; area->a_used_bytes < super->s_segsize;
- area->a_used_bytes += PAGE_SIZE) {
- /* Memset another page */
- index++;
- page = get_mapping_page(area->a_sb, index, 0);
+ while (no_indizes) {
+ page = get_mapping_page(sb, index, 0);
BUG_ON(!page); /* FIXME: reserve a pool */
- memset(page_address(page), 0xff, PAGE_SIZE);
+ SetPageUptodate(page);
+ memset(page_address(page), 0xff, PAGE_CACHE_SIZE);
SetPagePrivate(page);
page_cache_release(page);
+ index++;
+ no_indizes--;
}
}
/*
+ * bdev_writeseg will write full pages. Memset the tail to prevent data leaks.
+ * Also make sure we allocate (and memset) all pages for final writeout.
+ */
+static void pad_wbuf(struct logfs_area *area, int final)
+{
+ pad_partial_page(area);
+ if (final)
+ pad_full_pages(area);
+}
+
+/*
* We have to be careful with the alias tree. Since lookup is done by bix,
* it needs to be normalized, so 14, 15, 16, etc. all match when dealing with
* indirect blocks. So always use it through accessor functions.
@@ -683,7 +691,7 @@ int logfs_segment_delete(struct inode *inode, struct logfs_shadow *shadow)
return 0;
}
-static void freeseg(struct super_block *sb, u32 segno)
+void freeseg(struct super_block *sb, u32 segno)
{
struct logfs_super *super = logfs_super(sb);
struct address_space *mapping = super->s_mapping_inode->i_mapping;
diff --git a/fs/logfs/super.c b/fs/logfs/super.c
index c66beab78de..9d856c49afc 100644
--- a/fs/logfs/super.c
+++ b/fs/logfs/super.c
@@ -277,7 +277,7 @@ static int logfs_recover_sb(struct super_block *sb)
}
if (valid0 && valid1 && ds_cmp(ds0, ds1)) {
printk(KERN_INFO"Superblocks don't match - fixing.\n");
- return write_one_sb(sb, super->s_devops->find_last_sb);
+ return logfs_write_sb(sb);
}
/* If neither is valid now, something's wrong. Didn't we properly
* check them before?!? */
@@ -289,6 +289,10 @@ static int logfs_make_writeable(struct super_block *sb)
{
int err;
+ err = logfs_open_segfile(sb);
+ if (err)
+ return err;
+
/* Repair any broken superblock copies */
err = logfs_recover_sb(sb);
if (err)
@@ -299,10 +303,6 @@ static int logfs_make_writeable(struct super_block *sb)
if (err)
return err;
- err = logfs_open_segfile(sb);
- if (err)
- return err;
-
/* Do one GC pass before any data gets dirtied */
logfs_gc_pass(sb);
@@ -328,7 +328,7 @@ static int logfs_get_sb_final(struct super_block *sb, struct vfsmount *mnt)
sb->s_root = d_alloc_root(rootdir);
if (!sb->s_root)
- goto fail;
+ goto fail2;
super->s_erase_page = alloc_pages(GFP_KERNEL, 0);
if (!super->s_erase_page)
@@ -572,8 +572,7 @@ int logfs_get_sb_device(struct file_system_type *type, int flags,
return 0;
err1:
- up_write(&sb->s_umount);
- deactivate_super(sb);
+ deactivate_locked_super(sb);
return err;
err0:
kfree(super);
diff --git a/fs/namei.c b/fs/namei.c
index 1c0fca6e899..a7dce91a7e4 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1610,8 +1610,7 @@ exit:
static struct file *do_last(struct nameidata *nd, struct path *path,
int open_flag, int acc_mode,
- int mode, const char *pathname,
- int *want_dir)
+ int mode, const char *pathname)
{
struct dentry *dir = nd->path.dentry;
struct file *filp;
@@ -1642,7 +1641,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
if (nd->last.name[nd->last.len]) {
if (open_flag & O_CREAT)
goto exit;
- *want_dir = 1;
+ nd->flags |= LOOKUP_DIRECTORY;
}
/* just plain open? */
@@ -1656,8 +1655,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
if (path->dentry->d_inode->i_op->follow_link)
return NULL;
error = -ENOTDIR;
- if (*want_dir && !path->dentry->d_inode->i_op->lookup)
- goto exit_dput;
+ if (nd->flags & LOOKUP_DIRECTORY) {
+ if (!path->dentry->d_inode->i_op->lookup)
+ goto exit_dput;
+ }
path_to_nameidata(path, nd);
audit_inode(pathname, nd->path.dentry);
goto ok;
@@ -1766,7 +1767,6 @@ struct file *do_filp_open(int dfd, const char *pathname,
int count = 0;
int flag = open_to_namei_flags(open_flag);
int force_reval = 0;
- int want_dir = open_flag & O_DIRECTORY;
if (!(open_flag & O_CREAT))
mode = 0;
@@ -1828,7 +1828,9 @@ reval:
if (open_flag & O_EXCL)
nd.flags |= LOOKUP_EXCL;
}
- filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
+ if (open_flag & O_DIRECTORY)
+ nd.flags |= LOOKUP_DIRECTORY;
+ filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
while (unlikely(!filp)) { /* trailing symlink */
struct path holder;
struct inode *inode = path.dentry->d_inode;
@@ -1866,7 +1868,7 @@ reval:
}
holder = path;
nd.flags &= ~LOOKUP_PARENT;
- filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname, &want_dir);
+ filp = do_last(&nd, &path, open_flag, acc_mode, mode, pathname);
if (inode->i_op->put_link)
inode->i_op->put_link(holder.dentry, &nd, cookie);
path_put(&holder);
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 636eaafd6ea..6129a431aa3 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -323,14 +323,14 @@ int nilfs_write_logs(struct list_head *logs, struct the_nilfs *nilfs)
int nilfs_wait_on_logs(struct list_head *logs)
{
struct nilfs_segment_buffer *segbuf;
- int err;
+ int err, ret = 0;
list_for_each_entry(segbuf, logs, sb_list) {
err = nilfs_segbuf_wait(segbuf);
- if (err)
- return err;
+ if (err && !ret)
+ ret = err;
}
- return 0;
+ return ret;
}
/*
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 69576a95e13..c161d89061b 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -1510,6 +1510,12 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
if (mode != SC_LSEG_SR || sci->sc_stage.scnt < NILFS_ST_CPFILE)
break;
+ nilfs_clear_logs(&sci->sc_segbufs);
+
+ err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
+ if (unlikely(err))
+ return err;
+
if (sci->sc_stage.flags & NILFS_CF_SUFREED) {
err = nilfs_sufile_cancel_freev(nilfs->ns_sufile,
sci->sc_freesegs,
@@ -1517,12 +1523,6 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci,
NULL);
WARN_ON(err); /* do not happen */
}
- nilfs_clear_logs(&sci->sc_segbufs);
-
- err = nilfs_segctor_extend_segments(sci, nilfs, nadd);
- if (unlikely(err))
- return err;
-
nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA);
sci->sc_stage = prev_stage;
}
@@ -1897,8 +1897,7 @@ static void nilfs_segctor_abort_construction(struct nilfs_sc_info *sci,
list_splice_tail_init(&sci->sc_write_logs, &logs);
ret = nilfs_wait_on_logs(&logs);
- if (ret)
- nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret);
+ nilfs_abort_logs(&logs, NULL, sci->sc_super_root, ret ? : err);
list_splice_tail_init(&sci->sc_segbufs, &logs);
nilfs_cancel_segusage(&logs, nilfs->ns_sufile);
diff --git a/fs/ocfs2/acl.c b/fs/ocfs2/acl.c
index 0501974bedd..8ccf0f8c9cc 100644
--- a/fs/ocfs2/acl.c
+++ b/fs/ocfs2/acl.c
@@ -30,6 +30,8 @@
#include "alloc.h"
#include "dlmglue.h"
#include "file.h"
+#include "inode.h"
+#include "journal.h"
#include "ocfs2_fs.h"
#include "xattr.h"
@@ -166,6 +168,60 @@ static struct posix_acl *ocfs2_get_acl(struct inode *inode, int type)
}
/*
+ * Helper function to set i_mode in memory and disk. Some call paths
+ * will not have di_bh or a journal handle to pass, in which case it
+ * will create it's own.
+ */
+static int ocfs2_acl_set_mode(struct inode *inode, struct buffer_head *di_bh,
+ handle_t *handle, umode_t new_mode)
+{
+ int ret, commit_handle = 0;
+ struct ocfs2_dinode *di;
+
+ if (di_bh == NULL) {
+ ret = ocfs2_read_inode_block(inode, &di_bh);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
+ }
+ } else
+ get_bh(di_bh);
+
+ if (handle == NULL) {
+ handle = ocfs2_start_trans(OCFS2_SB(inode->i_sb),
+ OCFS2_INODE_UPDATE_CREDITS);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ mlog_errno(ret);
+ goto out_brelse;
+ }
+
+ commit_handle = 1;
+ }
+
+ di = (struct ocfs2_dinode *)di_bh->b_data;
+ ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out_commit;
+ }
+
+ inode->i_mode = new_mode;
+ di->i_mode = cpu_to_le16(inode->i_mode);
+
+ ocfs2_journal_dirty(handle, di_bh);
+
+out_commit:
+ if (commit_handle)
+ ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
+out_brelse:
+ brelse(di_bh);
+out:
+ return ret;
+}
+
+/*
* Set the access or default ACL of an inode.
*/
static int ocfs2_set_acl(handle_t *handle,
@@ -193,9 +249,14 @@ static int ocfs2_set_acl(handle_t *handle,
if (ret < 0)
return ret;
else {
- inode->i_mode = mode;
if (ret == 0)
acl = NULL;
+
+ ret = ocfs2_acl_set_mode(inode, di_bh,
+ handle, mode);
+ if (ret)
+ return ret;
+
}
}
break;
@@ -283,6 +344,7 @@ int ocfs2_init_acl(handle_t *handle,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct posix_acl *acl = NULL;
int ret = 0;
+ mode_t mode;
if (!S_ISLNK(inode->i_mode)) {
if (osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) {
@@ -291,12 +353,17 @@ int ocfs2_init_acl(handle_t *handle,
if (IS_ERR(acl))
return PTR_ERR(acl);
}
- if (!acl)
- inode->i_mode &= ~current_umask();
+ if (!acl) {
+ mode = inode->i_mode & ~current_umask();
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
+ if (ret) {
+ mlog_errno(ret);
+ goto cleanup;
+ }
+ }
}
if ((osb->s_mount_opt & OCFS2_MOUNT_POSIX_ACL) && acl) {
struct posix_acl *clone;
- mode_t mode;
if (S_ISDIR(inode->i_mode)) {
ret = ocfs2_set_acl(handle, inode, di_bh,
@@ -313,7 +380,7 @@ int ocfs2_init_acl(handle_t *handle,
mode = inode->i_mode;
ret = posix_acl_create_masq(clone, &mode);
if (ret >= 0) {
- inode->i_mode = mode;
+ ret = ocfs2_acl_set_mode(inode, di_bh, handle, mode);
if (ret > 0) {
ret = ocfs2_set_acl(handle, inode,
di_bh, ACL_TYPE_ACCESS,
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index a659606dcb9..9289b4357d2 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -1875,7 +1875,6 @@ int dlm_assert_master_handler(struct o2net_msg *msg, u32 len, void *data,
ok:
spin_unlock(&res->spinlock);
}
- spin_unlock(&dlm->spinlock);
// mlog(0, "woo! got an assert_master from node %u!\n",
// assert->node_idx);
@@ -1926,7 +1925,6 @@ ok:
/* master is known, detach if not already detached.
* ensures that only one assert_master call will happen
* on this mle. */
- spin_lock(&dlm->spinlock);
spin_lock(&dlm->master_lock);
rr = atomic_read(&mle->mle_refs.refcount);
@@ -1959,7 +1957,6 @@ ok:
__dlm_put_mle(mle);
}
spin_unlock(&dlm->master_lock);
- spin_unlock(&dlm->spinlock);
} else if (res) {
if (res->owner != assert->node_idx) {
mlog(0, "assert_master from %u, but current "
@@ -1967,6 +1964,7 @@ ok:
res->owner, namelen, name);
}
}
+ spin_unlock(&dlm->spinlock);
done:
ret = 0;
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 278a223aae1..ab207901d32 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -891,6 +891,21 @@ static int ocfs2_query_inode_wipe(struct inode *inode,
/* Do some basic inode verification... */
di = (struct ocfs2_dinode *) di_bh->b_data;
if (!(di->i_flags & cpu_to_le32(OCFS2_ORPHANED_FL))) {
+ /*
+ * Inodes in the orphan dir must have ORPHANED_FL. The only
+ * inodes that come back out of the orphan dir are reflink
+ * targets. A reflink target may be moved out of the orphan
+ * dir between the time we scan the directory and the time we
+ * process it. This would lead to HAS_REFCOUNT_FL being set but
+ * ORPHANED_FL not.
+ */
+ if (di->i_dyn_features & cpu_to_le16(OCFS2_HAS_REFCOUNT_FL)) {
+ mlog(0, "Reflinked inode %llu is no longer orphaned. "
+ "it shouldn't be deleted\n",
+ (unsigned long long)oi->ip_blkno);
+ goto bail;
+ }
+
/* for lack of a better error? */
status = -EEXIST;
mlog(ML_ERROR,
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ca992d91f51..c983715d8d8 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -872,8 +872,10 @@ static int ocfs2_sync_local_to_main(struct ocfs2_super *osb,
(unsigned long long)la_start_blk,
(unsigned long long)blkno);
- status = ocfs2_free_clusters(handle, main_bm_inode,
- main_bm_bh, blkno, count);
+ status = ocfs2_release_clusters(handle,
+ main_bm_inode,
+ main_bm_bh, blkno,
+ count);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -984,8 +986,7 @@ static int ocfs2_local_alloc_reserve_for_window(struct ocfs2_super *osb,
}
retry_enospc:
- (*ac)->ac_bits_wanted = osb->local_alloc_bits;
-
+ (*ac)->ac_bits_wanted = osb->local_alloc_default_bits;
status = ocfs2_reserve_cluster_bitmap_bits(osb, *ac);
if (status == -ENOSPC) {
if (ocfs2_recalc_la_window(osb, OCFS2_LA_EVENT_ENOSPC) ==
@@ -1061,6 +1062,7 @@ retry_enospc:
OCFS2_LA_DISABLED)
goto bail;
+ ac->ac_bits_wanted = osb->local_alloc_default_bits;
status = ocfs2_claim_clusters(osb, handle, ac,
osb->local_alloc_bits,
&cluster_off,
diff --git a/fs/ocfs2/locks.c b/fs/ocfs2/locks.c
index 544ac624517..b5cb3ede940 100644
--- a/fs/ocfs2/locks.c
+++ b/fs/ocfs2/locks.c
@@ -133,7 +133,7 @@ int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
- if (__mandatory_lock(inode))
+ if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK)
return -ENOLCK;
return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl);
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index d9cd4e373a5..b1eb50ae409 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -84,7 +84,7 @@ static int ocfs2_prepare_orphan_dir(struct ocfs2_super *osb,
static int ocfs2_orphan_add(struct ocfs2_super *osb,
handle_t *handle,
struct inode *inode,
- struct ocfs2_dinode *fe,
+ struct buffer_head *fe_bh,
char *name,
struct ocfs2_dir_lookup_result *lookup,
struct inode *orphan_dir_inode);
@@ -879,7 +879,7 @@ static int ocfs2_unlink(struct inode *dir,
fe = (struct ocfs2_dinode *) fe_bh->b_data;
if (inode_is_unlinkable(inode)) {
- status = ocfs2_orphan_add(osb, handle, inode, fe, orphan_name,
+ status = ocfs2_orphan_add(osb, handle, inode, fe_bh, orphan_name,
&orphan_insert, orphan_dir);
if (status < 0) {
mlog_errno(status);
@@ -1300,7 +1300,7 @@ static int ocfs2_rename(struct inode *old_dir,
if (S_ISDIR(new_inode->i_mode) ||
(ocfs2_read_links_count(newfe) == 1)) {
status = ocfs2_orphan_add(osb, handle, new_inode,
- newfe, orphan_name,
+ newfe_bh, orphan_name,
&orphan_insert, orphan_dir);
if (status < 0) {
mlog_errno(status);
@@ -1911,7 +1911,7 @@ leave:
static int ocfs2_orphan_add(struct ocfs2_super *osb,
handle_t *handle,
struct inode *inode,
- struct ocfs2_dinode *fe,
+ struct buffer_head *fe_bh,
char *name,
struct ocfs2_dir_lookup_result *lookup,
struct inode *orphan_dir_inode)
@@ -1919,6 +1919,7 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
struct buffer_head *orphan_dir_bh = NULL;
int status = 0;
struct ocfs2_dinode *orphan_fe;
+ struct ocfs2_dinode *fe = (struct ocfs2_dinode *) fe_bh->b_data;
mlog_entry("(inode->i_ino = %lu)\n", inode->i_ino);
@@ -1959,6 +1960,21 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
goto leave;
}
+ /*
+ * We're going to journal the change of i_flags and i_orphaned_slot.
+ * It's safe anyway, though some callers may duplicate the journaling.
+ * Journaling within the func just make the logic look more
+ * straightforward.
+ */
+ status = ocfs2_journal_access_di(handle,
+ INODE_CACHE(inode),
+ fe_bh,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (status < 0) {
+ mlog_errno(status);
+ goto leave;
+ }
+
le32_add_cpu(&fe->i_flags, OCFS2_ORPHANED_FL);
/* Record which orphan dir our inode now resides
@@ -1966,6 +1982,8 @@ static int ocfs2_orphan_add(struct ocfs2_super *osb,
* dir to lock. */
fe->i_orphaned_slot = cpu_to_le16(osb->slot_num);
+ ocfs2_journal_dirty(handle, fe_bh);
+
mlog(0, "Inode %llu orphaned in slot %d\n",
(unsigned long long)OCFS2_I(inode)->ip_blkno, osb->slot_num);
@@ -2123,7 +2141,7 @@ int ocfs2_create_inode_in_orphan(struct inode *dir,
}
di = (struct ocfs2_dinode *)new_di_bh->b_data;
- status = ocfs2_orphan_add(osb, handle, inode, di, orphan_name,
+ status = ocfs2_orphan_add(osb, handle, inode, new_di_bh, orphan_name,
&orphan_insert, orphan_dir);
if (status < 0) {
mlog_errno(status);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 1238b491db9..adf5e2ebc2c 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -763,8 +763,18 @@ static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
}
-#define ocfs2_set_bit ext2_set_bit
-#define ocfs2_clear_bit ext2_clear_bit
+static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
+{
+ ext2_set_bit(bit, bitmap);
+}
+#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
+
+static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
+{
+ ext2_clear_bit(bit, bitmap);
+}
+#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
+
#define ocfs2_test_bit ext2_test_bit
#define ocfs2_find_next_zero_bit ext2_find_next_zero_bit
#define ocfs2_find_next_bit ext2_find_next_bit
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 9e96921dffd..29405f2ff61 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4075,6 +4075,7 @@ static int ocfs2_complete_reflink(struct inode *s_inode,
OCFS2_I(t_inode)->ip_dyn_features = OCFS2_I(s_inode)->ip_dyn_features;
spin_unlock(&OCFS2_I(t_inode)->ip_lock);
i_size_write(t_inode, size);
+ t_inode->i_blocks = s_inode->i_blocks;
di->i_xattr_inline_size = s_di->i_xattr_inline_size;
di->i_clusters = s_di->i_clusters;
diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
index c3c60bc3e07..19ba00f2854 100644
--- a/fs/ocfs2/suballoc.c
+++ b/fs/ocfs2/suballoc.c
@@ -95,13 +95,6 @@ static inline int ocfs2_block_group_set_bits(handle_t *handle,
struct buffer_head *group_bh,
unsigned int bit_off,
unsigned int num_bits);
-static inline int ocfs2_block_group_clear_bits(handle_t *handle,
- struct inode *alloc_inode,
- struct ocfs2_group_desc *bg,
- struct buffer_head *group_bh,
- unsigned int bit_off,
- unsigned int num_bits);
-
static int ocfs2_relink_block_group(handle_t *handle,
struct inode *alloc_inode,
struct buffer_head *fe_bh,
@@ -152,7 +145,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
#define do_error(fmt, ...) \
do{ \
- if (clean_error) \
+ if (resize) \
mlog(ML_ERROR, fmt "\n", ##__VA_ARGS__); \
else \
ocfs2_error(sb, fmt, ##__VA_ARGS__); \
@@ -160,7 +153,7 @@ static u32 ocfs2_bits_per_group(struct ocfs2_chain_list *cl)
static int ocfs2_validate_gd_self(struct super_block *sb,
struct buffer_head *bh,
- int clean_error)
+ int resize)
{
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -211,7 +204,7 @@ static int ocfs2_validate_gd_self(struct super_block *sb,
static int ocfs2_validate_gd_parent(struct super_block *sb,
struct ocfs2_dinode *di,
struct buffer_head *bh,
- int clean_error)
+ int resize)
{
unsigned int max_bits;
struct ocfs2_group_desc *gd = (struct ocfs2_group_desc *)bh->b_data;
@@ -233,8 +226,11 @@ static int ocfs2_validate_gd_parent(struct super_block *sb,
return -EINVAL;
}
- if (le16_to_cpu(gd->bg_chain) >=
- le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) {
+ /* In resize, we may meet the case bg_chain == cl_next_free_rec. */
+ if ((le16_to_cpu(gd->bg_chain) >
+ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) ||
+ ((le16_to_cpu(gd->bg_chain) ==
+ le16_to_cpu(di->id2.i_chain.cl_next_free_rec)) && !resize)) {
do_error("Group descriptor #%llu has bad chain %u",
(unsigned long long)bh->b_blocknr,
le16_to_cpu(gd->bg_chain));
@@ -1975,18 +1971,18 @@ int ocfs2_claim_clusters(struct ocfs2_super *osb,
bits_wanted, cluster_start, num_clusters);
}
-static inline int ocfs2_block_group_clear_bits(handle_t *handle,
- struct inode *alloc_inode,
- struct ocfs2_group_desc *bg,
- struct buffer_head *group_bh,
- unsigned int bit_off,
- unsigned int num_bits)
+static int ocfs2_block_group_clear_bits(handle_t *handle,
+ struct inode *alloc_inode,
+ struct ocfs2_group_desc *bg,
+ struct buffer_head *group_bh,
+ unsigned int bit_off,
+ unsigned int num_bits,
+ void (*undo_fn)(unsigned int bit,
+ unsigned long *bmap))
{
int status;
unsigned int tmp;
- int journal_type = OCFS2_JOURNAL_ACCESS_WRITE;
struct ocfs2_group_desc *undo_bg = NULL;
- int cluster_bitmap = 0;
mlog_entry_void();
@@ -1996,20 +1992,18 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle,
mlog(0, "off = %u, num = %u\n", bit_off, num_bits);
- if (ocfs2_is_cluster_bitmap(alloc_inode))
- journal_type = OCFS2_JOURNAL_ACCESS_UNDO;
-
+ BUG_ON(undo_fn && !ocfs2_is_cluster_bitmap(alloc_inode));
status = ocfs2_journal_access_gd(handle, INODE_CACHE(alloc_inode),
- group_bh, journal_type);
+ group_bh,
+ undo_fn ?
+ OCFS2_JOURNAL_ACCESS_UNDO :
+ OCFS2_JOURNAL_ACCESS_WRITE);
if (status < 0) {
mlog_errno(status);
goto bail;
}
- if (ocfs2_is_cluster_bitmap(alloc_inode))
- cluster_bitmap = 1;
-
- if (cluster_bitmap) {
+ if (undo_fn) {
jbd_lock_bh_state(group_bh);
undo_bg = (struct ocfs2_group_desc *)
bh2jh(group_bh)->b_committed_data;
@@ -2020,13 +2014,13 @@ static inline int ocfs2_block_group_clear_bits(handle_t *handle,
while(tmp--) {
ocfs2_clear_bit((bit_off + tmp),
(unsigned long *) bg->bg_bitmap);
- if (cluster_bitmap)
- ocfs2_set_bit(bit_off + tmp,
- (unsigned long *) undo_bg->bg_bitmap);
+ if (undo_fn)
+ undo_fn(bit_off + tmp,
+ (unsigned long *) undo_bg->bg_bitmap);
}
le16_add_cpu(&bg->bg_free_bits_count, num_bits);
- if (cluster_bitmap)
+ if (undo_fn)
jbd_unlock_bh_state(group_bh);
status = ocfs2_journal_dirty(handle, group_bh);
@@ -2039,12 +2033,14 @@ bail:
/*
* expects the suballoc inode to already be locked.
*/
-int ocfs2_free_suballoc_bits(handle_t *handle,
- struct inode *alloc_inode,
- struct buffer_head *alloc_bh,
- unsigned int start_bit,
- u64 bg_blkno,
- unsigned int count)
+static int _ocfs2_free_suballoc_bits(handle_t *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *alloc_bh,
+ unsigned int start_bit,
+ u64 bg_blkno,
+ unsigned int count,
+ void (*undo_fn)(unsigned int bit,
+ unsigned long *bitmap))
{
int status = 0;
u32 tmp_used;
@@ -2079,7 +2075,7 @@ int ocfs2_free_suballoc_bits(handle_t *handle,
status = ocfs2_block_group_clear_bits(handle, alloc_inode,
group, group_bh,
- start_bit, count);
+ start_bit, count, undo_fn);
if (status < 0) {
mlog_errno(status);
goto bail;
@@ -2110,6 +2106,17 @@ bail:
return status;
}
+int ocfs2_free_suballoc_bits(handle_t *handle,
+ struct inode *alloc_inode,
+ struct buffer_head *alloc_bh,
+ unsigned int start_bit,
+ u64 bg_blkno,
+ unsigned int count)
+{
+ return _ocfs2_free_suballoc_bits(handle, alloc_inode, alloc_bh,
+ start_bit, bg_blkno, count, NULL);
+}
+
int ocfs2_free_dinode(handle_t *handle,
struct inode *inode_alloc_inode,
struct buffer_head *inode_alloc_bh,
@@ -2123,11 +2130,13 @@ int ocfs2_free_dinode(handle_t *handle,
inode_alloc_bh, bit, bg_blkno, 1);
}
-int ocfs2_free_clusters(handle_t *handle,
- struct inode *bitmap_inode,
- struct buffer_head *bitmap_bh,
- u64 start_blk,
- unsigned int num_clusters)
+static int _ocfs2_free_clusters(handle_t *handle,
+ struct inode *bitmap_inode,
+ struct buffer_head *bitmap_bh,
+ u64 start_blk,
+ unsigned int num_clusters,
+ void (*undo_fn)(unsigned int bit,
+ unsigned long *bitmap))
{
int status;
u16 bg_start_bit;
@@ -2154,9 +2163,9 @@ int ocfs2_free_clusters(handle_t *handle,
mlog(0, "bg_blkno = %llu, bg_start_bit = %u\n",
(unsigned long long)bg_blkno, bg_start_bit);
- status = ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
- bg_start_bit, bg_blkno,
- num_clusters);
+ status = _ocfs2_free_suballoc_bits(handle, bitmap_inode, bitmap_bh,
+ bg_start_bit, bg_blkno,
+ num_clusters, undo_fn);
if (status < 0) {
mlog_errno(status);
goto out;
@@ -2170,6 +2179,32 @@ out:
return status;
}
+int ocfs2_free_clusters(handle_t *handle,
+ struct inode *bitmap_inode,
+ struct buffer_head *bitmap_bh,
+ u64 start_blk,
+ unsigned int num_clusters)
+{
+ return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
+ start_blk, num_clusters,
+ _ocfs2_set_bit);
+}
+
+/*
+ * Give never-used clusters back to the global bitmap. We don't need
+ * to protect these bits in the undo buffer.
+ */
+int ocfs2_release_clusters(handle_t *handle,
+ struct inode *bitmap_inode,
+ struct buffer_head *bitmap_bh,
+ u64 start_blk,
+ unsigned int num_clusters)
+{
+ return _ocfs2_free_clusters(handle, bitmap_inode, bitmap_bh,
+ start_blk, num_clusters,
+ _ocfs2_clear_bit);
+}
+
static inline void ocfs2_debug_bg(struct ocfs2_group_desc *bg)
{
printk("Block Group:\n");
diff --git a/fs/ocfs2/suballoc.h b/fs/ocfs2/suballoc.h
index fa60723c43e..e0f46df357e 100644
--- a/fs/ocfs2/suballoc.h
+++ b/fs/ocfs2/suballoc.h
@@ -127,6 +127,11 @@ int ocfs2_free_clusters(handle_t *handle,
struct buffer_head *bitmap_bh,
u64 start_blk,
unsigned int num_clusters);
+int ocfs2_release_clusters(handle_t *handle,
+ struct inode *bitmap_inode,
+ struct buffer_head *bitmap_bh,
+ u64 start_blk,
+ unsigned int num_clusters);
static inline u64 ocfs2_which_suballoc_group(u64 block, unsigned int bit)
{
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index d1b0d386f6d..3e7773089b9 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -1622,7 +1622,7 @@ static void ocfs2_xa_block_wipe_namevalue(struct ocfs2_xa_loc *loc)
/* Now tell xh->xh_entries about it */
for (i = 0; i < count; i++) {
offset = le16_to_cpu(xh->xh_entries[i].xe_name_offset);
- if (offset < namevalue_offset)
+ if (offset <= namevalue_offset)
le16_add_cpu(&xh->xh_entries[i].xe_name_offset,
namevalue_size);
}
@@ -6528,13 +6528,11 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
int indexed)
{
int ret;
- struct ocfs2_alloc_context *meta_ac;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
- struct ocfs2_xattr_set_ctxt ctxt = {
- .meta_ac = meta_ac,
- };
+ struct ocfs2_xattr_set_ctxt ctxt;
- ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &meta_ac);
+ memset(&ctxt, 0, sizeof(ctxt));
+ ret = ocfs2_reserve_new_metadata_blocks(osb, 1, &ctxt.meta_ac);
if (ret < 0) {
mlog_errno(ret);
return ret;
@@ -6556,7 +6554,7 @@ static int ocfs2_create_empty_xattr_block(struct inode *inode,
ocfs2_commit_trans(osb, ctxt.handle);
out:
- ocfs2_free_alloc_context(meta_ac);
+ ocfs2_free_alloc_context(ctxt.meta_ac);
return ret;
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index a7310841c83..b1f6e62773d 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -442,12 +442,13 @@ static const struct file_operations proc_lstats_operations = {
unsigned long badness(struct task_struct *p, unsigned long uptime);
static int proc_oom_score(struct task_struct *task, char *buffer)
{
- unsigned long points;
+ unsigned long points = 0;
struct timespec uptime;
do_posix_clock_monotonic_gettime(&uptime);
read_lock(&tasklist_lock);
- points = badness(task->group_leader, uptime.tv_sec);
+ if (pid_alive(task))
+ points = badness(task, uptime.tv_sec);
read_unlock(&tasklist_lock);
return sprintf(buffer, "%lu\n", points);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 183f8ff5f40..096273984c3 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -406,6 +406,7 @@ static int show_smap(struct seq_file *m, void *v)
memset(&mss, 0, sizeof mss);
mss.vma = vma;
+ /* mmap_sem is held in m_start */
if (vma->vm_mm && !is_vm_hugetlb_page(vma))
walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
@@ -552,7 +553,8 @@ const struct file_operations proc_clear_refs_operations = {
};
struct pagemapread {
- u64 __user *out, *end;
+ int pos, len;
+ u64 *buffer;
};
#define PM_ENTRY_BYTES sizeof(u64)
@@ -575,10 +577,8 @@ struct pagemapread {
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
- if (put_user(pfn, pm->out))
- return -EFAULT;
- pm->out++;
- if (pm->out >= pm->end)
+ pm->buffer[pm->pos++] = pfn;
+ if (pm->pos >= pm->len)
return PM_END_OF_BUFFER;
return 0;
}
@@ -720,21 +720,20 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long addr,
* determine which areas of memory are actually mapped and llseek to
* skip over unmapped regions.
*/
+#define PAGEMAP_WALK_SIZE (PMD_SIZE)
static ssize_t pagemap_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
- struct page **pages, *page;
- unsigned long uaddr, uend;
struct mm_struct *mm;
struct pagemapread pm;
- int pagecount;
int ret = -ESRCH;
struct mm_walk pagemap_walk = {};
unsigned long src;
unsigned long svpfn;
unsigned long start_vaddr;
unsigned long end_vaddr;
+ int copied = 0;
if (!task)
goto out;
@@ -757,35 +756,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (!mm)
goto out_task;
-
- uaddr = (unsigned long)buf & PAGE_MASK;
- uend = (unsigned long)(buf + count);
- pagecount = (PAGE_ALIGN(uend) - uaddr) / PAGE_SIZE;
- ret = 0;
- if (pagecount == 0)
- goto out_mm;
- pages = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
+ pm.len = PM_ENTRY_BYTES * (PAGEMAP_WALK_SIZE >> PAGE_SHIFT);
+ pm.buffer = kmalloc(pm.len, GFP_TEMPORARY);
ret = -ENOMEM;
- if (!pages)
+ if (!pm.buffer)
goto out_mm;
- down_read(&current->mm->mmap_sem);
- ret = get_user_pages(current, current->mm, uaddr, pagecount,
- 1, 0, pages, NULL);
- up_read(&current->mm->mmap_sem);
-
- if (ret < 0)
- goto out_free;
-
- if (ret != pagecount) {
- pagecount = ret;
- ret = -EFAULT;
- goto out_pages;
- }
-
- pm.out = (u64 __user *)buf;
- pm.end = (u64 __user *)(buf + count);
-
pagemap_walk.pmd_entry = pagemap_pte_range;
pagemap_walk.pte_hole = pagemap_pte_hole;
pagemap_walk.hugetlb_entry = pagemap_hugetlb_range;
@@ -807,23 +783,36 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
* user buffer is tracked in "pm", and the walk
* will stop when we hit the end of the buffer.
*/
- ret = walk_page_range(start_vaddr, end_vaddr, &pagemap_walk);
- if (ret == PM_END_OF_BUFFER)
- ret = 0;
- /* don't need mmap_sem for these, but this looks cleaner */
- *ppos += (char __user *)pm.out - buf;
- if (!ret)
- ret = (char __user *)pm.out - buf;
-
-out_pages:
- for (; pagecount; pagecount--) {
- page = pages[pagecount-1];
- if (!PageReserved(page))
- SetPageDirty(page);
- page_cache_release(page);
+ ret = 0;
+ while (count && (start_vaddr < end_vaddr)) {
+ int len;
+ unsigned long end;
+
+ pm.pos = 0;
+ end = start_vaddr + PAGEMAP_WALK_SIZE;
+ /* overflow ? */
+ if (end < start_vaddr || end > end_vaddr)
+ end = end_vaddr;
+ down_read(&mm->mmap_sem);
+ ret = walk_page_range(start_vaddr, end, &pagemap_walk);
+ up_read(&mm->mmap_sem);
+ start_vaddr = end;
+
+ len = min(count, PM_ENTRY_BYTES * pm.pos);
+ if (copy_to_user(buf, pm.buffer, len) < 0) {
+ ret = -EFAULT;
+ goto out_free;
+ }
+ copied += len;
+ buf += len;
+ count -= len;
}
+ *ppos += copied;
+ if (!ret || ret == PM_END_OF_BUFFER)
+ ret = copied;
+
out_free:
- kfree(pages);
+ kfree(pm.buffer);
out_mm:
mmput(mm);
out_task:
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 04bf5d791bd..ab190511bc1 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -1618,10 +1618,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
save_mount_options(s, data);
sbi = kzalloc(sizeof(struct reiserfs_sb_info), GFP_KERNEL);
- if (!sbi) {
- errval = -ENOMEM;
- goto error_alloc;
- }
+ if (!sbi)
+ return -ENOMEM;
s->s_fs_info = sbi;
/* Set default values for options: non-aggressive tails, RO on errors */
REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
@@ -1878,12 +1876,12 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
return (0);
error:
- reiserfs_write_unlock(s);
-error_alloc:
if (jinit_done) { /* kill the commit thread, free journal ram */
journal_release_error(NULL, s);
}
+ reiserfs_write_unlock(s);
+
reiserfs_free_bitmap_cache(s);
if (SB_BUFFER_WITH_SB(s))
brelse(SB_BUFFER_WITH_SB(s));
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 4a3c4e44102..de2f82efb15 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -1545,39 +1545,7 @@ static __inline__ void drm_core_dropmap(struct drm_local_map *map)
{
}
-
-static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
-{
- if (size != 0 && nmemb > ULONG_MAX / size)
- return NULL;
-
- if (size * nmemb <= PAGE_SIZE)
- return kcalloc(nmemb, size, GFP_KERNEL);
-
- return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
-}
-
-/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
-static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
-{
- if (size != 0 && nmemb > ULONG_MAX / size)
- return NULL;
-
- if (size * nmemb <= PAGE_SIZE)
- return kmalloc(nmemb * size, GFP_KERNEL);
-
- return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
-}
-
-static __inline void drm_free_large(void *ptr)
-{
- if (!is_vmalloc_addr(ptr))
- return kfree(ptr);
-
- vfree(ptr);
-}
+#include "drm_mem_util.h"
/*@}*/
#endif /* __KERNEL__ */
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
new file mode 100644
index 00000000000..6bd325fedc8
--- /dev/null
+++ b/include/drm/drm_mem_util.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright © 2008 Intel Corporation
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Jesse Barnes <jbarnes@virtuousgeek.org>
+ *
+ */
+#ifndef _DRM_MEM_UTIL_H_
+#define _DRM_MEM_UTIL_H_
+
+#include <linux/vmalloc.h>
+
+static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
+{
+ if (size != 0 && nmemb > ULONG_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+ return kcalloc(nmemb, size, GFP_KERNEL);
+
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+}
+
+/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
+static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
+{
+ if (size != 0 && nmemb > ULONG_MAX / size)
+ return NULL;
+
+ if (size * nmemb <= PAGE_SIZE)
+ return kmalloc(nmemb * size, GFP_KERNEL);
+
+ return __vmalloc(size * nmemb,
+ GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+}
+
+static __inline void drm_free_large(void *ptr)
+{
+ if (!is_vmalloc_addr(ptr))
+ return kfree(ptr);
+
+ vfree(ptr);
+}
+
+#endif
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 676104b7818..04a6ebc27b9 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -410,6 +410,7 @@
{0x1002, 0x9712, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
+ {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
{0, 0, 0}
#define r128_PCI_IDS \
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index e3f1b4a4b60..e929c27ede2 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -115,7 +115,6 @@ struct ttm_backend {
struct ttm_backend_func *func;
};
-#define TTM_PAGE_FLAG_VMALLOC (1 << 0)
#define TTM_PAGE_FLAG_USER (1 << 1)
#define TTM_PAGE_FLAG_USER_DIRTY (1 << 2)
#define TTM_PAGE_FLAG_WRITE (1 << 3)
diff --git a/include/linux/amba/bus.h b/include/linux/amba/bus.h
index 6816be6c3f7..8b103860783 100644
--- a/include/linux/amba/bus.h
+++ b/include/linux/amba/bus.h
@@ -14,6 +14,9 @@
#ifndef ASMARM_AMBA_H
#define ASMARM_AMBA_H
+#include <linux/device.h>
+#include <linux/resource.h>
+
#define AMBA_NR_IRQS 2
struct amba_device {
diff --git a/include/linux/amba/pl061.h b/include/linux/amba/pl061.h
index b4fbd986260..5ddd9ad4b19 100644
--- a/include/linux/amba/pl061.h
+++ b/include/linux/amba/pl061.h
@@ -1,3 +1,5 @@
+#include <linux/types.h>
+
/* platform data for the PL061 GPIO driver */
struct pl061_platform_data {
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 0cf725bdd2a..fc53492b6ad 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -73,6 +73,7 @@ enum clock_event_nofitiers {
* @list: list head for the management code
* @mode: operating mode assigned by the management code
* @next_event: local storage for the next event in oneshot mode
+ * @retries: number of forced programming retries
*/
struct clock_event_device {
const char *name;
@@ -93,6 +94,7 @@ struct clock_event_device {
struct list_head list;
enum clock_event_mode mode;
ktime_t next_event;
+ unsigned long retries;
};
/*
diff --git a/include/linux/ext3_fs.h b/include/linux/ext3_fs.h
index cac84b00666..5f494b46509 100644
--- a/include/linux/ext3_fs.h
+++ b/include/linux/ext3_fs.h
@@ -565,17 +565,17 @@ enum {
static inline int ext3_test_inode_state(struct inode *inode, int bit)
{
- return test_bit(bit, &EXT3_I(inode)->i_state);
+ return test_bit(bit, &EXT3_I(inode)->i_state_flags);
}
static inline void ext3_set_inode_state(struct inode *inode, int bit)
{
- set_bit(bit, &EXT3_I(inode)->i_state);
+ set_bit(bit, &EXT3_I(inode)->i_state_flags);
}
static inline void ext3_clear_inode_state(struct inode *inode, int bit)
{
- clear_bit(bit, &EXT3_I(inode)->i_state);
+ clear_bit(bit, &EXT3_I(inode)->i_state_flags);
}
#else
/* Assume that user mode programs are passing in an ext3fs superblock, not
diff --git a/include/linux/ext3_fs_i.h b/include/linux/ext3_fs_i.h
index 7679acdb519..f42c098aed8 100644
--- a/include/linux/ext3_fs_i.h
+++ b/include/linux/ext3_fs_i.h
@@ -87,7 +87,7 @@ struct ext3_inode_info {
* near to their parent directory's inode.
*/
__u32 i_block_group;
- unsigned long i_state; /* Dynamic state flags for ext3 */
+ unsigned long i_state_flags; /* Dynamic state flags for ext3 */
/* block reservation info */
struct ext3_block_alloc_info *i_block_alloc_info;
diff --git a/include/linux/freezer.h b/include/linux/freezer.h
index 5a361f85cfe..da7e52b099f 100644
--- a/include/linux/freezer.h
+++ b/include/linux/freezer.h
@@ -64,9 +64,12 @@ extern bool freeze_task(struct task_struct *p, bool sig_only);
extern void cancel_freezing(struct task_struct *p);
#ifdef CONFIG_CGROUP_FREEZER
-extern int cgroup_frozen(struct task_struct *task);
+extern int cgroup_freezing_or_frozen(struct task_struct *task);
#else /* !CONFIG_CGROUP_FREEZER */
-static inline int cgroup_frozen(struct task_struct *task) { return 0; }
+static inline int cgroup_freezing_or_frozen(struct task_struct *task)
+{
+ return 0;
+}
#endif /* !CONFIG_CGROUP_FREEZER */
/*
diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
index 7be0c6fbe88..c57db27ac86 100644
--- a/include/linux/fscache-cache.h
+++ b/include/linux/fscache-cache.h
@@ -105,7 +105,7 @@ struct fscache_operation {
/* operation releaser */
fscache_operation_release_t release;
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
const char *name; /* operation name */
const char *state; /* operation state */
#define fscache_set_op_name(OP, N) do { (OP)->name = (N); } while(0)
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 71ab79da7e7..26fad187d66 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -112,12 +112,14 @@ struct resource_list {
extern struct resource ioport_resource;
extern struct resource iomem_resource;
+extern struct resource *request_resource_conflict(struct resource *root, struct resource *new);
extern int request_resource(struct resource *root, struct resource *new);
extern int release_resource(struct resource *new);
void release_child_resources(struct resource *new);
extern void reserve_region_with_split(struct resource *root,
resource_size_t start, resource_size_t end,
const char *name);
+extern struct resource *insert_resource_conflict(struct resource *parent, struct resource *new);
extern int insert_resource(struct resource *parent, struct resource *new);
extern void insert_resource_expand_to_fit(struct resource *root, struct resource *new);
extern int allocate_resource(struct resource *root, struct resource *new,
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index d654873aa25..1f7e300094c 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -59,6 +59,7 @@
enum nf_ip6_hook_priorities {
NF_IP6_PRI_FIRST = INT_MIN,
NF_IP6_PRI_CONNTRACK_DEFRAG = -400,
+ NF_IP6_PRI_RAW = -300,
NF_IP6_PRI_SELINUX_FIRST = -225,
NF_IP6_PRI_CONNTRACK = -200,
NF_IP6_PRI_MANGLE = -150,
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 95477038a72..c8e37544040 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -842,13 +842,6 @@ extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
-static inline void
-perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
-{
- if (atomic_read(&perf_swevent_enabled[event_id]))
- __perf_sw_event(event_id, nr, nmi, regs, addr);
-}
-
extern void
perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip);
@@ -887,6 +880,20 @@ static inline void perf_fetch_caller_regs(struct pt_regs *regs, int skip)
return perf_arch_fetch_caller_regs(regs, ip, skip);
}
+static inline void
+perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
+{
+ if (atomic_read(&perf_swevent_enabled[event_id])) {
+ struct pt_regs hot_regs;
+
+ if (!regs) {
+ perf_fetch_caller_regs(&hot_regs, 1);
+ regs = &hot_regs;
+ }
+ __perf_sw_event(event_id, nr, nmi, regs, addr);
+ }
+}
+
extern void __perf_event_mmap(struct vm_area_struct *vma);
static inline void perf_event_mmap(struct vm_area_struct *vma)
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 3024050c82a..872a98e13d6 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -123,22 +123,11 @@ static inline int rcu_read_lock_held(void)
return lock_is_held(&rcu_lock_map);
}
-/**
- * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
- *
- * If CONFIG_PROVE_LOCKING is selected and enabled, returns nonzero iff in
- * an RCU-bh read-side critical section. In absence of CONFIG_PROVE_LOCKING,
- * this assumes we are in an RCU-bh read-side critical section unless it can
- * prove otherwise.
- *
- * Check rcu_scheduler_active to prevent false positives during boot.
+/*
+ * rcu_read_lock_bh_held() is defined out of line to avoid #include-file
+ * hell.
*/
-static inline int rcu_read_lock_bh_held(void)
-{
- if (!debug_lockdep_rcu_enabled())
- return 1;
- return lock_is_held(&rcu_bh_lock_map);
-}
+extern int rcu_read_lock_bh_held(void);
/**
* rcu_read_lock_sched_held - might we be in RCU-sched read-side critical section?
@@ -160,7 +149,7 @@ static inline int rcu_read_lock_sched_held(void)
return 1;
if (debug_locks)
lockdep_opinion = lock_is_held(&rcu_sched_lock_map);
- return lockdep_opinion || preempt_count() != 0;
+ return lockdep_opinion || preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void)
@@ -191,7 +180,7 @@ static inline int rcu_read_lock_bh_held(void)
#ifdef CONFIG_PREEMPT
static inline int rcu_read_lock_sched_held(void)
{
- return !rcu_scheduler_active || preempt_count() != 0;
+ return !rcu_scheduler_active || preempt_count() != 0 || irqs_disabled();
}
#else /* #ifdef CONFIG_PREEMPT */
static inline int rcu_read_lock_sched_held(void)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 03f816a9b65..124f90cd5a3 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -190,9 +190,6 @@ struct skb_shared_info {
atomic_t dataref;
unsigned short nr_frags;
unsigned short gso_size;
-#ifdef CONFIG_HAS_DMA
- dma_addr_t dma_head;
-#endif
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
unsigned short gso_type;
@@ -201,9 +198,6 @@ struct skb_shared_info {
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
skb_frag_t frags[MAX_SKB_FRAGS];
-#ifdef CONFIG_HAS_DMA
- dma_addr_t dma_maps[MAX_SKB_FRAGS];
-#endif
/* Intermediate layers must ensure that destructor_arg
* remains valid until skb destructor */
void * destructor_arg;
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 7b3aae2052a..354cc5617f8 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -255,6 +255,7 @@ struct ucred {
#define MSG_ERRQUEUE 0x2000 /* Fetch message from error queue */
#define MSG_NOSIGNAL 0x4000 /* Do not generate SIGPIPE */
#define MSG_MORE 0x8000 /* Sender will send more */
+#define MSG_WAITFORONE 0x10000 /* recvmmsg(): block until 1+ packets avail */
#define MSG_EOF MSG_FIN
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index f59604ed0ec..78b4bd3be49 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -49,7 +49,7 @@ struct tracepoint {
void **it_func; \
\
rcu_read_lock_sched_notrace(); \
- it_func = rcu_dereference((tp)->funcs); \
+ it_func = rcu_dereference_sched((tp)->funcs); \
if (it_func) { \
do { \
((void(*)(proto))(*it_func))(args); \
diff --git a/include/pcmcia/ss.h b/include/pcmcia/ss.h
index 32896a77391..2e488b60bc7 100644
--- a/include/pcmcia/ss.h
+++ b/include/pcmcia/ss.h
@@ -277,12 +277,6 @@ extern struct pccard_resource_ops pccard_nonstatic_ops;
#endif
-/* socket drivers are expected to use these callbacks in their .drv struct */
-extern int pcmcia_socket_dev_suspend(struct device *dev);
-extern void pcmcia_socket_dev_early_resume(struct device *dev);
-extern void pcmcia_socket_dev_late_resume(struct device *dev);
-extern int pcmcia_socket_dev_resume(struct device *dev);
-
/* socket drivers use this callback in their IRQ handler */
extern void pcmcia_parse_events(struct pcmcia_socket *socket,
unsigned int events);
diff --git a/kernel/cgroup_freezer.c b/kernel/cgroup_freezer.c
index 59e9ef6aab4..eb3f34d5741 100644
--- a/kernel/cgroup_freezer.c
+++ b/kernel/cgroup_freezer.c
@@ -47,17 +47,20 @@ static inline struct freezer *task_freezer(struct task_struct *task)
struct freezer, css);
}
-int cgroup_frozen(struct task_struct *task)
+int cgroup_freezing_or_frozen(struct task_struct *task)
{
struct freezer *freezer;
enum freezer_state state;
task_lock(task);
freezer = task_freezer(task);
- state = freezer->state;
+ if (!freezer->css.cgroup->parent)
+ state = CGROUP_THAWED; /* root cgroup can't be frozen */
+ else
+ state = freezer->state;
task_unlock(task);
- return state == CGROUP_FROZEN;
+ return (state == CGROUP_FREEZING) || (state == CGROUP_FROZEN);
}
/*
diff --git a/kernel/cred.c b/kernel/cred.c
index 1ed8ca18790..1b1129d0cce 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -364,7 +364,7 @@ struct cred *prepare_usermodehelper_creds(void)
new = kmem_cache_alloc(cred_jar, GFP_ATOMIC);
if (!new)
- return NULL;
+ goto free_tgcred;
kdebug("prepare_usermodehelper_creds() alloc %p", new);
@@ -397,6 +397,10 @@ struct cred *prepare_usermodehelper_creds(void)
error:
put_cred(new);
+free_tgcred:
+#ifdef CONFIG_KEYS
+ kfree(tgcred);
+#endif
return NULL;
}
diff --git a/kernel/early_res.c b/kernel/early_res.c
index 3cb2c661bb7..31aa9332ef3 100644
--- a/kernel/early_res.c
+++ b/kernel/early_res.c
@@ -333,6 +333,12 @@ void __init free_early_partial(u64 start, u64 end)
struct early_res *r;
int i;
+ if (start == end)
+ return;
+
+ if (WARN_ONCE(start > end, " wrong range [%#llx, %#llx]\n", start, end))
+ return;
+
try_next:
i = find_overlapped_early(start, end);
if (i >= max_early_res)
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index 42ec11b2af8..b7091d5ca2f 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -359,6 +359,23 @@ static inline void mask_ack_irq(struct irq_desc *desc, int irq)
if (desc->chip->ack)
desc->chip->ack(irq);
}
+ desc->status |= IRQ_MASKED;
+}
+
+static inline void mask_irq(struct irq_desc *desc, int irq)
+{
+ if (desc->chip->mask) {
+ desc->chip->mask(irq);
+ desc->status |= IRQ_MASKED;
+ }
+}
+
+static inline void unmask_irq(struct irq_desc *desc, int irq)
+{
+ if (desc->chip->unmask) {
+ desc->chip->unmask(irq);
+ desc->status &= ~IRQ_MASKED;
+ }
}
/*
@@ -484,10 +501,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc)
raw_spin_lock(&desc->lock);
desc->status &= ~IRQ_INPROGRESS;
- if (unlikely(desc->status & IRQ_ONESHOT))
- desc->status |= IRQ_MASKED;
- else if (!(desc->status & IRQ_DISABLED) && desc->chip->unmask)
- desc->chip->unmask(irq);
+ if (!(desc->status & (IRQ_DISABLED | IRQ_ONESHOT)))
+ unmask_irq(desc, irq);
out_unlock:
raw_spin_unlock(&desc->lock);
}
@@ -524,8 +539,7 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc)
action = desc->action;
if (unlikely(!action || (desc->status & IRQ_DISABLED))) {
desc->status |= IRQ_PENDING;
- if (desc->chip->mask)
- desc->chip->mask(irq);
+ mask_irq(desc, irq);
goto out;
}
@@ -593,7 +607,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
irqreturn_t action_ret;
if (unlikely(!action)) {
- desc->chip->mask(irq);
+ mask_irq(desc, irq);
goto out_unlock;
}
@@ -605,8 +619,7 @@ handle_edge_irq(unsigned int irq, struct irq_desc *desc)
if (unlikely((desc->status &
(IRQ_PENDING | IRQ_MASKED | IRQ_DISABLED)) ==
(IRQ_PENDING | IRQ_MASKED))) {
- desc->chip->unmask(irq);
- desc->status &= ~IRQ_MASKED;
+ unmask_irq(desc, irq);
}
desc->status &= ~IRQ_PENDING;
@@ -716,7 +729,7 @@ set_irq_chip_and_handler_name(unsigned int irq, struct irq_chip *chip,
__set_irq_handler(irq, handle, 0, name);
}
-void __init set_irq_noprobe(unsigned int irq)
+void set_irq_noprobe(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
@@ -731,7 +744,7 @@ void __init set_irq_noprobe(unsigned int irq)
raw_spin_unlock_irqrestore(&desc->lock, flags);
}
-void __init set_irq_probe(unsigned int irq)
+void set_irq_probe(unsigned int irq)
{
struct irq_desc *desc = irq_to_desc(irq);
unsigned long flags;
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index eb6078ca60c..398fda155f6 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -382,6 +382,7 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
{
struct irq_desc *desc = irq_to_desc(irq);
struct irqaction *action;
+ unsigned long flags;
if (!desc)
return 0;
@@ -389,11 +390,14 @@ int can_request_irq(unsigned int irq, unsigned long irqflags)
if (desc->status & IRQ_NOREQUEST)
return 0;
+ raw_spin_lock_irqsave(&desc->lock, flags);
action = desc->action;
if (action)
if (irqflags & action->flags & IRQF_SHARED)
action = NULL;
+ raw_spin_unlock_irqrestore(&desc->lock, flags);
+
return !action;
}
@@ -483,8 +487,26 @@ static int irq_wait_for_interrupt(struct irqaction *action)
*/
static void irq_finalize_oneshot(unsigned int irq, struct irq_desc *desc)
{
+again:
chip_bus_lock(irq, desc);
raw_spin_lock_irq(&desc->lock);
+
+ /*
+ * Implausible though it may be we need to protect us against
+ * the following scenario:
+ *
+ * The thread is faster done than the hard interrupt handler
+ * on the other CPU. If we unmask the irq line then the
+ * interrupt can come in again and masks the line, leaves due
+ * to IRQ_INPROGRESS and the irq line is masked forever.
+ */
+ if (unlikely(desc->status & IRQ_INPROGRESS)) {
+ raw_spin_unlock_irq(&desc->lock);
+ chip_bus_sync_unlock(irq, desc);
+ cpu_relax();
+ goto again;
+ }
+
if (!(desc->status & IRQ_DISABLED) && (desc->status & IRQ_MASKED)) {
desc->status &= ~IRQ_MASKED;
desc->chip->unmask(irq);
diff --git a/kernel/kgdb.c b/kernel/kgdb.c
index 761fdd2b303..11f3515ca83 100644
--- a/kernel/kgdb.c
+++ b/kernel/kgdb.c
@@ -69,9 +69,16 @@ struct kgdb_state {
struct pt_regs *linux_regs;
};
+/* Exception state values */
+#define DCPU_WANT_MASTER 0x1 /* Waiting to become a master kgdb cpu */
+#define DCPU_NEXT_MASTER 0x2 /* Transition from one master cpu to another */
+#define DCPU_IS_SLAVE 0x4 /* Slave cpu enter exception */
+#define DCPU_SSTEP 0x8 /* CPU is single stepping */
+
static struct debuggerinfo_struct {
void *debuggerinfo;
struct task_struct *task;
+ int exception_state;
} kgdb_info[NR_CPUS];
/**
@@ -391,27 +398,22 @@ int kgdb_mem2hex(char *mem, char *buf, int count)
/*
* Copy the binary array pointed to by buf into mem. Fix $, #, and
- * 0x7d escaped with 0x7d. Return a pointer to the character after
- * the last byte written.
+ * 0x7d escaped with 0x7d. Return -EFAULT on failure or 0 on success.
+ * The input buf is overwitten with the result to write to mem.
*/
static int kgdb_ebin2mem(char *buf, char *mem, int count)
{
- int err = 0;
- char c;
+ int size = 0;
+ char *c = buf;
while (count-- > 0) {
- c = *buf++;
- if (c == 0x7d)
- c = *buf++ ^ 0x20;
-
- err = probe_kernel_write(mem, &c, 1);
- if (err)
- break;
-
- mem++;
+ c[size] = *buf++;
+ if (c[size] == 0x7d)
+ c[size] = *buf++ ^ 0x20;
+ size++;
}
- return err;
+ return probe_kernel_write(mem, c, size);
}
/*
@@ -563,49 +565,6 @@ static struct task_struct *getthread(struct pt_regs *regs, int tid)
}
/*
- * CPU debug state control:
- */
-
-#ifdef CONFIG_SMP
-static void kgdb_wait(struct pt_regs *regs)
-{
- unsigned long flags;
- int cpu;
-
- local_irq_save(flags);
- cpu = raw_smp_processor_id();
- kgdb_info[cpu].debuggerinfo = regs;
- kgdb_info[cpu].task = current;
- /*
- * Make sure the above info reaches the primary CPU before
- * our cpu_in_kgdb[] flag setting does:
- */
- smp_wmb();
- atomic_set(&cpu_in_kgdb[cpu], 1);
-
- /* Disable any cpu specific hw breakpoints */
- kgdb_disable_hw_debug(regs);
-
- /* Wait till primary CPU is done with debugging */
- while (atomic_read(&passive_cpu_wait[cpu]))
- cpu_relax();
-
- kgdb_info[cpu].debuggerinfo = NULL;
- kgdb_info[cpu].task = NULL;
-
- /* fix up hardware debug registers on local cpu */
- if (arch_kgdb_ops.correct_hw_break)
- arch_kgdb_ops.correct_hw_break();
-
- /* Signal the primary CPU that we are done: */
- atomic_set(&cpu_in_kgdb[cpu], 0);
- touch_softlockup_watchdog_sync();
- clocksource_touch_watchdog();
- local_irq_restore(flags);
-}
-#endif
-
-/*
* Some architectures need cache flushes when we set/clear a
* breakpoint:
*/
@@ -1400,34 +1359,13 @@ static int kgdb_reenter_check(struct kgdb_state *ks)
return 1;
}
-/*
- * kgdb_handle_exception() - main entry point from a kernel exception
- *
- * Locking hierarchy:
- * interface locks, if any (begin_session)
- * kgdb lock (kgdb_active)
- */
-int
-kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+static int kgdb_cpu_enter(struct kgdb_state *ks, struct pt_regs *regs)
{
- struct kgdb_state kgdb_var;
- struct kgdb_state *ks = &kgdb_var;
unsigned long flags;
int sstep_tries = 100;
int error = 0;
int i, cpu;
-
- ks->cpu = raw_smp_processor_id();
- ks->ex_vector = evector;
- ks->signo = signo;
- ks->ex_vector = evector;
- ks->err_code = ecode;
- ks->kgdb_usethreadid = 0;
- ks->linux_regs = regs;
-
- if (kgdb_reenter_check(ks))
- return 0; /* Ouch, double exception ! */
-
+ int trace_on = 0;
acquirelock:
/*
* Interrupts will be restored by the 'trap return' code, except when
@@ -1435,13 +1373,43 @@ acquirelock:
*/
local_irq_save(flags);
- cpu = raw_smp_processor_id();
+ cpu = ks->cpu;
+ kgdb_info[cpu].debuggerinfo = regs;
+ kgdb_info[cpu].task = current;
+ /*
+ * Make sure the above info reaches the primary CPU before
+ * our cpu_in_kgdb[] flag setting does:
+ */
+ atomic_inc(&cpu_in_kgdb[cpu]);
/*
- * Acquire the kgdb_active lock:
+ * CPU will loop if it is a slave or request to become a kgdb
+ * master cpu and acquire the kgdb_active lock:
*/
- while (atomic_cmpxchg(&kgdb_active, -1, cpu) != -1)
+ while (1) {
+ if (kgdb_info[cpu].exception_state & DCPU_WANT_MASTER) {
+ if (atomic_cmpxchg(&kgdb_active, -1, cpu) == cpu)
+ break;
+ } else if (kgdb_info[cpu].exception_state & DCPU_IS_SLAVE) {
+ if (!atomic_read(&passive_cpu_wait[cpu]))
+ goto return_normal;
+ } else {
+return_normal:
+ /* Return to normal operation by executing any
+ * hw breakpoint fixup.
+ */
+ if (arch_kgdb_ops.correct_hw_break)
+ arch_kgdb_ops.correct_hw_break();
+ if (trace_on)
+ tracing_on();
+ atomic_dec(&cpu_in_kgdb[cpu]);
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ local_irq_restore(flags);
+ return 0;
+ }
cpu_relax();
+ }
/*
* For single stepping, try to only enter on the processor
@@ -1475,9 +1443,6 @@ acquirelock:
if (kgdb_io_ops->pre_exception)
kgdb_io_ops->pre_exception();
- kgdb_info[ks->cpu].debuggerinfo = ks->linux_regs;
- kgdb_info[ks->cpu].task = current;
-
kgdb_disable_hw_debug(ks->linux_regs);
/*
@@ -1486,15 +1451,9 @@ acquirelock:
*/
if (!kgdb_single_step) {
for (i = 0; i < NR_CPUS; i++)
- atomic_set(&passive_cpu_wait[i], 1);
+ atomic_inc(&passive_cpu_wait[i]);
}
- /*
- * spin_lock code is good enough as a barrier so we don't
- * need one here:
- */
- atomic_set(&cpu_in_kgdb[ks->cpu], 1);
-
#ifdef CONFIG_SMP
/* Signal the other CPUs to enter kgdb_wait() */
if ((!kgdb_single_step) && kgdb_do_roundup)
@@ -1518,6 +1477,9 @@ acquirelock:
kgdb_single_step = 0;
kgdb_contthread = current;
exception_level = 0;
+ trace_on = tracing_is_on();
+ if (trace_on)
+ tracing_off();
/* Talk to debugger with gdbserial protocol */
error = gdb_serial_stub(ks);
@@ -1526,13 +1488,11 @@ acquirelock:
if (kgdb_io_ops->post_exception)
kgdb_io_ops->post_exception();
- kgdb_info[ks->cpu].debuggerinfo = NULL;
- kgdb_info[ks->cpu].task = NULL;
- atomic_set(&cpu_in_kgdb[ks->cpu], 0);
+ atomic_dec(&cpu_in_kgdb[ks->cpu]);
if (!kgdb_single_step) {
for (i = NR_CPUS-1; i >= 0; i--)
- atomic_set(&passive_cpu_wait[i], 0);
+ atomic_dec(&passive_cpu_wait[i]);
/*
* Wait till all the CPUs have quit
* from the debugger.
@@ -1551,6 +1511,8 @@ kgdb_restore:
else
kgdb_sstep_pid = 0;
}
+ if (trace_on)
+ tracing_on();
/* Free kgdb_active */
atomic_set(&kgdb_active, -1);
touch_softlockup_watchdog_sync();
@@ -1560,13 +1522,52 @@ kgdb_restore:
return error;
}
+/*
+ * kgdb_handle_exception() - main entry point from a kernel exception
+ *
+ * Locking hierarchy:
+ * interface locks, if any (begin_session)
+ * kgdb lock (kgdb_active)
+ */
+int
+kgdb_handle_exception(int evector, int signo, int ecode, struct pt_regs *regs)
+{
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+ int ret;
+
+ ks->cpu = raw_smp_processor_id();
+ ks->ex_vector = evector;
+ ks->signo = signo;
+ ks->ex_vector = evector;
+ ks->err_code = ecode;
+ ks->kgdb_usethreadid = 0;
+ ks->linux_regs = regs;
+
+ if (kgdb_reenter_check(ks))
+ return 0; /* Ouch, double exception ! */
+ kgdb_info[ks->cpu].exception_state |= DCPU_WANT_MASTER;
+ ret = kgdb_cpu_enter(ks, regs);
+ kgdb_info[ks->cpu].exception_state &= ~DCPU_WANT_MASTER;
+ return ret;
+}
+
int kgdb_nmicallback(int cpu, void *regs)
{
#ifdef CONFIG_SMP
+ struct kgdb_state kgdb_var;
+ struct kgdb_state *ks = &kgdb_var;
+
+ memset(ks, 0, sizeof(struct kgdb_state));
+ ks->cpu = cpu;
+ ks->linux_regs = regs;
+
if (!atomic_read(&cpu_in_kgdb[cpu]) &&
- atomic_read(&kgdb_active) != cpu &&
- atomic_read(&cpu_in_kgdb[atomic_read(&kgdb_active)])) {
- kgdb_wait((struct pt_regs *)regs);
+ atomic_read(&kgdb_active) != -1 &&
+ atomic_read(&kgdb_active) != cpu) {
+ kgdb_info[cpu].exception_state |= DCPU_IS_SLAVE;
+ kgdb_cpu_enter(ks, regs);
+ kgdb_info[cpu].exception_state &= ~DCPU_IS_SLAVE;
return 0;
}
#endif
@@ -1742,11 +1743,11 @@ EXPORT_SYMBOL_GPL(kgdb_unregister_io_module);
*/
void kgdb_breakpoint(void)
{
- atomic_set(&kgdb_setting_breakpoint, 1);
+ atomic_inc(&kgdb_setting_breakpoint);
wmb(); /* Sync point before breakpoint */
arch_kgdb_breakpoint();
wmb(); /* Sync point after breakpoint */
- atomic_set(&kgdb_setting_breakpoint, 0);
+ atomic_dec(&kgdb_setting_breakpoint);
}
EXPORT_SYMBOL_GPL(kgdb_breakpoint);
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 574ee58a304..681af806d76 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1164,11 +1164,9 @@ void perf_event_task_sched_out(struct task_struct *task,
struct perf_event_context *ctx = task->perf_event_ctxp;
struct perf_event_context *next_ctx;
struct perf_event_context *parent;
- struct pt_regs *regs;
int do_switch = 1;
- regs = task_pt_regs(task);
- perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, regs, 0);
+ perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
if (likely(!ctx || !cpuctx->task_ctx))
return;
@@ -2786,12 +2784,11 @@ __weak struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
return NULL;
}
-#ifdef CONFIG_EVENT_TRACING
__weak
void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
{
}
-#endif
+
/*
* Output
@@ -3378,15 +3375,23 @@ static void perf_event_task_output(struct perf_event *event,
struct perf_task_event *task_event)
{
struct perf_output_handle handle;
- int size;
struct task_struct *task = task_event->task;
- int ret;
+ unsigned long flags;
+ int size, ret;
+
+ /*
+ * If this CPU attempts to acquire an rq lock held by a CPU spinning
+ * in perf_output_lock() from interrupt context, it's game over.
+ */
+ local_irq_save(flags);
size = task_event->event_id.header.size;
ret = perf_output_begin(&handle, event, size, 0, 0);
- if (ret)
+ if (ret) {
+ local_irq_restore(flags);
return;
+ }
task_event->event_id.pid = perf_event_pid(event, task);
task_event->event_id.ppid = perf_event_pid(event, current);
@@ -3397,6 +3402,7 @@ static void perf_event_task_output(struct perf_event *event,
perf_output_put(&handle, task_event->event_id);
perf_output_end(&handle);
+ local_irq_restore(flags);
}
static int perf_event_task_match(struct perf_event *event)
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 1a22dfd42df..bc7704b3a44 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -1061,9 +1061,9 @@ static void check_thread_timers(struct task_struct *tsk,
}
}
-static void stop_process_timers(struct task_struct *tsk)
+static void stop_process_timers(struct signal_struct *sig)
{
- struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
+ struct thread_group_cputimer *cputimer = &sig->cputimer;
unsigned long flags;
if (!cputimer->running)
@@ -1072,6 +1072,10 @@ static void stop_process_timers(struct task_struct *tsk)
spin_lock_irqsave(&cputimer->lock, flags);
cputimer->running = 0;
spin_unlock_irqrestore(&cputimer->lock, flags);
+
+ sig->cputime_expires.prof_exp = cputime_zero;
+ sig->cputime_expires.virt_exp = cputime_zero;
+ sig->cputime_expires.sched_exp = 0;
}
static u32 onecputick;
@@ -1133,7 +1137,7 @@ static void check_process_timers(struct task_struct *tsk,
list_empty(&timers[CPUCLOCK_VIRT]) &&
cputime_eq(sig->it[CPUCLOCK_VIRT].expires, cputime_zero) &&
list_empty(&timers[CPUCLOCK_SCHED])) {
- stop_process_timers(tsk);
+ stop_process_timers(sig);
return;
}
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 5ade1bdcf36..71ae29052ab 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -88,12 +88,11 @@ static int try_to_freeze_tasks(bool sig_only)
printk(KERN_ERR "Freezing of tasks failed after %d.%02d seconds "
"(%d tasks refusing to freeze):\n",
elapsed_csecs / 100, elapsed_csecs % 100, todo);
- show_state();
read_lock(&tasklist_lock);
do_each_thread(g, p) {
task_lock(p);
if (freezing(p) && !freezer_should_skip(p))
- printk(KERN_ERR " %s\n", p->comm);
+ sched_show_task(p);
cancel_freezing(p);
task_unlock(p);
} while_each_thread(g, p);
@@ -145,7 +144,7 @@ static void thaw_tasks(bool nosig_only)
if (nosig_only && should_send_signal(p))
continue;
- if (cgroup_frozen(p))
+ if (cgroup_freezing_or_frozen(p))
continue;
thaw_process(p);
diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
index f1125c1a632..63fe2543398 100644
--- a/kernel/rcupdate.c
+++ b/kernel/rcupdate.c
@@ -45,6 +45,7 @@
#include <linux/mutex.h>
#include <linux/module.h>
#include <linux/kernel_stat.h>
+#include <linux/hardirq.h>
#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key rcu_lock_key;
@@ -66,6 +67,28 @@ EXPORT_SYMBOL_GPL(rcu_sched_lock_map);
int rcu_scheduler_active __read_mostly;
EXPORT_SYMBOL_GPL(rcu_scheduler_active);
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+
+/**
+ * rcu_read_lock_bh_held - might we be in RCU-bh read-side critical section?
+ *
+ * Check for bottom half being disabled, which covers both the
+ * CONFIG_PROVE_RCU and not cases. Note that if someone uses
+ * rcu_read_lock_bh(), but then later enables BH, lockdep (if enabled)
+ * will show the situation.
+ *
+ * Check debug_lockdep_rcu_enabled() to prevent false positives during boot.
+ */
+int rcu_read_lock_bh_held(void)
+{
+ if (!debug_lockdep_rcu_enabled())
+ return 1;
+ return in_softirq();
+}
+EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
+
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+
/*
* This function is invoked towards the end of the scheduler's initialization
* process. Before this is called, the idle task might contain
diff --git a/kernel/resource.c b/kernel/resource.c
index 2d5be5d9bf5..9c358e26353 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -219,19 +219,34 @@ void release_child_resources(struct resource *r)
}
/**
- * request_resource - request and reserve an I/O or memory resource
+ * request_resource_conflict - request and reserve an I/O or memory resource
* @root: root resource descriptor
* @new: resource descriptor desired by caller
*
- * Returns 0 for success, negative error code on error.
+ * Returns 0 for success, conflict resource on error.
*/
-int request_resource(struct resource *root, struct resource *new)
+struct resource *request_resource_conflict(struct resource *root, struct resource *new)
{
struct resource *conflict;
write_lock(&resource_lock);
conflict = __request_resource(root, new);
write_unlock(&resource_lock);
+ return conflict;
+}
+
+/**
+ * request_resource - request and reserve an I/O or memory resource
+ * @root: root resource descriptor
+ * @new: resource descriptor desired by caller
+ *
+ * Returns 0 for success, negative error code on error.
+ */
+int request_resource(struct resource *root, struct resource *new)
+{
+ struct resource *conflict;
+
+ conflict = request_resource_conflict(root, new);
return conflict ? -EBUSY : 0;
}
@@ -474,25 +489,40 @@ static struct resource * __insert_resource(struct resource *parent, struct resou
}
/**
- * insert_resource - Inserts a resource in the resource tree
+ * insert_resource_conflict - Inserts resource in the resource tree
* @parent: parent of the new resource
* @new: new resource to insert
*
- * Returns 0 on success, -EBUSY if the resource can't be inserted.
+ * Returns 0 on success, conflict resource if the resource can't be inserted.
*
- * This function is equivalent to request_resource when no conflict
+ * This function is equivalent to request_resource_conflict when no conflict
* happens. If a conflict happens, and the conflicting resources
* entirely fit within the range of the new resource, then the new
* resource is inserted and the conflicting resources become children of
* the new resource.
*/
-int insert_resource(struct resource *parent, struct resource *new)
+struct resource *insert_resource_conflict(struct resource *parent, struct resource *new)
{
struct resource *conflict;
write_lock(&resource_lock);
conflict = __insert_resource(parent, new);
write_unlock(&resource_lock);
+ return conflict;
+}
+
+/**
+ * insert_resource - Inserts a resource in the resource tree
+ * @parent: parent of the new resource
+ * @new: new resource to insert
+ *
+ * Returns 0 on success, -EBUSY if the resource can't be inserted.
+ */
+int insert_resource(struct resource *parent, struct resource *new)
+{
+ struct resource *conflict;
+
+ conflict = insert_resource_conflict(parent, new);
return conflict ? -EBUSY : 0;
}
diff --git a/kernel/sched.c b/kernel/sched.c
index 9ab3cd7858d..528a10592c1 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2650,7 +2650,7 @@ void wake_up_new_task(struct task_struct *p, unsigned long clone_flags)
{
unsigned long flags;
struct rq *rq;
- int cpu = get_cpu();
+ int cpu __maybe_unused = get_cpu();
#ifdef CONFIG_SMP
/*
@@ -4902,7 +4902,9 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
int ret;
cpumask_var_t mask;
- if (len < cpumask_size())
+ if (len < nr_cpu_ids)
+ return -EINVAL;
+ if (len & (sizeof(unsigned long)-1))
return -EINVAL;
if (!alloc_cpumask_var(&mask, GFP_KERNEL))
@@ -4910,10 +4912,12 @@ SYSCALL_DEFINE3(sched_getaffinity, pid_t, pid, unsigned int, len,
ret = sched_getaffinity(pid, mask);
if (ret == 0) {
- if (copy_to_user(user_mask_ptr, mask, cpumask_size()))
+ size_t retlen = min_t(size_t, len, cpumask_size());
+
+ if (copy_to_user(user_mask_ptr, mask, retlen))
ret = -EFAULT;
else
- ret = cpumask_size();
+ ret = retlen;
}
free_cpumask_var(mask);
@@ -5383,7 +5387,7 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask)
get_task_struct(mt);
task_rq_unlock(rq, &flags);
- wake_up_process(rq->migration_thread);
+ wake_up_process(mt);
put_task_struct(mt);
wait_for_completion(&req.done);
tlb_migrate_finish(p->mm);
diff --git a/kernel/sched_debug.c b/kernel/sched_debug.c
index 67f95aada4b..9b49db14403 100644
--- a/kernel/sched_debug.c
+++ b/kernel/sched_debug.c
@@ -518,8 +518,4 @@ void proc_sched_set_task(struct task_struct *p)
p->se.nr_wakeups_idle = 0;
p->sched_info.bkl_count = 0;
#endif
- p->se.sum_exec_runtime = 0;
- p->se.prev_sum_exec_runtime = 0;
- p->nvcsw = 0;
- p->nivcsw = 0;
}
diff --git a/kernel/slow-work.c b/kernel/slow-work.c
index 7494bbf5a27..7d3f4fa9ef4 100644
--- a/kernel/slow-work.c
+++ b/kernel/slow-work.c
@@ -637,7 +637,7 @@ int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
goto cancelled;
/* the timer holds a reference whilst it is pending */
- ret = work->ops->get_ref(work);
+ ret = slow_work_get_ref(work);
if (ret < 0)
goto cant_get_ref;
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
index 321f3c59d73..a29ebd1ef41 100644
--- a/kernel/slow-work.h
+++ b/kernel/slow-work.h
@@ -43,28 +43,28 @@ extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
*/
static inline void slow_work_set_thread_pid(int id, pid_t pid)
{
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
slow_work_pids[id] = pid;
#endif
}
static inline void slow_work_mark_time(struct slow_work *work)
{
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
work->mark = CURRENT_TIME;
#endif
}
static inline void slow_work_begin_exec(int id, struct slow_work *work)
{
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
slow_work_execs[id] = work;
#endif
}
static inline void slow_work_end_exec(int id, struct slow_work *work)
{
-#ifdef CONFIG_SLOW_WORK_PROC
+#ifdef CONFIG_SLOW_WORK_DEBUG
write_lock(&slow_work_execs_lock);
slow_work_execs[id] = NULL;
write_unlock(&slow_work_execs_lock);
diff --git a/kernel/softlockup.c b/kernel/softlockup.c
index 0d4c7898ab8..4b493f67dcb 100644
--- a/kernel/softlockup.c
+++ b/kernel/softlockup.c
@@ -155,11 +155,11 @@ void softlockup_tick(void)
* Wake up the high-prio watchdog task twice per
* threshold timespan.
*/
- if (now > touch_ts + softlockup_thresh/2)
+ if (time_after(now - softlockup_thresh/2, touch_ts))
wake_up_process(per_cpu(softlockup_watchdog, this_cpu));
/* Warn about unreasonable delays: */
- if (now <= (touch_ts + softlockup_thresh))
+ if (time_before_eq(now - softlockup_thresh, touch_ts))
return;
per_cpu(softlockup_print_ts, this_cpu) = touch_ts;
diff --git a/kernel/time/tick-oneshot.c b/kernel/time/tick-oneshot.c
index 0a8a213016f..aada0e52680 100644
--- a/kernel/time/tick-oneshot.c
+++ b/kernel/time/tick-oneshot.c
@@ -22,6 +22,29 @@
#include "tick-internal.h"
+/* Limit min_delta to a jiffie */
+#define MIN_DELTA_LIMIT (NSEC_PER_SEC / HZ)
+
+static int tick_increase_min_delta(struct clock_event_device *dev)
+{
+ /* Nothing to do if we already reached the limit */
+ if (dev->min_delta_ns >= MIN_DELTA_LIMIT)
+ return -ETIME;
+
+ if (dev->min_delta_ns < 5000)
+ dev->min_delta_ns = 5000;
+ else
+ dev->min_delta_ns += dev->min_delta_ns >> 1;
+
+ if (dev->min_delta_ns > MIN_DELTA_LIMIT)
+ dev->min_delta_ns = MIN_DELTA_LIMIT;
+
+ printk(KERN_WARNING "CE: %s increased min_delta_ns to %llu nsec\n",
+ dev->name ? dev->name : "?",
+ (unsigned long long) dev->min_delta_ns);
+ return 0;
+}
+
/**
* tick_program_event internal worker function
*/
@@ -37,23 +60,28 @@ int tick_dev_program_event(struct clock_event_device *dev, ktime_t expires,
if (!ret || !force)
return ret;
+ dev->retries++;
/*
- * We tried 2 times to program the device with the given
- * min_delta_ns. If that's not working then we double it
+ * We tried 3 times to program the device with the given
+ * min_delta_ns. If that's not working then we increase it
* and emit a warning.
*/
if (++i > 2) {
/* Increase the min. delta and try again */
- if (!dev->min_delta_ns)
- dev->min_delta_ns = 5000;
- else
- dev->min_delta_ns += dev->min_delta_ns >> 1;
-
- printk(KERN_WARNING
- "CE: %s increasing min_delta_ns to %llu nsec\n",
- dev->name ? dev->name : "?",
- (unsigned long long) dev->min_delta_ns << 1);
-
+ if (tick_increase_min_delta(dev)) {
+ /*
+ * Get out of the loop if min_delta_ns
+ * hit the limit already. That's
+ * better than staying here forever.
+ *
+ * We clear next_event so we have a
+ * chance that the box survives.
+ */
+ printk(KERN_WARNING
+ "CE: Reprogramming failure. Giving up\n");
+ dev->next_event.tv64 = KTIME_MAX;
+ return -ETIME;
+ }
i = 0;
}
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 16736379a9c..39f6177fafa 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -818,7 +818,8 @@ void update_wall_time(void)
shift = min(shift, maxshift);
while (offset >= timekeeper.cycle_interval) {
offset = logarithmic_accumulation(offset, shift);
- shift--;
+ if(offset < timekeeper.cycle_interval<<shift)
+ shift--;
}
/* correct the clock when NTP error is too big */
diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
index bdfb8dd1050..1a4a7dd7877 100644
--- a/kernel/time/timer_list.c
+++ b/kernel/time/timer_list.c
@@ -228,6 +228,7 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
SEQ_printf(m, " event_handler: ");
print_name_offset(m, dev->event_handler);
SEQ_printf(m, "\n");
+ SEQ_printf(m, " retries: %lu\n", dev->retries);
}
static void timer_list_show_tickdevices(struct seq_file *m)
@@ -257,7 +258,7 @@ static int timer_list_show(struct seq_file *m, void *v)
u64 now = ktime_to_ns(ktime_get());
int cpu;
- SEQ_printf(m, "Timer List Version: v0.5\n");
+ SEQ_printf(m, "Timer List Version: v0.6\n");
SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
diff --git a/kernel/timer.c b/kernel/timer.c
index c61a7949387..fc965eae0e8 100644
--- a/kernel/timer.c
+++ b/kernel/timer.c
@@ -880,6 +880,7 @@ int try_to_del_timer_sync(struct timer_list *timer)
if (base->running_timer == timer)
goto out;
+ timer_stats_timer_clear_start_info(timer);
ret = 0;
if (timer_pending(timer)) {
detach_timer(timer, 1);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 05a9f83b881..9a0f9bf6a37 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -207,6 +207,14 @@ EXPORT_SYMBOL_GPL(tracing_is_on);
#define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX)
#define RB_EVNT_MIN_SIZE 8U /* two 32bit words */
+#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+# define RB_FORCE_8BYTE_ALIGNMENT 0
+# define RB_ARCH_ALIGNMENT RB_ALIGNMENT
+#else
+# define RB_FORCE_8BYTE_ALIGNMENT 1
+# define RB_ARCH_ALIGNMENT 8U
+#endif
+
/* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */
#define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX
@@ -1201,18 +1209,19 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages)
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
- return;
+ goto out;
p = cpu_buffer->pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
free_buffer_page(bpage);
}
if (RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)))
- return;
+ goto out;
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
+out:
spin_unlock_irq(&cpu_buffer->reader_lock);
}
@@ -1229,7 +1238,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
for (i = 0; i < nr_pages; i++) {
if (RB_WARN_ON(cpu_buffer, list_empty(pages)))
- return;
+ goto out;
p = pages->next;
bpage = list_entry(p, struct buffer_page, list);
list_del_init(&bpage->list);
@@ -1238,6 +1247,7 @@ rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer,
rb_reset_cpu(cpu_buffer);
rb_check_pages(cpu_buffer);
+out:
spin_unlock_irq(&cpu_buffer->reader_lock);
}
@@ -1547,7 +1557,7 @@ rb_update_event(struct ring_buffer_event *event,
case 0:
length -= RB_EVNT_HDR_SIZE;
- if (length > RB_MAX_SMALL_DATA)
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
event->array[0] = length;
else
event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT);
@@ -1722,11 +1732,11 @@ static unsigned rb_calculate_event_length(unsigned length)
if (!length)
length = 1;
- if (length > RB_MAX_SMALL_DATA)
+ if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT)
length += sizeof(event.array[0]);
length += RB_EVNT_HDR_SIZE;
- length = ALIGN(length, RB_ALIGNMENT);
+ length = ALIGN(length, RB_ARCH_ALIGNMENT);
return length;
}
diff --git a/kernel/trace/trace_clock.c b/kernel/trace/trace_clock.c
index 6fbfb8f417b..9d589d8dcd1 100644
--- a/kernel/trace/trace_clock.c
+++ b/kernel/trace/trace_clock.c
@@ -84,7 +84,7 @@ u64 notrace trace_clock_global(void)
int this_cpu;
u64 now;
- raw_local_irq_save(flags);
+ local_irq_save(flags);
this_cpu = raw_smp_processor_id();
now = cpu_clock(this_cpu);
@@ -110,7 +110,7 @@ u64 notrace trace_clock_global(void)
arch_spin_unlock(&trace_clock_struct.lock);
out:
- raw_local_irq_restore(flags);
+ local_irq_restore(flags);
return now;
}
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index 81f691eb3a3..0565bb42566 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -17,7 +17,12 @@ EXPORT_SYMBOL_GPL(perf_arch_fetch_caller_regs);
static char *perf_trace_buf;
static char *perf_trace_buf_nmi;
-typedef typeof(char [PERF_MAX_TRACE_SIZE]) perf_trace_t ;
+/*
+ * Force it to be aligned to unsigned long to avoid misaligned accesses
+ * suprises
+ */
+typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
+ perf_trace_t;
/* Count the events in use (per event id, not per instance) */
static int total_ref_count;
@@ -130,6 +135,8 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
char *trace_buf, *raw_data;
int pc, cpu;
+ BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
+
pc = preempt_count();
/* Protect the per cpu buffer, begin the rcu read side */
@@ -152,7 +159,7 @@ __kprobes void *perf_trace_buf_prepare(int size, unsigned short type,
raw_data = per_cpu_ptr(trace_buf, cpu);
/* zero the dead bytes from align to not leak stack to user */
- *(u64 *)(&raw_data[size - sizeof(u64)]) = 0ULL;
+ memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
entry = (struct trace_entry *)raw_data;
tracing_generic_entry_update(entry, *irq_flags, pc);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index d7c791ef003..9b134460b01 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -180,19 +180,12 @@ static void __init __free_pages_memory(unsigned long start, unsigned long end)
end_aligned = end & ~(BITS_PER_LONG - 1);
if (end_aligned <= start_aligned) {
-#if 1
- printk(KERN_DEBUG " %lx - %lx\n", start, end);
-#endif
for (i = start; i < end; i++)
__free_pages_bootmem(pfn_to_page(i), 0);
return;
}
-#if 1
- printk(KERN_DEBUG " %lx %lx - %lx %lx\n",
- start, start_aligned, end_aligned, end);
-#endif
for (i = start; i < start_aligned; i++)
__free_pages_bootmem(pfn_to_page(i), 0);
@@ -428,9 +421,6 @@ void __init free_bootmem_node(pg_data_t *pgdat, unsigned long physaddr,
{
#ifdef CONFIG_NO_BOOTMEM
free_early(physaddr, physaddr + size);
-#if 0
- printk(KERN_DEBUG "free %lx %lx\n", physaddr, size);
-#endif
#else
unsigned long start, end;
@@ -456,9 +446,6 @@ void __init free_bootmem(unsigned long addr, unsigned long size)
{
#ifdef CONFIG_NO_BOOTMEM
free_early(addr, addr + size);
-#if 0
- printk(KERN_DEBUG "free %lx %lx\n", addr, size);
-#endif
#else
unsigned long start, end;
diff --git a/mm/nommu.c b/mm/nommu.c
index e4b8f4d28a3..63fa17d121f 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -146,7 +146,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
for (i = 0; i < nr_pages; i++) {
- vma = find_extend_vma(mm, start);
+ vma = find_vma(mm, start);
if (!vma)
goto finish_or_fault;
@@ -162,7 +162,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
}
if (vmas)
vmas[i] = vma;
- start += PAGE_SIZE;
+ start = (start + PAGE_SIZE) & PAGE_MASK;
}
return i;
@@ -764,7 +764,7 @@ EXPORT_SYMBOL(find_vma);
*/
struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
{
- return find_vma(mm, addr & PAGE_MASK);
+ return find_vma(mm, addr);
}
/*
diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
index 453512266ea..db783d7af5a 100644
--- a/net/8021q/vlan.c
+++ b/net/8021q/vlan.c
@@ -378,6 +378,8 @@ static void vlan_transfer_features(struct net_device *dev,
#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
vlandev->fcoe_ddp_xid = dev->fcoe_ddp_xid;
#endif
+ vlandev->real_num_tx_queues = dev->real_num_tx_queues;
+ BUG_ON(vlandev->real_num_tx_queues > vlandev->num_tx_queues);
if (old_features != vlandev->features)
netdev_features_change(vlandev);
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index 9e83272fc5b..2fd057c81bb 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -361,6 +361,14 @@ static netdev_tx_t vlan_dev_hwaccel_hard_start_xmit(struct sk_buff *skb,
return ret;
}
+static u16 vlan_dev_select_queue(struct net_device *dev, struct sk_buff *skb)
+{
+ struct net_device *rdev = vlan_dev_info(dev)->real_dev;
+ const struct net_device_ops *ops = rdev->netdev_ops;
+
+ return ops->ndo_select_queue(rdev, skb);
+}
+
static int vlan_dev_change_mtu(struct net_device *dev, int new_mtu)
{
/* TODO: gotta make sure the underlying layer can handle it,
@@ -688,7 +696,8 @@ static const struct header_ops vlan_header_ops = {
.parse = eth_header_parse,
};
-static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops;
+static const struct net_device_ops vlan_netdev_ops, vlan_netdev_accel_ops,
+ vlan_netdev_ops_sq, vlan_netdev_accel_ops_sq;
static int vlan_dev_init(struct net_device *dev)
{
@@ -722,11 +731,17 @@ static int vlan_dev_init(struct net_device *dev)
if (real_dev->features & NETIF_F_HW_VLAN_TX) {
dev->header_ops = real_dev->header_ops;
dev->hard_header_len = real_dev->hard_header_len;
- dev->netdev_ops = &vlan_netdev_accel_ops;
+ if (real_dev->netdev_ops->ndo_select_queue)
+ dev->netdev_ops = &vlan_netdev_accel_ops_sq;
+ else
+ dev->netdev_ops = &vlan_netdev_accel_ops;
} else {
dev->header_ops = &vlan_header_ops;
dev->hard_header_len = real_dev->hard_header_len + VLAN_HLEN;
- dev->netdev_ops = &vlan_netdev_ops;
+ if (real_dev->netdev_ops->ndo_select_queue)
+ dev->netdev_ops = &vlan_netdev_ops_sq;
+ else
+ dev->netdev_ops = &vlan_netdev_ops;
}
if (is_vlan_dev(real_dev))
@@ -865,6 +880,56 @@ static const struct net_device_ops vlan_netdev_accel_ops = {
#endif
};
+static const struct net_device_ops vlan_netdev_ops_sq = {
+ .ndo_select_queue = vlan_dev_select_queue,
+ .ndo_change_mtu = vlan_dev_change_mtu,
+ .ndo_init = vlan_dev_init,
+ .ndo_uninit = vlan_dev_uninit,
+ .ndo_open = vlan_dev_open,
+ .ndo_stop = vlan_dev_stop,
+ .ndo_start_xmit = vlan_dev_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = vlan_dev_set_mac_address,
+ .ndo_set_rx_mode = vlan_dev_set_rx_mode,
+ .ndo_set_multicast_list = vlan_dev_set_rx_mode,
+ .ndo_change_rx_flags = vlan_dev_change_rx_flags,
+ .ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats = vlan_dev_get_stats,
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+ .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+#endif
+};
+
+static const struct net_device_ops vlan_netdev_accel_ops_sq = {
+ .ndo_select_queue = vlan_dev_select_queue,
+ .ndo_change_mtu = vlan_dev_change_mtu,
+ .ndo_init = vlan_dev_init,
+ .ndo_uninit = vlan_dev_uninit,
+ .ndo_open = vlan_dev_open,
+ .ndo_stop = vlan_dev_stop,
+ .ndo_start_xmit = vlan_dev_hwaccel_hard_start_xmit,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_set_mac_address = vlan_dev_set_mac_address,
+ .ndo_set_rx_mode = vlan_dev_set_rx_mode,
+ .ndo_set_multicast_list = vlan_dev_set_rx_mode,
+ .ndo_change_rx_flags = vlan_dev_change_rx_flags,
+ .ndo_do_ioctl = vlan_dev_ioctl,
+ .ndo_neigh_setup = vlan_dev_neigh_setup,
+ .ndo_get_stats = vlan_dev_get_stats,
+#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE)
+ .ndo_fcoe_ddp_setup = vlan_dev_fcoe_ddp_setup,
+ .ndo_fcoe_ddp_done = vlan_dev_fcoe_ddp_done,
+ .ndo_fcoe_enable = vlan_dev_fcoe_enable,
+ .ndo_fcoe_disable = vlan_dev_fcoe_disable,
+ .ndo_fcoe_get_wwn = vlan_dev_fcoe_get_wwn,
+#endif
+};
+
void vlan_setup(struct net_device *dev)
{
ether_setup(dev);
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index d4ec38fa64e..6f9206b36dc 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -614,7 +614,7 @@ void netpoll_print_options(struct netpoll *np)
np->name, np->local_port);
printk(KERN_INFO "%s: local IP %pI4\n",
np->name, &np->local_ip);
- printk(KERN_INFO "%s: interface %s\n",
+ printk(KERN_INFO "%s: interface '%s'\n",
np->name, np->dev_name);
printk(KERN_INFO "%s: remote port %d\n",
np->name, np->remote_port);
@@ -661,6 +661,9 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
if ((delim = strchr(cur, '@')) == NULL)
goto parse_failed;
*delim = 0;
+ if (*cur == ' ' || *cur == '\t')
+ printk(KERN_INFO "%s: warning: whitespace"
+ "is not allowed\n", np->name);
np->remote_port = simple_strtol(cur, NULL, 10);
cur = delim;
}
@@ -708,7 +711,7 @@ int netpoll_parse_options(struct netpoll *np, char *opt)
return 0;
parse_failed:
- printk(KERN_INFO "%s: couldn't parse config at %s!\n",
+ printk(KERN_INFO "%s: couldn't parse config at '%s'!\n",
np->name, cur);
return -1;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index 51ca946e339..3feb2b39030 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1194,7 +1194,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
if (idx < s_idx)
goto cont;
- if (idx > s_idx)
+ if (h > s_h || idx > s_idx)
s_ip_idx = 0;
in_dev = __in_dev_get_rcu(dev);
if (!in_dev)
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index 0b9d03c54dc..d0a6092a67b 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1616,17 +1616,20 @@ ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
int ct;
struct rtnexthop *nhp;
struct net *net = mfc_net(c);
- struct net_device *dev = net->ipv4.vif_table[c->mfc_parent].dev;
u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head;
- if (dev)
- RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
+ /* If cache is unresolved, don't try to parse IIF and OIF */
+ if (c->mfc_parent > MAXVIFS)
+ return -ENOENT;
+
+ if (VIF_EXISTS(net, c->mfc_parent))
+ RTA_PUT(skb, RTA_IIF, 4, &net->ipv4.vif_table[c->mfc_parent].dev->ifindex);
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (c->mfc_un.res.ttls[ct] < 255) {
+ if (VIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
goto rtattr_failure;
nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 54fd68c14c8..d413b57be9b 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1097,7 +1097,7 @@ static int slow_chain_length(const struct rtable *head)
}
static int rt_intern_hash(unsigned hash, struct rtable *rt,
- struct rtable **rp, struct sk_buff *skb)
+ struct rtable **rp, struct sk_buff *skb, int ifindex)
{
struct rtable *rth, **rthp;
unsigned long now;
@@ -1212,11 +1212,16 @@ restart:
slow_chain_length(rt_hash_table[hash].chain) > rt_chain_length_max) {
struct net *net = dev_net(rt->u.dst.dev);
int num = ++net->ipv4.current_rt_cache_rebuild_count;
- if (!rt_caching(dev_net(rt->u.dst.dev))) {
+ if (!rt_caching(net)) {
printk(KERN_WARNING "%s: %d rebuilds is over limit, route caching disabled\n",
rt->u.dst.dev->name, num);
}
- rt_emergency_hash_rebuild(dev_net(rt->u.dst.dev));
+ rt_emergency_hash_rebuild(net);
+ spin_unlock_bh(rt_hash_lock_addr(hash));
+
+ hash = rt_hash(rt->fl.fl4_dst, rt->fl.fl4_src,
+ ifindex, rt_genid(net));
+ goto restart;
}
}
@@ -1477,7 +1482,7 @@ void ip_rt_redirect(__be32 old_gw, __be32 daddr, __be32 new_gw,
&netevent);
rt_del(hash, rth);
- if (!rt_intern_hash(hash, rt, &rt, NULL))
+ if (!rt_intern_hash(hash, rt, &rt, NULL, rt->fl.oif))
ip_rt_put(rt);
goto do_next;
}
@@ -1931,7 +1936,7 @@ static int ip_route_input_mc(struct sk_buff *skb, __be32 daddr, __be32 saddr,
in_dev_put(in_dev);
hash = rt_hash(daddr, saddr, dev->ifindex, rt_genid(dev_net(dev)));
- return rt_intern_hash(hash, rth, NULL, skb);
+ return rt_intern_hash(hash, rth, NULL, skb, dev->ifindex);
e_nobufs:
in_dev_put(in_dev);
@@ -2098,7 +2103,7 @@ static int ip_mkroute_input(struct sk_buff *skb,
/* put it into the cache */
hash = rt_hash(daddr, saddr, fl->iif,
rt_genid(dev_net(rth->u.dst.dev)));
- return rt_intern_hash(hash, rth, NULL, skb);
+ return rt_intern_hash(hash, rth, NULL, skb, fl->iif);
}
/*
@@ -2255,7 +2260,7 @@ local_input:
}
rth->rt_type = res.type;
hash = rt_hash(daddr, saddr, fl.iif, rt_genid(net));
- err = rt_intern_hash(hash, rth, NULL, skb);
+ err = rt_intern_hash(hash, rth, NULL, skb, fl.iif);
goto done;
no_route:
@@ -2502,7 +2507,7 @@ static int ip_mkroute_output(struct rtable **rp,
if (err == 0) {
hash = rt_hash(oldflp->fl4_dst, oldflp->fl4_src, oldflp->oif,
rt_genid(dev_net(dev_out)));
- err = rt_intern_hash(hash, rth, rp, NULL);
+ err = rt_intern_hash(hash, rth, rp, NULL, oldflp->oif);
}
return err;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 3381b4317c2..7e567ae5eaa 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -3610,7 +3610,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
hlist_for_each_entry_rcu(dev, node, head, index_hlist) {
if (idx < s_idx)
goto cont;
- if (idx > s_idx)
+ if (h > s_h || idx > s_idx)
s_ip_idx = 0;
ip_idx = 0;
if ((idev = __in6_dev_get(dev)) == NULL)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 23e4ac0cc30..27acfb58650 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1695,17 +1695,20 @@ ip6mr_fill_mroute(struct sk_buff *skb, struct mfc6_cache *c, struct rtmsg *rtm)
int ct;
struct rtnexthop *nhp;
struct net *net = mfc6_net(c);
- struct net_device *dev = net->ipv6.vif6_table[c->mf6c_parent].dev;
u8 *b = skb_tail_pointer(skb);
struct rtattr *mp_head;
- if (dev)
- RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
+ /* If cache is unresolved, don't try to parse IIF and OIF */
+ if (c->mf6c_parent > MAXMIFS)
+ return -ENOENT;
+
+ if (MIF_EXISTS(net, c->mf6c_parent))
+ RTA_PUT(skb, RTA_IIF, 4, &net->ipv6.vif6_table[c->mf6c_parent].dev->ifindex);
mp_head = (struct rtattr *)skb_put(skb, RTA_LENGTH(0));
for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
- if (c->mfc_un.res.ttls[ct] < 255) {
+ if (MIF_EXISTS(net, ct) && c->mfc_un.res.ttls[ct] < 255) {
if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
goto rtattr_failure;
nhp = (struct rtnexthop *)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
diff --git a/net/ipv6/netfilter/ip6table_raw.c b/net/ipv6/netfilter/ip6table_raw.c
index aef31a29de9..b9cf7cd6192 100644
--- a/net/ipv6/netfilter/ip6table_raw.c
+++ b/net/ipv6/netfilter/ip6table_raw.c
@@ -13,7 +13,7 @@ static const struct xt_table packet_raw = {
.valid_hooks = RAW_VALID_HOOKS,
.me = THIS_MODULE,
.af = NFPROTO_IPV6,
- .priority = NF_IP6_PRI_FIRST,
+ .priority = NF_IP6_PRI_RAW,
};
/* The work comes in here from netfilter.c. */
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7fcb0e5d121..0d7713c5c20 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -890,12 +890,17 @@ static struct dst_entry *ip6_negative_advice(struct dst_entry *dst)
struct rt6_info *rt = (struct rt6_info *) dst;
if (rt) {
- if (rt->rt6i_flags & RTF_CACHE)
- ip6_del_rt(rt);
- else
+ if (rt->rt6i_flags & RTF_CACHE) {
+ if (rt6_check_expired(rt)) {
+ ip6_del_rt(rt);
+ dst = NULL;
+ }
+ } else {
dst_release(dst);
+ dst = NULL;
+ }
}
- return NULL;
+ return dst;
}
static void ip6_link_failure(struct sk_buff *skb)
diff --git a/net/key/af_key.c b/net/key/af_key.c
index 36870788264..344145f23c3 100644
--- a/net/key/af_key.c
+++ b/net/key/af_key.c
@@ -2129,10 +2129,9 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
int err;
out_skb = pfkey_xfrm_policy2msg_prep(xp);
- if (IS_ERR(out_skb)) {
- err = PTR_ERR(out_skb);
- goto out;
- }
+ if (IS_ERR(out_skb))
+ return PTR_ERR(out_skb);
+
err = pfkey_xfrm_policy2msg(out_skb, xp, dir);
if (err < 0)
return err;
@@ -2148,7 +2147,6 @@ static int key_notify_policy(struct xfrm_policy *xp, int dir, struct km_event *c
out_hdr->sadb_msg_seq = c->seq;
out_hdr->sadb_msg_pid = c->pid;
pfkey_broadcast(out_skb, GFP_ATOMIC, BROADCAST_ALL, NULL, xp_net(xp));
-out:
return 0;
}
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 9e9c4896394..215a64835de 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -493,6 +493,7 @@ static void hashlimit_ipv6_mask(__be32 *i, unsigned int p)
case 64 ... 95:
i[2] = maskl(i[2], p - 64);
i[3] = 0;
+ break;
case 96 ... 127:
i[3] = maskl(i[3], p - 96);
break;
@@ -879,7 +880,8 @@ static void dl_seq_stop(struct seq_file *s, void *v)
struct xt_hashlimit_htable *htable = s->private;
unsigned int *bucket = (unsigned int *)v;
- kfree(bucket);
+ if (!IS_ERR(bucket))
+ kfree(bucket);
spin_unlock_bh(&htable->lock);
}
diff --git a/net/netfilter/xt_recent.c b/net/netfilter/xt_recent.c
index 7073dbb8100..971d172afec 100644
--- a/net/netfilter/xt_recent.c
+++ b/net/netfilter/xt_recent.c
@@ -267,7 +267,7 @@ recent_mt(const struct sk_buff *skb, const struct xt_match_param *par)
for (i = 0; i < e->nstamps; i++) {
if (info->seconds && time_after(time, e->stamps[i]))
continue;
- if (info->hit_count && ++hits >= info->hit_count) {
+ if (!info->hit_count || ++hits >= info->hit_count) {
ret = !ret;
break;
}
diff --git a/net/sched/Kconfig b/net/sched/Kconfig
index 21f9c7678aa..2f691fb180d 100644
--- a/net/sched/Kconfig
+++ b/net/sched/Kconfig
@@ -328,13 +328,16 @@ config NET_CLS_FLOW
module will be called cls_flow.
config NET_CLS_CGROUP
- bool "Control Group Classifier"
+ tristate "Control Group Classifier"
select NET_CLS
depends on CGROUPS
---help---
Say Y here if you want to classify packets based on the control
cgroup of their process.
+ To compile this code as a module, choose M here: the
+ module will be called cls_cgroup.
+
config NET_EMATCH
bool "Extended Matches"
select NET_CLS
diff --git a/net/sched/cls_cgroup.c b/net/sched/cls_cgroup.c
index e4877ca6727..7f27d2c15e0 100644
--- a/net/sched/cls_cgroup.c
+++ b/net/sched/cls_cgroup.c
@@ -24,6 +24,25 @@ struct cgroup_cls_state
u32 classid;
};
+static struct cgroup_subsys_state *cgrp_create(struct cgroup_subsys *ss,
+ struct cgroup *cgrp);
+static void cgrp_destroy(struct cgroup_subsys *ss, struct cgroup *cgrp);
+static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp);
+
+struct cgroup_subsys net_cls_subsys = {
+ .name = "net_cls",
+ .create = cgrp_create,
+ .destroy = cgrp_destroy,
+ .populate = cgrp_populate,
+#ifdef CONFIG_NET_CLS_CGROUP
+ .subsys_id = net_cls_subsys_id,
+#else
+#define net_cls_subsys_id net_cls_subsys.subsys_id
+#endif
+ .module = THIS_MODULE,
+};
+
+
static inline struct cgroup_cls_state *cgrp_cls_state(struct cgroup *cgrp)
{
return container_of(cgroup_subsys_state(cgrp, net_cls_subsys_id),
@@ -79,14 +98,6 @@ static int cgrp_populate(struct cgroup_subsys *ss, struct cgroup *cgrp)
return cgroup_add_files(cgrp, ss, ss_files, ARRAY_SIZE(ss_files));
}
-struct cgroup_subsys net_cls_subsys = {
- .name = "net_cls",
- .create = cgrp_create,
- .destroy = cgrp_destroy,
- .populate = cgrp_populate,
- .subsys_id = net_cls_subsys_id,
-};
-
struct cls_cgroup_head
{
u32 handle;
@@ -277,12 +288,19 @@ static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
static int __init init_cgroup_cls(void)
{
- return register_tcf_proto_ops(&cls_cgroup_ops);
+ int ret = register_tcf_proto_ops(&cls_cgroup_ops);
+ if (ret)
+ return ret;
+ ret = cgroup_load_subsys(&net_cls_subsys);
+ if (ret)
+ unregister_tcf_proto_ops(&cls_cgroup_ops);
+ return ret;
}
static void __exit exit_cgroup_cls(void)
{
unregister_tcf_proto_ops(&cls_cgroup_ops);
+ cgroup_unload_subsys(&net_cls_subsys);
}
module_init(init_cgroup_cls);
diff --git a/net/socket.c b/net/socket.c
index 769c386bd42..f55ffe9f8c8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -2135,6 +2135,10 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen,
break;
++datagrams;
+ /* MSG_WAITFORONE turns on MSG_DONTWAIT after one packet */
+ if (flags & MSG_WAITFORONE)
+ flags |= MSG_DONTWAIT;
+
if (timeout) {
ktime_get_ts(timeout);
*timeout = timespec_sub(end_time, *timeout);
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index b546ac2660f..a2ff86189d2 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -148,6 +148,9 @@ static void pcm_debug_name(struct snd_pcm_substream *substream,
#define xrun_debug(substream, mask) \
((substream)->pstr->xrun_debug & (mask))
+#else
+#define xrun_debug(substream, mask) 0
+#endif
#define dump_stack_on_xrun(substream) do { \
if (xrun_debug(substream, XRUN_DEBUG_STACK)) \
@@ -169,6 +172,7 @@ static void xrun(struct snd_pcm_substream *substream)
}
}
+#ifdef CONFIG_SND_PCM_XRUN_DEBUG
#define hw_ptr_error(substream, fmt, args...) \
do { \
if (xrun_debug(substream, XRUN_DEBUG_BASIC)) { \
@@ -255,8 +259,6 @@ static void xrun_log_show(struct snd_pcm_substream *substream)
#else /* ! CONFIG_SND_PCM_XRUN_DEBUG */
-#define xrun_debug(substream, mask) 0
-#define xrun(substream) do { } while (0)
#define hw_ptr_error(substream, fmt, args...) do { } while (0)
#define xrun_log(substream, pos) do { } while (0)
#define xrun_log_show(substream) do { } while (0)
diff --git a/sound/pci/ac97/ac97_patch.c b/sound/pci/ac97/ac97_patch.c
index 1caf5e3c1f6..e68c98ef404 100644
--- a/sound/pci/ac97/ac97_patch.c
+++ b/sound/pci/ac97/ac97_patch.c
@@ -1852,12 +1852,14 @@ static unsigned int ad1981_jacks_blacklist[] = {
0x10140523, /* Thinkpad R40 */
0x10140534, /* Thinkpad X31 */
0x10140537, /* Thinkpad T41p */
+ 0x1014053e, /* Thinkpad R40e */
0x10140554, /* Thinkpad T42p/R50p */
0x10140567, /* Thinkpad T43p 2668-G7U */
0x10140581, /* Thinkpad X41-2527 */
0x10280160, /* Dell Dimension 2400 */
0x104380b0, /* Asus A7V8X-MX */
0x11790241, /* Toshiba Satellite A-15 S127 */
+ 0x1179ff10, /* Toshiba P500 */
0x144dc01a, /* Samsung NP-X20C004/SEG */
0 /* end */
};
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 8b2915631cc..4bb90675f70 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2269,6 +2269,7 @@ static struct snd_pci_quirk position_fix_list[] __devinitdata = {
SND_PCI_QUIRK(0x103c, 0x306d, "HP dv3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1106, 0x3288, "ASUS M2V-MX SE", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1043, 0x813d, "ASUS P5AD2", POS_FIX_LPIB),
+ SND_PCI_QUIRK(0x1458, 0xa022, "ga-ma770-ud3", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1462, 0x1002, "MSI Wind U115", POS_FIX_LPIB),
SND_PCI_QUIRK(0x1565, 0x820f, "Biostar Microtech", POS_FIX_LPIB),
SND_PCI_QUIRK(0x8086, 0xd601, "eMachines T5212", POS_FIX_LPIB),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 053d53d8c8b..9a23444e9e7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -10043,8 +10043,11 @@ static void alc882_auto_set_output_and_unmute(struct hda_codec *codec,
alc_set_pin_output(codec, nid, pin_type);
if (spec->multiout.dac_nids[dac_idx] == 0x25)
idx = 4;
- else
+ else {
+ if (spec->multiout.num_dacs >= dac_idx)
+ return;
idx = spec->multiout.dac_nids[dac_idx] - 2;
+ }
snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CONNECT_SEL, idx);
}
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 8a8f52db7e3..bc0f670a833 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -200,7 +200,7 @@ endif
CFLAGS = -ggdb3 -Wall -Wextra -std=gnu99 -Werror $(CFLAGS_OPTIMIZE) -D_FORTIFY_SOURCE=2 $(EXTRA_WARNINGS) $(EXTRA_CFLAGS)
EXTLIBS = -lpthread -lrt -lelf -lm
-ALL_CFLAGS = $(CFLAGS)
+ALL_CFLAGS = $(CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64
ALL_LDFLAGS = $(LDFLAGS)
STRIP ?= strip
@@ -492,19 +492,19 @@ ifeq ($(uname_S),Darwin)
PTHREAD_LIBS =
endif
-ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
-ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifeq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include <gnu/libc-version.h>'; echo 'int main(void) { const char * version = gnu_get_libc_version(); return (long)version; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
msg := $(error No gnu/libc-version.h found, please install glibc-dev[el]/glibc-static);
endif
- ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ ifneq ($(shell sh -c "(echo '\#include <libelf.h>'; echo 'int main(void) { Elf * elf = elf_begin(0, ELF_C_READ_MMAP, 0); return (long)elf; }') | $(CC) -x c - $(ALL_CFLAGS) -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
BASIC_CFLAGS += -DLIBELF_NO_MMAP
endif
else
msg := $(error No libelf.h/libelf found, please install libelf-dev/elfutils-libelf-devel and glibc-dev[el]);
endif
-ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -D_LARGEFILE64_SOURCE -D_FILE_OFFSET_BITS=64 -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
+ifneq ($(shell sh -c "(echo '\#include <dwarf.h>'; echo '\#include <libdw.h>'; echo 'int main(void) { Dwarf *dbg; dbg = dwarf_begin(0, DWARF_C_READ); return (long)dbg; }') | $(CC) -x c - $(ALL_CFLAGS) -I/usr/include/elfutils -ldw -lelf -o $(BITBUCKET) $(ALL_LDFLAGS) $(EXTLIBS) "$(QUIET_STDERR)" && echo y"), y)
msg := $(warning No libdw.h found or old libdw.h found, disables dwarf support. Please install elfutils-devel/elfutils-dev);
BASIC_CFLAGS += -DNO_DWARF_SUPPORT
else
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index c30a3359234..152d6c9b1fa 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -47,7 +47,6 @@
#include "util/probe-event.h"
#define MAX_PATH_LEN 256
-#define MAX_PROBES 128
/* Session management structure */
static struct {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 0b719e3dde0..1f529321607 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -455,7 +455,7 @@ static void print_sym_table(void)
struct sym_entry *syme, *n;
struct rb_root tmp = RB_ROOT;
struct rb_node *nd;
- int sym_width = 0, dso_width = 0, max_dso_width;
+ int sym_width = 0, dso_width = 0, dso_short_width = 0;
const int win_width = winsize.ws_col - 1;
samples = userspace_samples = 0;
@@ -545,15 +545,20 @@ static void print_sym_table(void)
if (syme->map->dso->long_name_len > dso_width)
dso_width = syme->map->dso->long_name_len;
+ if (syme->map->dso->short_name_len > dso_short_width)
+ dso_short_width = syme->map->dso->short_name_len;
+
if (syme->name_len > sym_width)
sym_width = syme->name_len;
}
printed = 0;
- max_dso_width = winsize.ws_col - sym_width - 29;
- if (dso_width > max_dso_width)
- dso_width = max_dso_width;
+ if (sym_width + dso_width > winsize.ws_col - 29) {
+ dso_width = dso_short_width;
+ if (sym_width + dso_width > winsize.ws_col - 29)
+ sym_width = winsize.ws_col - dso_width - 29;
+ }
putchar('\n');
if (nr_counters == 1)
printf(" samples pcnt");
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 53181dbfe4a..7c004b6ef24 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -242,7 +242,7 @@ void parse_perf_probe_event(const char *str, struct probe_point *pp,
/* Parse probe point */
parse_perf_probe_probepoint(argv[0], pp);
- if (pp->file || pp->line)
+ if (pp->file || pp->line || pp->lazy_line)
*need_dwarf = true;
/* Copy arguments and ensure return probe has no C argument */
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 1e6c65ebbd8..c171a243d05 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -333,8 +333,8 @@ static void show_location(Dwarf_Op *op, struct probe_finder *pf)
die("%u exceeds max register number.", regn);
if (deref)
- ret = snprintf(pf->buf, pf->len, " %s=+%ju(%s)",
- pf->var, (uintmax_t)offs, regs);
+ ret = snprintf(pf->buf, pf->len, " %s=%+jd(%s)",
+ pf->var, (intmax_t)offs, regs);
else
ret = snprintf(pf->buf, pf->len, " %s=%s", pf->var, regs);
DIE_IF(ret < 0);
@@ -352,8 +352,7 @@ static void show_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
goto error;
/* TODO: handle more than 1 exprs */
- ret = dwarf_getlocation_addr(&attr, (pf->addr - pf->cu_base),
- &expr, &nexpr, 1);
+ ret = dwarf_getlocation_addr(&attr, pf->addr, &expr, &nexpr, 1);
if (ret <= 0 || nexpr == 0)
goto error;
@@ -437,8 +436,7 @@ static void show_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
/* Get the frame base attribute/ops */
dwarf_attr(sp_die, DW_AT_frame_base, &fb_attr);
- ret = dwarf_getlocation_addr(&fb_attr, (pf->addr - pf->cu_base),
- &pf->fb_ops, &nops, 1);
+ ret = dwarf_getlocation_addr(&fb_attr, pf->addr, &pf->fb_ops, &nops, 1);
if (ret <= 0 || nops == 0)
pf->fb_ops = NULL;
@@ -455,6 +453,9 @@ static void show_probe_point(Dwarf_Die *sp_die, struct probe_finder *pf)
/* *pf->fb_ops will be cached in libdw. Don't free it. */
pf->fb_ops = NULL;
+ if (pp->found == MAX_PROBES)
+ die("Too many( > %d) probe point found.\n", MAX_PROBES);
+
pp->probes[pp->found] = strdup(tmp);
pp->found++;
}
@@ -641,7 +642,6 @@ static void find_probe_point_by_func(struct probe_finder *pf)
int find_probe_point(int fd, struct probe_point *pp)
{
struct probe_finder pf = {.pp = pp};
- int ret;
Dwarf_Off off, noff;
size_t cuhl;
Dwarf_Die *diep;
@@ -668,10 +668,6 @@ int find_probe_point(int fd, struct probe_point *pp)
pf.fname = NULL;
if (!pp->file || pf.fname) {
- /* Save CU base address (for frame_base) */
- ret = dwarf_lowpc(&pf.cu_die, &pf.cu_base);
- if (ret != 0)
- pf.cu_base = 0;
if (pp->function)
find_probe_point_by_func(&pf);
else if (pp->lazy_line)
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index d1a651793ba..21f7354397b 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -71,7 +71,6 @@ struct probe_finder {
/* For variable searching */
Dwarf_Op *fb_ops; /* Frame base attribute */
- Dwarf_Addr cu_base; /* Current CU base address */
const char *var; /* Current variable name */
char *buf; /* Current output buffer */
int len; /* Length of output buffer */
diff --git a/tools/perf/util/scripting-engines/trace-event-python.c b/tools/perf/util/scripting-engines/trace-event-python.c
index 33a414bbba3..6a72f14c598 100644
--- a/tools/perf/util/scripting-engines/trace-event-python.c
+++ b/tools/perf/util/scripting-engines/trace-event-python.c
@@ -208,7 +208,7 @@ static void python_process_event(int cpu, void *data,
int size __unused,
unsigned long long nsecs, char *comm)
{
- PyObject *handler, *retval, *context, *t;
+ PyObject *handler, *retval, *context, *t, *obj;
static char handler_name[256];
struct format_field *field;
unsigned long long val;
@@ -256,16 +256,23 @@ static void python_process_event(int cpu, void *data,
offset &= 0xffff;
} else
offset = field->offset;
- PyTuple_SetItem(t, n++,
- PyString_FromString((char *)data + offset));
+ obj = PyString_FromString((char *)data + offset);
} else { /* FIELD_IS_NUMERIC */
val = read_size(data + field->offset, field->size);
if (field->flags & FIELD_IS_SIGNED) {
- PyTuple_SetItem(t, n++, PyInt_FromLong(val));
+ if ((long long)val >= LONG_MIN &&
+ (long long)val <= LONG_MAX)
+ obj = PyInt_FromLong(val);
+ else
+ obj = PyLong_FromLongLong(val);
} else {
- PyTuple_SetItem(t, n++, PyInt_FromLong(val));
+ if (val <= LONG_MAX)
+ obj = PyInt_FromLong(val);
+ else
+ obj = PyLong_FromUnsignedLongLong(val);
}
}
+ PyTuple_SetItem(t, n++, obj);
}
if (_PyTuple_Resize(&t, n) == -1)
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index 323c0aea0a9..c458c4a371d 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -163,9 +163,17 @@ void dso__set_long_name(struct dso *self, char *name)
self->long_name_len = strlen(name);
}
+static void dso__set_short_name(struct dso *self, const char *name)
+{
+ if (name == NULL)
+ return;
+ self->short_name = name;
+ self->short_name_len = strlen(name);
+}
+
static void dso__set_basename(struct dso *self)
{
- self->short_name = basename(self->long_name);
+ dso__set_short_name(self, basename(self->long_name));
}
struct dso *dso__new(const char *name)
@@ -176,7 +184,7 @@ struct dso *dso__new(const char *name)
int i;
strcpy(self->name, name);
dso__set_long_name(self, self->name);
- self->short_name = self->name;
+ dso__set_short_name(self, self->name);
for (i = 0; i < MAP__NR_TYPES; ++i)
self->symbols[i] = self->symbol_names[i] = RB_ROOT;
self->slen_calculated = 0;
@@ -897,7 +905,6 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
struct kmap *kmap = self->kernel ? map__kmap(map) : NULL;
struct map *curr_map = map;
struct dso *curr_dso = self;
- size_t dso_name_len = strlen(self->short_name);
Elf_Data *symstrs, *secstrs;
uint32_t nr_syms;
int err = -1;
@@ -987,7 +994,8 @@ static int dso__load_sym(struct dso *self, struct map *map, const char *name,
char dso_name[PATH_MAX];
if (strcmp(section_name,
- curr_dso->short_name + dso_name_len) == 0)
+ (curr_dso->short_name +
+ self->short_name_len)) == 0)
goto new_symbol;
if (strcmp(section_name, ".text") == 0) {
@@ -1782,7 +1790,7 @@ struct dso *dso__new_kernel(const char *name)
struct dso *self = dso__new(name ?: "[kernel.kallsyms]");
if (self != NULL) {
- self->short_name = "[kernel]";
+ dso__set_short_name(self, "[kernel]");
self->kernel = 1;
}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 280dadd32a0..f30a3742891 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -110,9 +110,10 @@ struct dso {
u8 sorted_by_name;
u8 loaded;
u8 build_id[BUILD_ID_SIZE];
- u16 long_name_len;
const char *short_name;
char *long_name;
+ u16 long_name_len;
+ u16 short_name_len;
char name[0];
};