Merge branch 'pending-dma-coherent' into devel
authorRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 4 Dec 2009 15:00:00 +0000 (15:00 +0000)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 4 Dec 2009 15:00:00 +0000 (15:00 +0000)
312 files changed:
.gitignore
Documentation/fb/framebuffer.txt
Documentation/filesystems/caching/fscache.txt
Documentation/filesystems/caching/netfs-api.txt
Documentation/filesystems/ocfs2.txt
Documentation/slow-work.txt
MAINTAINERS
Makefile
arch/arm/include/asm/cacheflush.h
arch/arm/include/asm/kmap_types.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/swab.h
arch/arm/kernel/signal.c
arch/arm/mach-bcmring/include/mach/io.h
arch/arm/mach-omap2/board-zoom2.c
arch/arm/mach-omap2/clock34xx.c
arch/arm/mach-omap2/clock34xx.h
arch/arm/mach-omap2/gpmc.c
arch/arm/mach-pxa/cpufreq-pxa2xx.c
arch/arm/mach-pxa/cpufreq-pxa3xx.c
arch/arm/mach-pxa/spitz.c
arch/arm/mm/Kconfig
arch/arm/mm/cache-l2x0.c
arch/arm/mm/copypage-v6.c
arch/arm/mm/fault-armv.c
arch/arm/mm/flush.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/arm/plat-omap/gpio.c
arch/arm/tools/mach-types
arch/blackfin/kernel/bfin_dma_5xx.c
arch/blackfin/kernel/cplb-mpu/cplbinit.c
arch/blackfin/kernel/process.c
arch/blackfin/kernel/ptrace.c
arch/blackfin/mach-bf518/include/mach/anomaly.h
arch/blackfin/mach-bf527/include/mach/anomaly.h
arch/blackfin/mach-bf533/include/mach/anomaly.h
arch/blackfin/mach-bf537/include/mach/anomaly.h
arch/blackfin/mach-bf538/include/mach/anomaly.h
arch/blackfin/mach-bf548/include/mach/anomaly.h
arch/blackfin/mach-bf561/atomic.S
arch/blackfin/mach-bf561/include/mach/anomaly.h
arch/blackfin/mach-common/arch_checks.c
arch/blackfin/mach-common/smp.c
arch/parisc/kernel/unwind.c
arch/parisc/kernel/vmlinux.lds.S
arch/powerpc/include/asm/kmap_types.h
arch/sh/kernel/cpu/irq/imask.c
arch/sh/kernel/cpu/irq/intc-sh5.c
arch/sparc/mm/init_64.h
arch/x86/kernel/acpi/processor.c
arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
arch/x86/kernel/cpu/cpufreq/longhaul.c
arch/x86/kernel/cpu/cpufreq/powernow-k8.c
arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
crypto/async_tx/Kconfig
crypto/async_tx/async_pq.c
crypto/async_tx/async_xor.c
crypto/gcm.c
drivers/acpi/acpica/acpredef.h
drivers/acpi/blacklist.c
drivers/acpi/sleep.c
drivers/ata/sata_fsl.c
drivers/base/power/runtime.c
drivers/block/cciss.c
drivers/char/agp/intel-agp.c
drivers/char/keyboard.c
drivers/char/tty_port.c
drivers/char/vt_ioctl.c
drivers/cpufreq/cpufreq.c
drivers/cpufreq/cpufreq_conservative.c
drivers/cpufreq/cpufreq_ondemand.c
drivers/crypto/padlock-aes.c
drivers/dma/Kconfig
drivers/dma/dmaengine.c
drivers/dma/ioat/dca.c
drivers/dma/ioat/dma.h
drivers/dma/ioat/dma_v2.c
drivers/dma/ioat/dma_v3.c
drivers/dma/ioat/hw.h
drivers/dma/ioat/registers.h
drivers/dma/shdma.c
drivers/firewire/ohci.c
drivers/gpu/drm/Kconfig
drivers/gpu/drm/drm_edid.c
drivers/gpu/drm/drm_fb_helper.c
drivers/gpu/drm/drm_gem.c
drivers/gpu/drm/drm_mm.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_irq.c
drivers/gpu/drm/i915/i915_suspend.c
drivers/gpu/drm/i915/intel_crt.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/radeon/atom.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_agp.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/rv515.c
drivers/i2c/busses/i2c-pnx.c
drivers/i2c/chips/tsl2550.c
drivers/i2c/i2c-core.c
drivers/ide/ide-ioctls.c
drivers/ieee802154/fakehard.c
drivers/isdn/hardware/mISDN/hfcmulti.c
drivers/isdn/i4l/isdn_ppp.c
drivers/media/common/ir-functions.c
drivers/media/dvb/dvb-usb/cxusb.c
drivers/media/dvb/siano/Kconfig
drivers/media/radio/radio-gemtek-pci.c
drivers/media/video/davinci/vpif_display.c
drivers/media/video/em28xx/em28xx-cards.c
drivers/media/video/mx1_camera.c
drivers/media/video/mx3_camera.c
drivers/media/video/sh_mobile_ceu_camera.c
drivers/media/video/soc_camera.c
drivers/media/video/videobuf-dma-contig.c
drivers/misc/eeprom/at24.c
drivers/mmc/host/mmci.c
drivers/net/Kconfig
drivers/net/arm/ep93xx_eth.c
drivers/net/au1000_eth.c
drivers/net/b44.c
drivers/net/can/Kconfig
drivers/net/can/dev.c
drivers/net/can/sja1000/Kconfig [new file with mode: 0644]
drivers/net/can/usb/Kconfig [new file with mode: 0644]
drivers/net/can/usb/Makefile
drivers/net/cxgb3/sge.c
drivers/net/davinci_emac.c
drivers/net/e100.c
drivers/net/e1000e/e1000.h
drivers/net/e1000e/ethtool.c
drivers/net/e1000e/ich8lan.c
drivers/net/e1000e/netdev.c
drivers/net/e1000e/phy.c
drivers/net/forcedeth.c
drivers/net/ibm_newemac/emac.h
drivers/net/ixgbe/ixgbe_main.c
drivers/net/ks8851_mll.c
drivers/net/macvlan.c
drivers/net/netxen/netxen_nic.h
drivers/net/netxen/netxen_nic_hdr.h
drivers/net/netxen/netxen_nic_hw.c
drivers/net/netxen/netxen_nic_init.c
drivers/net/netxen/netxen_nic_main.c
drivers/net/phy/mdio-gpio.c
drivers/net/ppp_generic.c
drivers/net/r6040.c
drivers/net/r8169.c
drivers/net/s2io.c
drivers/net/smc91x.c
drivers/net/smsc911x.c
drivers/net/smsc9420.c
drivers/net/stmmac/stmmac_main.c
drivers/net/stmmac/stmmac_timer.c
drivers/net/stmmac/stmmac_timer.h
drivers/net/sungem.c
drivers/net/usb/hso.c
drivers/net/veth.c
drivers/net/wan/cosa.c
drivers/net/wireless/ath/ath5k/base.c
drivers/net/wireless/ath/ath5k/led.c
drivers/net/wireless/ath/ath9k/main.c
drivers/net/wireless/b43/main.c
drivers/net/wireless/ipw2x00/ipw2100.c
drivers/net/wireless/ipw2x00/ipw2200.c
drivers/net/wireless/ipw2x00/libipw.h
drivers/net/wireless/ipw2x00/libipw_module.c
drivers/net/wireless/iwlwifi/iwl-1000.c
drivers/net/wireless/iwlwifi/iwl-6000.c
drivers/net/wireless/iwlwifi/iwl-agn-rs.c
drivers/net/wireless/iwlwifi/iwl-agn.c
drivers/net/wireless/iwlwifi/iwl-core.h
drivers/net/wireless/iwlwifi/iwl-tx.c
drivers/net/wireless/libertas/ethtool.c
drivers/net/wireless/p54/p54usb.c
drivers/net/wireless/rtl818x/rtl8187_rfkill.c
drivers/pci/dmar.c
drivers/platform/x86/acerhdf.c
drivers/platform/x86/thinkpad_acpi.c
drivers/scsi/bfa/bfad_fwimg.c
drivers/scsi/bfa/bfad_im.c
drivers/scsi/gdth.c
drivers/scsi/hosts.c
drivers/scsi/ipr.c
drivers/scsi/ipr.h
drivers/scsi/libsas/sas_expander.c
drivers/scsi/pmcraid.c
drivers/scsi/scsi_scan.c
drivers/scsi/scsi_sysfs.c
drivers/scsi/scsi_transport_fc.c
drivers/scsi/sd_dif.c
drivers/serial/suncore.c
drivers/serial/suncore.h
drivers/serial/sunhv.c
drivers/serial/sunsab.c
drivers/serial/sunsu.c
drivers/serial/sunzilog.c
drivers/spi/spi_stmp.c
drivers/ssb/scan.c
drivers/staging/go7007/s2250-board.c
drivers/staging/go7007/s2250-loader.h [new file with mode: 0644]
drivers/staging/octeon/ethernet-mdio.c
drivers/staging/octeon/ethernet-spi.c
drivers/staging/octeon/ethernet.c
drivers/uio/uio_pdrv_genirq.c
drivers/usb/class/cdc-acm.c
drivers/usb/host/ohci-hcd.c
drivers/usb/host/ohci-pci.c
drivers/usb/host/ohci-q.c
drivers/usb/host/ohci.h
drivers/usb/host/xhci-mem.c
drivers/usb/host/xhci-ring.c
drivers/usb/mon/mon_bin.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/option.c
drivers/video/da8xx-fb.c
drivers/watchdog/pnx4008_wdt.c
fs/9p/cache.c
fs/afs/file.c
fs/cachefiles/interface.c
fs/cachefiles/namei.c
fs/cachefiles/rdwr.c
fs/cifs/CHANGES
fs/cifs/cifsfs.c
fs/cifs/dir.c
fs/fcntl.c
fs/fscache/Kconfig
fs/fscache/Makefile
fs/fscache/cache.c
fs/fscache/cookie.c
fs/fscache/internal.h
fs/fscache/main.c
fs/fscache/object-list.c [new file with mode: 0644]
fs/fscache/object.c
fs/fscache/operation.c
fs/fscache/page.c
fs/fscache/proc.c
fs/fscache/stats.c
fs/fuse/dir.c
fs/gfs2/main.c
fs/gfs2/recovery.c
fs/jffs2/read.c
fs/nfs/fscache.c
fs/nfs/nfs4proc.c
fs/ocfs2/file.c
fs/ocfs2/ocfs2.h
fs/ocfs2/refcounttree.c
fs/ocfs2/super.c
fs/ocfs2/uptodate.c
fs/proc/array.c
include/asm-generic/fcntl.h
include/linux/fscache-cache.h
include/linux/fscache.h
include/linux/i2c-pnx.h
include/linux/isdn_ppp.h
include/linux/slow-work.h
include/linux/suspend.h
include/linux/vt.h
include/net/mac80211.h
include/net/sctp/structs.h
include/scsi/scsi_device.h
include/scsi/scsi_host.h
init/Kconfig
kernel/Makefile
kernel/slow-work-proc.c [new file with mode: 0644]
kernel/slow-work.c
kernel/slow-work.h [new file with mode: 0644]
kernel/workqueue.c
lib/radix-tree.c
lib/string.c
mm/Kconfig
mm/backing-dev.c
mm/memory_hotplug.c
net/8021q/vlan.c
net/bluetooth/hci_conn.c
net/bluetooth/l2cap.c
net/core/dev.c
net/core/pktgen.c
net/core/skbuff.c
net/ipv4/ip_fragment.c
net/ipv4/ipmr.c
net/ipv4/tcp.c
net/mac80211/agg-rx.c
net/mac80211/agg-tx.c
net/mac80211/ht.c
net/mac80211/ieee80211_i.h
net/mac80211/util.c
net/netfilter/nf_log.c
net/netfilter/xt_limit.c
net/netfilter/xt_osf.c
net/rfkill/core.c
net/sctp/associola.c
net/sctp/outqueue.c
net/sctp/sm_sideeffect.c
net/sctp/sm_statefuns.c
net/sctp/socket.c
net/sctp/transport.c
net/sunrpc/addr.c
scripts/kconfig/Makefile
scripts/kconfig/streamline_config.pl
security/integrity/ima/ima_iint.c
sound/arm/aaci.c
sound/pci/hda/patch_realtek.c
sound/soc/codecs/tlv320aic23.c
sound/soc/omap/omap3evm.c
sound/soc/omap/omap3pandora.c
sound/soc/soc-dapm.c
sound/usb/usbmixer.c

index b93fb7eff94286c7a15d475fa581dac32a89b0db..946c7ec5c922ff5bb02726144178f56f57c3378d 100644 (file)
@@ -25,6 +25,7 @@
 *.elf
 *.bin
 *.gz
+*.bz2
 *.lzma
 *.patch
 *.gcno
index b3e3a035683993edc8391cefd11ea3ef6be9eb08..fe79e3c8847dc334f479050bb0bc6af5711f868a 100644 (file)
@@ -312,10 +312,8 @@ and to the following documentation:
 8. Mailing list
 ---------------
 
-There are several frame buffer device related mailing lists at SourceForge:
-  - linux-fbdev-announce@lists.sourceforge.net, for announcements,
-  - linux-fbdev-user@lists.sourceforge.net, for generic user support,
-  - linux-fbdev-devel@lists.sourceforge.net, for project developers.
+There is a frame buffer device related mailing list at kernel.org:
+linux-fbdev@vger.kernel.org.
 
 Point your web browser to http://sourceforge.net/projects/linux-fbdev/ for
 subscription information and archive browsing.
index 9e94b9491d89c7bc246865d1b4580d8533fa7549..a91e2e2095b09fcebca6f456fe7e1fe36263006d 100644 (file)
@@ -235,6 +235,7 @@ proc files.
                neg=N   Number of negative lookups made
                pos=N   Number of positive lookups made
                crt=N   Number of objects created by lookup
+               tmo=N   Number of lookups timed out and requeued
        Updates n=N     Number of update cookie requests seen
                nul=N   Number of upd reqs given a NULL parent
                run=N   Number of upd reqs granted CPU time
@@ -250,8 +251,10 @@ proc files.
                ok=N    Number of successful alloc reqs
                wt=N    Number of alloc reqs that waited on lookup completion
                nbf=N   Number of alloc reqs rejected -ENOBUFS
+               int=N   Number of alloc reqs aborted -ERESTARTSYS
                ops=N   Number of alloc reqs submitted
                owt=N   Number of alloc reqs waited for CPU time
+               abt=N   Number of alloc reqs aborted due to object death
        Retrvls n=N     Number of retrieval (read) requests seen
                ok=N    Number of successful retr reqs
                wt=N    Number of retr reqs that waited on lookup completion
@@ -261,6 +264,7 @@ proc files.
                oom=N   Number of retr reqs failed -ENOMEM
                ops=N   Number of retr reqs submitted
                owt=N   Number of retr reqs waited for CPU time
+               abt=N   Number of retr reqs aborted due to object death
        Stores  n=N     Number of storage (write) requests seen
                ok=N    Number of successful store reqs
                agn=N   Number of store reqs on a page already pending storage
@@ -268,12 +272,37 @@ proc files.
                oom=N   Number of store reqs failed -ENOMEM
                ops=N   Number of store reqs submitted
                run=N   Number of store reqs granted CPU time
+               pgs=N   Number of pages given store req processing time
+               rxd=N   Number of store reqs deleted from tracking tree
+               olm=N   Number of store reqs over store limit
+       VmScan  nos=N   Number of release reqs against pages with no pending store
+               gon=N   Number of release reqs against pages stored by time lock granted
+               bsy=N   Number of release reqs ignored due to in-progress store
+               can=N   Number of page stores cancelled due to release req
        Ops     pend=N  Number of times async ops added to pending queues
                run=N   Number of times async ops given CPU time
                enq=N   Number of times async ops queued for processing
+               can=N   Number of async ops cancelled
+               rej=N   Number of async ops rejected due to object lookup/create failure
                dfr=N   Number of async ops queued for deferred release
                rel=N   Number of async ops released
                gc=N    Number of deferred-release async ops garbage collected
+       CacheOp alo=N   Number of in-progress alloc_object() cache ops
+               luo=N   Number of in-progress lookup_object() cache ops
+               luc=N   Number of in-progress lookup_complete() cache ops
+               gro=N   Number of in-progress grab_object() cache ops
+               upo=N   Number of in-progress update_object() cache ops
+               dro=N   Number of in-progress drop_object() cache ops
+               pto=N   Number of in-progress put_object() cache ops
+               syn=N   Number of in-progress sync_cache() cache ops
+               atc=N   Number of in-progress attr_changed() cache ops
+               rap=N   Number of in-progress read_or_alloc_page() cache ops
+               ras=N   Number of in-progress read_or_alloc_pages() cache ops
+               alp=N   Number of in-progress allocate_page() cache ops
+               als=N   Number of in-progress allocate_pages() cache ops
+               wrp=N   Number of in-progress write_page() cache ops
+               ucp=N   Number of in-progress uncache_page() cache ops
+               dsp=N   Number of in-progress dissociate_pages() cache ops
 
 
  (*) /proc/fs/fscache/histogram
@@ -299,6 +328,87 @@ proc files.
      jiffy range covered, and the SECS field the equivalent number of seconds.
 
 
+===========
+OBJECT LIST
+===========
+
+If CONFIG_FSCACHE_OBJECT_LIST is enabled, the FS-Cache facility will maintain a
+list of all the objects currently allocated and allow them to be viewed
+through:
+
+       /proc/fs/fscache/objects
+
+This will look something like:
+
+       [root@andromeda ~]# head /proc/fs/fscache/objects
+       OBJECT   PARENT   STAT CHLDN OPS OOP IPR EX READS EM EV F S | NETFS_COOKIE_DEF TY FL NETFS_DATA       OBJECT_KEY, AUX_DATA
+       ======== ======== ==== ===== === === === == ===== == == = = | ================ == == ================ ================
+          17e4b        2 ACTV     0   0   0   0  0     0 7b  4 0 8 | NFS.fh           DT  0 ffff88001dd82820 010006017edcf8bbc93b43298fdfbe71e50b57b13a172c0117f38472, e567634700000000000000000000000063f2404a000000000000000000000000c9030000000000000000000063f2404a
+          1693a        2 ACTV     0   0   0   0  0     0 7b  4 0 8 | NFS.fh           DT  0 ffff88002db23380 010006017edcf8bbc93b43298fdfbe71e50b57b1e0162c01a2df0ea6, 420ebc4a000000000000000000000000420ebc4a0000000000000000000000000e1801000000000000000000420ebc4a
+
+where the first set of columns before the '|' describe the object:
+
+       COLUMN  DESCRIPTION
+       ======= ===============================================================
+       OBJECT  Object debugging ID (appears as OBJ%x in some debug messages)
+       PARENT  Debugging ID of parent object
+       STAT    Object state
+       CHLDN   Number of child objects of this object
+       OPS     Number of outstanding operations on this object
+       OOP     Number of outstanding child object management operations
+       IPR
+       EX      Number of outstanding exclusive operations
+       READS   Number of outstanding read operations
+       EM      Object's event mask
+       EV      Events raised on this object
+       F       Object flags
+       S       Object slow-work work item flags
+
+and the second set of columns describe the object's cookie, if present:
+
+       COLUMN          DESCRIPTION
+       =============== =======================================================
+       NETFS_COOKIE_DEF Name of netfs cookie definition
+       TY              Cookie type (IX - index, DT - data, hex - special)
+       FL              Cookie flags
+       NETFS_DATA      Netfs private data stored in the cookie
+       OBJECT_KEY      Object key      } 1 column, with separating comma
+       AUX_DATA        Object aux data } presence may be configured
+
+The data shown may be filtered by attaching the a key to an appropriate keyring
+before viewing the file.  Something like:
+
+               keyctl add user fscache:objlist <restrictions> @s
+
+where <restrictions> are a selection of the following letters:
+
+       K       Show hexdump of object key (don't show if not given)
+       A       Show hexdump of object aux data (don't show if not given)
+
+and the following paired letters:
+
+       C       Show objects that have a cookie
+       c       Show objects that don't have a cookie
+       B       Show objects that are busy
+       b       Show objects that aren't busy
+       W       Show objects that have pending writes
+       w       Show objects that don't have pending writes
+       R       Show objects that have outstanding reads
+       r       Show objects that don't have outstanding reads
+       S       Show objects that have slow work queued
+       s       Show objects that don't have slow work queued
+
+If neither side of a letter pair is given, then both are implied.  For example:
+
+       keyctl add user fscache:objlist KB @s
+
+shows objects that are busy, and lists their object keys, but does not dump
+their auxiliary data.  It also implies "CcWwRrSs", but as 'B' is given, 'b' is
+not implied.
+
+By default all objects and all fields will be shown.
+
+
 =========
 DEBUGGING
 =========
index 2666b1ed5e9e6515cdda71c878f846438167a0b1..1902c57b72ef7e103a07856dd520aef772c1e684 100644 (file)
@@ -641,7 +641,7 @@ data file must be retired (see the relinquish cookie function below).
 
 Furthermore, note that this does not cancel the asynchronous read or write
 operation started by the read/alloc and write functions, so the page
-invalidation and release functions must use:
+invalidation functions must use:
 
        bool fscache_check_page_write(struct fscache_cookie *cookie,
                                      struct page *page);
@@ -654,6 +654,25 @@ to see if a page is being written to the cache, and:
 to wait for it to finish if it is.
 
 
+When releasepage() is being implemented, a special FS-Cache function exists to
+manage the heuristics of coping with vmscan trying to eject pages, which may
+conflict with the cache trying to write pages to the cache (which may itself
+need to allocate memory):
+
+       bool fscache_maybe_release_page(struct fscache_cookie *cookie,
+                                       struct page *page,
+                                       gfp_t gfp);
+
+This takes the netfs cookie, and the page and gfp arguments as supplied to
+releasepage().  It will return false if the page cannot be released yet for
+some reason and if it returns true, the page has been uncached and can now be
+released.
+
+To make a page available for release, this function may wait for an outstanding
+storage request to complete, or it may attempt to cancel the storage request -
+in which case the page will not be stored in the cache this time.
+
+
 ==========================
 INDEX AND DATA FILE UPDATE
 ==========================
index c2a0871280a02eeaa6c027300207881b4a8825cf..c58b9f5ba002a4bdfb8b0ef2aefb30d08160bf53 100644 (file)
@@ -20,15 +20,16 @@ Lots of code taken from ext3 and other projects.
 Authors in alphabetical order:
 Joel Becker   <joel.becker@oracle.com>
 Zach Brown    <zach.brown@oracle.com>
-Mark Fasheh   <mark.fasheh@oracle.com>
+Mark Fasheh   <mfasheh@suse.com>
 Kurt Hackel   <kurt.hackel@oracle.com>
+Tao Ma        <tao.ma@oracle.com>
 Sunil Mushran <sunil.mushran@oracle.com>
 Manish Singh  <manish.singh@oracle.com>
+Tiger Yang    <tiger.yang@oracle.com>
 
 Caveats
 =======
 Features which OCFS2 does not support yet:
-       - quotas
        - Directory change notification (F_NOTIFY)
        - Distributed Caching (F_SETLEASE/F_GETLEASE/break_lease)
 
@@ -70,7 +71,6 @@ commit=nrsec  (*)     Ocfs2 can be told to sync all its data and metadata
                        performance.
 localalloc=8(*)                Allows custom localalloc size in MB. If the value is too
                        large, the fs will silently revert it to the default.
-                       Localalloc is not enabled for local mounts.
 localflocks            This disables cluster aware flock.
 inode64                        Indicates that Ocfs2 is allowed to create inodes at
                        any location in the filesystem, including those which
index ebc50f808ea4b6774992ff340a4de9c5492c8135..52bc31433723402ff6531552c9a118acf83f0da3 100644 (file)
@@ -41,6 +41,13 @@ expand files, provided the time taken to do so isn't too long.
 Operations of both types may sleep during execution, thus tying up the thread
 loaned to it.
 
+A further class of work item is available, based on the slow work item class:
+
+ (*) Delayed slow work items.
+
+These are slow work items that have a timer to defer queueing of the item for
+a while.
+
 
 THREAD-TO-CLASS ALLOCATION
 --------------------------
@@ -64,9 +71,11 @@ USING SLOW WORK ITEMS
 Firstly, a module or subsystem wanting to make use of slow work items must
 register its interest:
 
-        int ret = slow_work_register_user();
+        int ret = slow_work_register_user(struct module *module);
 
-This will return 0 if successful, or a -ve error upon failure.
+This will return 0 if successful, or a -ve error upon failure.  The module
+pointer should be the module interested in using this facility (almost
+certainly THIS_MODULE).
 
 
 Slow work items may then be set up by:
@@ -91,6 +100,10 @@ Slow work items may then be set up by:
 
        slow_work_init(&myitem, &myitem_ops);
 
+     or:
+
+       delayed_slow_work_init(&myitem, &myitem_ops);
+
      or:
 
        vslow_work_init(&myitem, &myitem_ops);
@@ -102,15 +115,92 @@ A suitably set up work item can then be enqueued for processing:
        int ret = slow_work_enqueue(&myitem);
 
 This will return a -ve error if the thread pool is unable to gain a reference
-on the item, 0 otherwise.
+on the item, 0 otherwise, or (for delayed work):
+
+       int ret = delayed_slow_work_enqueue(&myitem, my_jiffy_delay);
 
 
 The items are reference counted, so there ought to be no need for a flush
-operation.  When all a module's slow work items have been processed, and the
+operation.  But as the reference counting is optional, means to cancel
+existing work items are also included:
+
+       cancel_slow_work(&myitem);
+       cancel_delayed_slow_work(&myitem);
+
+can be used to cancel pending work.  The above cancel function waits for
+existing work to have been executed (or prevent execution of them, depending
+on timing).
+
+
+When all a module's slow work items have been processed, and the
 module has no further interest in the facility, it should unregister its
 interest:
 
-       slow_work_unregister_user();
+       slow_work_unregister_user(struct module *module);
+
+The module pointer is used to wait for all outstanding work items for that
+module before completing the unregistration.  This prevents the put_ref() code
+from being taken away before it completes.  module should almost certainly be
+THIS_MODULE.
+
+
+================
+HELPER FUNCTIONS
+================
+
+The slow-work facility provides a function by which it can be determined
+whether or not an item is queued for later execution:
+
+       bool queued = slow_work_is_queued(struct slow_work *work);
+
+If it returns false, then the item is not on the queue (it may be executing
+with a requeue pending).  This can be used to work out whether an item on which
+another depends is on the queue, thus allowing a dependent item to be queued
+after it.
+
+If the above shows an item on which another depends not to be queued, then the
+owner of the dependent item might need to wait.  However, to avoid locking up
+the threads unnecessarily be sleeping in them, it can make sense under some
+circumstances to return the work item to the queue, thus deferring it until
+some other items have had a chance to make use of the yielded thread.
+
+To yield a thread and defer an item, the work function should simply enqueue
+the work item again and return.  However, this doesn't work if there's nothing
+actually on the queue, as the thread just vacated will jump straight back into
+the item's work function, thus busy waiting on a CPU.
+
+Instead, the item should use the thread to wait for the dependency to go away,
+but rather than using schedule() or schedule_timeout() to sleep, it should use
+the following function:
+
+       bool requeue = slow_work_sleep_till_thread_needed(
+                       struct slow_work *work,
+                       signed long *_timeout);
+
+This will add a second wait and then sleep, such that it will be woken up if
+either something appears on the queue that could usefully make use of the
+thread - and behind which this item can be queued, or if the event the caller
+set up to wait for happens.  True will be returned if something else appeared
+on the queue and this work function should perhaps return, of false if
+something else woke it up.  The timeout is as for schedule_timeout().
+
+For example:
+
+       wq = bit_waitqueue(&my_flags, MY_BIT);
+       init_wait(&wait);
+       requeue = false;
+       do {
+               prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+               if (!test_bit(MY_BIT, &my_flags))
+                       break;
+               requeue = slow_work_sleep_till_thread_needed(&my_work,
+                                                            &timeout);
+       } while (timeout > 0 && !requeue);
+       finish_wait(wq, &wait);
+       if (!test_bit(MY_BIT, &my_flags)
+               goto do_my_thing;
+       if (requeue)
+               return; // to slow_work
 
 
 ===============
@@ -118,7 +208,8 @@ ITEM OPERATIONS
 ===============
 
 Each work item requires a table of operations of type struct slow_work_ops.
-All members are required:
+Only ->execute() is required; the getting and putting of a reference and the
+describing of an item are all optional.
 
  (*) Get a reference on an item:
 
@@ -148,6 +239,16 @@ All members are required:
      This should perform the work required of the item.  It may sleep, it may
      perform disk I/O and it may wait for locks.
 
+ (*) View an item through /proc:
+
+       void (*desc)(struct slow_work *work, struct seq_file *m);
+
+     If supplied, this should print to 'm' a small string describing the work
+     the item is to do.  This should be no more than about 40 characters, and
+     shouldn't include a newline character.
+
+     See the 'Viewing executing and queued items' section below.
+
 
 ==================
 POOL CONFIGURATION
@@ -172,3 +273,50 @@ The slow-work thread pool has a number of configurables:
      is bounded to between 1 and one fewer than the number of active threads.
      This ensures there is always at least one thread that can process very
      slow work items, and always at least one thread that won't.
+
+
+==================================
+VIEWING EXECUTING AND QUEUED ITEMS
+==================================
+
+If CONFIG_SLOW_WORK_PROC is enabled, a proc file is made available:
+
+       /proc/slow_work_rq
+
+through which the list of work items being executed and the queues of items to
+be executed may be viewed.  The owner of a work item is given the chance to
+add some information of its own.
+
+The contents look something like the following:
+
+    THR PID   ITEM ADDR        FL MARK  DESC
+    === ===== ================ == ===== ==========
+      0  3005 ffff880023f52348  a 952ms FSC: OBJ17d3: LOOK
+      1  3006 ffff880024e33668  2 160ms FSC: OBJ17e5 OP60d3b: Write1/Store fl=2
+      2  3165 ffff8800296dd180  a 424ms FSC: OBJ17e4: LOOK
+      3  4089 ffff8800262c8d78  a 212ms FSC: OBJ17ea: CRTN
+      4  4090 ffff88002792bed8  2 388ms FSC: OBJ17e8 OP60d36: Write1/Store fl=2
+      5  4092 ffff88002a0ef308  2 388ms FSC: OBJ17e7 OP60d2e: Write1/Store fl=2
+      6  4094 ffff88002abaf4b8  2 132ms FSC: OBJ17e2 OP60d4e: Write1/Store fl=2
+      7  4095 ffff88002bb188e0  a 388ms FSC: OBJ17e9: CRTN
+    vsq     - ffff880023d99668  1 308ms FSC: OBJ17e0 OP60f91: Write1/EnQ fl=2
+    vsq     - ffff8800295d1740  1 212ms FSC: OBJ16be OP4d4b6: Write1/EnQ fl=2
+    vsq     - ffff880025ba3308  1 160ms FSC: OBJ179a OP58dec: Write1/EnQ fl=2
+    vsq     - ffff880024ec83e0  1 160ms FSC: OBJ17ae OP599f2: Write1/EnQ fl=2
+    vsq     - ffff880026618e00  1 160ms FSC: OBJ17e6 OP60d33: Write1/EnQ fl=2
+    vsq     - ffff880025a2a4b8  1 132ms FSC: OBJ16a2 OP4d583: Write1/EnQ fl=2
+    vsq     - ffff880023cbe6d8  9 212ms FSC: OBJ17eb: LOOK
+    vsq     - ffff880024d37590  9 212ms FSC: OBJ17ec: LOOK
+    vsq     - ffff880027746cb0  9 212ms FSC: OBJ17ed: LOOK
+    vsq     - ffff880024d37ae8  9 212ms FSC: OBJ17ee: LOOK
+    vsq     - ffff880024d37cb0  9 212ms FSC: OBJ17ef: LOOK
+    vsq     - ffff880025036550  9 212ms FSC: OBJ17f0: LOOK
+    vsq     - ffff8800250368e0  9 212ms FSC: OBJ17f1: LOOK
+    vsq     - ffff880025036aa8  9 212ms FSC: OBJ17f2: LOOK
+
+In the 'THR' column, executing items show the thread they're occupying and
+queued threads indicate which queue they're on.  'PID' shows the process ID of
+a slow-work thread that's executing something.  'FL' shows the work item flags.
+'MARK' indicates how long since an item was queued or began executing.  Lastly,
+the 'DESC' column permits the owner of an item to give some information.
+
index 81d68d5b7eea0c172c6fce0cd5bd4abcd32ac75a..7d4b5cc07ad86f74cfcada1540b1f9132508cbaf 100644 (file)
@@ -512,10 +512,32 @@ W:        http://www.arm.linux.org.uk/
 S:     Maintained
 F:     arch/arm/
 
+ARM PRIMECELL AACI PL041 DRIVER
+M:     Russell King <linux@arm.linux.org.uk>
+S:     Maintained
+F:     sound/arm/aaci.*
+
+ARM PRIMECELL CLCD PL110 DRIVER
+M:     Russell King <linux@arm.linux.org.uk>
+S:     Maintained
+F:     drivers/video/amba-clcd.*
+
+ARM PRIMECELL KMI PL050 DRIVER
+M:     Russell King <linux@arm.linux.org.uk>
+S:     Maintained
+F:     drivers/input/serio/ambakmi.*
+F:     include/linux/amba/kmi.h
+
 ARM PRIMECELL MMCI PL180/1 DRIVER
 S:     Orphan
 F:     drivers/mmc/host/mmci.*
 
+ARM PRIMECELL BUS SUPPORT
+M:     Russell King <linux@arm.linux.org.uk>
+S:     Maintained
+F:     drivers/amba/
+F:     include/linux/amba/bus.h
+
 ARM/ADI ROADRUNNER MACHINE SUPPORT
 M:     Lennert Buytenhek <kernel@wantstofly.org>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -749,6 +771,14 @@ ARM/NEC MOBILEPRO 900/c MACHINE SUPPORT
 M:     Michael Petchkovsky <mkpetch@internode.on.net>
 S:     Maintained
 
+ARM/NOMADIK ARCHITECTURE
+M:     Alessandro Rubini <rubini@unipv.it>
+M:     STEricsson <STEricsson_nomadik_linux@list.st.com>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+F:     arch/arm/mach-nomadik/
+F:     arch/arm/plat-nomadik/
+
 ARM/OPENMOKO NEO FREERUNNER (GTA02) MACHINE SUPPORT
 M:     Nelson Castillo <arhuaco@freaks-unidos.net>
 L:     openmoko-kernel@lists.openmoko.org (subscribers-only)
@@ -1027,7 +1057,7 @@ F:        drivers/serial/atmel_serial.c
 
 ATMEL LCDFB DRIVER
 M:     Nicolas Ferre <nicolas.ferre@atmel.com>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/atmel_lcdfb.c
 F:     include/video/atmel_lcdc.h
@@ -2113,7 +2143,7 @@ F:        drivers/net/wan/dlci.c
 F:     drivers/net/wan/sdla.c
 
 FRAMEBUFFER LAYER
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 W:     http://linux-fbdev.sourceforge.net/
 S:     Orphan
 F:     Documentation/fb/
@@ -2136,7 +2166,7 @@ F:        drivers/i2c/busses/i2c-cpm.c
 
 FREESCALE IMX / MXC FRAMEBUFFER DRIVER
 M:     Sascha Hauer <kernel@pengutronix.de>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/plat-mxc/include/mach/imxfb.h
@@ -2312,6 +2342,13 @@ T:       git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
 S:     Maintained
 F:     drivers/media/video/gspca/finepix.c
 
+GSPCA GL860 SUBDRIVER
+M:     Olivier Lorin <o.lorin@laposte.net>
+L:     linux-media@vger.kernel.org
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/mchehab/linux-2.6.git
+S:     Maintained
+F:     drivers/media/video/gspca/gl860/
+
 GSPCA M5602 SUBDRIVER
 M:     Erik Andren <erik.andren@gmail.com>
 L:     linux-media@vger.kernel.org
@@ -2533,8 +2570,7 @@ S:        Maintained
 F:     Documentation/i2c/
 F:     drivers/i2c/
 F:     include/linux/i2c.h
-F:     include/linux/i2c-dev.h
-F:     include/linux/i2c-id.h
+F:     include/linux/i2c-*.h
 
 I2C-TINY-USB DRIVER
 M:     Till Harbaum <till@harbaum.org>
@@ -2635,7 +2671,7 @@ S:        Supported
 F:     security/integrity/ima/
 
 IMS TWINTURBO FRAMEBUFFER DRIVER
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Orphan
 F:     drivers/video/imsttfb.c
 
@@ -2670,14 +2706,14 @@ F:      drivers/input/
 
 INTEL FRAMEBUFFER DRIVER (excluding 810 and 815)
 M:     Sylvain Meyer <sylvain.meyer@worldonline.fr>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     Documentation/fb/intelfb.txt
 F:     drivers/video/intelfb/
 
 INTEL 810/815 FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/i810/
 
@@ -3084,9 +3120,13 @@ F:       kernel/kgdb.c
 
 KMEMCHECK
 M:     Vegard Nossum <vegardno@ifi.uio.no>
-P      Pekka Enberg
-M:     penberg@cs.helsinki.fi
+M:     Pekka Enberg <penberg@cs.helsinki.fi>
 S:     Maintained
+F:     Documentation/kmemcheck.txt
+F:     arch/x86/include/asm/kmemcheck.h
+F:     arch/x86/mm/kmemcheck/
+F:     include/linux/kmemcheck.h
+F:     mm/kmemcheck.c
 
 KMEMLEAK
 M:     Catalin Marinas <catalin.marinas@arm.com>
@@ -3387,7 +3427,7 @@ S:        Supported
 
 MATROX FRAMEBUFFER DRIVER
 M:     Petr Vandrovec <vandrove@vc.cvut.cz>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/matrox/matroxfb_*
 F:     include/linux/matroxfb.h
@@ -3774,7 +3814,7 @@ F:        fs/ntfs/
 
 NVIDIA (rivafb and nvidiafb) FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/riva/
 F:     drivers/video/nvidia/
@@ -3809,7 +3849,7 @@ F:        sound/soc/omap/
 
 OMAP FRAMEBUFFER SUPPORT
 M:     Imre Deak <imre.deak@nokia.com>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 L:     linux-omap@vger.kernel.org
 S:     Maintained
 F:     drivers/video/omap/
@@ -4315,19 +4355,21 @@ F:      include/linux/qnxtypes.h
 
 RADEON FRAMEBUFFER DISPLAY DRIVER
 M:     Benjamin Herrenschmidt <benh@kernel.crashing.org>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/aty/radeon*
 F:     include/linux/radeonfb.h
 
 RAGE128 FRAMEBUFFER DISPLAY DRIVER
 M:     Paul Mackerras <paulus@samba.org>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/aty/aty128fb.c
 
 RALINK RT2X00 WIRELESS LAN DRIVER
 P:     rt2x00 project
+M:     Ivo van Doorn <IvDoorn@gmail.com>
+M:     Gertjan van Wingerde <gwingerde@gmail.com>
 L:     linux-wireless@vger.kernel.org
 L:     users@rt2x00.serialmonkey.com (moderated for non-subscribers)
 W:     http://rt2x00.serialmonkey.com/
@@ -4415,7 +4457,7 @@ RFKILL
 M:     Johannes Berg <johannes@sipsolutions.net>
 L:     linux-wireless@vger.kernel.org
 S:     Maintained
-F      Documentation/rfkill.txt
+F:     Documentation/rfkill.txt
 F:     net/rfkill/
 
 RISCOM8 DRIVER
@@ -4459,7 +4501,7 @@ F:        drivers/net/wireless/rtl818x/rtl8187*
 
 S3 SAVAGE FRAMEBUFFER DRIVER
 M:     Antonino Daplas <adaplas@gmail.com>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/savage/
 
@@ -5622,7 +5664,7 @@ S:        Maintained
 
 UVESAFB DRIVER
 M:     Michal Januszewski <spock@gentoo.org>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 W:     http://dev.gentoo.org/~spock/projects/uvesafb/
 S:     Maintained
 F:     Documentation/fb/uvesafb.txt
@@ -5655,7 +5697,7 @@ F:        drivers/mmc/host/via-sdmmc.c
 VIA UNICHROME(PRO)/CHROME9 FRAMEBUFFER DRIVER
 M:     Joseph Chan <JosephChan@via.com.tw>
 M:     Scott Fang <ScottFang@viatech.com.cn>
-L:     linux-fbdev-devel@lists.sourceforge.net (moderated for non-subscribers)
+L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/via/
 
index aa3e13a7e353a655f00085eeb1103444fdee0952..ad8260102f642fed6d4bf13560eecaeb73db065d 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 2
 PATCHLEVEL = 6
 SUBLEVEL = 32
-EXTRAVERSION = -rc7
+EXTRAVERSION = -rc8
 NAME = Man-Eating Seals of Antiquity
 
 # *DOCUMENTATION*
index 3d0cdd21b882d1d39cade901827b17509c63e06e..9fd6d3ab68c098120abab6b3f377a36ed4b0b6d5 100644 (file)
@@ -331,15 +331,15 @@ static inline void outer_flush_range(unsigned long start, unsigned long end)
  * Convert calls to our calling convention.
  */
 #define flush_cache_all()              __cpuc_flush_kern_all()
-#ifndef CONFIG_CPU_CACHE_VIPT
-static inline void flush_cache_mm(struct mm_struct *mm)
+
+static inline void vivt_flush_cache_mm(struct mm_struct *mm)
 {
        if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
                __cpuc_flush_user_all();
 }
 
 static inline void
-flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
        if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
                __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
@@ -347,7 +347,7 @@ flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long
 }
 
 static inline void
-flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
+vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 {
        if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
                unsigned long addr = user_addr & PAGE_MASK;
@@ -356,7 +356,7 @@ flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned l
 }
 
 static inline void
-flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
+vivt_flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                         unsigned long uaddr, void *kaddr,
                         unsigned long len, int write)
 {
@@ -365,6 +365,16 @@ flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                __cpuc_coherent_kern_range(addr, addr + len);
        }
 }
+
+#ifndef CONFIG_CPU_CACHE_VIPT
+#define flush_cache_mm(mm) \
+               vivt_flush_cache_mm(mm)
+#define flush_cache_range(vma,start,end) \
+               vivt_flush_cache_range(vma,start,end)
+#define flush_cache_page(vma,addr,pfn) \
+               vivt_flush_cache_page(vma,addr,pfn)
+#define flush_ptrace_access(vma,page,ua,ka,len,write) \
+               vivt_flush_ptrace_access(vma,page,ua,ka,len,write)
 #else
 extern void flush_cache_mm(struct mm_struct *mm);
 extern void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
@@ -410,8 +420,6 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
  */
 extern void flush_dcache_page(struct page *);
 
-extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
-
 static inline void __flush_icache_all(void)
 {
 #ifdef CONFIG_ARM_ERRATA_411920
index d16ec97ec9a9948fbb7de727164a006bfe6eeae7..c019949a5189dc725a937006eb8445c18d0ad2ef 100644 (file)
@@ -22,4 +22,10 @@ enum km_type {
        KM_TYPE_NR
 };
 
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define KM_NMI         (-1)
+#define KM_NMI_PTE     (-1)
+#define KM_IRQ_PTE     (-1)
+#endif
+
 #endif
index cefedf062138dc579b2eb21ec154bd73afe1f02a..bc2ff8b281335d8d021586318f6fd7f7b63aaffe 100644 (file)
  * private definitions which should NOT be used outside memory.h
  * files.  Use virt_to_phys/phys_to_virt/__pa/__va instead.
  */
+#ifndef __virt_to_phys
 #define __virt_to_phys(x)      ((x) - PAGE_OFFSET + PHYS_OFFSET)
 #define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
+#endif
 
 /*
  * Convert a physical address to a Page Frame Number and back
index ca2bf2f6d6ea93c29d312525b89aceb9d30134f3..9997ad20eff11d04906882b0a8da3c7c47ac4f76 100644 (file)
 #  define __SWAB_64_THRU_32__
 #endif
 
+#if defined(__KERNEL__) && __LINUX_ARM_ARCH__ >= 6
+
+static inline __attribute_const__ __u16 __arch_swab16(__u16 x)
+{
+       __asm__ ("rev16 %0, %1" : "=r" (x) : "r" (x));
+       return x;
+}
+#define __arch_swab16 __arch_swab16
+
+static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
+{
+       __asm__ ("rev %0, %1" : "=r" (x) : "r" (x));
+       return x;
+}
+#define __arch_swab32 __arch_swab32
+
+#else
+
 static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 {
        __u32 t;
@@ -48,3 +66,4 @@ static inline __attribute_const__ __u32 __arch_swab32(__u32 x)
 
 #endif
 
+#endif
index 2a573d4fea24a7ab12873282ad5c805a4c0e8de1..e7714f367eb83aa0a4b0224e5129ec94c87c3391 100644 (file)
@@ -662,8 +662,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
                                regs->ARM_sp -= 4;
                                usp = (u32 __user *)regs->ARM_sp;
 
-                               put_user(regs->ARM_pc, usp);
-                               regs->ARM_pc = KERN_RESTART_CODE;
+                               if (put_user(regs->ARM_pc, usp) == 0) {
+                                       regs->ARM_pc = KERN_RESTART_CODE;
+                               } else {
+                                       regs->ARM_sp += 4;
+                                       force_sigsegv(0, current);
+                               }
 #endif
                        }
                }
index 4db0eff90357fa7f3b100fb4014e6687f8a1d3e4..dae5e9b166ead0ac7d12545cf835c11a0ca6bcec 100644 (file)
 
 #define IO_SPACE_LIMIT 0xffffffff
 
-#define __io(a)         ((void __iomem *)HW_IO_PHYS_TO_VIRT(a))
-
-/* Do not enable mem_pci for a big endian arm architecture or unexpected byteswaps will */
-/* happen in readw/writew etc. */
-
-#define readb(c)        __raw_readb(c)
-#define readw(c)        __raw_readw(c)
-#define readl(c)        __raw_readl(c)
-#define readb_relaxed(addr) readb(addr)
-#define readw_relaxed(addr) readw(addr)
-#define readl_relaxed(addr) readl(addr)
-
-#define readsb(p, d, l)   __raw_readsb(p, d, l)
-#define readsw(p, d, l)   __raw_readsw(p, d, l)
-#define readsl(p, d, l)   __raw_readsl(p, d, l)
-
-#define writeb(v, c)     __raw_writeb(v, c)
-#define writew(v, c)     __raw_writew(v, c)
-#define writel(v, c)     __raw_writel(v, c)
-
-#define writesb(p, d, l)  __raw_writesb(p, d, l)
-#define writesw(p, d, l)  __raw_writesw(p, d, l)
-#define writesl(p, d, l)  __raw_writesl(p, d, l)
-
-#define memset_io(c, v, l)    _memset_io((c), (v), (l))
-#define memcpy_fromio(a, c, l)    _memcpy_fromio((a), (c), (l))
-#define memcpy_toio(c, a, l)  _memcpy_toio((c), (a), (l))
-
-#define eth_io_copy_and_sum(s, c, l, b) eth_copy_and_sum((s), (c), (l), (b))
+/*
+ * We don't actually have real ISA nor PCI buses, but there is so many
+ * drivers out there that might just work if we fake them...
+ */
+#define __io(a)                __typesafe_io(a)
+#define __mem_pci(a)   (a)
 
 #endif
index ea00486a5e5314b5e5255a7c3aa9d4a9af306960..51e0b3ba5f3a3c0110e2a70868ebd6970a01738e 100644 (file)
 /* Zoom2 has Qwerty keyboard*/
 static int board_keymap[] = {
        KEY(0, 0, KEY_E),
-       KEY(1, 0, KEY_R),
-       KEY(2, 0, KEY_T),
-       KEY(3, 0, KEY_HOME),
-       KEY(6, 0, KEY_I),
-       KEY(7, 0, KEY_LEFTSHIFT),
-       KEY(0, 1, KEY_D),
+       KEY(0, 1, KEY_R),
+       KEY(0, 2, KEY_T),
+       KEY(0, 3, KEY_HOME),
+       KEY(0, 6, KEY_I),
+       KEY(0, 7, KEY_LEFTSHIFT),
+       KEY(1, 0, KEY_D),
        KEY(1, 1, KEY_F),
-       KEY(2, 1, KEY_G),
-       KEY(3, 1, KEY_SEND),
-       KEY(6, 1, KEY_K),
-       KEY(7, 1, KEY_ENTER),
-       KEY(0, 2, KEY_X),
-       KEY(1, 2, KEY_C),
+       KEY(1, 2, KEY_G),
+       KEY(1, 3, KEY_SEND),
+       KEY(1, 6, KEY_K),
+       KEY(1, 7, KEY_ENTER),
+       KEY(2, 0, KEY_X),
+       KEY(2, 1, KEY_C),
        KEY(2, 2, KEY_V),
-       KEY(3, 2, KEY_END),
-       KEY(6, 2, KEY_DOT),
-       KEY(7, 2, KEY_CAPSLOCK),
-       KEY(0, 3, KEY_Z),
-       KEY(1, 3, KEY_KPPLUS),
-       KEY(2, 3, KEY_B),
+       KEY(2, 3, KEY_END),
+       KEY(2, 6, KEY_DOT),
+       KEY(2, 7, KEY_CAPSLOCK),
+       KEY(3, 0, KEY_Z),
+       KEY(3, 1, KEY_KPPLUS),
+       KEY(3, 2, KEY_B),
        KEY(3, 3, KEY_F1),
-       KEY(6, 3, KEY_O),
-       KEY(7, 3, KEY_SPACE),
-       KEY(0, 4, KEY_W),
-       KEY(1, 4, KEY_Y),
-       KEY(2, 4, KEY_U),
-       KEY(3, 4, KEY_F2),
+       KEY(3, 6, KEY_O),
+       KEY(3, 7, KEY_SPACE),
+       KEY(4, 0, KEY_W),
+       KEY(4, 1, KEY_Y),
+       KEY(4, 2, KEY_U),
+       KEY(4, 3, KEY_F2),
        KEY(4, 4, KEY_VOLUMEUP),
-       KEY(6, 4, KEY_L),
-       KEY(7, 4, KEY_LEFT),
-       KEY(0, 5, KEY_S),
-       KEY(1, 5, KEY_H),
-       KEY(2, 5, KEY_J),
-       KEY(3, 5, KEY_F3),
+       KEY(4, 6, KEY_L),
+       KEY(4, 7, KEY_LEFT),
+       KEY(5, 0, KEY_S),
+       KEY(5, 1, KEY_H),
+       KEY(5, 2, KEY_J),
+       KEY(5, 3, KEY_F3),
        KEY(5, 5, KEY_VOLUMEDOWN),
-       KEY(6, 5, KEY_M),
-       KEY(4, 5, KEY_ENTER),
-       KEY(7, 5, KEY_RIGHT),
-       KEY(0, 6, KEY_Q),
-       KEY(1, 6, KEY_A),
-       KEY(2, 6, KEY_N),
-       KEY(3, 6, KEY_BACKSPACE),
+       KEY(5, 6, KEY_M),
+       KEY(5, 7, KEY_ENTER),
+       KEY(6, 0, KEY_Q),
+       KEY(6, 1, KEY_A),
+       KEY(6, 2, KEY_N),
+       KEY(6, 3, KEY_BACKSPACE),
        KEY(6, 6, KEY_P),
-       KEY(7, 6, KEY_UP),
        KEY(6, 7, KEY_SELECT),
-       KEY(7, 7, KEY_DOWN),
-       KEY(0, 7, KEY_PROG1),   /*MACRO 1 <User defined> */
-       KEY(1, 7, KEY_PROG2),   /*MACRO 2 <User defined> */
-       KEY(2, 7, KEY_PROG3),   /*MACRO 3 <User defined> */
-       KEY(3, 7, KEY_PROG4),   /*MACRO 4 <User defined> */
-       0
+       KEY(7, 0, KEY_PROG1),   /*MACRO 1 <User defined> */
+       KEY(7, 1, KEY_PROG2),   /*MACRO 2 <User defined> */
+       KEY(7, 2, KEY_PROG3),   /*MACRO 3 <User defined> */
+       KEY(7, 3, KEY_PROG4),   /*MACRO 4 <User defined> */
+       KEY(7, 5, KEY_RIGHT),
+       KEY(7, 6, KEY_UP),
+       KEY(7, 7, KEY_DOWN)
 };
 
 static struct matrix_keymap_data board_map_data = {
index 489556eecbd1189a7a61638deec0f33d3aff59ad..7c5c00df3c70c36bb2a96b39ef015688e3599cb8 100644 (file)
@@ -473,7 +473,7 @@ static u16 _omap3_dpll_compute_freqsel(struct clk *clk, u8 n)
        unsigned long fint;
        u16 f = 0;
 
-       fint = clk->dpll_data->clk_ref->rate / (n + 1);
+       fint = clk->dpll_data->clk_ref->rate / n;
 
        pr_debug("clock: fint is %lu\n", fint);
 
index c8119781e00aff7f0be0e7965dbd14878edcb55a..9565c05bebd259d050afc597e7578294403470c7 100644 (file)
@@ -489,9 +489,9 @@ static struct clk core_ck = {
 static struct clk dpll3_m2x2_ck = {
        .name           = "dpll3_m2x2_ck",
        .ops            = &clkops_null,
-       .parent         = &dpll3_x2_ck,
+       .parent         = &dpll3_m2_ck,
        .clkdm_name     = "dpll3_clkdm",
-       .recalc         = &followparent_recalc,
+       .recalc         = &omap3_clkoutx2_recalc,
 };
 
 /* The PWRDN bit is apparently only available on 3430ES2 and above */
index f8657568b1baff51eeb6b950de4b35b6088d47c7..f3c992e29651c1c0520a28eb551717537ea82a93 100644 (file)
@@ -378,7 +378,7 @@ EXPORT_SYMBOL(gpmc_cs_request);
 void gpmc_cs_free(int cs)
 {
        spin_lock(&gpmc_mem_lock);
-       if (cs >= GPMC_CS_NUM || !gpmc_cs_reserved(cs)) {
+       if (cs >= GPMC_CS_NUM || cs < 0 || !gpmc_cs_reserved(cs)) {
                printk(KERN_ERR "Trying to free non-reserved GPMC CS%d\n", cs);
                BUG();
                spin_unlock(&gpmc_mem_lock);
index 983cc8c2008190db5e686f65a69f2a47bd2fd5cc..9e4d9816726ad5d75c0657f9a32d0b36c0ec3aa7 100644 (file)
@@ -447,6 +447,7 @@ static __init int pxa_cpufreq_init(struct cpufreq_policy *policy)
                pxa27x_freq_table[i].frequency = freq;
                pxa27x_freq_table[i].index = i;
        }
+       pxa27x_freq_table[i].index = i;
        pxa27x_freq_table[i].frequency = CPUFREQ_TABLE_END;
 
        /*
index 67f34a8d8e60ee58c7f9257fd4812d9dd827dc34..149cdd9aee4d51d23977d769d1a3d041897f12ee 100644 (file)
@@ -102,7 +102,7 @@ static int setup_freqs_table(struct cpufreq_policy *policy,
                table[i].index = i;
                table[i].frequency = freqs[i].cpufreq_mhz * 1000;
        }
-       table[num].frequency = i;
+       table[num].index = i;
        table[num].frequency = CPUFREQ_TABLE_END;
 
        pxa3xx_freqs = freqs;
index 3da45d05174398d0b2767f5efc0ab8efa0d6691d..d98023f55503881c3d7e177b1112686f0ef8b5c0 100644 (file)
@@ -802,10 +802,12 @@ static void __init spitz_init(void)
 {
        spitz_ficp_platform_data.gpio_pwdown = SPITZ_GPIO_IR_ON;
 
+#ifdef CONFIG_MACH_BORZOI
        if (machine_is_borzoi()) {
                sharpsl_nand_platform_data.badblock_pattern = &sharpsl_akita_bbt;
                sharpsl_nand_platform_data.ecc_layout = &akita_oobinfo;
        }
+#endif
 
        platform_scoop_config = &spitz_pcmcia_config;
 
index 9264d814cd7a9db1f5a5d0901fc62e81fa921aa0..7b7d4c36c11cab2be54f2d9902daa71b89258905 100644 (file)
@@ -774,5 +774,5 @@ config CACHE_XSC3L2
 
 config ARM_L1_CACHE_SHIFT
        int
-       default 6 if ARCH_OMAP3
+       default 6 if ARCH_OMAP3 || ARCH_S5PC1XX
        default 5
index b480f1d3591f9b2263a74d12e1aa4fafdd1c464e..747f9a9021bb9d97de9e22d13b04bb5c38ecc15b 100644 (file)
@@ -99,18 +99,25 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
 
        l2x0_base = base;
 
-       /* disable L2X0 */
-       writel(0, l2x0_base + L2X0_CTRL);
+       /*
+        * Check if l2x0 controller is already enabled.
+        * If you are booting from non-secure mode
+        * accessing the below registers will fault.
+        */
+       if (!(readl(l2x0_base + L2X0_CTRL) & 1)) {
 
-       aux = readl(l2x0_base + L2X0_AUX_CTRL);
-       aux &= aux_mask;
-       aux |= aux_val;
-       writel(aux, l2x0_base + L2X0_AUX_CTRL);
+               /* l2x0 controller is disabled */
 
-       l2x0_inv_all();
+               aux = readl(l2x0_base + L2X0_AUX_CTRL);
+               aux &= aux_mask;
+               aux |= aux_val;
+               writel(aux, l2x0_base + L2X0_AUX_CTRL);
 
-       /* enable L2X0 */
-       writel(1, l2x0_base + L2X0_CTRL);
+               l2x0_inv_all();
+
+               /* enable L2X0 */
+               writel(1, l2x0_base + L2X0_CTRL);
+       }
 
        outer_cache.inv_range = l2x0_inv_range;
        outer_cache.clean_range = l2x0_clean_range;
index 4127a7bddfe5cef91f4a340cfe4225e6200330a6..841f355319bfee86725ef6348d07d96b9b4f446e 100644 (file)
@@ -41,6 +41,14 @@ static void v6_copy_user_highpage_nonaliasing(struct page *to,
        kfrom = kmap_atomic(from, KM_USER0);
        kto = kmap_atomic(to, KM_USER1);
        copy_page(kto, kfrom);
+#ifdef CONFIG_HIGHMEM
+       /*
+        * kmap_atomic() doesn't set the page virtual address, and
+        * kunmap_atomic() takes care of cache flushing already.
+        */
+       if (page_address(to) != NULL)
+#endif
+               __cpuc_flush_dcache_page(kto);
        kunmap_atomic(kto, KM_USER1);
        kunmap_atomic(kfrom, KM_USER0);
 }
index d0d17b6a370304749d4f0f04c63662716e1c65e2..729602291958871bfc749c7a1af708620f97cfd9 100644 (file)
@@ -23,6 +23,8 @@
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
 
+#include "mm.h"
+
 static unsigned long shared_pte_mask = L_PTE_MT_BUFFERABLE;
 
 /*
@@ -151,7 +153,14 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t pte)
        if (!pfn_valid(pfn))
                return;
 
+       /*
+        * The zero page is never written to, so never has any dirty
+        * cache lines, and therefore never needs to be flushed.
+        */
        page = pfn_to_page(pfn);
+       if (page == ZERO_PAGE(0))
+               return;
+
        mapping = page_mapping(page);
 #ifndef CONFIG_SMP
        if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
index 7f294f307c835ed45d82c434e51954ba272481fc..329594e760cdb09e868b1e1ad42a30dfda78f429 100644 (file)
@@ -35,14 +35,12 @@ static void flush_pfn_alias(unsigned long pfn, unsigned long vaddr)
            :
            : "r" (to), "r" (to + PAGE_SIZE - L1_CACHE_BYTES), "r" (zero)
            : "cc");
-       __flush_icache_all();
 }
 
 void flush_cache_mm(struct mm_struct *mm)
 {
        if (cache_is_vivt()) {
-               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
-                       __cpuc_flush_user_all();
+               vivt_flush_cache_mm(mm);
                return;
        }
 
@@ -52,16 +50,13 @@ void flush_cache_mm(struct mm_struct *mm)
                    :
                    : "r" (0)
                    : "cc");
-               __flush_icache_all();
        }
 }
 
 void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
 {
        if (cache_is_vivt()) {
-               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
-                       __cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
-                                               vma->vm_flags);
+               vivt_flush_cache_range(vma, start, end);
                return;
        }
 
@@ -71,22 +66,26 @@ void flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
                    :
                    : "r" (0)
                    : "cc");
-               __flush_icache_all();
        }
+
+       if (vma->vm_flags & VM_EXEC)
+               __flush_icache_all();
 }
 
 void flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
 {
        if (cache_is_vivt()) {
-               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
-                       unsigned long addr = user_addr & PAGE_MASK;
-                       __cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
-               }
+               vivt_flush_cache_page(vma, user_addr, pfn);
                return;
        }
 
-       if (cache_is_vipt_aliasing())
+       if (cache_is_vipt_aliasing()) {
                flush_pfn_alias(pfn, user_addr);
+               __flush_icache_all();
+       }
+
+       if (vma->vm_flags & VM_EXEC && icache_is_vivt_asid_tagged())
+               __flush_icache_all();
 }
 
 void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
@@ -94,15 +93,13 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
                         unsigned long len, int write)
 {
        if (cache_is_vivt()) {
-               if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
-                       unsigned long addr = (unsigned long)kaddr;
-                       __cpuc_coherent_kern_range(addr, addr + len);
-               }
+               vivt_flush_ptrace_access(vma, page, uaddr, kaddr, len, write);
                return;
        }
 
        if (cache_is_vipt_aliasing()) {
                flush_pfn_alias(page_to_pfn(page), uaddr);
+               __flush_icache_all();
                return;
        }
 
@@ -120,6 +117,8 @@ void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
 
 void __flush_dcache_page(struct address_space *mapping, struct page *page)
 {
+       void *addr = page_address(page);
+
        /*
         * Writeback any data associated with the kernel mapping of this
         * page.  This ensures that data in the physical page is mutually
@@ -130,9 +129,9 @@ void __flush_dcache_page(struct address_space *mapping, struct page *page)
         * kmap_atomic() doesn't set the page virtual address, and
         * kunmap_atomic() takes care of cache flushing already.
         */
-       if (page_address(page))
+       if (addr)
 #endif
-               __cpuc_flush_dcache_page(page_address(page));
+               __cpuc_flush_dcache_page(addr);
 
        /*
         * If this is a page cache page, and we have an aliasing VIPT cache,
@@ -196,7 +195,16 @@ static void __flush_dcache_aliases(struct address_space *mapping, struct page *p
  */
 void flush_dcache_page(struct page *page)
 {
-       struct address_space *mapping = page_mapping(page);
+       struct address_space *mapping;
+
+       /*
+        * The zero page is never written to, so never has any dirty
+        * cache lines, and therefore never needs to be flushed.
+        */
+       if (page == ZERO_PAGE(0))
+               return;
+
+       mapping = page_mapping(page);
 
 #ifndef CONFIG_SMP
        if (!PageHighMem(page) && mapping && !mapping_mapped(mapping))
@@ -242,6 +250,7 @@ void __flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned l
                 * userspace address only.
                 */
                flush_pfn_alias(pfn, vmaddr);
+               __flush_icache_all();
        }
 
        /*
index c4f6f05198e041831817747a69ad0003fa7c783e..a888363398f8528fcb6bc1bb7aeca4d0245464a9 100644 (file)
@@ -24,6 +24,8 @@ struct mem_type {
 
 const struct mem_type *get_mem_type(unsigned int type);
 
+extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
+
 #endif
 
 struct map_desc;
index ea67be0223ace255de0d96af02009fc5274085b0..2427cdcd9098141b919a578bb116414ffef423ef 100644 (file)
@@ -1036,7 +1036,7 @@ void __init paging_init(struct machine_desc *mdesc)
         */
        zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
        empty_zero_page = virt_to_page(zero_page);
-       flush_dcache_page(empty_zero_page);
+       __flush_dcache_page(NULL, empty_zero_page);
 }
 
 /*
index 71ebd7fcfea158afa5d0f6eb14b4d9202f79be5a..7c345b757df12d17014416116ce07a39d663c634 100644 (file)
@@ -373,7 +373,7 @@ static inline int gpio_valid(int gpio)
 
 static int check_gpio(int gpio)
 {
-       if (unlikely(gpio_valid(gpio)) < 0) {
+       if (unlikely(gpio_valid(gpio) < 0)) {
                printk(KERN_ERR "omap-gpio: invalid GPIO %d\n", gpio);
                dump_stack();
                return -1;
index 94be7bb6cb9a9f345721053ea491b6353b431191..07b976da617418d32b6c0a7d2d84a4ffeb7a5ace 100644 (file)
@@ -12,7 +12,7 @@
 #
 #   http://www.arm.linux.org.uk/developer/machines/?action=new
 #
-# Last update: Fri Sep 18 21:42:00 2009
+# Last update: Wed Nov 25 22:14:58 2009
 #
 # machine_is_xxx       CONFIG_xxxx             MACH_TYPE_xxx           number
 #
@@ -928,7 +928,7 @@ palmt5                      MACH_PALMT5             PALMT5                  917
 palmtc                 MACH_PALMTC             PALMTC                  918
 omap_apollon           MACH_OMAP_APOLLON       OMAP_APOLLON            919
 mxc30030evb            MACH_MXC30030EVB        MXC30030EVB             920
-rea_2d                 MACH_REA_2D             REA_2D                  921
+rea_cpu2               MACH_REA_2D             REA_2D                  921
 eti3e524               MACH_TI3E524            TI3E524                 922
 ateb9200               MACH_ATEB9200           ATEB9200                923
 auckland               MACH_AUCKLAND           AUCKLAND                924
@@ -2421,3 +2421,118 @@ liberty                 MACH_LIBERTY            LIBERTY                 2434
 mh355                  MACH_MH355              MH355                   2435
 pc7802                 MACH_PC7802             PC7802                  2436
 gnet_sgc               MACH_GNET_SGC           GNET_SGC                2437
+einstein15             MACH_EINSTEIN15         EINSTEIN15              2438
+cmpd                   MACH_CMPD               CMPD                    2439
+davinci_hase1          MACH_DAVINCI_HASE1      DAVINCI_HASE1           2440
+lgeincitephone         MACH_LGEINCITEPHONE     LGEINCITEPHONE          2441
+ea313x                 MACH_EA313X             EA313X                  2442
+fwbd_39064             MACH_FWBD_39064         FWBD_39064              2443
+fwbd_390128            MACH_FWBD_390128        FWBD_390128             2444
+pelco_moe              MACH_PELCO_MOE          PELCO_MOE               2445
+minimix27              MACH_MINIMIX27          MINIMIX27               2446
+omap3_thunder          MACH_OMAP3_THUNDER      OMAP3_THUNDER           2447
+passionc               MACH_PASSIONC           PASSIONC                2448
+mx27amata              MACH_MX27AMATA          MX27AMATA               2449
+bgat1                  MACH_BGAT1              BGAT1                   2450
+buzz                   MACH_BUZZ               BUZZ                    2451
+mb9g20                 MACH_MB9G20             MB9G20                  2452
+yushan                 MACH_YUSHAN             YUSHAN                  2453
+lizard                 MACH_LIZARD             LIZARD                  2454
+omap3polycom           MACH_OMAP3POLYCOM       OMAP3POLYCOM            2455
+smdkv210               MACH_SMDKV210           SMDKV210                2456
+bravo                  MACH_BRAVO              BRAVO                   2457
+siogentoo1             MACH_SIOGENTOO1         SIOGENTOO1              2458
+siogentoo2             MACH_SIOGENTOO2         SIOGENTOO2              2459
+sm3k                   MACH_SM3K               SM3K                    2460
+acer_tempo_f900                MACH_ACER_TEMPO_F900    ACER_TEMPO_F900         2461
+sst61vc010_dev         MACH_SST61VC010_DEV     SST61VC010_DEV          2462
+glittertind            MACH_GLITTERTIND        GLITTERTIND             2463
+omap_zoom3             MACH_OMAP_ZOOM3         OMAP_ZOOM3              2464
+omap_3630sdp           MACH_OMAP_3630SDP       OMAP_3630SDP            2465
+cybook2440             MACH_CYBOOK2440         CYBOOK2440              2466
+torino_s               MACH_TORINO_S           TORINO_S                2467
+havana                 MACH_HAVANA             HAVANA                  2468
+beaumont_11            MACH_BEAUMONT_11        BEAUMONT_11             2469
+vanguard               MACH_VANGUARD           VANGUARD                2470
+s5pc110_draco          MACH_S5PC110_DRACO      S5PC110_DRACO           2471
+cartesio_two           MACH_CARTESIO_TWO       CARTESIO_TWO            2472
+aster                  MACH_ASTER              ASTER                   2473
+voguesv210             MACH_VOGUESV210         VOGUESV210              2474
+acm500x                        MACH_ACM500X            ACM500X                 2475
+km9260                 MACH_KM9260             KM9260                  2476
+nideflexg1             MACH_NIDEFLEXG1         NIDEFLEXG1              2477
+ctera_plug_io          MACH_CTERA_PLUG_IO      CTERA_PLUG_IO           2478
+smartq7                        MACH_SMARTQ7            SMARTQ7                 2479
+at91sam9g10ek2         MACH_AT91SAM9G10EK2     AT91SAM9G10EK2          2480
+asusp527               MACH_ASUSP527           ASUSP527                2481
+at91sam9g20mpm2                MACH_AT91SAM9G20MPM2    AT91SAM9G20MPM2         2482
+topasa900              MACH_TOPASA900          TOPASA900               2483
+electrum_100           MACH_ELECTRUM_100       ELECTRUM_100            2484
+mx51grb                        MACH_MX51GRB            MX51GRB                 2485
+xea300                 MACH_XEA300             XEA300                  2486
+htcstartrek            MACH_HTCSTARTREK        HTCSTARTREK             2487
+lima                   MACH_LIMA               LIMA                    2488
+csb740                 MACH_CSB740             CSB740                  2489
+usb_s8815              MACH_USB_S8815          USB_S8815               2490
+watson_efm_plugin      MACH_WATSON_EFM_PLUGIN  WATSON_EFM_PLUGIN       2491
+milkyway               MACH_MILKYWAY           MILKYWAY                2492
+g4evm                  MACH_G4EVM              G4EVM                   2493
+picomod6               MACH_PICOMOD6           PICOMOD6                2494
+omapl138_hawkboard     MACH_OMAPL138_HAWKBOARD OMAPL138_HAWKBOARD      2495
+ip6000                 MACH_IP6000             IP6000                  2496
+ip6010                 MACH_IP6010             IP6010                  2497
+utm400                 MACH_UTM400             UTM400                  2498
+omap3_zybex            MACH_OMAP3_ZYBEX        OMAP3_ZYBEX             2499
+wireless_space         MACH_WIRELESS_SPACE     WIRELESS_SPACE          2500
+sx560                  MACH_SX560              SX560                   2501
+ts41x                  MACH_TS41X              TS41X                   2502
+elphel10373            MACH_ELPHEL10373        ELPHEL10373             2503
+rhobot                 MACH_RHOBOT             RHOBOT                  2504
+mx51_refresh           MACH_MX51_REFRESH       MX51_REFRESH            2505
+ls9260                 MACH_LS9260             LS9260                  2506
+shank                  MACH_SHANK              SHANK                   2507
+qsd8x50_st1            MACH_QSD8X50_ST1        QSD8X50_ST1             2508
+at91sam9m10ekes                MACH_AT91SAM9M10EKES    AT91SAM9M10EKES         2509
+hiram                  MACH_HIRAM              HIRAM                   2510
+phy3250                        MACH_PHY3250            PHY3250                 2511
+ea3250                 MACH_EA3250             EA3250                  2512
+fdi3250                        MACH_FDI3250            FDI3250                 2513
+whitestone             MACH_WHITESTONE         WHITESTONE              2514
+at91sam9263nit         MACH_AT91SAM9263NIT     AT91SAM9263NIT          2515
+ccmx51                 MACH_CCMX51             CCMX51                  2516
+ccmx51js               MACH_CCMX51JS           CCMX51JS                2517
+ccwmx51                        MACH_CCWMX51            CCWMX51                 2518
+ccwmx51js              MACH_CCWMX51JS          CCWMX51JS               2519
+mini6410               MACH_MINI6410           MINI6410                2520
+tiny6410               MACH_TINY6410           TINY6410                2521
+nano6410               MACH_NANO6410           NANO6410                2522
+at572d940hfnldb                MACH_AT572D940HFNLDB    AT572D940HFNLDB         2523
+htcleo                 MACH_HTCLEO             HTCLEO                  2524
+avp13                  MACH_AVP13              AVP13                   2525
+xxsvideod              MACH_XXSVIDEOD          XXSVIDEOD               2526
+vpnext                 MACH_VPNEXT             VPNEXT                  2527
+swarco_itc3            MACH_SWARCO_ITC3        SWARCO_ITC3             2528
+tx51                   MACH_TX51               TX51                    2529
+dolby_cat1021          MACH_DOLBY_CAT1021      DOLBY_CAT1021           2530
+mx28evk                        MACH_MX28EVK            MX28EVK                 2531
+phoenix260             MACH_PHOENIX260         PHOENIX260              2532
+uvaca_stork            MACH_UVACA_STORK        UVACA_STORK             2533
+smartq5                        MACH_SMARTQ5            SMARTQ5                 2534
+all3078                        MACH_ALL3078            ALL3078                 2535
+ctera_2bay_ds          MACH_CTERA_2BAY_DS      CTERA_2BAY_DS           2536
+siogentoo3             MACH_SIOGENTOO3         SIOGENTOO3              2537
+epb5000                        MACH_EPB5000            EPB5000                 2538
+hy9263                 MACH_HY9263             HY9263                  2539
+acer_tempo_m900                MACH_ACER_TEMPO_M900    ACER_TEMPO_M900         2540
+acer_tempo_dx650       MACH_ACER_TEMPO_DX900   ACER_TEMPO_DX900        2541
+acer_tempo_x960                MACH_ACER_TEMPO_X960    ACER_TEMPO_X960         2542
+acer_eten_v900         MACH_ACER_ETEN_V900     ACER_ETEN_V900          2543
+acer_eten_x900         MACH_ACER_ETEN_X900     ACER_ETEN_X900          2544
+bonnell                        MACH_BONNELL            BONNELL                 2545
+oht_mx27               MACH_OHT_MX27           OHT_MX27                2546
+htcquartz              MACH_HTCQUARTZ          HTCQUARTZ               2547
+davinci_dm6467tevm     MACH_DAVINCI_DM6467TEVM DAVINCI_DM6467TEVM      2548
+c3ax03                 MACH_C3AX03             C3AX03                  2549
+mxt_td60               MACH_MXT_TD60           MXT_TD60                2550
+esyx                   MACH_ESYX               ESYX                    2551
+bulldog                        MACH_BULLDOG            BULLDOG                 2553
index 1f170216d2f9b35f475db4a7fa37d9c2064a9fe3..3946aff4f4148b6133f31429fbbe9facb40b35c0 100644 (file)
@@ -225,8 +225,13 @@ int blackfin_dma_suspend(void)
 void blackfin_dma_resume(void)
 {
        int i;
-       for (i = 0; i < MAX_DMA_SUSPEND_CHANNELS; ++i)
-               dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
+
+       for (i = 0; i < MAX_DMA_CHANNELS; ++i) {
+               dma_ch[i].regs->cfg = 0;
+
+               if (i < MAX_DMA_SUSPEND_CHANNELS)
+                       dma_ch[i].regs->peripheral_map = dma_ch[i].saved_peripheral_map;
+       }
 }
 #endif
 
index f7b9cdce823977a58e944999f6cbb35b1d0e0bbd..b52c1f8c4bc0e9451ab2def134dfc7e095e45185 100644 (file)
@@ -38,7 +38,7 @@ void __init generate_cplb_tables_cpu(unsigned int cpu)
 
 #ifdef CONFIG_BFIN_EXTMEM_DCACHEABLE
        d_cache = CPLB_L1_CHBL;
-#ifdef CONFIG_BFIN_EXTMEM_WRITETROUGH
+#ifdef CONFIG_BFIN_EXTMEM_WRITETHROUGH
        d_cache |= CPLB_L1_AOW | CPLB_WT;
 #endif
 #endif
index 430ae39456e8e1b5cbe9c2c5c6396ff921699722..5cc7e2e9e4156f202bd9bc10f7c58cfa72e0bdbf 100644 (file)
@@ -151,7 +151,7 @@ void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_
        regs->pc = new_ip;
        if (current->mm)
                regs->p5 = current->mm->start_data;
-#ifdef CONFIG_SMP
+#ifndef CONFIG_SMP
        task_thread_info(current)->l1_task_info.stack_start =
                (void *)current->mm->context.stack_start;
        task_thread_info(current)->l1_task_info.lowest_sp = (void *)new_sp;
index 0982b5d5af100ab70f3aee3efbd780eb234175b2..56b0ba12175f2d6922068c4284fa9a0487a75acd 100644 (file)
@@ -315,7 +315,7 @@ long arch_ptrace(struct task_struct *child, long request, long addr, long data)
                        case BFIN_MEM_ACCESS_CORE:
                        case BFIN_MEM_ACCESS_CORE_ONLY:
                                copied = access_process_vm(child, addr, &data,
-                                                          to_copy, 0);
+                                                          to_copy, 1);
                                if (copied)
                                        break;
 
index e9c65390edd1852bdf4b691542e34e3687a29a95..2829dd0400f18b405953e2ff61e2996d3c4c7115 100644 (file)
@@ -1,9 +1,13 @@
 /*
- * File: include/asm-blackfin/mach-bf518/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
 #define ANOMALY_05000461 (1)
 /* Synchronization Problem at Startup May Cause SPORT Transmit Channels to Misalign */
 #define ANOMALY_05000462 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000099 (0)
 #define ANOMALY_05000450 (0)
 #define ANOMALY_05000465 (0)
 #define ANOMALY_05000467 (0)
+#define ANOMALY_05000474 (0)
+#define ANOMALY_05000475 (0)
 
 #endif
index 3f9052687fa8065c871d00e268c34c45f85a7f31..02040df8ec80be9ef3b08e1525b76aa22507a717 100644 (file)
@@ -1,14 +1,18 @@
 /*
- * File: include/asm-blackfin/mach-bf527/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
  *  - Revision D, 08/14/2009; ADSP-BF526 Blackfin Processor Anomaly List
- *  - Revision F, 03/03/2009; ADSP-BF527 Blackfin Processor Anomaly List
+ *  - Revision G, 08/25/2009; ADSP-BF527 Blackfin Processor Anomaly List
  */
 
 #ifndef _MACH_ANOMALY_H_
 #define ANOMALY_05000467 (1)
 /* PLL Latches Incorrect Settings During Reset */
 #define ANOMALY_05000469 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000099 (0)
 #define ANOMALY_05000412 (0)
 #define ANOMALY_05000447 (0)
 #define ANOMALY_05000448 (0)
+#define ANOMALY_05000474 (0)
+#define ANOMALY_05000475 (0)
 
 #endif
index cd83db2fb1a17fdc99e3cba7a7423005430afc80..9b3f7a27714d3d8964619e61626d366d6eef7a05 100644 (file)
@@ -1,9 +1,13 @@
 /*
- * File: include/asm-blackfin/mach-bf533/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
 #define ANOMALY_05000443 (1)
 /* False Hardware Error when RETI Points to Invalid Memory */
 #define ANOMALY_05000461 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* These anomalies have been "phased" out of analog.com anomaly sheets and are
  * here to show running on older silicon just isn't feasible.
 #define ANOMALY_05000450 (0)
 #define ANOMALY_05000465 (0)
 #define ANOMALY_05000467 (0)
+#define ANOMALY_05000474 (0)
+#define ANOMALY_05000475 (0)
 
 #endif
index f091ad2d8ea8dd23001bd598027e34aeb97b3598..d2c427bc6656e8fa0c6be3234c644d1fa8288aa6 100644 (file)
@@ -1,9 +1,13 @@
 /*
- * File: include/asm-blackfin/mach-bf537/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
 #define ANOMALY_05000443 (1)
 /* False Hardware Error when RETI Points to Invalid Memory */
 #define ANOMALY_05000461 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000099 (0)
 #define ANOMALY_05000450 (0)
 #define ANOMALY_05000465 (0)
 #define ANOMALY_05000467 (0)
+#define ANOMALY_05000474 (0)
+#define ANOMALY_05000475 (0)
 
 #endif
index 26b76083e14c084fb2cd48622573ffeb70566b29..d882b7e6f59bcf443e00270ef01cd7788b821aa1 100644 (file)
@@ -1,9 +1,13 @@
 /*
- * File: include/asm-blackfin/mach-bf538/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
 #define ANOMALY_05000443 (1)
 /* False Hardware Error when RETI Points to Invalid Memory */
 #define ANOMALY_05000461 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000099 (0)
 #define ANOMALY_05000450 (0)
 #define ANOMALY_05000465 (0)
 #define ANOMALY_05000467 (0)
+#define ANOMALY_05000474 (0)
+#define ANOMALY_05000475 (0)
 
 #endif
index 52b116ae522a57fc153515055ed7389ac784d18f..7d08c7524498a6f0e4645ba106925c7f3cf292eb 100644 (file)
@@ -1,9 +1,13 @@
 /*
- * File: include/asm-blackfin/mach-bf548/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
@@ -24,6 +28,8 @@
 #define ANOMALY_05000119 (1)
 /* Rx.H Cannot Be Used to Access 16-bit System MMR Registers */
 #define ANOMALY_05000122 (1)
+/* Data Corruption with Cached External Memory and Non-Cached On-Chip L2 Memory */
+#define ANOMALY_05000220 (1)
 /* False Hardware Error from an Access in the Shadow of a Conditional Branch */
 #define ANOMALY_05000245 (1)
 /* Sensitivity To Noise with Slow Input Edge Rates on External SPORT TX and RX Clocks */
 #define ANOMALY_05000466 (1)
 /* Possible RX data corruption when control & data EP FIFOs are accessed via the core */
 #define ANOMALY_05000467 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* Access to DDR-SDRAM causes system hang under certain PLL/VR settings */
+#define ANOMALY_05000474 (1)
+/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
+#define ANOMALY_05000475 (1)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000099 (0)
 #define ANOMALY_05000198 (0)
 #define ANOMALY_05000202 (0)
 #define ANOMALY_05000215 (0)
-#define ANOMALY_05000220 (0)
 #define ANOMALY_05000227 (0)
 #define ANOMALY_05000230 (0)
 #define ANOMALY_05000231 (0)
index 0261a5e751b35189d79f5684ff227f6552145ff3..f99f174b129f694609710faf9fe653f01f76b503 100644 (file)
        \reg\().h = _corelock;
 .endm
 
+.macro safe_testset addr:req, scratch:req
+#if ANOMALY_05000477
+       cli \scratch;
+       testset (\addr);
+       sti \scratch;
+#else
+       testset (\addr);
+#endif
+.endm
+
 /*
  * r0 = address of atomic data to flush and invalidate (32bit).
  *
@@ -33,7 +43,7 @@ ENTRY(_get_core_lock)
        cli r0;
        coreslot_loadaddr p0;
 .Lretry_corelock:
-       testset (p0);
+       safe_testset p0, r2;
        if cc jump .Ldone_corelock;
        SSYNC(r2);
        jump .Lretry_corelock
@@ -56,7 +66,7 @@ ENTRY(_get_core_lock_noflush)
        cli r0;
        coreslot_loadaddr p0;
 .Lretry_corelock_noflush:
-       testset (p0);
+       safe_testset p0, r2;
        if cc jump .Ldone_corelock_noflush;
        SSYNC(r2);
        jump .Lretry_corelock_noflush
index 70da495c96652e1c407d8ecb49e5607b389cec6a..5ddc981e9937b2d11d24a4efdb17217e24b4f18a 100644 (file)
@@ -1,9 +1,13 @@
 /*
- * File: include/asm-blackfin/mach-bf561/anomaly.h
- * Bugs: Enter bugs at http://blackfin.uclinux.org/
+ * DO NOT EDIT THIS FILE
+ * This file is under version control at
+ *   svn://sources.blackfin.uclinux.org/toolchain/trunk/proc-defs/header-frags/
+ * and can be replaced with that version at any time
+ * DO NOT EDIT THIS FILE
  *
- * Copyright (C) 2004-2009 Analog Devices Inc.
- * Licensed under the GPL-2 or later.
+ * Copyright 2004-2009 Analog Devices Inc.
+ * Licensed under the ADI BSD license.
+ *   https://docs.blackfin.uclinux.org/doku.php?id=adi_bsd
  */
 
 /* This file should be up to date with:
 /* Disabling Peripherals with DMA Running May Cause DMA System Instability */
 #define ANOMALY_05000278 (__SILICON_REVISION__ < 5)
 /* False Hardware Error Exception when ISR Context Is Not Restored */
-#define ANOMALY_05000281 (__SILICON_REVISION__ < 5)
+/* Temporarily walk around for bug 5423 till this issue is confirmed by
+ * official anomaly document. It looks 05000281 still exists on bf561
+ * v0.5.
+ */
+#define ANOMALY_05000281 (__SILICON_REVISION__ <= 5)
 /* System MMR Write Is Stalled Indefinitely when Killed in a Particular Stage */
 #define ANOMALY_05000283 (1)
 /* Reads Will Receive Incorrect Data under Certain Conditions */
 #define ANOMALY_05000443 (1)
 /* False Hardware Error when RETI Points to Invalid Memory */
 #define ANOMALY_05000461 (1)
+/* Interrupted 32-Bit SPORT Data Register Access Results In Underflow */
+#define ANOMALY_05000473 (1)
+/* Core Hang With L2/L3 Configured in Writeback Cache Mode */
+#define ANOMALY_05000475 (__SILICON_REVISION__ < 4)
+/* TESTSET Instruction Cannot Be Interrupted */
+#define ANOMALY_05000477 (1)
 
 /* Anomalies that don't exist on this proc */
 #define ANOMALY_05000119 (0)
 #define ANOMALY_05000450 (0)
 #define ANOMALY_05000465 (0)
 #define ANOMALY_05000467 (0)
+#define ANOMALY_05000474 (0)
 
 #endif
index 9dbafcdcf4791e3ba7eb8522cf1b6de911c05b0a..f2ca211a76a09a4497b0b18714cac2df2e0fb8a5 100644 (file)
@@ -57,3 +57,8 @@
         (!defined(CONFIG_BFIN_EXTMEM_DCACHEABLE) && defined(CONFIG_BFIN_L2_WRITEBACK)))
 # error You are exposing Anomaly 220 in this config, either config L2 as Write Through, or make External Memory WB.
 #endif
+
+#if ANOMALY_05000475 && \
+       (defined(CONFIG_BFIN_EXTMEM_WRITEBACK) || defined(CONFIG_BFIN_L2_WRITEBACK))
+# error "Anomaly 475 does not allow you to use Write Back cache with L2 or External Memory"
+#endif
index d98585f3237d5114f9d054e386c64898a13ce08a..d92b168c83281cdd89326875e80ca75afe890c80 100644 (file)
@@ -276,10 +276,9 @@ void smp_send_reschedule(int cpu)
        if (cpu_is_offline(cpu))
                return;
 
-       msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+       msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
        if (!msg)
                return;
-       memset(msg, 0, sizeof(msg));
        INIT_LIST_HEAD(&msg->list);
        msg->type = BFIN_IPI_RESCHEDULE;
 
@@ -305,10 +304,9 @@ void smp_send_stop(void)
        if (cpus_empty(callmap))
                return;
 
-       msg = kmalloc(sizeof(*msg), GFP_ATOMIC);
+       msg = kzalloc(sizeof(*msg), GFP_ATOMIC);
        if (!msg)
                return;
-       memset(msg, 0, sizeof(msg));
        INIT_LIST_HEAD(&msg->list);
        msg->type = BFIN_IPI_CPU_STOP;
 
index 69dad5a850a8392f5f520dd164f4dda9a4d5445a..a36799e85693d7b30d7a9acf1f2703709ad77bcb 100644 (file)
@@ -28,7 +28,7 @@
 #define dbg(x...)
 #endif
 
-#define KERNEL_START (KERNEL_BINARY_TEXT_START - 0x1000)
+#define KERNEL_START (KERNEL_BINARY_TEXT_START)
 
 extern struct unwind_table_entry __start___unwind[];
 extern struct unwind_table_entry __stop___unwind[];
index fda4baa059b5002a99f64fdb832cf134b20756ee..9dab4a4e09f7af86d5dbe6a5a8e129393d619a42 100644 (file)
@@ -78,9 +78,6 @@ SECTIONS
         */
        . = ALIGN(PAGE_SIZE);
        data_start = .;
-       EXCEPTION_TABLE(16)
-
-       NOTES
 
        /* unwind info */
        .PARISC.unwind : {
@@ -89,6 +86,9 @@ SECTIONS
                __stop___unwind = .;
        }
 
+       EXCEPTION_TABLE(16)
+       NOTES
+
        /* Data */
        RW_DATA_SECTION(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
 
index b6bac6f61c16d60465b17da5cc77ba81e059ec5e..916369575c976991d449dfcd2b4c196c4bd8e7f1 100644 (file)
@@ -29,5 +29,16 @@ enum km_type {
        KM_TYPE_NR
 };
 
+/*
+ * This is a temporary build fix that (so they say on lkml....) should no longer
+ * be required after 2.6.33, because of changes planned to the kmap code.
+ * Let's try to remove this cruft then.
+ */
+#ifdef CONFIG_DEBUG_HIGHMEM
+#define KM_NMI         (-1)
+#define KM_NMI_PTE     (-1)
+#define KM_IRQ_PTE     (-1)
+#endif
+
 #endif /* __KERNEL__ */
 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
index 6b5d191eec3a98079ce6605a84f71472f89a309e..a351ed84eec5932c3e9f6ea0d7bcfff2c95c2f35 100644 (file)
@@ -68,7 +68,7 @@ static void unmask_imask_irq(unsigned int irq)
 }
 
 static struct irq_chip imask_irq_chip = {
-       .typename       = "SR.IMASK",
+       .name           = "SR.IMASK",
        .mask           = mask_imask_irq,
        .unmask         = unmask_imask_irq,
        .mask_ack       = mask_imask_irq,
index 6c092f1f55579e7d67641f4504bad9f71f5ee9e2..06e7e2959b542e9b862566d4f078e18953ff964b 100644 (file)
@@ -85,7 +85,7 @@ static void mask_and_ack_intc(unsigned int);
 static void end_intc_irq(unsigned int irq);
 
 static struct irq_chip intc_irq_type = {
-       .typename = "INTC",
+       .name = "INTC",
        .startup = startup_intc_irq,
        .shutdown = shutdown_intc_irq,
        .enable = enable_intc_irq,
index c2f772dbd556ffaa5a75430500fbedb9cde3d3fd..77d1b313e3441e9b616dffeed881b7cf5bfcc541 100644 (file)
@@ -45,7 +45,7 @@ extern void free_initmem(void);
 #define VMEMMAP_ALIGN(x)       (((x)+VMEMMAP_CHUNK-1UL)&VMEMMAP_CHUNK_MASK)
 
 #define VMEMMAP_SIZE   ((((1UL << MAX_PHYSADDR_BITS) >> PAGE_SHIFT) * \
-                         sizeof(struct page *)) >> VMEMMAP_CHUNK_SHIFT)
+                         sizeof(struct page)) >> VMEMMAP_CHUNK_SHIFT)
 extern unsigned long vmemmap_table[VMEMMAP_SIZE];
 #endif
 
index d296f4a195c916d8d454c144396ecc1f71af91d2..d85d1b2432baec47b65d81191353c0ab6b490aec 100644 (file)
@@ -79,7 +79,8 @@ void arch_acpi_processor_init_pdc(struct acpi_processor *pr)
        struct cpuinfo_x86 *c = &cpu_data(pr->id);
 
        pr->pdc = NULL;
-       if (c->x86_vendor == X86_VENDOR_INTEL)
+       if (c->x86_vendor == X86_VENDOR_INTEL ||
+           c->x86_vendor == X86_VENDOR_CENTAUR)
                init_intel_pdc(pr, c);
 
        return;
index 7d5c3b0ea8dad3a69eaf24d9b99d7eb3a2bacfd2..8b581d3905cb47214af854582e24a379fac444f3 100644 (file)
@@ -526,15 +526,21 @@ static const struct dmi_system_id sw_any_bug_dmi_table[] = {
 
 static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c)
 {
-       /* http://www.intel.com/Assets/PDF/specupdate/314554.pdf
+       /* Intel Xeon Processor 7100 Series Specification Update
+        * http://www.intel.com/Assets/PDF/specupdate/314554.pdf
         * AL30: A Machine Check Exception (MCE) Occurring during an
         * Enhanced Intel SpeedStep Technology Ratio Change May Cause
-        * Both Processor Cores to Lock Up when HT is enabled*/
+        * Both Processor Cores to Lock Up*/
        if (c->x86_vendor == X86_VENDOR_INTEL) {
                if ((c->x86 == 15) &&
                    (c->x86_model == 6) &&
-                   (c->x86_mask == 8) && smt_capable())
+                   (c->x86_mask == 8)) {
+                       printk(KERN_INFO "acpi-cpufreq: Intel(R) "
+                           "Xeon(R) 7100 Errata AL30, processors may "
+                           "lock up on frequency changes: disabling "
+                           "acpi-cpufreq.\n");
                        return -ENODEV;
+                   }
                }
        return 0;
 }
@@ -549,13 +555,18 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        unsigned int result = 0;
        struct cpuinfo_x86 *c = &cpu_data(policy->cpu);
        struct acpi_processor_performance *perf;
+#ifdef CONFIG_SMP
+       static int blacklisted;
+#endif
 
        dprintk("acpi_cpufreq_cpu_init\n");
 
 #ifdef CONFIG_SMP
-       result = acpi_cpufreq_blacklist(c);
-       if (result)
-               return result;
+       if (blacklisted)
+               return blacklisted;
+       blacklisted = acpi_cpufreq_blacklist(c);
+       if (blacklisted)
+               return blacklisted;
 #endif
 
        data = kzalloc(sizeof(struct acpi_cpufreq_data), GFP_KERNEL);
index ce2ed3e4aad96a200fe0280dea4d66a4b2fb9268..cabd2fa3fc931e0e20f6f368b6378705d040e602 100644 (file)
@@ -813,7 +813,7 @@ static int __init longhaul_cpu_init(struct cpufreq_policy *policy)
                        memcpy(eblcr, samuel2_eblcr, sizeof(samuel2_eblcr));
                        break;
                case 1 ... 15:
-                       longhaul_version = TYPE_LONGHAUL_V1;
+                       longhaul_version = TYPE_LONGHAUL_V2;
                        if (c->x86_mask < 8) {
                                cpu_model = CPU_SAMUEL2;
                                cpuname = "C3 'Samuel 2' [C5B]";
index 6394aa5c7985b17ea0c18a9d9ae6859cd7b66585..3f12dabeab525d2de56321c6dc1a1a6817d8db7a 100644 (file)
@@ -1022,7 +1022,7 @@ static int get_transition_latency(struct powernow_k8_data *data)
                 * set it to 1 to avoid problems in the future.
                 * For all others it's a BIOS bug.
                 */
-               if (!boot_cpu_data.x86 == 0x11)
+               if (boot_cpu_data.x86 != 0x11)
                        printk(KERN_ERR FW_WARN PFX "Invalid zero transition "
                                "latency\n");
                max_latency = 1;
index 6911e91fb4f67dc5344342f83e273b39a74106bf..3ae5a7a3a500dc2ddf392ba730736ade952057d7 100644 (file)
@@ -232,28 +232,23 @@ static unsigned int speedstep_detect_chipset(void)
        return 0;
 }
 
-struct get_freq_data {
-       unsigned int speed;
-       unsigned int processor;
-};
-
-static void get_freq_data(void *_data)
+static void get_freq_data(void *_speed)
 {
-       struct get_freq_data *data = _data;
+       unsigned int *speed = _speed;
 
-       data->speed = speedstep_get_frequency(data->processor);
+       *speed = speedstep_get_frequency(speedstep_processor);
 }
 
 static unsigned int speedstep_get(unsigned int cpu)
 {
-       struct get_freq_data data = { .processor = cpu };
+       unsigned int speed;
 
        /* You're supposed to ensure CPU is online. */
-       if (smp_call_function_single(cpu, get_freq_data, &data, 1) != 0)
+       if (smp_call_function_single(cpu, get_freq_data, &speed, 1) != 0)
                BUG();
 
-       dprintk("detected %u kHz as current frequency\n", data.speed);
-       return data.speed;
+       dprintk("detected %u kHz as current frequency\n", speed);
+       return speed;
 }
 
 /**
index e5aeb2b79e6f956998b8bd0b2bc3bc882ab5067c..e28e276ac611d9d9bd25011b1cefc200456d6242 100644 (file)
@@ -23,3 +23,8 @@ config ASYNC_RAID6_RECOV
        select ASYNC_CORE
        select ASYNC_PQ
 
+config ASYNC_TX_DISABLE_PQ_VAL_DMA
+       bool
+
+config ASYNC_TX_DISABLE_XOR_VAL_DMA
+       bool
index 6b5cc4fba59f0a52e9b20689ed029cbb65b6209a..ec87f53d50595f0c545df398932799d5d2054c0d 100644 (file)
@@ -240,6 +240,16 @@ async_gen_syndrome(struct page **blocks, unsigned int offset, int disks,
 }
 EXPORT_SYMBOL_GPL(async_gen_syndrome);
 
+static inline struct dma_chan *
+pq_val_chan(struct async_submit_ctl *submit, struct page **blocks, int disks, size_t len)
+{
+       #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+       return NULL;
+       #endif
+       return async_tx_find_channel(submit, DMA_PQ_VAL, NULL, 0,  blocks,
+                                    disks, len);
+}
+
 /**
  * async_syndrome_val - asynchronously validate a raid6 syndrome
  * @blocks: source blocks from idx 0..disks-3, P @ disks-2 and Q @ disks-1
@@ -260,9 +270,7 @@ async_syndrome_val(struct page **blocks, unsigned int offset, int disks,
                   size_t len, enum sum_check_flags *pqres, struct page *spare,
                   struct async_submit_ctl *submit)
 {
-       struct dma_chan *chan = async_tx_find_channel(submit, DMA_PQ_VAL,
-                                                     NULL, 0,  blocks, disks,
-                                                     len);
+       struct dma_chan *chan = pq_val_chan(submit, blocks, disks, len);
        struct dma_device *device = chan ? chan->device : NULL;
        struct dma_async_tx_descriptor *tx;
        unsigned char coefs[disks-2];
index 79182dcb91b797760579482176cb652c495263a8..079ae8ca590bf869fdeb475a1bf4bea7a563831e 100644 (file)
@@ -234,6 +234,17 @@ static int page_is_zero(struct page *p, unsigned int offset, size_t len)
                memcmp(a, a + 4, len - 4) == 0);
 }
 
+static inline struct dma_chan *
+xor_val_chan(struct async_submit_ctl *submit, struct page *dest,
+                struct page **src_list, int src_cnt, size_t len)
+{
+       #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+       return NULL;
+       #endif
+       return async_tx_find_channel(submit, DMA_XOR_VAL, &dest, 1, src_list,
+                                    src_cnt, len);
+}
+
 /**
  * async_xor_val - attempt a xor parity check with a dma engine.
  * @dest: destination page used if the xor is performed synchronously
@@ -255,9 +266,7 @@ async_xor_val(struct page *dest, struct page **src_list, unsigned int offset,
              int src_cnt, size_t len, enum sum_check_flags *result,
              struct async_submit_ctl *submit)
 {
-       struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR_VAL,
-                                                     &dest, 1, src_list,
-                                                     src_cnt, len);
+       struct dma_chan *chan = xor_val_chan(submit, dest, src_list, src_cnt, len);
        struct dma_device *device = chan ? chan->device : NULL;
        struct dma_async_tx_descriptor *tx = NULL;
        dma_addr_t *dma_src = NULL;
index 5fc3292483efb19cdbf45b18e877015229fb30db..c6547130624ce73cd4e41088f260f40b391dd940 100644 (file)
@@ -40,7 +40,7 @@ struct crypto_rfc4106_ctx {
 struct crypto_gcm_ghash_ctx {
        unsigned int cryptlen;
        struct scatterlist *src;
-       crypto_completion_t complete;
+       void (*complete)(struct aead_request *req, int err);
 };
 
 struct crypto_gcm_req_priv_ctx {
@@ -267,23 +267,26 @@ static int gcm_hash_final(struct aead_request *req,
        return crypto_ahash_final(ahreq);
 }
 
-static void gcm_hash_final_done(struct crypto_async_request *areq,
-                               int err)
+static void __gcm_hash_final_done(struct aead_request *req, int err)
 {
-       struct aead_request *req = areq->data;
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
 
        if (!err)
                crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
 
-       gctx->complete(areq, err);
+       gctx->complete(req, err);
 }
 
-static void gcm_hash_len_done(struct crypto_async_request *areq,
-                             int err)
+static void gcm_hash_final_done(struct crypto_async_request *areq, int err)
 {
        struct aead_request *req = areq->data;
+
+       __gcm_hash_final_done(req, err);
+}
+
+static void __gcm_hash_len_done(struct aead_request *req, int err)
+{
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 
        if (!err) {
@@ -292,13 +295,18 @@ static void gcm_hash_len_done(struct crypto_async_request *areq,
                        return;
        }
 
-       gcm_hash_final_done(areq, err);
+       __gcm_hash_final_done(req, err);
 }
 
-static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
-                                      int err)
+static void gcm_hash_len_done(struct crypto_async_request *areq, int err)
 {
        struct aead_request *req = areq->data;
+
+       __gcm_hash_len_done(req, err);
+}
+
+static void __gcm_hash_crypt_remain_done(struct aead_request *req, int err)
+{
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 
        if (!err) {
@@ -307,13 +315,19 @@ static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
                        return;
        }
 
-       gcm_hash_len_done(areq, err);
+       __gcm_hash_len_done(req, err);
 }
 
-static void gcm_hash_crypt_done(struct crypto_async_request *areq,
-                               int err)
+static void gcm_hash_crypt_remain_done(struct crypto_async_request *areq,
+                                      int err)
 {
        struct aead_request *req = areq->data;
+
+       __gcm_hash_crypt_remain_done(req, err);
+}
+
+static void __gcm_hash_crypt_done(struct aead_request *req, int err)
+{
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
        unsigned int remain;
@@ -327,13 +341,18 @@ static void gcm_hash_crypt_done(struct crypto_async_request *areq,
                        return;
        }
 
-       gcm_hash_crypt_remain_done(areq, err);
+       __gcm_hash_crypt_remain_done(req, err);
 }
 
-static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
-                                          int err)
+static void gcm_hash_crypt_done(struct crypto_async_request *areq, int err)
 {
        struct aead_request *req = areq->data;
+
+       __gcm_hash_crypt_done(req, err);
+}
+
+static void __gcm_hash_assoc_remain_done(struct aead_request *req, int err)
+{
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
        crypto_completion_t complete;
@@ -350,15 +369,21 @@ static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
        }
 
        if (remain)
-               gcm_hash_crypt_done(areq, err);
+               __gcm_hash_crypt_done(req, err);
        else
-               gcm_hash_crypt_remain_done(areq, err);
+               __gcm_hash_crypt_remain_done(req, err);
 }
 
-static void gcm_hash_assoc_done(struct crypto_async_request *areq,
-                               int err)
+static void gcm_hash_assoc_remain_done(struct crypto_async_request *areq,
+                                      int err)
 {
        struct aead_request *req = areq->data;
+
+       __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void __gcm_hash_assoc_done(struct aead_request *req, int err)
+{
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        unsigned int remain;
 
@@ -371,13 +396,18 @@ static void gcm_hash_assoc_done(struct crypto_async_request *areq,
                        return;
        }
 
-       gcm_hash_assoc_remain_done(areq, err);
+       __gcm_hash_assoc_remain_done(req, err);
 }
 
-static void gcm_hash_init_done(struct crypto_async_request *areq,
-                              int err)
+static void gcm_hash_assoc_done(struct crypto_async_request *areq, int err)
 {
        struct aead_request *req = areq->data;
+
+       __gcm_hash_assoc_done(req, err);
+}
+
+static void __gcm_hash_init_done(struct aead_request *req, int err)
+{
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        crypto_completion_t complete;
        unsigned int remain = 0;
@@ -393,9 +423,16 @@ static void gcm_hash_init_done(struct crypto_async_request *areq,
        }
 
        if (remain)
-               gcm_hash_assoc_done(areq, err);
+               __gcm_hash_assoc_done(req, err);
        else
-               gcm_hash_assoc_remain_done(areq, err);
+               __gcm_hash_assoc_remain_done(req, err);
+}
+
+static void gcm_hash_init_done(struct crypto_async_request *areq, int err)
+{
+       struct aead_request *req = areq->data;
+
+       __gcm_hash_init_done(req, err);
 }
 
 static int gcm_hash(struct aead_request *req,
@@ -457,10 +494,8 @@ static void gcm_enc_copy_hash(struct aead_request *req,
                                 crypto_aead_authsize(aead), 1);
 }
 
-static void gcm_enc_hash_done(struct crypto_async_request *areq,
-                                    int err)
+static void gcm_enc_hash_done(struct aead_request *req, int err)
 {
-       struct aead_request *req = areq->data;
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
 
        if (!err)
@@ -469,8 +504,7 @@ static void gcm_enc_hash_done(struct crypto_async_request *areq,
        aead_request_complete(req, err);
 }
 
-static void gcm_encrypt_done(struct crypto_async_request *areq,
-                                    int err)
+static void gcm_encrypt_done(struct crypto_async_request *areq, int err)
 {
        struct aead_request *req = areq->data;
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
@@ -479,9 +513,13 @@ static void gcm_encrypt_done(struct crypto_async_request *areq,
                err = gcm_hash(req, pctx);
                if (err == -EINPROGRESS || err == -EBUSY)
                        return;
+               else if (!err) {
+                       crypto_xor(pctx->auth_tag, pctx->iauth_tag, 16);
+                       gcm_enc_copy_hash(req, pctx);
+               }
        }
 
-       gcm_enc_hash_done(areq, err);
+       aead_request_complete(req, err);
 }
 
 static int crypto_gcm_encrypt(struct aead_request *req)
@@ -538,9 +576,8 @@ static void gcm_decrypt_done(struct crypto_async_request *areq, int err)
        aead_request_complete(req, err);
 }
 
-static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
+static void gcm_dec_hash_done(struct aead_request *req, int err)
 {
-       struct aead_request *req = areq->data;
        struct crypto_gcm_req_priv_ctx *pctx = crypto_gcm_reqctx(req);
        struct ablkcipher_request *abreq = &pctx->u.abreq;
        struct crypto_gcm_ghash_ctx *gctx = &pctx->ghash_ctx;
@@ -552,9 +589,11 @@ static void gcm_dec_hash_done(struct crypto_async_request *areq, int err)
                err = crypto_ablkcipher_decrypt(abreq);
                if (err == -EINPROGRESS || err == -EBUSY)
                        return;
+               else if (!err)
+                       err = crypto_gcm_verify(req, pctx);
        }
 
-       gcm_decrypt_done(areq, err);
+       aead_request_complete(req, err);
 }
 
 static int crypto_gcm_decrypt(struct aead_request *req)
index cd80d1dd195093fc726bc9253a8f1423dc79d4a2..57bdaf6ffab1b43e1f91dfb35c04bdc8422db7fe 100644 (file)
@@ -203,8 +203,9 @@ static const union acpi_predefined_info predefined_names[] =
        {{"_BCT", 1, ACPI_RTYPE_INTEGER}},
        {{"_BDN", 0, ACPI_RTYPE_INTEGER}},
        {{"_BFS", 1, 0}},
-       {{"_BIF", 0, ACPI_RTYPE_PACKAGE}}, /* Fixed-length (9 Int),(4 Str) */
-                         {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9, ACPI_RTYPE_STRING}, 4,0}},
+       {{"_BIF", 0, ACPI_RTYPE_PACKAGE} }, /* Fixed-length (9 Int),(4 Str/Buf) */
+                         {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 9,
+                            ACPI_RTYPE_STRING | ACPI_RTYPE_BUFFER}, 4, 0} },
 
        {{"_BIX", 0, ACPI_RTYPE_PACKAGE}},      /* Fixed-length (16 Int),(4 Str) */
        {{{ACPI_PTYPE1_FIXED, ACPI_RTYPE_INTEGER, 16, ACPI_RTYPE_STRING}, 4,
index e56b2a7b53db2dcc4735287edaeb10a7c54a3d56..23e5a0519af552d27ddcb9f8c14aa38062b434ea 100644 (file)
@@ -224,6 +224,7 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
         * _OSI(Linux) helps sound
         * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad R61"),
         * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T61"),
+        * T400, T500
         * _OSI(Linux) has Linux specific hooks
         * DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
         * _OSI(Linux) is a NOP:
@@ -254,6 +255,22 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
                     DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad X61"),
                },
        },
+       {
+       .callback = dmi_enable_osi_linux,
+       .ident = "Lenovo ThinkPad T400",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T400"),
+               },
+       },
+       {
+       .callback = dmi_enable_osi_linux,
+       .ident = "Lenovo ThinkPad T500",
+       .matches = {
+                    DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+                    DMI_MATCH(DMI_PRODUCT_VERSION, "ThinkPad T500"),
+               },
+       },
        {}
 };
 
index 4cc1b8116e76acd7b8edfbfca294d3e4f64bbc64..5f2c379ab7bfba1903158f5c3f94cf37631a6c44 100644 (file)
@@ -430,6 +430,14 @@ static struct dmi_system_id __initdata acpisleep_dmi_table[] = {
        },
        {
        .callback = init_set_sci_en_on_resume,
+       .ident = "Hewlett-Packard Compaq Presario C700 Notebook PC",
+       .matches = {
+               DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
+               DMI_MATCH(DMI_PRODUCT_NAME, "Compaq Presario C700 Notebook PC"),
+               },
+       },
+       {
+       .callback = init_set_sci_en_on_resume,
        .ident = "Hewlett-Packard Compaq Presario CQ40 Notebook PC",
        .matches = {
                DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
index d344db42a00251cb4b1206672ad6c453927d7463..172b57e6543fca8a9d0750ae37ad3d7d258762f8 100644 (file)
@@ -707,34 +707,17 @@ static unsigned int sata_fsl_dev_classify(struct ata_port *ap)
        return ata_dev_classify(&tf);
 }
 
-static int sata_fsl_prereset(struct ata_link *link, unsigned long deadline)
-{
-       /* FIXME: Never skip softreset, sata_fsl_softreset() is
-        * combination of soft and hard resets.  sata_fsl_softreset()
-        * needs to be splitted into soft and hard resets.
-        */
-       return 0;
-}
-
-static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
+static int sata_fsl_hardreset(struct ata_link *link, unsigned int *class,
                                        unsigned long deadline)
 {
        struct ata_port *ap = link->ap;
-       struct sata_fsl_port_priv *pp = ap->private_data;
        struct sata_fsl_host_priv *host_priv = ap->host->private_data;
        void __iomem *hcr_base = host_priv->hcr_base;
-       int pmp = sata_srst_pmp(link);
        u32 temp;
-       struct ata_taskfile tf;
-       u8 *cfis;
-       u32 Serror;
        int i = 0;
        unsigned long start_jiffies;
 
-       DPRINTK("in xx_softreset\n");
-
-       if (pmp != SATA_PMP_CTRL_PORT)
-               goto issue_srst;
+       DPRINTK("in xx_hardreset\n");
 
 try_offline_again:
        /*
@@ -749,7 +732,7 @@ try_offline_again:
 
        if (temp & ONLINE) {
                ata_port_printk(ap, KERN_ERR,
-                               "Softreset failed, not off-lined %d\n", i);
+                               "Hardreset failed, not off-lined %d\n", i);
 
                /*
                 * Try to offline controller atleast twice
@@ -761,7 +744,7 @@ try_offline_again:
                        goto try_offline_again;
        }
 
-       DPRINTK("softreset, controller off-lined\n");
+       DPRINTK("hardreset, controller off-lined\n");
        VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
        VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
 
@@ -786,11 +769,11 @@ try_offline_again:
 
        if (!(temp & ONLINE)) {
                ata_port_printk(ap, KERN_ERR,
-                               "Softreset failed, not on-lined\n");
+                               "Hardreset failed, not on-lined\n");
                goto err;
        }
 
-       DPRINTK("softreset, controller off-lined & on-lined\n");
+       DPRINTK("hardreset, controller off-lined & on-lined\n");
        VPRINTK("HStatus = 0x%x\n", ioread32(hcr_base + HSTATUS));
        VPRINTK("HControl = 0x%x\n", ioread32(hcr_base + HCONTROL));
 
@@ -806,7 +789,7 @@ try_offline_again:
                                "No Device OR PHYRDY change,Hstatus = 0x%x\n",
                                ioread32(hcr_base + HSTATUS));
                *class = ATA_DEV_NONE;
-               goto out;
+               return 0;
        }
 
        /*
@@ -819,11 +802,44 @@ try_offline_again:
        if ((temp & 0xFF) != 0x18) {
                ata_port_printk(ap, KERN_WARNING, "No Signature Update\n");
                *class = ATA_DEV_NONE;
-               goto out;
+               goto do_followup_srst;
        } else {
                ata_port_printk(ap, KERN_INFO,
                                "Signature Update detected @ %d msecs\n",
                                jiffies_to_msecs(jiffies - start_jiffies));
+               *class = sata_fsl_dev_classify(ap);
+               return 0;
+       }
+
+do_followup_srst:
+       /*
+        * request libATA to perform follow-up softreset
+        */
+       return -EAGAIN;
+
+err:
+       return -EIO;
+}
+
+static int sata_fsl_softreset(struct ata_link *link, unsigned int *class,
+                                       unsigned long deadline)
+{
+       struct ata_port *ap = link->ap;
+       struct sata_fsl_port_priv *pp = ap->private_data;
+       struct sata_fsl_host_priv *host_priv = ap->host->private_data;
+       void __iomem *hcr_base = host_priv->hcr_base;
+       int pmp = sata_srst_pmp(link);
+       u32 temp;
+       struct ata_taskfile tf;
+       u8 *cfis;
+       u32 Serror;
+
+       DPRINTK("in xx_softreset\n");
+
+       if (ata_link_offline(link)) {
+               DPRINTK("PHY reports no device\n");
+               *class = ATA_DEV_NONE;
+               return 0;
        }
 
        /*
@@ -834,7 +850,6 @@ try_offline_again:
         * reached here, we can send a command to the target device
         */
 
-issue_srst:
        DPRINTK("Sending SRST/device reset\n");
 
        ata_tf_init(link->device, &tf);
@@ -860,6 +875,8 @@ issue_srst:
                ioread32(CA + hcr_base), ioread32(CC + hcr_base));
 
        iowrite32(0xFFFF, CC + hcr_base);
+       if (pmp != SATA_PMP_CTRL_PORT)
+               iowrite32(pmp, CQPMP + hcr_base);
        iowrite32(1, CQ + hcr_base);
 
        temp = ata_wait_register(CQ + hcr_base, 0x1, 0x1, 1, 5000);
@@ -926,7 +943,6 @@ issue_srst:
                VPRINTK("cereg = 0x%x\n", ioread32(hcr_base + CE));
        }
 
-out:
        return 0;
 
 err:
@@ -988,18 +1004,6 @@ static void sata_fsl_error_intr(struct ata_port *ap)
                ehi->err_mask |= AC_ERR_ATA_BUS;
                ehi->action |= ATA_EH_SOFTRESET;
 
-               /*
-                * Ignore serror in case of fatal errors as we always want
-                * to do a soft-reset of the FSL SATA controller. Analyzing
-                * serror may cause libata to schedule a hard-reset action,
-                * and hard-reset currently does not do controller
-                * offline/online, causing command timeouts and leads to an
-                * un-recoverable state, hence make libATA ignore
-                * autopsy in case of fatal errors.
-                */
-
-               ehi->flags |= ATA_EHI_NO_AUTOPSY;
-
                freeze = 1;
        }
 
@@ -1267,8 +1271,8 @@ static struct ata_port_operations sata_fsl_ops = {
 
        .freeze = sata_fsl_freeze,
        .thaw = sata_fsl_thaw,
-       .prereset = sata_fsl_prereset,
        .softreset = sata_fsl_softreset,
+       .hardreset = sata_fsl_hardreset,
        .pmp_softreset = sata_fsl_softreset,
        .error_handler = sata_fsl_error_handler,
        .post_internal_cmd = sata_fsl_post_internal_cmd,
index a770498a74ece95daa7f2eade9434e0e7518e095..846d89e3d12294d423af8a38ff57373d0b6f4505 100644 (file)
@@ -328,11 +328,11 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
                 * necessary.
                 */
                parent = dev->parent;
-               spin_unlock_irq(&dev->power.lock);
+               spin_unlock(&dev->power.lock);
 
                pm_runtime_get_noresume(parent);
 
-               spin_lock_irq(&parent->power.lock);
+               spin_lock(&parent->power.lock);
                /*
                 * We can resume if the parent's run-time PM is disabled or it
                 * is set to ignore children.
@@ -343,9 +343,9 @@ int __pm_runtime_resume(struct device *dev, bool from_wq)
                        if (parent->power.runtime_status != RPM_ACTIVE)
                                retval = -EBUSY;
                }
-               spin_unlock_irq(&parent->power.lock);
+               spin_unlock(&parent->power.lock);
 
-               spin_lock_irq(&dev->power.lock);
+               spin_lock(&dev->power.lock);
                if (retval)
                        goto out;
                goto repeat;
@@ -777,7 +777,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
        }
 
        if (parent) {
-               spin_lock_irq(&parent->power.lock);
+               spin_lock(&parent->power.lock);
 
                /*
                 * It is invalid to put an active child under a parent that is
@@ -793,7 +793,7 @@ int __pm_runtime_set_status(struct device *dev, unsigned int status)
                                atomic_inc(&parent->power.child_count);
                }
 
-               spin_unlock_irq(&parent->power.lock);
+               spin_unlock(&parent->power.lock);
 
                if (error)
                        goto out;
index 6399e5090df4d59175063ae1b158f7b77e8a41e3..92b126394fa1f1f9605bd5227242fbd9d01c691b 100644 (file)
@@ -482,7 +482,7 @@ static ssize_t host_store_rescan(struct device *dev,
 
        return count;
 }
-DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
+static DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
 
 static ssize_t dev_show_unique_id(struct device *dev,
                                 struct device_attribute *attr,
@@ -512,7 +512,7 @@ static ssize_t dev_show_unique_id(struct device *dev,
                                sn[8], sn[9], sn[10], sn[11],
                                sn[12], sn[13], sn[14], sn[15]);
 }
-DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
+static DEVICE_ATTR(unique_id, S_IRUGO, dev_show_unique_id, NULL);
 
 static ssize_t dev_show_vendor(struct device *dev,
                               struct device_attribute *attr,
@@ -536,7 +536,7 @@ static ssize_t dev_show_vendor(struct device *dev,
        else
                return snprintf(buf, sizeof(vendor) + 1, "%s\n", drv->vendor);
 }
-DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
+static DEVICE_ATTR(vendor, S_IRUGO, dev_show_vendor, NULL);
 
 static ssize_t dev_show_model(struct device *dev,
                              struct device_attribute *attr,
@@ -560,7 +560,7 @@ static ssize_t dev_show_model(struct device *dev,
        else
                return snprintf(buf, sizeof(model) + 1, "%s\n", drv->model);
 }
-DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
+static DEVICE_ATTR(model, S_IRUGO, dev_show_model, NULL);
 
 static ssize_t dev_show_rev(struct device *dev,
                            struct device_attribute *attr,
@@ -584,7 +584,7 @@ static ssize_t dev_show_rev(struct device *dev,
        else
                return snprintf(buf, sizeof(rev) + 1, "%s\n", drv->rev);
 }
-DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
+static DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
 
 static ssize_t cciss_show_lunid(struct device *dev,
                                struct device_attribute *attr, char *buf)
@@ -609,7 +609,7 @@ static ssize_t cciss_show_lunid(struct device *dev,
                lunid[0], lunid[1], lunid[2], lunid[3],
                lunid[4], lunid[5], lunid[6], lunid[7]);
 }
-DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
+static DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
 
 static ssize_t cciss_show_raid_level(struct device *dev,
                                     struct device_attribute *attr, char *buf)
@@ -632,7 +632,7 @@ static ssize_t cciss_show_raid_level(struct device *dev,
        return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
                        raid_label[raid]);
 }
-DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
+static DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
 
 static ssize_t cciss_show_usage_count(struct device *dev,
                                      struct device_attribute *attr, char *buf)
@@ -651,7 +651,7 @@ static ssize_t cciss_show_usage_count(struct device *dev,
        spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
        return snprintf(buf, 20, "%d\n", count);
 }
-DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
+static DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
 
 static struct attribute *cciss_host_attrs[] = {
        &dev_attr_rescan.attr,
index 4068467ce7b93bcd8e82e1174603b089b868c6b9..3cb56a049e249eed3eac9abb7d0e0c012fae067c 100644 (file)
@@ -62,6 +62,7 @@
 #define PCI_DEVICE_ID_INTEL_IGDNG_D_IG     0x0042
 #define PCI_DEVICE_ID_INTEL_IGDNG_M_HB     0x0044
 #define PCI_DEVICE_ID_INTEL_IGDNG_MA_HB            0x0062
+#define PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB    0x006a
 #define PCI_DEVICE_ID_INTEL_IGDNG_M_IG     0x0046
 
 /* cover 915 and 945 variants */
@@ -96,7 +97,8 @@
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_B43_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_D_HB || \
                agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_M_HB || \
-               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB)
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MA_HB || \
+               agp_bridge->dev->device == PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB)
 
 extern int agp_memory_reserved;
 
@@ -1161,12 +1163,6 @@ static int intel_i915_configure(void)
 
        intel_i9xx_setup_flush();
 
-#ifdef USE_PCI_DMA_API 
-       if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
-               dev_err(&intel_private.pcidev->dev,
-                       "set gfx device dma mask 36bit failed!\n");
-#endif
-
        return 0;
 }
 
@@ -1364,6 +1360,7 @@ static void intel_i965_get_gtt_range(int *gtt_offset, int *gtt_size)
        case PCI_DEVICE_ID_INTEL_IGDNG_D_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_M_HB:
        case PCI_DEVICE_ID_INTEL_IGDNG_MA_HB:
+       case PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB:
                *gtt_offset = *gtt_size = MB(2);
                break;
        default:
@@ -2365,6 +2362,8 @@ static const struct intel_driver_description {
            "IGDNG/M", NULL, &intel_i965_driver },
        { PCI_DEVICE_ID_INTEL_IGDNG_MA_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
            "IGDNG/MA", NULL, &intel_i965_driver },
+       { PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB, PCI_DEVICE_ID_INTEL_IGDNG_M_IG, 0,
+           "IGDNG/MC2", NULL, &intel_i965_driver },
        { 0, 0, 0, NULL, NULL, NULL }
 };
 
@@ -2456,6 +2455,11 @@ static int __devinit agp_intel_probe(struct pci_dev *pdev,
                                &bridge->mode);
        }
 
+       if (bridge->driver->mask_memory == intel_i965_mask_memory)
+               if (pci_set_dma_mask(intel_private.pcidev, DMA_BIT_MASK(36)))
+                       dev_err(&intel_private.pcidev->dev,
+                               "set gfx device dma mask 36bit failed!\n");
+
        pci_set_drvdata(pdev, bridge);
        return agp_add_bridge(bridge);
 }
@@ -2561,6 +2565,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
        ID(PCI_DEVICE_ID_INTEL_IGDNG_D_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_M_HB),
        ID(PCI_DEVICE_ID_INTEL_IGDNG_MA_HB),
+       ID(PCI_DEVICE_ID_INTEL_IGDNG_MC2_HB),
        { }
 };
 
index 737be953cc589c5a75750ad47cb89726598ef3a9..950837cf9e9c3881718d161e4185c6fc1194b9cb 100644 (file)
@@ -1249,7 +1249,7 @@ static void kbd_keycode(unsigned int keycode, int down, int hw_raw)
 
        if (keycode >= NR_KEYS)
                if (keycode >= KEY_BRL_DOT1 && keycode <= KEY_BRL_DOT8)
-                       keysym = K(KT_BRL, keycode - KEY_BRL_DOT1 + 1);
+                       keysym = U(K(KT_BRL, keycode - KEY_BRL_DOT1 + 1));
                else
                        return;
        else
index a4bbb28f10be4473a0550e51466b14ee81a35b67..2e8552dc5edaef207604f2b3b51a26eff898e3ca 100644 (file)
@@ -221,6 +221,9 @@ int tty_port_block_til_ready(struct tty_port *port,
           the port has just hung up or is in another error state */
        if ((filp->f_flags & O_NONBLOCK) ||
                        (tty->flags & (1 << TTY_IO_ERROR))) {
+               /* Indicate we are open */
+               if (tty->termios->c_cflag & CBAUD)
+                       tty_port_raise_dtr_rts(port);
                port->flags |= ASYNC_NORMAL_ACTIVE;
                return 0;
        }
index ed86d3bf249a1e001a076e27985216cf94714d0e..6aa10284104aeb6e4bc3a22833ea347e67c5d976 100644 (file)
@@ -103,8 +103,8 @@ void vt_event_post(unsigned int event, unsigned int old, unsigned int new)
                ve->event.event = event;
                /* kernel view is consoles 0..n-1, user space view is
                   console 1..n with 0 meaning current, so we must bias */
-               ve->event.old = old + 1;
-               ve->event.new = new + 1;
+               ve->event.oldev = old + 1;
+               ve->event.newev = new + 1;
                wake = 1;
                ve->done = 1;
        }
@@ -186,7 +186,7 @@ int vt_waitactive(int n)
                vt_event_wait(&vw);
                if (vw.done == 0)
                        return -EINTR;
-       } while (vw.event.new != n);
+       } while (vw.event.newev != n);
        return 0;
 }
 
index 3938c7817095d0747a44045b51cff3c680e6e78a..ff57c40e9b8b94905ca090e79698390a66ad2583 100644 (file)
@@ -41,7 +41,7 @@ static struct cpufreq_driver *cpufreq_driver;
 static DEFINE_PER_CPU(struct cpufreq_policy *, cpufreq_cpu_data);
 #ifdef CONFIG_HOTPLUG_CPU
 /* This one keeps track of the previously set governor of a removed CPU */
-static DEFINE_PER_CPU(struct cpufreq_governor *, cpufreq_cpu_governor);
+static DEFINE_PER_CPU(char[CPUFREQ_NAME_LEN], cpufreq_cpu_governor);
 #endif
 static DEFINE_SPINLOCK(cpufreq_driver_lock);
 
@@ -774,10 +774,12 @@ int cpufreq_add_dev_policy(unsigned int cpu, struct cpufreq_policy *policy,
 #ifdef CONFIG_SMP
        unsigned long flags;
        unsigned int j;
-
 #ifdef CONFIG_HOTPLUG_CPU
-       if (per_cpu(cpufreq_cpu_governor, cpu)) {
-               policy->governor = per_cpu(cpufreq_cpu_governor, cpu);
+       struct cpufreq_governor *gov;
+
+       gov = __find_governor(per_cpu(cpufreq_cpu_governor, cpu));
+       if (gov) {
+               policy->governor = gov;
                dprintk("Restoring governor %s for cpu %d\n",
                       policy->governor->name, cpu);
        }
@@ -949,10 +951,13 @@ err_out_kobj_put:
 static int cpufreq_add_dev(struct sys_device *sys_dev)
 {
        unsigned int cpu = sys_dev->id;
-       int ret = 0;
+       int ret = 0, found = 0;
        struct cpufreq_policy *policy;
        unsigned long flags;
        unsigned int j;
+#ifdef CONFIG_HOTPLUG_CPU
+       int sibling;
+#endif
 
        if (cpu_is_offline(cpu))
                return 0;
@@ -999,7 +1004,19 @@ static int cpufreq_add_dev(struct sys_device *sys_dev)
        INIT_WORK(&policy->update, handle_update);
 
        /* Set governor before ->init, so that driver could check it */
-       policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
+#ifdef CONFIG_HOTPLUG_CPU
+       for_each_online_cpu(sibling) {
+               struct cpufreq_policy *cp = per_cpu(cpufreq_cpu_data, sibling);
+               if (cp && cp->governor &&
+                   (cpumask_test_cpu(cpu, cp->related_cpus))) {
+                       policy->governor = cp->governor;
+                       found = 1;
+                       break;
+               }
+       }
+#endif
+       if (!found)
+               policy->governor = CPUFREQ_DEFAULT_GOVERNOR;
        /* call driver. From then on the cpufreq must be able
         * to accept all calls to ->verify and ->setpolicy for this CPU
         */
@@ -1111,7 +1128,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
 #ifdef CONFIG_SMP
 
 #ifdef CONFIG_HOTPLUG_CPU
-       per_cpu(cpufreq_cpu_governor, cpu) = data->governor;
+       strncpy(per_cpu(cpufreq_cpu_governor, cpu), data->governor->name,
+                       CPUFREQ_NAME_LEN);
 #endif
 
        /* if we have other CPUs still registered, we need to unlink them,
@@ -1135,7 +1153,8 @@ static int __cpufreq_remove_dev(struct sys_device *sys_dev)
                                continue;
                        dprintk("removing link for cpu %u\n", j);
 #ifdef CONFIG_HOTPLUG_CPU
-                       per_cpu(cpufreq_cpu_governor, j) = data->governor;
+                       strncpy(per_cpu(cpufreq_cpu_governor, j),
+                               data->governor->name, CPUFREQ_NAME_LEN);
 #endif
                        cpu_sys_dev = get_cpu_sysdev(j);
                        sysfs_remove_link(&cpu_sys_dev->kobj, "cpufreq");
@@ -1606,9 +1625,22 @@ EXPORT_SYMBOL_GPL(cpufreq_register_governor);
 
 void cpufreq_unregister_governor(struct cpufreq_governor *governor)
 {
+#ifdef CONFIG_HOTPLUG_CPU
+       int cpu;
+#endif
+
        if (!governor)
                return;
 
+#ifdef CONFIG_HOTPLUG_CPU
+       for_each_present_cpu(cpu) {
+               if (cpu_online(cpu))
+                       continue;
+               if (!strcmp(per_cpu(cpufreq_cpu_governor, cpu), governor->name))
+                       strcpy(per_cpu(cpufreq_cpu_governor, cpu), "\0");
+       }
+#endif
+
        mutex_lock(&cpufreq_governor_mutex);
        list_del(&governor->governor_list);
        mutex_unlock(&cpufreq_governor_mutex);
index bc33ddc9c97cddd9e29eedc1a2737dee7e5cf5f7..c7b081b839ffe1fb37df117fb2f0395467383ec4 100644 (file)
@@ -116,9 +116,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
 
        idle_time = cputime64_sub(cur_wall_time, busy_time);
        if (wall)
-               *wall = cur_wall_time;
+               *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
 
-       return idle_time;
+       return (cputime64_t)jiffies_to_usecs(idle_time);;
 }
 
 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
index 071699de50eef68a56e586224ccbe1d07e319011..4b34ade2332baaa50bb1ca1af9c45e1a9893d321 100644 (file)
@@ -133,9 +133,9 @@ static inline cputime64_t get_cpu_idle_time_jiffy(unsigned int cpu,
 
        idle_time = cputime64_sub(cur_wall_time, busy_time);
        if (wall)
-               *wall = cur_wall_time;
+               *wall = (cputime64_t)jiffies_to_usecs(cur_wall_time);
 
-       return idle_time;
+       return (cputime64_t)jiffies_to_usecs(idle_time);
 }
 
 static inline cputime64_t get_cpu_idle_time(unsigned int cpu, cputime64_t *wall)
index a9952b1236b07ee407cd84990ab53d02622318d4..84c51e17726966c196ac53ce05671e972f65b98a 100644 (file)
@@ -236,7 +236,7 @@ static inline void ecb_crypt(const u8 *in, u8 *out, u32 *key,
        /* Padlock in ECB mode fetches at least ecb_fetch_bytes of data.
         * We could avoid some copying here but it's probably not worth it.
         */
-       if (unlikely(((unsigned long)in & PAGE_SIZE) + ecb_fetch_bytes > PAGE_SIZE)) {
+       if (unlikely(((unsigned long)in & ~PAGE_MASK) + ecb_fetch_bytes > PAGE_SIZE)) {
                ecb_crypt_copy(in, out, key, cword, count);
                return;
        }
@@ -248,7 +248,7 @@ static inline u8 *cbc_crypt(const u8 *in, u8 *out, u32 *key,
                            u8 *iv, struct cword *cword, int count)
 {
        /* Padlock in CBC mode fetches at least cbc_fetch_bytes of data. */
-       if (unlikely(((unsigned long)in & PAGE_SIZE) + cbc_fetch_bytes > PAGE_SIZE))
+       if (unlikely(((unsigned long)in & ~PAGE_MASK) + cbc_fetch_bytes > PAGE_SIZE))
                return cbc_crypt_copy(in, out, key, iv, cword, count);
 
        return rep_xcrypt_cbc(in, out, key, iv, cword, count);
index 5903a88351bfdf5b844a45c616aa68419318a3fb..b401dadad4a87af4e567186ce8f7153f7b62a3a4 100644 (file)
@@ -26,6 +26,8 @@ config INTEL_IOATDMA
        select DMA_ENGINE
        select DCA
        select ASYNC_TX_DISABLE_CHANNEL_SWITCH
+       select ASYNC_TX_DISABLE_PQ_VAL_DMA
+       select ASYNC_TX_DISABLE_XOR_VAL_DMA
        help
          Enable support for the Intel(R) I/OAT DMA engine present
          in recent Intel Xeon chipsets.
index bd0b248de2cfabc28f1fecd63e5d64ed61236844..8f99354082ceaa169f7ac081594bc83b0c003478 100644 (file)
@@ -632,11 +632,21 @@ static bool device_has_all_tx_types(struct dma_device *device)
        #if defined(CONFIG_ASYNC_XOR) || defined(CONFIG_ASYNC_XOR_MODULE)
        if (!dma_has_cap(DMA_XOR, device->cap_mask))
                return false;
+
+       #ifndef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+       if (!dma_has_cap(DMA_XOR_VAL, device->cap_mask))
+               return false;
+       #endif
        #endif
 
        #if defined(CONFIG_ASYNC_PQ) || defined(CONFIG_ASYNC_PQ_MODULE)
        if (!dma_has_cap(DMA_PQ, device->cap_mask))
                return false;
+
+       #ifndef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+       if (!dma_has_cap(DMA_PQ_VAL, device->cap_mask))
+               return false;
+       #endif
        #endif
 
        return true;
index 69d02615c4d69d77276d760cc9b728c5986bc262..abd9038e06b1351ab7706c81fcbd96e8dbf2eebd 100644 (file)
@@ -98,17 +98,17 @@ static int dca_enabled_in_bios(struct pci_dev *pdev)
        cpuid_level_9 = cpuid_eax(9);
        res = test_bit(0, &cpuid_level_9);
        if (!res)
-               dev_err(&pdev->dev, "DCA is disabled in BIOS\n");
+               dev_dbg(&pdev->dev, "DCA is disabled in BIOS\n");
 
        return res;
 }
 
-static int system_has_dca_enabled(struct pci_dev *pdev)
+int system_has_dca_enabled(struct pci_dev *pdev)
 {
        if (boot_cpu_has(X86_FEATURE_DCA))
                return dca_enabled_in_bios(pdev);
 
-       dev_err(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
+       dev_dbg(&pdev->dev, "boot cpu doesn't have X86_FEATURE_DCA\n");
        return 0;
 }
 
index c14fdfeb7f33f2c6c1d9a540c65f3ee21ebe9840..45edde99648070e8c9a0ca14f214ead551cbfc84 100644 (file)
@@ -297,9 +297,7 @@ static inline bool is_ioat_suspended(unsigned long status)
 /* channel was fatally programmed */
 static inline bool is_ioat_bug(unsigned long err)
 {
-       return !!(err & (IOAT_CHANERR_SRC_ADDR_ERR|IOAT_CHANERR_DEST_ADDR_ERR|
-                        IOAT_CHANERR_NEXT_ADDR_ERR|IOAT_CHANERR_CONTROL_ERR|
-                        IOAT_CHANERR_LENGTH_ERR));
+       return !!err;
 }
 
 static inline void ioat_unmap(struct pci_dev *pdev, dma_addr_t addr, size_t len,
index 96ffab7d37a70e82e882160dde2649487dffd742..8f1f7f05deaadaac6cd428cbef09e35e161ededc 100644 (file)
@@ -279,6 +279,8 @@ void ioat2_timer_event(unsigned long data)
                        u32 chanerr;
 
                        chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+                       dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+                               __func__, chanerr);
                        BUG_ON(is_ioat_bug(chanerr));
                }
 
index 35d1e33afd5b9c3bf7a185d3f9df5b37747dacf6..42f6f10fb0cc249b2b07854f00e556f8335029d7 100644 (file)
@@ -378,6 +378,8 @@ static void ioat3_timer_event(unsigned long data)
                        u32 chanerr;
 
                        chanerr = readl(chan->reg_base + IOAT_CHANERR_OFFSET);
+                       dev_err(to_dev(chan), "%s: Channel halted (%x)\n",
+                               __func__, chanerr);
                        BUG_ON(is_ioat_bug(chanerr));
                }
 
@@ -569,7 +571,7 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result,
        dump_desc_dbg(ioat, compl_desc);
 
        /* we leave the channel locked to ensure in order submission */
-       return &desc->txd;
+       return &compl_desc->txd;
 }
 
 static struct dma_async_tx_descriptor *
@@ -728,7 +730,7 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result,
        dump_desc_dbg(ioat, compl_desc);
 
        /* we leave the channel locked to ensure in order submission */
-       return &desc->txd;
+       return &compl_desc->txd;
 }
 
 static struct dma_async_tx_descriptor *
@@ -736,10 +738,16 @@ ioat3_prep_pq(struct dma_chan *chan, dma_addr_t *dst, dma_addr_t *src,
              unsigned int src_cnt, const unsigned char *scf, size_t len,
              unsigned long flags)
 {
+       /* specify valid address for disabled result */
+       if (flags & DMA_PREP_PQ_DISABLE_P)
+               dst[0] = dst[1];
+       if (flags & DMA_PREP_PQ_DISABLE_Q)
+               dst[1] = dst[0];
+
        /* handle the single source multiply case from the raid6
         * recovery path
         */
-       if (unlikely((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1)) {
+       if ((flags & DMA_PREP_PQ_DISABLE_P) && src_cnt == 1) {
                dma_addr_t single_source[2];
                unsigned char single_source_coef[2];
 
@@ -761,6 +769,12 @@ ioat3_prep_pq_val(struct dma_chan *chan, dma_addr_t *pq, dma_addr_t *src,
                  unsigned int src_cnt, const unsigned char *scf, size_t len,
                  enum sum_check_flags *pqres, unsigned long flags)
 {
+       /* specify valid address for disabled result */
+       if (flags & DMA_PREP_PQ_DISABLE_P)
+               pq[0] = pq[1];
+       if (flags & DMA_PREP_PQ_DISABLE_Q)
+               pq[1] = pq[0];
+
        /* the cleanup routine only sets bits on validate failure, it
         * does not clear bits on validate success... so clear it here
         */
@@ -778,9 +792,9 @@ ioat3_prep_pqxor(struct dma_chan *chan, dma_addr_t dst, dma_addr_t *src,
        dma_addr_t pq[2];
 
        memset(scf, 0, src_cnt);
-       flags |= DMA_PREP_PQ_DISABLE_Q;
        pq[0] = dst;
-       pq[1] = ~0;
+       flags |= DMA_PREP_PQ_DISABLE_Q;
+       pq[1] = dst; /* specify valid address for disabled result */
 
        return __ioat3_prep_pq_lock(chan, NULL, pq, src, src_cnt, scf, len,
                                    flags);
@@ -800,9 +814,9 @@ ioat3_prep_pqxor_val(struct dma_chan *chan, dma_addr_t *src,
        *result = 0;
 
        memset(scf, 0, src_cnt);
-       flags |= DMA_PREP_PQ_DISABLE_Q;
        pq[0] = src[0];
-       pq[1] = ~0;
+       flags |= DMA_PREP_PQ_DISABLE_Q;
+       pq[1] = pq[0]; /* specify valid address for disabled result */
 
        return __ioat3_prep_pq_lock(chan, result, pq, &src[1], src_cnt - 1, scf,
                                    len, flags);
@@ -1117,6 +1131,7 @@ static int __devinit ioat3_dma_self_test(struct ioatdma_device *device)
 int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
 {
        struct pci_dev *pdev = device->pdev;
+       int dca_en = system_has_dca_enabled(pdev);
        struct dma_device *dma;
        struct dma_chan *c;
        struct ioat_chan_common *chan;
@@ -1137,6 +1152,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
        dma->device_prep_dma_interrupt = ioat3_prep_interrupt_lock;
 
        cap = readl(device->reg_base + IOAT_DMA_CAP_OFFSET);
+
+       /* dca is incompatible with raid operations */
+       if (dca_en && (cap & (IOAT_CAP_XOR|IOAT_CAP_PQ)))
+               cap &= ~(IOAT_CAP_XOR|IOAT_CAP_PQ);
+
        if (cap & IOAT_CAP_XOR) {
                is_raid_device = true;
                dma->max_xor = 8;
@@ -1186,6 +1206,16 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca)
                device->timer_fn = ioat2_timer_event;
        }
 
+       #ifdef CONFIG_ASYNC_TX_DISABLE_PQ_VAL_DMA
+       dma_cap_clear(DMA_PQ_VAL, dma->cap_mask);
+       dma->device_prep_dma_pq_val = NULL;
+       #endif
+
+       #ifdef CONFIG_ASYNC_TX_DISABLE_XOR_VAL_DMA
+       dma_cap_clear(DMA_XOR_VAL, dma->cap_mask);
+       dma->device_prep_dma_xor_val = NULL;
+       #endif
+
        /* -= IOAT ver.3 workarounds =- */
        /* Write CHANERRMSK_INT with 3E07h to mask out the errors
         * that can cause stability issues for IOAT ver.3
index 99afb12bd4093fe862006111ebe1b8a560abe26f..60e675455b6aa13d10aef2fe50b753fbe0a5942d 100644 (file)
@@ -39,6 +39,8 @@
 #define IOAT_VER_3_0            0x30    /* Version 3.0 */
 #define IOAT_VER_3_2            0x32    /* Version 3.2 */
 
+int system_has_dca_enabled(struct pci_dev *pdev);
+
 struct ioat_dma_descriptor {
        uint32_t        size;
        union {
index 63038e18ab03266fe20a6891a91e938bad7dc79d..f015ec1967004ed103149c5e93b8ad2d670eae80 100644 (file)
@@ -92,9 +92,7 @@
 #define IOAT_CHANCTRL_ERR_COMPLETION_EN                0x0004
 #define IOAT_CHANCTRL_INT_REARM                        0x0001
 #define IOAT_CHANCTRL_RUN                      (IOAT_CHANCTRL_INT_REARM |\
-                                                IOAT_CHANCTRL_ERR_COMPLETION_EN |\
-                                                IOAT_CHANCTRL_ANY_ERR_ABORT_EN |\
-                                                IOAT_CHANCTRL_ERR_INT_EN)
+                                                IOAT_CHANCTRL_ANY_ERR_ABORT_EN)
 
 #define IOAT_DMA_COMP_OFFSET                   0x02    /* 16-bit DMA channel compatibility */
 #define IOAT_DMA_COMP_V1                       0x0001  /* Compatibility with DMA version 1 */
index b3b065c4e5c1f4eb0990c5e81286a8191e81edee..034ecf0ace03751b5fb20b282328e8a4d70005fe 100644 (file)
@@ -640,17 +640,16 @@ static int __init sh_dmae_probe(struct platform_device *pdev)
 #endif
        struct sh_dmae_device *shdev;
 
+       /* get platform data */
+       if (!pdev->dev.platform_data)
+               return -ENODEV;
+
        shdev = kzalloc(sizeof(struct sh_dmae_device), GFP_KERNEL);
        if (!shdev) {
                dev_err(&pdev->dev, "No enough memory\n");
-               err = -ENOMEM;
-               goto shdev_err;
+               return -ENOMEM;
        }
 
-       /* get platform data */
-       if (!pdev->dev.platform_data)
-               goto shdev_err;
-
        /* platform data */
        memcpy(&shdev->pdata, pdev->dev.platform_data,
                        sizeof(struct sh_dmae_pdata));
@@ -722,7 +721,6 @@ eirq_err:
 rst_err:
        kfree(shdev);
 
-shdev_err:
        return err;
 }
 
index 5d524254499ed154b6585c328e6b5a88413792db..94260aa76aa3a88c87a2d3f805ed1abee38d7d44 100644 (file)
@@ -275,7 +275,7 @@ static void log_irqs(u32 evt)
            !(evt & OHCI1394_busReset))
                return;
 
-       fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
+       fw_notify("IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
            evt & OHCI1394_selfIDComplete       ? " selfID"             : "",
            evt & OHCI1394_RQPkt                ? " AR_req"             : "",
            evt & OHCI1394_RSPkt                ? " AR_resp"            : "",
@@ -286,6 +286,7 @@ static void log_irqs(u32 evt)
            evt & OHCI1394_postedWriteErr       ? " postedWriteErr"     : "",
            evt & OHCI1394_cycleTooLong         ? " cycleTooLong"       : "",
            evt & OHCI1394_cycle64Seconds       ? " cycle64Seconds"     : "",
+           evt & OHCI1394_cycleInconsistent    ? " cycleInconsistent"  : "",
            evt & OHCI1394_regAccessFail        ? " regAccessFail"      : "",
            evt & OHCI1394_busReset             ? " busReset"           : "",
            evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
@@ -293,6 +294,7 @@ static void log_irqs(u32 evt)
                    OHCI1394_respTxComplete | OHCI1394_isochRx |
                    OHCI1394_isochTx | OHCI1394_postedWriteErr |
                    OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
+                   OHCI1394_cycleInconsistent |
                    OHCI1394_regAccessFail | OHCI1394_busReset)
                                                ? " ?"                  : "");
 }
@@ -1439,6 +1441,17 @@ static irqreturn_t irq_handler(int irq, void *data)
                          OHCI1394_LinkControl_cycleMaster);
        }
 
+       if (unlikely(event & OHCI1394_cycleInconsistent)) {
+               /*
+                * We need to clear this event bit in order to make
+                * cycleMatch isochronous I/O work.  In theory we should
+                * stop active cycleMatch iso contexts now and restart
+                * them at least two cycles later.  (FIXME?)
+                */
+               if (printk_ratelimit())
+                       fw_notify("isochronous cycle inconsistent\n");
+       }
+
        if (event & OHCI1394_cycle64Seconds) {
                cycle_time = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
                if ((cycle_time & 0x80000000) == 0)
@@ -1528,6 +1541,7 @@ static int ohci_enable(struct fw_card *card, u32 *config_rom, size_t length)
                  OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
                  OHCI1394_isochRx | OHCI1394_isochTx |
                  OHCI1394_postedWriteErr | OHCI1394_cycleTooLong |
+                 OHCI1394_cycleInconsistent |
                  OHCI1394_cycle64Seconds | OHCI1394_regAccessFail |
                  OHCI1394_masterIntEnable);
        if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
@@ -1890,15 +1904,30 @@ static int handle_it_packet(struct context *context,
 {
        struct iso_context *ctx =
                container_of(context, struct iso_context, context);
+       int i;
+       struct descriptor *pd;
 
-       if (last->transfer_status == 0)
-               /* This descriptor isn't done yet, stop iteration. */
+       for (pd = d; pd <= last; pd++)
+               if (pd->transfer_status)
+                       break;
+       if (pd > last)
+               /* Descriptor(s) not done yet, stop iteration */
                return 0;
 
-       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS)
+       i = ctx->header_length;
+       if (i + 4 < PAGE_SIZE) {
+               /* Present this value as big-endian to match the receive code */
+               *(__be32 *)(ctx->header + i) = cpu_to_be32(
+                               ((u32)le16_to_cpu(pd->transfer_status) << 16) |
+                               le16_to_cpu(pd->res_count));
+               ctx->header_length += 4;
+       }
+       if (le16_to_cpu(last->control) & DESCRIPTOR_IRQ_ALWAYS) {
                ctx->base.callback(&ctx->base, le16_to_cpu(last->res_count),
-                                  0, NULL, ctx->base.callback_data);
-
+                                  ctx->header_length, ctx->header,
+                                  ctx->base.callback_data);
+               ctx->header_length = 0;
+       }
        return 1;
 }
 
index f831ea15929169af0f825343ec366974e9738932..96eddd17e050c8aed4fa202360abebb310f7b05c 100644 (file)
@@ -92,6 +92,7 @@ config DRM_I830
 config DRM_I915
        tristate "i915 driver"
        depends on AGP_INTEL
+       select SHMEM
        select DRM_KMS_HELPER
        select FB_CFB_FILLRECT
        select FB_CFB_COPYAREA
index cea665d86dd387e3515b0ed1a1fb310d1ac15075..b54ba63d506e0350abbf53acd50420cbeb79f7c3 100644 (file)
@@ -662,6 +662,12 @@ static struct drm_display_mode *drm_mode_detailed(struct drm_device *dev,
                return NULL;
        }
 
+       /* Some EDIDs have bogus h/vtotal values */
+       if (mode->hsync_end > mode->htotal)
+               mode->htotal = mode->hsync_end + 1;
+       if (mode->vsync_end > mode->vtotal)
+               mode->vtotal = mode->vsync_end + 1;
+
        drm_mode_set_name(mode);
 
        if (pt->misc & DRM_EDID_PT_INTERLACED)
index dc8e374a0b55e6852103cfdd56d0e84dcd0232d3..65ef011fa8ba093297d9e94983db2d0236d2a842 100644 (file)
@@ -599,7 +599,7 @@ int drm_fb_helper_check_var(struct fb_var_screeninfo *var,
        struct drm_framebuffer *fb = fb_helper->fb;
        int depth;
 
-       if (var->pixclock == -1 || !var->pixclock)
+       if (var->pixclock != 0)
                return -EINVAL;
 
        /* Need to resize the fb object !!! */
@@ -691,7 +691,7 @@ int drm_fb_helper_set_par(struct fb_info *info)
        int ret;
        int i;
 
-       if (var->pixclock != -1) {
+       if (var->pixclock != 0) {
                DRM_ERROR("PIXEL CLCOK SET\n");
                return -EINVAL;
        }
@@ -904,7 +904,7 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
        fb_helper->fb = fb;
 
        if (new_fb) {
-               info->var.pixclock = -1;
+               info->var.pixclock = 0;
                if (register_framebuffer(info) < 0)
                        return -EINVAL;
        } else {
index 80391995bdec05f07e809e33651466379c7823b3..e9dbb481c469f4a0071256349bd7a5e7a1239dcd 100644 (file)
@@ -552,7 +552,7 @@ int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
        vma->vm_flags |= VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
        vma->vm_ops = obj->dev->driver->gem_vm_ops;
        vma->vm_private_data = map->handle;
-       vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
+       vma->vm_page_prot =  pgprot_writecombine(vm_get_page_prot(vma->vm_flags));
 
        /* Take a ref for this mapping of the object, so that the fault
         * handler can dereference the mmap offset's pointer to the object.
index c861d80fd779c68d6b4ce4af2c3b5191db8e3bbf..97dc5a4f0de42604463ac99a7a161c33b2d3550f 100644 (file)
@@ -103,6 +103,11 @@ static struct drm_mm_node *drm_mm_kmalloc(struct drm_mm *mm, int atomic)
        return child;
 }
 
+/* drm_mm_pre_get() - pre allocate drm_mm_node structure
+ * drm_mm:     memory manager struct we are pre-allocating for
+ *
+ * Returns 0 on success or -ENOMEM if allocation fails.
+ */
 int drm_mm_pre_get(struct drm_mm *mm)
 {
        struct drm_mm_node *node;
@@ -253,12 +258,14 @@ void drm_mm_put_block(struct drm_mm_node *cur)
                                prev_node->size += next_node->size;
                                list_del(&next_node->ml_entry);
                                list_del(&next_node->fl_entry);
+                               spin_lock(&mm->unused_lock);
                                if (mm->num_unused < MM_UNUSED_TARGET) {
                                        list_add(&next_node->fl_entry,
                                                 &mm->unused_nodes);
                                        ++mm->num_unused;
                                } else
                                        kfree(next_node);
+                               spin_unlock(&mm->unused_lock);
                        } else {
                                next_node->size += cur->size;
                                next_node->start = cur->start;
@@ -271,11 +278,13 @@ void drm_mm_put_block(struct drm_mm_node *cur)
                list_add(&cur->fl_entry, &mm->fl_entry);
        } else {
                list_del(&cur->ml_entry);
+               spin_lock(&mm->unused_lock);
                if (mm->num_unused < MM_UNUSED_TARGET) {
                        list_add(&cur->fl_entry, &mm->unused_nodes);
                        ++mm->num_unused;
                } else
                        kfree(cur);
+               spin_unlock(&mm->unused_lock);
        }
 }
 
index f8ce9a3a420de39c39c2f517f0c067cfda7669a5..26bf0552b3cb572cf3c03b7c63e3a338573eddad 100644 (file)
@@ -267,10 +267,10 @@ static void i915_dump_pages(struct seq_file *m, struct page **pages, int page_co
        uint32_t *mem;
 
        for (page = 0; page < page_count; page++) {
-               mem = kmap(pages[page]);
+               mem = kmap_atomic(pages[page], KM_USER0);
                for (i = 0; i < PAGE_SIZE; i += 4)
                        seq_printf(m, "%08x :  %08x\n", i, mem[i / 4]);
-               kunmap(pages[page]);
+               kunmap_atomic(pages[page], KM_USER0);
        }
 }
 
index 57204e298975b4b9516f5be83734e442806bb76c..a725f6591192e2216798fc4dcf04108125795b3e 100644 (file)
@@ -296,6 +296,7 @@ typedef struct drm_i915_private {
        u32 saveVBLANK_A;
        u32 saveVSYNC_A;
        u32 saveBCLRPAT_A;
+       u32 saveTRANSACONF;
        u32 saveTRANS_HTOTAL_A;
        u32 saveTRANS_HBLANK_A;
        u32 saveTRANS_HSYNC_A;
@@ -326,6 +327,7 @@ typedef struct drm_i915_private {
        u32 saveVBLANK_B;
        u32 saveVSYNC_B;
        u32 saveBCLRPAT_B;
+       u32 saveTRANSBCONF;
        u32 saveTRANS_HTOTAL_B;
        u32 saveTRANS_HBLANK_B;
        u32 saveTRANS_HSYNC_B;
@@ -414,6 +416,16 @@ typedef struct drm_i915_private {
        u32 savePFB_WIN_SZ;
        u32 savePFA_WIN_POS;
        u32 savePFB_WIN_POS;
+       u32 savePCH_DREF_CONTROL;
+       u32 saveDISP_ARB_CTL;
+       u32 savePIPEA_DATA_M1;
+       u32 savePIPEA_DATA_N1;
+       u32 savePIPEA_LINK_M1;
+       u32 savePIPEA_LINK_N1;
+       u32 savePIPEB_DATA_M1;
+       u32 savePIPEB_DATA_N1;
+       u32 savePIPEB_LINK_M1;
+       u32 savePIPEB_LINK_N1;
 
        struct {
                struct drm_mm gtt_space;
index c3ceffa46ea0e2dfdfc600e7440f1750376eb1f9..aa7fd82aa6eb343afa3fdb4b7088104f311f00d9 100644 (file)
@@ -254,10 +254,15 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
 {
        drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
        int ret = IRQ_NONE;
-       u32 de_iir, gt_iir;
+       u32 de_iir, gt_iir, de_ier;
        u32 new_de_iir, new_gt_iir;
        struct drm_i915_master_private *master_priv;
 
+       /* disable master interrupt before clearing iir  */
+       de_ier = I915_READ(DEIER);
+       I915_WRITE(DEIER, de_ier & ~DE_MASTER_IRQ_CONTROL);
+       (void)I915_READ(DEIER);
+
        de_iir = I915_READ(DEIIR);
        gt_iir = I915_READ(GTIIR);
 
@@ -290,6 +295,9 @@ irqreturn_t igdng_irq_handler(struct drm_device *dev)
                gt_iir = new_gt_iir;
        }
 
+       I915_WRITE(DEIER, de_ier);
+       (void)I915_READ(DEIER);
+
        return ret;
 }
 
index 992d5617e79829d4a7586d81f4e4579a0de8c251..6eec8171a44e55f6000ec5545b8f4733c4cbe86a 100644 (file)
@@ -239,6 +239,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
        if (drm_core_check_feature(dev, DRIVER_MODESET))
                return;
 
+       if (IS_IGDNG(dev)) {
+               dev_priv->savePCH_DREF_CONTROL = I915_READ(PCH_DREF_CONTROL);
+               dev_priv->saveDISP_ARB_CTL = I915_READ(DISP_ARB_CTL);
+       }
+
        /* Pipe & plane A info */
        dev_priv->savePIPEACONF = I915_READ(PIPEACONF);
        dev_priv->savePIPEASRC = I915_READ(PIPEASRC);
@@ -263,6 +268,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                dev_priv->saveBCLRPAT_A = I915_READ(BCLRPAT_A);
 
        if (IS_IGDNG(dev)) {
+               dev_priv->savePIPEA_DATA_M1 = I915_READ(PIPEA_DATA_M1);
+               dev_priv->savePIPEA_DATA_N1 = I915_READ(PIPEA_DATA_N1);
+               dev_priv->savePIPEA_LINK_M1 = I915_READ(PIPEA_LINK_M1);
+               dev_priv->savePIPEA_LINK_N1 = I915_READ(PIPEA_LINK_N1);
+
                dev_priv->saveFDI_TXA_CTL = I915_READ(FDI_TXA_CTL);
                dev_priv->saveFDI_RXA_CTL = I915_READ(FDI_RXA_CTL);
 
@@ -270,6 +280,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                dev_priv->savePFA_WIN_SZ = I915_READ(PFA_WIN_SZ);
                dev_priv->savePFA_WIN_POS = I915_READ(PFA_WIN_POS);
 
+               dev_priv->saveTRANSACONF = I915_READ(TRANSACONF);
                dev_priv->saveTRANS_HTOTAL_A = I915_READ(TRANS_HTOTAL_A);
                dev_priv->saveTRANS_HBLANK_A = I915_READ(TRANS_HBLANK_A);
                dev_priv->saveTRANS_HSYNC_A = I915_READ(TRANS_HSYNC_A);
@@ -314,6 +325,11 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                dev_priv->saveBCLRPAT_B = I915_READ(BCLRPAT_B);
 
        if (IS_IGDNG(dev)) {
+               dev_priv->savePIPEB_DATA_M1 = I915_READ(PIPEB_DATA_M1);
+               dev_priv->savePIPEB_DATA_N1 = I915_READ(PIPEB_DATA_N1);
+               dev_priv->savePIPEB_LINK_M1 = I915_READ(PIPEB_LINK_M1);
+               dev_priv->savePIPEB_LINK_N1 = I915_READ(PIPEB_LINK_N1);
+
                dev_priv->saveFDI_TXB_CTL = I915_READ(FDI_TXB_CTL);
                dev_priv->saveFDI_RXB_CTL = I915_READ(FDI_RXB_CTL);
 
@@ -321,6 +337,7 @@ static void i915_save_modeset_reg(struct drm_device *dev)
                dev_priv->savePFB_WIN_SZ = I915_READ(PFB_WIN_SZ);
                dev_priv->savePFB_WIN_POS = I915_READ(PFB_WIN_POS);
 
+               dev_priv->saveTRANSBCONF = I915_READ(TRANSBCONF);
                dev_priv->saveTRANS_HTOTAL_B = I915_READ(TRANS_HTOTAL_B);
                dev_priv->saveTRANS_HBLANK_B = I915_READ(TRANS_HBLANK_B);
                dev_priv->saveTRANS_HSYNC_B = I915_READ(TRANS_HSYNC_B);
@@ -368,6 +385,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
                fpb1_reg = FPB1;
        }
 
+       if (IS_IGDNG(dev)) {
+               I915_WRITE(PCH_DREF_CONTROL, dev_priv->savePCH_DREF_CONTROL);
+               I915_WRITE(DISP_ARB_CTL, dev_priv->saveDISP_ARB_CTL);
+       }
+
        /* Pipe & plane A info */
        /* Prime the clock */
        if (dev_priv->saveDPLL_A & DPLL_VCO_ENABLE) {
@@ -395,6 +417,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
                I915_WRITE(BCLRPAT_A, dev_priv->saveBCLRPAT_A);
 
        if (IS_IGDNG(dev)) {
+               I915_WRITE(PIPEA_DATA_M1, dev_priv->savePIPEA_DATA_M1);
+               I915_WRITE(PIPEA_DATA_N1, dev_priv->savePIPEA_DATA_N1);
+               I915_WRITE(PIPEA_LINK_M1, dev_priv->savePIPEA_LINK_M1);
+               I915_WRITE(PIPEA_LINK_N1, dev_priv->savePIPEA_LINK_N1);
+
                I915_WRITE(FDI_RXA_CTL, dev_priv->saveFDI_RXA_CTL);
                I915_WRITE(FDI_TXA_CTL, dev_priv->saveFDI_TXA_CTL);
 
@@ -402,6 +429,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
                I915_WRITE(PFA_WIN_SZ, dev_priv->savePFA_WIN_SZ);
                I915_WRITE(PFA_WIN_POS, dev_priv->savePFA_WIN_POS);
 
+               I915_WRITE(TRANSACONF, dev_priv->saveTRANSACONF);
                I915_WRITE(TRANS_HTOTAL_A, dev_priv->saveTRANS_HTOTAL_A);
                I915_WRITE(TRANS_HBLANK_A, dev_priv->saveTRANS_HBLANK_A);
                I915_WRITE(TRANS_HSYNC_A, dev_priv->saveTRANS_HSYNC_A);
@@ -439,7 +467,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
        /* Actually enable it */
        I915_WRITE(dpll_b_reg, dev_priv->saveDPLL_B);
        DRM_UDELAY(150);
-       if (IS_I965G(dev))
+       if (IS_I965G(dev) && !IS_IGDNG(dev))
                I915_WRITE(DPLL_B_MD, dev_priv->saveDPLL_B_MD);
        DRM_UDELAY(150);
 
@@ -454,6 +482,11 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
                I915_WRITE(BCLRPAT_B, dev_priv->saveBCLRPAT_B);
 
        if (IS_IGDNG(dev)) {
+               I915_WRITE(PIPEB_DATA_M1, dev_priv->savePIPEB_DATA_M1);
+               I915_WRITE(PIPEB_DATA_N1, dev_priv->savePIPEB_DATA_N1);
+               I915_WRITE(PIPEB_LINK_M1, dev_priv->savePIPEB_LINK_M1);
+               I915_WRITE(PIPEB_LINK_N1, dev_priv->savePIPEB_LINK_N1);
+
                I915_WRITE(FDI_RXB_CTL, dev_priv->saveFDI_RXB_CTL);
                I915_WRITE(FDI_TXB_CTL, dev_priv->saveFDI_TXB_CTL);
 
@@ -461,6 +494,7 @@ static void i915_restore_modeset_reg(struct drm_device *dev)
                I915_WRITE(PFB_WIN_SZ, dev_priv->savePFB_WIN_SZ);
                I915_WRITE(PFB_WIN_POS, dev_priv->savePFB_WIN_POS);
 
+               I915_WRITE(TRANSBCONF, dev_priv->saveTRANSBCONF);
                I915_WRITE(TRANS_HTOTAL_B, dev_priv->saveTRANS_HTOTAL_B);
                I915_WRITE(TRANS_HBLANK_B, dev_priv->saveTRANS_HBLANK_B);
                I915_WRITE(TRANS_HSYNC_B, dev_priv->saveTRANS_HSYNC_B);
index 212e22740fc123e4a569a1e84e7445ae8a9de135..e5051446c48e12ff14f5d504fbc3bdef72c8b2b8 100644 (file)
@@ -262,8 +262,8 @@ static bool intel_crt_detect_hotplug(struct drm_connector *connector)
                } while (time_after(timeout, jiffies));
        }
 
-       if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) ==
-           CRT_HOTPLUG_MONITOR_COLOR)
+       if ((I915_READ(PORT_HOTPLUG_STAT) & CRT_HOTPLUG_MONITOR_MASK) !=
+           CRT_HOTPLUG_MONITOR_NONE)
                return true;
 
        return false;
index 3ba6546b7c7f6875938ac8bc9b7345a4ca3ac200..099f420de57a2350d1da373cd7059b8305776e0a 100644 (file)
@@ -863,10 +863,8 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
        struct drm_device *dev = crtc->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        intel_clock_t clock;
-       int max_n;
-       bool found;
        int err_most = 47;
-       found = false;
+       int err_min = 10000;
 
        /* eDP has only 2 clock choice, no n/m/p setting */
        if (HAS_eDP)
@@ -890,10 +888,9 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
        }
 
        memset(best_clock, 0, sizeof(*best_clock));
-       max_n = limit->n.max;
        for (clock.p1 = limit->p1.max; clock.p1 >= limit->p1.min; clock.p1--) {
                /* based on hardware requriment prefer smaller n to precision */
-               for (clock.n = limit->n.min; clock.n <= max_n; clock.n++) {
+               for (clock.n = limit->n.min; clock.n <= limit->n.max; clock.n++) {
                        /* based on hardware requirment prefere larger m1,m2 */
                        for (clock.m1 = limit->m1.max;
                             clock.m1 >= limit->m1.min; clock.m1--) {
@@ -907,18 +904,18 @@ intel_igdng_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc,
                                        this_err = abs((10000 - (target*10000/clock.dot)));
                                        if (this_err < err_most) {
                                                *best_clock = clock;
-                                               err_most = this_err;
-                                               max_n = clock.n;
-                                               found = true;
                                                /* found on first matching */
                                                goto out;
+                                       } else if (this_err < err_min) {
+                                               *best_clock = clock;
+                                               err_min = this_err;
                                        }
                                }
                        }
                }
        }
 out:
-       return found;
+       return true;
 }
 
 /* DisplayPort has only two frequencies, 162MHz and 270MHz */
index 663ab6de0b582602d0b62cc8e088cbf6f99c7fae..c33451aec1bd647a601c7f256fd805598fb42d01 100644 (file)
@@ -77,14 +77,32 @@ static void intel_hdmi_dpms(struct drm_encoder *encoder, int mode)
        struct intel_hdmi_priv *hdmi_priv = intel_output->dev_priv;
        u32 temp;
 
-       if (mode != DRM_MODE_DPMS_ON) {
-               temp = I915_READ(hdmi_priv->sdvox_reg);
+       temp = I915_READ(hdmi_priv->sdvox_reg);
+
+       /* HW workaround, need to toggle enable bit off and on for 12bpc, but
+        * we do this anyway which shows more stable in testing.
+        */
+       if (IS_IGDNG(dev)) {
                I915_WRITE(hdmi_priv->sdvox_reg, temp & ~SDVO_ENABLE);
+               POSTING_READ(hdmi_priv->sdvox_reg);
+       }
+
+       if (mode != DRM_MODE_DPMS_ON) {
+               temp &= ~SDVO_ENABLE;
        } else {
-               temp = I915_READ(hdmi_priv->sdvox_reg);
-               I915_WRITE(hdmi_priv->sdvox_reg, temp | SDVO_ENABLE);
+               temp |= SDVO_ENABLE;
        }
+
+       I915_WRITE(hdmi_priv->sdvox_reg, temp);
        POSTING_READ(hdmi_priv->sdvox_reg);
+
+       /* HW workaround, need to write this twice for issue that may result
+        * in first write getting masked.
+        */
+       if (IS_IGDNG(dev)) {
+               I915_WRITE(hdmi_priv->sdvox_reg, temp);
+               POSTING_READ(hdmi_priv->sdvox_reg);
+       }
 }
 
 static void intel_hdmi_save(struct drm_connector *connector)
index 901befe03da278e7a6879c9cab1498a315eb0248..d67c42555ab9b826b4f18a7a91fa30e02b2522ae 100644 (file)
@@ -107,6 +107,7 @@ static uint32_t atom_iio_execute(struct atom_context *ctx, int base,
                        base += 3;
                        break;
                case ATOM_IIO_WRITE:
+                       (void)ctx->card->reg_read(ctx->card, CU16(base + 1));
                        ctx->card->reg_write(ctx->card, CU16(base + 1), temp);
                        base += 3;
                        break;
index 757f5cd37744cc40163338a121c7ae2b4abc6467..224506a2f7b1ae6d53d5b68347a4a7889a262a2c 100644 (file)
@@ -519,6 +519,7 @@ typedef int (*radeon_packet3_check_t)(struct radeon_cs_parser *p,
  * AGP
  */
 int radeon_agp_init(struct radeon_device *rdev);
+void radeon_agp_resume(struct radeon_device *rdev);
 void radeon_agp_fini(struct radeon_device *rdev);
 
 
index 23ea9955ac596f28423fb28781e76a975e7c3995..54bf49a6d676b6a53c1e301ec5db855d2cf7f4fb 100644 (file)
@@ -237,6 +237,18 @@ int radeon_agp_init(struct radeon_device *rdev)
 #endif
 }
 
+void radeon_agp_resume(struct radeon_device *rdev)
+{
+#if __OS_HAS_AGP
+       int r;
+       if (rdev->flags & RADEON_IS_AGP) {
+               r = radeon_agp_init(rdev);
+               if (r)
+                       dev_warn(rdev->dev, "radeon AGP reinit failed\n");
+       }
+#endif
+}
+
 void radeon_agp_fini(struct radeon_device *rdev)
 {
 #if __OS_HAS_AGP
index fce4c4087fda1cce504228b5a218c0a9cfd714da..29763ceae3af9de8dc2067ff38e0ac63d59fe75e 100644 (file)
@@ -566,8 +566,9 @@ static enum drm_connector_status radeon_vga_detect(struct drm_connector *connect
                radeon_i2c_do_lock(radeon_connector, 0);
 
                if (!radeon_connector->edid) {
-                       DRM_ERROR("DDC responded but not EDID found for %s\n",
-                                 drm_get_connector_name(connector));
+                       DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+                                       drm_get_connector_name(connector));
+                       ret = connector_status_connected;
                } else {
                        radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
 
@@ -720,8 +721,8 @@ static enum drm_connector_status radeon_dvi_detect(struct drm_connector *connect
                radeon_i2c_do_lock(radeon_connector, 0);
 
                if (!radeon_connector->edid) {
-                       DRM_ERROR("DDC responded but not EDID found for %s\n",
-                                 drm_get_connector_name(connector));
+                       DRM_ERROR("%s: probed a monitor but no|invalid EDID\n",
+                                       drm_get_connector_name(connector));
                } else {
                        radeon_connector->use_digital = !!(radeon_connector->edid->input & DRM_EDID_INPUT_DIGITAL);
 
@@ -1149,6 +1150,13 @@ radeon_add_legacy_connector(struct drm_device *dev,
                        if (ret)
                                goto failed;
                        radeon_connector->dac_load_detect = true;
+                       /* RS400,RC410,RS480 chipset seems to report a lot
+                        * of false positive on load detect, we haven't yet
+                        * found a way to make load detect reliable on those
+                        * chipset, thus just disable it for TV.
+                        */
+                       if (rdev->family == CHIP_RS400 || rdev->family == CHIP_RS480)
+                               radeon_connector->dac_load_detect = false;
                        drm_connector_attach_property(&radeon_connector->base,
                                                      rdev->mode_info.load_detect_property,
                                                      1);
index e3f9edfa40fe8a5d63b64eee70e91345c4a1bd7b..41bb76fbe734f282dadbfee6d70ba1e046e6ecee 100644 (file)
@@ -688,6 +688,8 @@ int radeon_resume_kms(struct drm_device *dev)
                return -1;
        }
        pci_set_master(dev->pdev);
+       /* resume AGP if in use */
+       radeon_agp_resume(rdev);
        radeon_resume(rdev);
        radeon_restore_bios_scratch_regs(rdev);
        fb_set_suspend(rdev->fbdev_info, 0);
index 7935f793bf629b8599644d242ddfbfcaf3a04cb6..ba68c9fe90a1b7db910e8f6d551a192e42ef264e 100644 (file)
@@ -137,8 +137,6 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
 
 void rv515_vga_render_disable(struct radeon_device *rdev)
 {
-       WREG32(R_000330_D1VGA_CONTROL, 0);
-       WREG32(R_000338_D2VGA_CONTROL, 0);
        WREG32(R_000300_VGA_RENDER_CONTROL,
                RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
 }
@@ -382,7 +380,6 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
        save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
 
        /* Stop all video */
-       WREG32(R_000330_D1VGA_CONTROL, 0);
        WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
        WREG32(R_000300_VGA_RENDER_CONTROL, 0);
        WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
@@ -391,6 +388,8 @@ void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
        WREG32(R_006880_D2CRTC_CONTROL, 0);
        WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
        WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
+       WREG32(R_000330_D1VGA_CONTROL, 0);
+       WREG32(R_000338_D2VGA_CONTROL, 0);
 }
 
 void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
@@ -404,14 +403,14 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
        WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
        mdelay(1);
        /* Restore video state */
+       WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
+       WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
        WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
        WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
        WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
        WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
        WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
        WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
-       WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
-       WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
        WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
 }
 
index 6ff6c20f1e78d261d6543b4e633c53aa0f80325c..fbab6846ae645a0a1324ecf3f23c939aae78289a 100644 (file)
@@ -19,7 +19,9 @@
 #include <linux/completion.h>
 #include <linux/platform_device.h>
 #include <linux/i2c-pnx.h>
+#include <linux/io.h>
 #include <mach/hardware.h>
+#include <mach/i2c.h>
 #include <asm/irq.h>
 #include <asm/uaccess.h>
 
@@ -54,6 +56,9 @@ static inline void i2c_pnx_arm_timer(struct i2c_adapter *adap)
        struct timer_list *timer = &data->mif.timer;
        int expires = I2C_PNX_TIMEOUT / (1000 / HZ);
 
+       if (expires <= 1)
+               expires = 2;
+
        del_timer_sync(timer);
 
        dev_dbg(&adap->dev, "Timer armed at %lu plus %u jiffies.\n",
@@ -645,7 +650,7 @@ static int __devinit i2c_pnx_probe(struct platform_device *pdev)
        return 0;
 
 out_irq:
-       free_irq(alg_data->irq, alg_data);
+       free_irq(alg_data->irq, i2c_pnx->adapter);
 out_clock:
        i2c_pnx->set_clock_stop(pdev);
 out_unmap:
@@ -664,7 +669,7 @@ static int __devexit i2c_pnx_remove(struct platform_device *pdev)
        struct i2c_adapter *adap = i2c_pnx->adapter;
        struct i2c_pnx_algo_data *alg_data = adap->algo_data;
 
-       free_irq(alg_data->irq, alg_data);
+       free_irq(alg_data->irq, i2c_pnx->adapter);
        i2c_del_adapter(adap);
        i2c_pnx->set_clock_stop(pdev);
        iounmap((void *)alg_data->ioaddr);
index aa96bd2d27ead9452d08da3bb62065468f1822f2..a0702f36a72fd065651e78c80ab4076eff77d4b8 100644 (file)
@@ -257,6 +257,7 @@ static DEVICE_ATTR(operating_mode, S_IWUSR | S_IRUGO,
 
 static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
 {
+       struct tsl2550_data *data = i2c_get_clientdata(client);
        u8 ch0, ch1;
        int ret;
 
@@ -274,6 +275,8 @@ static ssize_t __tsl2550_show_lux(struct i2c_client *client, char *buf)
        ret = tsl2550_calculate_lux(ch0, ch1);
        if (ret < 0)
                return ret;
+       if (data->operating_mode == 1)
+               ret *= 5;
 
        return sprintf(buf, "%d\n", ret);
 }
index 8d80fceca6a4aaf8ff9ff1a24851687e684f579e..296504355142f2f7bb5f3338a30fe79d37f36b8f 100644 (file)
@@ -762,6 +762,7 @@ int i2c_del_adapter(struct i2c_adapter *adap)
 {
        int res = 0;
        struct i2c_adapter *found;
+       struct i2c_client *client, *next;
 
        /* First make sure that this adapter was ever added */
        mutex_lock(&core_lock);
@@ -781,6 +782,16 @@ int i2c_del_adapter(struct i2c_adapter *adap)
        if (res)
                return res;
 
+       /* Remove devices instantiated from sysfs */
+       list_for_each_entry_safe(client, next, &userspace_devices, detected) {
+               if (client->adapter == adap) {
+                       dev_dbg(&adap->dev, "Removing %s at 0x%x\n",
+                               client->name, client->addr);
+                       list_del(&client->detected);
+                       i2c_unregister_device(client);
+               }
+       }
+
        /* Detach any active clients. This can't fail, thus we do not
           checking the returned value. */
        res = device_for_each_child(&adap->dev, NULL, __unregister_client);
index d3440b5010a5830fc936383b4a65ffd66b4163d6..6e7ae2b6cfc64c018131349e4474f04506b6aa36 100644 (file)
@@ -162,7 +162,7 @@ static int ide_cmd_ioctl(ide_drive_t *drive, unsigned long arg)
        if (tf->command == ATA_CMD_SET_FEATURES &&
            tf->feature == SETFEATURES_XFER &&
            tf->nsect >= XFER_SW_DMA_0) {
-               xfer_rate = ide_find_dma_mode(drive, XFER_UDMA_6);
+               xfer_rate = ide_find_dma_mode(drive, tf->nsect);
                if (xfer_rate != tf->nsect) {
                        err = -EINVAL;
                        goto abort;
index 96a2959ce877e10905acfcc84cb1897e874d7143..7c544f7c74c4f07ad735ce35b92a58b28a426d37 100644 (file)
@@ -260,15 +260,12 @@ static int ieee802154_fake_close(struct net_device *dev)
 static netdev_tx_t ieee802154_fake_xmit(struct sk_buff *skb,
                                              struct net_device *dev)
 {
-       skb->iif = dev->ifindex;
-       skb->dev = dev;
        dev->stats.tx_packets++;
        dev->stats.tx_bytes += skb->len;
 
-       dev->trans_start = jiffies;
-
        /* FIXME: do hardware work here ... */
 
+       dev_kfree_skb(skb);
        return NETDEV_TX_OK;
 }
 
index faed794cf75afcf50b8312395b5a01e7b5174a8d..a6624ad252c54a2f535e8e710b214d904ba5b731 100644 (file)
@@ -5481,7 +5481,7 @@ HFCmulti_init(void)
                if (err) {
                        printk(KERN_ERR "error registering embedded driver: "
                                "%x\n", err);
-                       return -err;
+                       return err;
                }
                HFC_cnt++;
                printk(KERN_INFO "%d devices registered\n", HFC_cnt);
index 2d14b64202a39d7b8745c97e0f2372b56e318aca..642d5aaf53cece9819e60c79b799c22cfdf71b1d 100644 (file)
@@ -1535,10 +1535,8 @@ static int isdn_ppp_mp_bundle_array_init(void)
        int sz = ISDN_MAX_CHANNELS*sizeof(ippp_bundle);
        if( (isdn_ppp_bundle_arr = kzalloc(sz, GFP_KERNEL)) == NULL )
                return -ENOMEM;
-       for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
+       for( i = 0; i < ISDN_MAX_CHANNELS; i++ )
                spin_lock_init(&isdn_ppp_bundle_arr[i].lock);
-               skb_queue_head_init(&isdn_ppp_bundle_arr[i].frags);
-       }
        return 0;
 }
 
@@ -1571,7 +1569,7 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
                if ((lp->netdev->pb = isdn_ppp_mp_bundle_alloc()) == NULL)
                        return -ENOMEM;
                lp->next = lp->last = lp;       /* nobody else in a queue */
-               skb_queue_head_init(&lp->netdev->pb->frags);
+               lp->netdev->pb->frags = NULL;
                lp->netdev->pb->frames = 0;
                lp->netdev->pb->seq = UINT_MAX;
        }
@@ -1583,29 +1581,28 @@ static int isdn_ppp_mp_init( isdn_net_local * lp, ippp_bundle * add_to )
 
 static u32 isdn_ppp_mp_get_seq( int short_seq, 
                                        struct sk_buff * skb, u32 last_seq );
-static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
-                               struct sk_buff *to);
-static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
-                                  struct sk_buff *from, struct sk_buff *to,
-                                  u32 lastseq);
-static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb);
+static struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
+                       struct sk_buff * from, struct sk_buff * to );
+static void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
+                               struct sk_buff * from, struct sk_buff * to );
+static void isdn_ppp_mp_free_skb( ippp_bundle * mp, struct sk_buff * skb );
 static void isdn_ppp_mp_print_recv_pkt( int slot, struct sk_buff * skb );
 
 static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp, 
-                               struct sk_buff *skb)
+                                                       struct sk_buff *skb)
 {
-       struct sk_buff *newfrag, *frag, *start, *nextf;
-       u32 newseq, minseq, thisseq;
-       isdn_mppp_stats *stats;
        struct ippp_struct *is;
+       isdn_net_local * lpq;
+       ippp_bundle * mp;
+       isdn_mppp_stats * stats;
+       struct sk_buff * newfrag, * frag, * start, *nextf;
+       u32 newseq, minseq, thisseq;
        unsigned long flags;
-       isdn_net_local *lpq;
-       ippp_bundle *mp;
        int slot;
 
        spin_lock_irqsave(&net_dev->pb->lock, flags);
-       mp = net_dev->pb;
-       stats = &mp->stats;
+       mp = net_dev->pb;
+        stats = &mp->stats;
        slot = lp->ppp_slot;
        if (slot < 0 || slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d)\n",
@@ -1616,19 +1613,20 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
                return;
        }
        is = ippp_table[slot];
-       if (++mp->frames > stats->max_queue_len)
+       if( ++mp->frames > stats->max_queue_len )
                stats->max_queue_len = mp->frames;
-
+       
        if (is->debug & 0x8)
                isdn_ppp_mp_print_recv_pkt(lp->ppp_slot, skb);
 
-       newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ,
-                                    skb, is->last_link_seqno);
+       newseq = isdn_ppp_mp_get_seq(is->mpppcfg & SC_IN_SHORT_SEQ, 
+                                               skb, is->last_link_seqno);
+
 
        /* if this packet seq # is less than last already processed one,
         * toss it right away, but check for sequence start case first 
         */
-       if (mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT)) {
+       if( mp->seq > MP_LONGSEQ_MAX && (newseq & MP_LONGSEQ_MAXBIT) ) {
                mp->seq = newseq;       /* the first packet: required for
                                         * rfc1990 non-compliant clients --
                                         * prevents constant packet toss */
@@ -1638,7 +1636,7 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
                spin_unlock_irqrestore(&mp->lock, flags);
                return;
        }
-
+       
        /* find the minimum received sequence number over all links */
        is->last_link_seqno = minseq = newseq;
        for (lpq = net_dev->queue;;) {
@@ -1659,31 +1657,22 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
                                         * packets */
        newfrag = skb;
 
-       /* Insert new fragment into the proper sequence slot.  */
-       skb_queue_walk(&mp->frags, frag) {
-               if (MP_SEQ(frag) == newseq) {
-                       isdn_ppp_mp_free_skb(mp, newfrag);
-                       newfrag = NULL;
-                       break;
-               }
-               if (MP_LT(newseq, MP_SEQ(frag))) {
-                       __skb_queue_before(&mp->frags, frag, newfrag);
-                       newfrag = NULL;
-                       break;
-               }
-       }
-       if (newfrag)
-               __skb_queue_tail(&mp->frags, newfrag);
+       /* if this new fragment is before the first one, then enqueue it now. */
+       if ((frag = mp->frags) == NULL || MP_LT(newseq, MP_SEQ(frag))) {
+               newfrag->next = frag;
+               mp->frags = frag = newfrag;
+               newfrag = NULL;
+       }
 
-       frag = skb_peek(&mp->frags);
-       start = ((MP_FLAGS(frag) & MP_BEGIN_FRAG) &&
-                (MP_SEQ(frag) == mp->seq)) ? frag : NULL;
-       if (!start)
-               goto check_overflow;
+       start = MP_FLAGS(frag) & MP_BEGIN_FRAG &&
+                               MP_SEQ(frag) == mp->seq ? frag : NULL;
 
-       /* main fragment traversing loop
+       /* 
+        * main fragment traversing loop
         *
         * try to accomplish several tasks:
+        * - insert new fragment into the proper sequence slot (once that's done
+        *   newfrag will be set to NULL)
         * - reassemble any complete fragment sequence (non-null 'start'
         *   indicates there is a continguous sequence present)
         * - discard any incomplete sequences that are below minseq -- due
@@ -1692,46 +1681,71 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
         *   come to complete such sequence and it should be discarded
         *
         * loop completes when we accomplished the following tasks:
+        * - new fragment is inserted in the proper sequence ('newfrag' is 
+        *   set to NULL)
         * - we hit a gap in the sequence, so no reassembly/processing is 
         *   possible ('start' would be set to NULL)
         *
         * algorithm for this code is derived from code in the book
         * 'PPP Design And Debugging' by James Carlson (Addison-Wesley)
         */
-       skb_queue_walk_safe(&mp->frags, frag, nextf) {
-               thisseq = MP_SEQ(frag);
-
-               /* check for misplaced start */
-               if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
-                       printk(KERN_WARNING"isdn_mppp(seq %d): new "
-                              "BEGIN flag with no prior END", thisseq);
-                       stats->seqerrs++;
-                       stats->frame_drops++;
-                       isdn_ppp_mp_discard(mp, start, frag);
-                       start = frag;
-               } else if (MP_LE(thisseq, minseq)) {            
-                       if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
+       while (start != NULL || newfrag != NULL) {
+
+               thisseq = MP_SEQ(frag);
+               nextf = frag->next;
+
+               /* drop any duplicate fragments */
+               if (newfrag != NULL && thisseq == newseq) {
+                       isdn_ppp_mp_free_skb(mp, newfrag);
+                       newfrag = NULL;
+               }
+
+               /* insert new fragment before next element if possible. */
+               if (newfrag != NULL && (nextf == NULL || 
+                                               MP_LT(newseq, MP_SEQ(nextf)))) {
+                       newfrag->next = nextf;
+                       frag->next = nextf = newfrag;
+                       newfrag = NULL;
+               }
+
+               if (start != NULL) {
+                       /* check for misplaced start */
+                       if (start != frag && (MP_FLAGS(frag) & MP_BEGIN_FRAG)) {
+                               printk(KERN_WARNING"isdn_mppp(seq %d): new "
+                                     "BEGIN flag with no prior END", thisseq);
+                               stats->seqerrs++;
+                               stats->frame_drops++;
+                               start = isdn_ppp_mp_discard(mp, start,frag);
+                               nextf = frag->next;
+                       }
+               } else if (MP_LE(thisseq, minseq)) {            
+                       if (MP_FLAGS(frag) & MP_BEGIN_FRAG)
                                start = frag;
-                       else {
+                       else {
                                if (MP_FLAGS(frag) & MP_END_FRAG)
-                                       stats->frame_drops++;
-                               __skb_unlink(skb, &mp->frags);
+                                       stats->frame_drops++;
+                               if( mp->frags == frag )
+                                       mp->frags = nextf;      
                                isdn_ppp_mp_free_skb(mp, frag);
+                               frag = nextf;
                                continue;
-                       }
+                       }
                }
-
-               /* if we have end fragment, then we have full reassembly
-                * sequence -- reassemble and process packet now
+               
+               /* if start is non-null and we have end fragment, then
+                * we have full reassembly sequence -- reassemble 
+                * and process packet now
                 */
-               if (MP_FLAGS(frag) & MP_END_FRAG) {
-                       minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
-                       /* Reassemble the packet then dispatch it */
-                       isdn_ppp_mp_reassembly(net_dev, lp, start, frag, thisseq);
+               if (start != NULL && (MP_FLAGS(frag) & MP_END_FRAG)) {
+                       minseq = mp->seq = (thisseq+1) & MP_LONGSEQ_MASK;
+                       /* Reassemble the packet then dispatch it */
+                       isdn_ppp_mp_reassembly(net_dev, lp, start, nextf);
+      
+                       start = NULL;
+                       frag = NULL;
 
-                       start = NULL;
-                       frag = NULL;
-               }
+                       mp->frags = nextf;
+               }
 
                /* check if need to update start pointer: if we just
                 * reassembled the packet and sequence is contiguous
@@ -1742,25 +1756,26 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
                 * below low watermark and set start to the next frag or
                 * clear start ptr.
                 */ 
-               if (nextf != (struct sk_buff *)&mp->frags && 
+               if (nextf != NULL && 
                    ((thisseq+1) & MP_LONGSEQ_MASK) == MP_SEQ(nextf)) {
-                       /* if we just reassembled and the next one is here, 
-                        * then start another reassembly.
-                        */
-                       if (frag == NULL) {
+                       /* if we just reassembled and the next one is here, 
+                        * then start another reassembly. */
+
+                       if (frag == NULL) {
                                if (MP_FLAGS(nextf) & MP_BEGIN_FRAG)
-                                       start = nextf;
-                               else {
-                                       printk(KERN_WARNING"isdn_mppp(seq %d):"
-                                              " END flag with no following "
-                                              "BEGIN", thisseq);
+                                       start = nextf;
+                               else
+                               {
+                                       printk(KERN_WARNING"isdn_mppp(seq %d):"
+                                               " END flag with no following "
+                                               "BEGIN", thisseq);
                                        stats->seqerrs++;
                                }
                        }
-               } else {
-                       if (nextf != (struct sk_buff *)&mp->frags &&
-                           frag != NULL &&
-                           MP_LT(thisseq, minseq)) {
+
+               } else {
+                       if ( nextf != NULL && frag != NULL &&
+                                               MP_LT(thisseq, minseq)) {
                                /* we've got a break in the sequence
                                 * and we not at the end yet
                                 * and we did not just reassembled
@@ -1769,39 +1784,41 @@ static void isdn_ppp_mp_receive(isdn_net_dev * net_dev, isdn_net_local * lp,
                                 * discard all the frames below low watermark 
                                 * and start over */
                                stats->frame_drops++;
-                               isdn_ppp_mp_discard(mp, start, nextf);
+                               mp->frags = isdn_ppp_mp_discard(mp,start,nextf);
                        }
                        /* break in the sequence, no reassembly */
-                       start = NULL;
-               }
-               if (!start)
-                       break;
-       }
-
-check_overflow:
+                       start = NULL;
+               }
+                               
+               frag = nextf;
+       }       /* while -- main loop */
+       
+       if (mp->frags == NULL)
+               mp->frags = frag;
+               
        /* rather straighforward way to deal with (not very) possible 
-        * queue overflow
-        */
+        * queue overflow */
        if (mp->frames > MP_MAX_QUEUE_LEN) {
                stats->overflows++;
-               skb_queue_walk_safe(&mp->frags, frag, nextf) {
-                       if (mp->frames <= MP_MAX_QUEUE_LEN)
-                               break;
-                       __skb_unlink(frag, &mp->frags);
-                       isdn_ppp_mp_free_skb(mp, frag);
+               while (mp->frames > MP_MAX_QUEUE_LEN) {
+                       frag = mp->frags->next;
+                       isdn_ppp_mp_free_skb(mp, mp->frags);
+                       mp->frags = frag;
                }
        }
        spin_unlock_irqrestore(&mp->lock, flags);
 }
 
-static void isdn_ppp_mp_cleanup(isdn_net_local *lp)
+static void isdn_ppp_mp_cleanup( isdn_net_local * lp )
 {
-       struct sk_buff *skb, *tmp;
-
-       skb_queue_walk_safe(&lp->netdev->pb->frags, skb, tmp) {
-               __skb_unlink(skb, &lp->netdev->pb->frags);
-               isdn_ppp_mp_free_skb(lp->netdev->pb, skb);
-       }
+       struct sk_buff * frag = lp->netdev->pb->frags;
+       struct sk_buff * nextfrag;
+       while( frag ) {
+               nextfrag = frag->next;
+               isdn_ppp_mp_free_skb(lp->netdev->pb, frag);
+               frag = nextfrag;
+       }
+       lp->netdev->pb->frags = NULL;
 }
 
 static u32 isdn_ppp_mp_get_seq( int short_seq, 
@@ -1838,115 +1855,72 @@ static u32 isdn_ppp_mp_get_seq( int short_seq,
        return seq;
 }
 
-static void isdn_ppp_mp_discard(ippp_bundle *mp, struct sk_buff *from,
-                               struct sk_buff *to)
+struct sk_buff * isdn_ppp_mp_discard( ippp_bundle * mp,
+                       struct sk_buff * from, struct sk_buff * to )
 {
-       if (from) {
-               struct sk_buff *skb, *tmp;
-               int freeing = 0;
-
-               skb_queue_walk_safe(&mp->frags, skb, tmp) {
-                       if (skb == to)
-                               break;
-                       if (skb == from)
-                               freeing = 1;
-                       if (!freeing)
-                               continue;
-                       __skb_unlink(skb, &mp->frags);
-                       isdn_ppp_mp_free_skb(mp, skb);
+       if( from )
+               while (from != to) {
+                       struct sk_buff * next = from->next;
+                       isdn_ppp_mp_free_skb(mp, from);
+                       from = next;
                }
-       }
-}
-
-static unsigned int calc_tot_len(struct sk_buff_head *queue,
-                                struct sk_buff *from, struct sk_buff *to)
-{
-       unsigned int tot_len = 0;
-       struct sk_buff *skb;
-       int found_start = 0;
-
-       skb_queue_walk(queue, skb) {
-               if (skb == from)
-                       found_start = 1;
-               if (!found_start)
-                       continue;
-               tot_len += skb->len - MP_HEADER_LEN;
-               if (skb == to)
-                       break;
-       }
-       return tot_len;
+       return from;
 }
 
-/* Reassemble packet using fragments in the reassembly queue from
- * 'from' until 'to', inclusive.
- */
-static void isdn_ppp_mp_reassembly(isdn_net_dev *net_dev, isdn_net_local *lp,
-                                  struct sk_buff *from, struct sk_buff *to,
-                                  u32 lastseq)
+void isdn_ppp_mp_reassembly( isdn_net_dev * net_dev, isdn_net_local * lp,
+                               struct sk_buff * from, struct sk_buff * to )
 {
-       ippp_bundle *mp = net_dev->pb;
-       unsigned int tot_len;
-       struct sk_buff *skb;
+       ippp_bundle * mp = net_dev->pb;
        int proto;
+       struct sk_buff * skb;
+       unsigned int tot_len;
 
        if (lp->ppp_slot < 0 || lp->ppp_slot >= ISDN_MAX_CHANNELS) {
                printk(KERN_ERR "%s: lp->ppp_slot(%d) out of range\n",
                        __func__, lp->ppp_slot);
                return;
        }
-
-       tot_len = calc_tot_len(&mp->frags, from, to);
-
-       if (MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG)) {
-               if (ippp_table[lp->ppp_slot]->debug & 0x40)
+       if( MP_FLAGS(from) == (MP_BEGIN_FRAG | MP_END_FRAG) ) {
+               if( ippp_table[lp->ppp_slot]->debug & 0x40 )
                        printk(KERN_DEBUG "isdn_mppp: reassembly: frame %d, "
-                              "len %d\n", MP_SEQ(from), from->len);
+                                       "len %d\n", MP_SEQ(from), from->len );
                skb = from;
                skb_pull(skb, MP_HEADER_LEN);
-               __skb_unlink(skb, &mp->frags);
                mp->frames--;   
        } else {
-               struct sk_buff *walk, *tmp;
-               int found_start = 0;
+               struct sk_buff * frag;
+               int n;
 
-               if (ippp_table[lp->ppp_slot]->debug & 0x40)
-                       printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
-                              "to %d, len %d\n", MP_SEQ(from), lastseq,
-                              tot_len);
+               for(tot_len=n=0, frag=from; frag != to; frag=frag->next, n++)
+                       tot_len += frag->len - MP_HEADER_LEN;
 
-               skb = dev_alloc_skb(tot_len);
-               if (!skb)
+               if( ippp_table[lp->ppp_slot]->debug & 0x40 )
+                       printk(KERN_DEBUG"isdn_mppp: reassembling frames %d "
+                               "to %d, len %d\n", MP_SEQ(from), 
+                               (MP_SEQ(from)+n-1) & MP_LONGSEQ_MASK, tot_len );
+               if( (skb = dev_alloc_skb(tot_len)) == NULL ) {
                        printk(KERN_ERR "isdn_mppp: cannot allocate sk buff "
-                              "of size %d\n", tot_len);
-
-               found_start = 0;
-               skb_queue_walk_safe(&mp->frags, walk, tmp) {
-                       if (walk == from)
-                               found_start = 1;
-                       if (!found_start)
-                               continue;
+                                       "of size %d\n", tot_len);
+                       isdn_ppp_mp_discard(mp, from, to);
+                       return;
+               }
 
-                       if (skb) {
-                               unsigned int len = walk->len - MP_HEADER_LEN;
-                               skb_copy_from_linear_data_offset(walk, MP_HEADER_LEN,
-                                                                skb_put(skb, len),
-                                                                len);
-                       }
-                       __skb_unlink(walk, &mp->frags);
-                       isdn_ppp_mp_free_skb(mp, walk);
+               while( from != to ) {
+                       unsigned int len = from->len - MP_HEADER_LEN;
 
-                       if (walk == to)
-                               break;
+                       skb_copy_from_linear_data_offset(from, MP_HEADER_LEN,
+                                                        skb_put(skb,len),
+                                                        len);
+                       frag = from->next;
+                       isdn_ppp_mp_free_skb(mp, from);
+                       from = frag; 
                }
        }
-       if (!skb)
-               return;
-
        proto = isdn_ppp_strip_proto(skb);
        isdn_ppp_push_higher(net_dev, lp, skb, proto);
 }
 
-static void isdn_ppp_mp_free_skb(ippp_bundle *mp, struct sk_buff *skb)
+static void isdn_ppp_mp_free_skb(ippp_bundle * mp, struct sk_buff * skb)
 {
        dev_kfree_skb(skb);
        mp->frames--;
index 655474b29e21ca23347cf15d7dc707a644a0011e..abd4791acb0ec0a426283d1174b4e088a1b497fc 100644 (file)
@@ -64,7 +64,7 @@ void ir_input_init(struct input_dev *dev, struct ir_input_state *ir,
 
        ir->ir_type = ir_type;
 
-       memset(ir->ir_codes, sizeof(ir->ir_codes), 0);
+       memset(ir->ir_codes, 0, sizeof(ir->ir_codes));
 
        /*
         * FIXME: This is a temporary workaround to use the new IR tables
index f65591fb7cec36d720d9717b8503c513cbcd3082..2a53dd096eef86b678fff1939d58f9b387ea3fce 100644 (file)
@@ -663,6 +663,14 @@ static struct zl10353_config cxusb_zl10353_xc3028_config = {
        .parallel_ts = 1,
 };
 
+static struct zl10353_config cxusb_zl10353_xc3028_config_no_i2c_gate = {
+       .demod_address = 0x0f,
+       .if2 = 45600,
+       .no_tuner = 1,
+       .parallel_ts = 1,
+       .disable_i2c_gate_ctrl = 1,
+};
+
 static struct mt352_config cxusb_mt352_xc3028_config = {
        .demod_address = 0x0f,
        .if2 = 4560,
@@ -894,7 +902,7 @@ static int cxusb_dualdig4_frontend_attach(struct dvb_usb_adapter *adap)
        cxusb_bluebird_gpio_pulse(adap->dev, 0x02, 1);
 
        if ((adap->fe = dvb_attach(zl10353_attach,
-                                  &cxusb_zl10353_xc3028_config,
+                                  &cxusb_zl10353_xc3028_config_no_i2c_gate,
                                   &adap->dev->i2c_adap)) == NULL)
                return -EIO;
 
index 8c1aed77ea30b9af7c281b13d079d30923fe7350..85a222c4eaa0606fb7f64da99ba43ce99b3516eb 100644 (file)
@@ -4,7 +4,7 @@
 
 config SMS_SIANO_MDTV
        tristate "Siano SMS1xxx based MDTV receiver"
-       depends on DVB_CORE && INPUT
+       depends on DVB_CORE && INPUT && HAS_DMA
        ---help---
          Choose Y or M here if you have MDTV receiver with a Siano chipset.
 
index c3f579de6e7171b1c7dce048b82ce526b0ba9c59..c6cf1166186824b1c062350617c14c7edef0ed01 100644 (file)
@@ -181,12 +181,10 @@ static void gemtek_pci_mute(struct gemtek_pci *card)
 
 static void gemtek_pci_unmute(struct gemtek_pci *card)
 {
-       mutex_lock(&card->lock);
        if (card->mute) {
                gemtek_pci_setfrequency(card, card->current_frequency);
                card->mute = false;
        }
-       mutex_unlock(&card->lock);
 }
 
 static int gemtek_pci_getsignal(struct gemtek_pci *card)
index c015da813dda1fa159a824ca91d6cf2a5477322e..d14cfb200ed0f83674ee106f1e9c3200cbf81697 100644 (file)
@@ -1426,7 +1426,6 @@ static __init int vpif_probe(struct platform_device *pdev)
        struct vpif_display_config *config;
        int i, j = 0, k, q, m, err = 0;
        struct i2c_adapter *i2c_adap;
-       struct vpif_config *config;
        struct common_obj *common;
        struct channel_obj *ch;
        struct video_device *vfd;
index bdb249bd9d5d272ae534dc7ed036ece6efe858e6..c0fd5c6feeac987a8d7cfabeb975dbc9e7ad0663 100644 (file)
@@ -1584,8 +1584,8 @@ struct em28xx_board em28xx_boards[] = {
        [EM2870_BOARD_REDDO_DVB_C_USB_BOX] = {
                .name          = "Reddo DVB-C USB TV Box",
                .tuner_type    = TUNER_ABSENT,
+               .tuner_gpio    = reddo_dvb_c_usb_box,
                .has_dvb       = 1,
-               .dvb_gpio      = reddo_dvb_c_usb_box,
        },
 };
 const unsigned int em28xx_bcount = ARRAY_SIZE(em28xx_boards);
index 5f37952c75cf77962fce4bf4f3e665afcf8cf8fe..72802291e81238f5f1797cd7fd6c2336255803d1 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/moduleparam.h>
 #include <linux/mutex.h>
 #include <linux/platform_device.h>
+#include <linux/sched.h>
 #include <linux/time.h>
 #include <linux/version.h>
 #include <linux/videodev2.h>
index dff2e5e2d8c6bc7d879577750c9831f859445dc9..7db82bdf6f31bc9421308b21872fb2da39813da0 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/clk.h>
 #include <linux/vmalloc.h>
 #include <linux/interrupt.h>
+#include <linux/sched.h>
 
 #include <media/v4l2-common.h>
 #include <media/v4l2-dev.h>
index 2f78b4f263f5861008bedb906809155b95ef3c46..9c8b7c7b89ee6de7a6c5199636feb2d49194947a 100644 (file)
@@ -31,6 +31,7 @@
 #include <linux/platform_device.h>
 #include <linux/videodev2.h>
 #include <linux/pm_runtime.h>
+#include <linux/sched.h>
 
 #include <media/v4l2-common.h>
 #include <media/v4l2-dev.h>
@@ -1723,11 +1724,12 @@ static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev)
 
        err = soc_camera_host_register(&pcdev->ici);
        if (err)
-               goto exit_free_irq;
+               goto exit_free_clk;
 
        return 0;
 
-exit_free_irq:
+exit_free_clk:
+       pm_runtime_disable(&pdev->dev);
        free_irq(pcdev->irq, pcdev);
 exit_release_mem:
        if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
@@ -1747,6 +1749,7 @@ static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev)
                                        struct sh_mobile_ceu_dev, ici);
 
        soc_camera_host_unregister(soc_host);
+       pm_runtime_disable(&pdev->dev);
        free_irq(pcdev->irq, pcdev);
        if (platform_get_resource(pdev, IORESOURCE_MEM, 1))
                dma_release_declared_memory(&pdev->dev);
index 36e617bd13c73778d6c945ef78f95491bf305f08..95fdeb23c2c1455f2930c4645369d266f62bc74e 100644 (file)
@@ -1097,6 +1097,13 @@ static int default_s_crop(struct soc_camera_device *icd, struct v4l2_crop *a)
        return v4l2_subdev_call(sd, video, s_crop, a);
 }
 
+static void soc_camera_device_init(struct device *dev, void *pdata)
+{
+       dev->platform_data      = pdata;
+       dev->bus                = &soc_camera_bus_type;
+       dev->release            = dummy_release;
+}
+
 int soc_camera_host_register(struct soc_camera_host *ici)
 {
        struct soc_camera_host *ix;
@@ -1158,6 +1165,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
 
        list_for_each_entry(icd, &devices, list) {
                if (icd->iface == ici->nr) {
+                       void *pdata = icd->dev.platform_data;
                        /* The bus->remove will be called */
                        device_unregister(&icd->dev);
                        /*
@@ -1169,6 +1177,7 @@ void soc_camera_host_unregister(struct soc_camera_host *ici)
                         * device private data.
                         */
                        memset(&icd->dev, 0, sizeof(icd->dev));
+                       soc_camera_device_init(&icd->dev, pdata);
                }
        }
 
@@ -1200,10 +1209,7 @@ static int soc_camera_device_register(struct soc_camera_device *icd)
                 * man, stay reasonable... */
                return -ENOMEM;
 
-       icd->devnum = num;
-       icd->dev.bus = &soc_camera_bus_type;
-
-       icd->dev.release        = dummy_release;
+       icd->devnum             = num;
        icd->use_count          = 0;
        icd->host_priv          = NULL;
        mutex_init(&icd->video_lock);
@@ -1311,12 +1317,13 @@ static int __devinit soc_camera_pdrv_probe(struct platform_device *pdev)
        icd->iface = icl->bus_id;
        icd->pdev = &pdev->dev;
        platform_set_drvdata(pdev, icd);
-       icd->dev.platform_data = icl;
 
        ret = soc_camera_device_register(icd);
        if (ret < 0)
                goto escdevreg;
 
+       soc_camera_device_init(&icd->dev, icl);
+
        icd->user_width         = DEFAULT_WIDTH;
        icd->user_height        = DEFAULT_HEIGHT;
 
index 635ffc7b03910582b215b3399f27f4eef912b181..c3065c4bcba9fb6019fb664de98f075423348994 100644 (file)
@@ -19,6 +19,7 @@
 #include <linux/mm.h>
 #include <linux/pagemap.h>
 #include <linux/dma-mapping.h>
+#include <linux/sched.h>
 #include <media/videobuf-dma-contig.h>
 
 struct videobuf_dma_contig_memory {
index db39f4a52f5311b56f73bf1a54d7c41808cb1174..2cb2736d65aa41bf959c62f6475d0b5ee57d8f15 100644 (file)
@@ -158,6 +158,7 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
        struct i2c_msg msg[2];
        u8 msgbuf[2];
        struct i2c_client *client;
+       unsigned long timeout, read_time;
        int status, i;
 
        memset(msg, 0, sizeof(msg));
@@ -183,47 +184,60 @@ static ssize_t at24_eeprom_read(struct at24_data *at24, char *buf,
        if (count > io_limit)
                count = io_limit;
 
-       /* Smaller eeproms can work given some SMBus extension calls */
        if (at24->use_smbus) {
+               /* Smaller eeproms can work given some SMBus extension calls */
                if (count > I2C_SMBUS_BLOCK_MAX)
                        count = I2C_SMBUS_BLOCK_MAX;
-               status = i2c_smbus_read_i2c_block_data(client, offset,
-                               count, buf);
-               dev_dbg(&client->dev, "smbus read %zu@%d --> %d\n",
-                               count, offset, status);
-               return (status < 0) ? -EIO : status;
+       } else {
+               /*
+                * When we have a better choice than SMBus calls, use a
+                * combined I2C message. Write address; then read up to
+                * io_limit data bytes. Note that read page rollover helps us
+                * here (unlike writes). msgbuf is u8 and will cast to our
+                * needs.
+                */
+               i = 0;
+               if (at24->chip.flags & AT24_FLAG_ADDR16)
+                       msgbuf[i++] = offset >> 8;
+               msgbuf[i++] = offset;
+
+               msg[0].addr = client->addr;
+               msg[0].buf = msgbuf;
+               msg[0].len = i;
+
+               msg[1].addr = client->addr;
+               msg[1].flags = I2C_M_RD;
+               msg[1].buf = buf;
+               msg[1].len = count;
        }
 
        /*
-        * When we have a better choice than SMBus calls, use a combined
-        * I2C message. Write address; then read up to io_limit data bytes.
-        * Note that read page rollover helps us here (unlike writes).
-        * msgbuf is u8 and will cast to our needs.
+        * Reads fail if the previous write didn't complete yet. We may
+        * loop a few times until this one succeeds, waiting at least
+        * long enough for one entire page write to work.
         */
-       i = 0;
-       if (at24->chip.flags & AT24_FLAG_ADDR16)
-               msgbuf[i++] = offset >> 8;
-       msgbuf[i++] = offset;
-
-       msg[0].addr = client->addr;
-       msg[0].buf = msgbuf;
-       msg[0].len = i;
+       timeout = jiffies + msecs_to_jiffies(write_timeout);
+       do {
+               read_time = jiffies;
+               if (at24->use_smbus) {
+                       status = i2c_smbus_read_i2c_block_data(client, offset,
+                                       count, buf);
+               } else {
+                       status = i2c_transfer(client->adapter, msg, 2);
+                       if (status == 2)
+                               status = count;
+               }
+               dev_dbg(&client->dev, "read %zu@%d --> %d (%ld)\n",
+                               count, offset, status, jiffies);
 
-       msg[1].addr = client->addr;
-       msg[1].flags = I2C_M_RD;
-       msg[1].buf = buf;
-       msg[1].len = count;
+               if (status == count)
+                       return count;
 
-       status = i2c_transfer(client->adapter, msg, 2);
-       dev_dbg(&client->dev, "i2c read %zu@%d --> %d\n",
-                       count, offset, status);
+               /* REVISIT: at HZ=100, this is sloooow */
+               msleep(1);
+       } while (time_before(read_time, timeout));
 
-       if (status == 2)
-               return count;
-       else if (status >= 0)
-               return -EIO;
-       else
-               return status;
+       return -ETIMEDOUT;
 }
 
 static ssize_t at24_read(struct at24_data *at24,
index 705a5894a6bbae304a76b37f649ee0be474b89c4..90d168ad03b6ca60574f45c03175b845bb2033b3 100644 (file)
@@ -56,7 +56,7 @@ static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
                                clk = 255;
                        host->cclk = host->mclk / (2 * (clk + 1));
                }
-               if (host->hw_designer == 0x80)
+               if (host->hw_designer == AMBA_VENDOR_ST)
                        clk |= MCI_FCEN; /* Bug fix in ST IP block */
                clk |= MCI_CLK_ENABLE;
                /* This hasn't proven to be worthwhile */
index e19ca4bb75102448879208df00d90aece5b8503b..b2f71f79baaf1f5981aa539f8d96dbb2af627c50 100644 (file)
@@ -975,7 +975,7 @@ config ENC28J60_WRITEVERIFY
 
 config ETHOC
        tristate "OpenCores 10/100 Mbps Ethernet MAC support"
-       depends on NET_ETHERNET && HAS_IOMEM
+       depends on NET_ETHERNET && HAS_IOMEM && HAS_DMA
        select MII
        select PHYLIB
        select CRC32
index 2be49c817995d45e7802fee8eeb61b0e99cf7ad8..b25467ac895c3e8f232506030665c398fe80f73e 100644 (file)
@@ -628,15 +628,6 @@ static int ep93xx_open(struct net_device *dev)
        if (ep93xx_alloc_buffers(ep))
                return -ENOMEM;
 
-       if (is_zero_ether_addr(dev->dev_addr)) {
-               random_ether_addr(dev->dev_addr);
-               printk(KERN_INFO "%s: generated random MAC address "
-                       "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x.\n", dev->name,
-                       dev->dev_addr[0], dev->dev_addr[1],
-                       dev->dev_addr[2], dev->dev_addr[3],
-                       dev->dev_addr[4], dev->dev_addr[5]);
-       }
-
        napi_enable(&ep->napi);
 
        if (ep93xx_start_hw(dev)) {
@@ -877,6 +868,9 @@ static int ep93xx_eth_probe(struct platform_device *pdev)
        ep->mii.mdio_write = ep93xx_mdio_write;
        ep->mdc_divisor = 40;   /* Max HCLK 100 MHz, min MDIO clk 2.5 MHz.  */
 
+       if (is_zero_ether_addr(dev->dev_addr))
+               random_ether_addr(dev->dev_addr);
+
        err = register_netdev(dev);
        if (err) {
                dev_err(&pdev->dev, "Failed to register netdev\n");
index ce6f1ac25df83e6ea3b26298f707cad6a9e488a3..3f4b4300f5332c163c8f520636ff8caf58dd3e90 100644 (file)
@@ -1088,7 +1088,14 @@ static struct net_device * au1000_probe(int port_num)
                return NULL;
        }
 
-       if ((err = register_netdev(dev)) != 0) {
+       dev->base_addr = base;
+       dev->irq = irq;
+       dev->netdev_ops = &au1000_netdev_ops;
+       SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
+       dev->watchdog_timeo = ETH_TX_TIMEOUT;
+
+       err = register_netdev(dev);
+       if (err != 0) {
                printk(KERN_ERR "%s: Cannot register net device, error %d\n",
                                DRV_NAME, err);
                free_netdev(dev);
@@ -1209,12 +1216,6 @@ static struct net_device * au1000_probe(int port_num)
                aup->tx_db_inuse[i] = pDB;
        }
 
-       dev->base_addr = base;
-       dev->irq = irq;
-       dev->netdev_ops = &au1000_netdev_ops;
-       SET_ETHTOOL_OPS(dev, &au1000_ethtool_ops);
-       dev->watchdog_timeo = ETH_TX_TIMEOUT;
-
        /*
         * The boot code uses the ethernet controller, so reset it to start
         * fresh.  au1000_init() expects that the device is in reset state.
index e046943ef29dc6c9243bb84ad0c5ab5fcdde19bf..2a9132343b66135fb2487c30a20ba6ce0db786ec 100644 (file)
@@ -912,9 +912,6 @@ static irqreturn_t b44_interrupt(int irq, void *dev_id)
                        bp->istat = istat;
                        __b44_disable_ints(bp);
                        __napi_schedule(&bp->napi);
-               } else {
-                       printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
-                              dev->name);
                }
 
 irq_ack:
index df32c109b7acee953f48eef55ddd7b6100cc1e40..772f6d2489ce4047ad60c6ae6d5cd23b9adfe11b 100644 (file)
@@ -35,66 +35,16 @@ config CAN_CALC_BITTIMING
          arguments "tq", "prop_seg", "phase_seg1", "phase_seg2" and "sjw".
          If unsure, say Y.
 
-config CAN_SJA1000
-       depends on CAN_DEV && HAS_IOMEM
-       tristate "Philips SJA1000"
-       ---help---
-         Driver for the SJA1000 CAN controllers from Philips or NXP
-
-config CAN_SJA1000_ISA
-       depends on CAN_SJA1000 && ISA
-       tristate "ISA Bus based legacy SJA1000 driver"
-       ---help---
-         This driver adds legacy support for SJA1000 chips connected to
-         the ISA bus using I/O port, memory mapped or indirect access.
-
-config CAN_SJA1000_PLATFORM
-       depends on CAN_SJA1000
-       tristate "Generic Platform Bus based SJA1000 driver"
-       ---help---
-         This driver adds support for the SJA1000 chips connected to
-         the "platform bus" (Linux abstraction for directly to the
-         processor attached devices).  Which can be found on various
-         boards from Phytec (http://www.phytec.de) like the PCM027,
-         PCM038.
-
-config CAN_SJA1000_OF_PLATFORM
-       depends on CAN_SJA1000 && PPC_OF
-       tristate "Generic OF Platform Bus based SJA1000 driver"
-       ---help---
-         This driver adds support for the SJA1000 chips connected to
-         the OpenFirmware "platform bus" found on embedded systems with
-         OpenFirmware bindings, e.g. if you have a PowerPC based system
-         you may want to enable this option.
-
-config CAN_EMS_PCI
-       tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
-       depends on PCI && CAN_SJA1000
-       ---help---
-         This driver is for the one, two or four channel CPC-PCI,
-         CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
-         (http://www.ems-wuensche.de).
-
-config CAN_EMS_USB
-       tristate "EMS CPC-USB/ARM7 CAN/USB interface"
-       depends on USB && CAN_DEV
-       ---help---
-         This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
-         from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
-
-config CAN_KVASER_PCI
-       tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
-       depends on PCI && CAN_SJA1000
-       ---help---
-         This driver is for the the PCIcanx and PCIcan cards (1, 2 or
-         4 channel) from Kvaser (http://www.kvaser.com).
-
 config CAN_AT91
        tristate "Atmel AT91 onchip CAN controller"
-       depends on CAN && CAN_DEV && ARCH_AT91SAM9263
+       depends on CAN_DEV && ARCH_AT91SAM9263
        ---help---
          This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
 
+source "drivers/net/can/sja1000/Kconfig"
+
+source "drivers/net/can/usb/Kconfig"
+
 config CAN_DEBUG_DEVICES
        bool "CAN devices debugging messages"
        depends on CAN
index 564e31c9fee449ba1e56a61a8347429eb1942a59..2868fe842a41cce6d52dcc6dde863c66012c7750 100644 (file)
@@ -629,6 +629,11 @@ nla_put_failure:
        return -EMSGSIZE;
 }
 
+static size_t can_get_xstats_size(const struct net_device *dev)
+{
+       return sizeof(struct can_device_stats);
+}
+
 static int can_fill_xstats(struct sk_buff *skb, const struct net_device *dev)
 {
        struct can_priv *priv = netdev_priv(dev);
@@ -657,6 +662,7 @@ static struct rtnl_link_ops can_link_ops __read_mostly = {
        .changelink     = can_changelink,
        .get_size       = can_get_size,
        .fill_info      = can_fill_info,
+       .get_xstats_size = can_get_xstats_size,
        .fill_xstats    = can_fill_xstats,
 };
 
diff --git a/drivers/net/can/sja1000/Kconfig b/drivers/net/can/sja1000/Kconfig
new file mode 100644 (file)
index 0000000..4c67492
--- /dev/null
@@ -0,0 +1,47 @@
+menuconfig CAN_SJA1000
+       tristate "Philips/NXP SJA1000 devices"
+       depends on CAN_DEV && HAS_IOMEM
+
+if CAN_SJA1000
+
+config CAN_SJA1000_ISA
+       tristate "ISA Bus based legacy SJA1000 driver"
+       depends on ISA
+       ---help---
+         This driver adds legacy support for SJA1000 chips connected to
+         the ISA bus using I/O port, memory mapped or indirect access.
+
+config CAN_SJA1000_PLATFORM
+       tristate "Generic Platform Bus based SJA1000 driver"
+       ---help---
+         This driver adds support for the SJA1000 chips connected to
+         the "platform bus" (Linux abstraction for directly to the
+         processor attached devices).  Which can be found on various
+         boards from Phytec (http://www.phytec.de) like the PCM027,
+         PCM038.
+
+config CAN_SJA1000_OF_PLATFORM
+       tristate "Generic OF Platform Bus based SJA1000 driver"
+       depends on PPC_OF
+       ---help---
+         This driver adds support for the SJA1000 chips connected to
+         the OpenFirmware "platform bus" found on embedded systems with
+         OpenFirmware bindings, e.g. if you have a PowerPC based system
+         you may want to enable this option.
+
+config CAN_EMS_PCI
+       tristate "EMS CPC-PCI, CPC-PCIe and CPC-104P Card"
+       depends on PCI
+       ---help---
+         This driver is for the one, two or four channel CPC-PCI,
+         CPC-PCIe and CPC-104P cards from EMS Dr. Thomas Wuensche
+         (http://www.ems-wuensche.de).
+
+config CAN_KVASER_PCI
+       tristate "Kvaser PCIcanx and Kvaser PCIcan PCI Cards"
+       depends on PCI
+       ---help---
+         This driver is for the the PCIcanx and PCIcan cards (1, 2 or
+         4 channel) from Kvaser (http://www.kvaser.com).
+
+endif
diff --git a/drivers/net/can/usb/Kconfig b/drivers/net/can/usb/Kconfig
new file mode 100644 (file)
index 0000000..bbc78e0
--- /dev/null
@@ -0,0 +1,10 @@
+menu "CAN USB interfaces"
+       depends on USB && CAN_DEV
+
+config CAN_EMS_USB
+       tristate "EMS CPC-USB/ARM7 CAN/USB interface"
+       ---help---
+         This driver is for the one channel CPC-USB/ARM7 CAN/USB interface
+         from from EMS Dr. Thomas Wuensche (http://www.ems-wuensche.de).
+
+endmenu
index c3f75ba701b1dbfa7f6bb718237aefc07eabc3a3..0afd51d4c7a5632965855b650120bac72f34def4 100644 (file)
@@ -3,3 +3,5 @@
 #
 
 obj-$(CONFIG_CAN_EMS_USB) += ems_usb.o
+
+ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
index f86612857a73b2bb470aecb92dfe94b5c50a1cd7..6366061712f447c198b31e5c5307ede58de51c06 100644 (file)
@@ -879,7 +879,7 @@ recycle:
        pci_dma_sync_single_for_cpu(adap->pdev, dma_addr, len,
                                    PCI_DMA_FROMDEVICE);
        (*sd->pg_chunk.p_cnt)--;
-       if (!*sd->pg_chunk.p_cnt)
+       if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
                pci_unmap_page(adap->pdev,
                               sd->pg_chunk.mapping,
                               fl->alloc_size,
@@ -2088,7 +2088,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
                                    PCI_DMA_FROMDEVICE);
 
        (*sd->pg_chunk.p_cnt)--;
-       if (!*sd->pg_chunk.p_cnt)
+       if (!*sd->pg_chunk.p_cnt && sd->pg_chunk.page != fl->pg_chunk.page)
                pci_unmap_page(adap->pdev,
                               sd->pg_chunk.mapping,
                               fl->alloc_size,
index 3179521aee90152ea16a886f023eba620247ea45..e3478314c0029a9a1a24d89b8ba22812f97752c0 100644 (file)
@@ -164,16 +164,14 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
 # define EMAC_MBP_MCASTCHAN(ch)                ((ch) & 0x7)
 
 /* EMAC mac_control register */
-#define EMAC_MACCONTROL_TXPTYPE                (0x200)
-#define EMAC_MACCONTROL_TXPACEEN       (0x40)
-#define EMAC_MACCONTROL_MIIEN          (0x20)
-#define EMAC_MACCONTROL_GIGABITEN      (0x80)
-#define EMAC_MACCONTROL_GIGABITEN_SHIFT (7)
-#define EMAC_MACCONTROL_FULLDUPLEXEN   (0x1)
+#define EMAC_MACCONTROL_TXPTYPE                BIT(9)
+#define EMAC_MACCONTROL_TXPACEEN       BIT(6)
+#define EMAC_MACCONTROL_GMIIEN         BIT(5)
+#define EMAC_MACCONTROL_GIGABITEN      BIT(7)
+#define EMAC_MACCONTROL_FULLDUPLEXEN   BIT(0)
 #define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
 
 /* GIGABIT MODE related bits */
-#define EMAC_DM646X_MACCONTORL_GMIIEN  BIT(5)
 #define EMAC_DM646X_MACCONTORL_GIG     BIT(7)
 #define EMAC_DM646X_MACCONTORL_GIGFORCE        BIT(17)
 
@@ -192,10 +190,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
 #define EMAC_RX_BUFFER_OFFSET_MASK     (0xFFFF)
 
 /* MAC_IN_VECTOR (0x180) register bit fields */
-#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT           (0x20000)
-#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT       (0x10000)
-#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC         (0x0100)
-#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC         (0x01)
+#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT     BIT(17)
+#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
+#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC   BIT(8)
+#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC   BIT(0)
 
 /** NOTE:: For DM646x the IN_VECTOR has changed */
 #define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC   BIT(EMAC_DEF_RX_CH)
@@ -203,7 +201,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
 #define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT     BIT(26)
 #define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
 
-
 /* CPPI bit positions */
 #define EMAC_CPPI_SOP_BIT              BIT(31)
 #define EMAC_CPPI_EOP_BIT              BIT(30)
@@ -750,8 +747,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
 
        if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
                mac_control = emac_read(EMAC_MACCONTROL);
-               mac_control |= (EMAC_DM646X_MACCONTORL_GMIIEN |
-                               EMAC_DM646X_MACCONTORL_GIG |
+               mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
                                EMAC_DM646X_MACCONTORL_GIGFORCE);
        } else {
                /* Clear the GIG bit and GIGFORCE bit */
@@ -2108,7 +2104,7 @@ static int emac_hw_enable(struct emac_priv *priv)
 
        /* Enable MII */
        val = emac_read(EMAC_MACCONTROL);
-       val |= (EMAC_MACCONTROL_MIIEN);
+       val |= (EMAC_MACCONTROL_GMIIEN);
        emac_write(EMAC_MACCONTROL, val);
 
        /* Enable NAPI and interrupts */
@@ -2140,9 +2136,6 @@ static int emac_poll(struct napi_struct *napi, int budget)
        u32 status = 0;
        u32 num_pkts = 0;
 
-       if (!netif_running(ndev))
-               return 0;
-
        /* Check interrupt vectors and call packet processing */
        status = emac_read(EMAC_MACINVECTOR);
 
index 3c29a20b751e831fd4d338ec316535cc7e43c093..d269a68ce3545d485a506708bb0474af3ad2bdaf 100644 (file)
 #include <linux/init.h>
 #include <linux/pci.h>
 #include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
 #include <linux/netdevice.h>
 #include <linux/etherdevice.h>
 #include <linux/mii.h>
@@ -602,6 +603,7 @@ struct nic {
        struct mem *mem;
        dma_addr_t dma_addr;
 
+       struct pci_pool *cbs_pool;
        dma_addr_t cbs_dma_addr;
        u8 adaptive_ifs;
        u8 tx_threshold;
@@ -1793,9 +1795,7 @@ static void e100_clean_cbs(struct nic *nic)
                        nic->cb_to_clean = nic->cb_to_clean->next;
                        nic->cbs_avail++;
                }
-               pci_free_consistent(nic->pdev,
-                       sizeof(struct cb) * nic->params.cbs.count,
-                       nic->cbs, nic->cbs_dma_addr);
+               pci_pool_free(nic->cbs_pool, nic->cbs, nic->cbs_dma_addr);
                nic->cbs = NULL;
                nic->cbs_avail = 0;
        }
@@ -1813,8 +1813,8 @@ static int e100_alloc_cbs(struct nic *nic)
        nic->cb_to_use = nic->cb_to_send = nic->cb_to_clean = NULL;
        nic->cbs_avail = 0;
 
-       nic->cbs = pci_alloc_consistent(nic->pdev,
-               sizeof(struct cb) * count, &nic->cbs_dma_addr);
+       nic->cbs = pci_pool_alloc(nic->cbs_pool, GFP_KERNEL,
+                                 &nic->cbs_dma_addr);
        if (!nic->cbs)
                return -ENOMEM;
 
@@ -2841,7 +2841,11 @@ static int __devinit e100_probe(struct pci_dev *pdev,
                DPRINTK(PROBE, ERR, "Cannot register net device, aborting.\n");
                goto err_out_free;
        }
-
+       nic->cbs_pool = pci_pool_create(netdev->name,
+                          nic->pdev,
+                          nic->params.cbs.count * sizeof(struct cb),
+                          sizeof(u32),
+                          0);
        DPRINTK(PROBE, INFO, "addr 0x%llx, irq %d, MAC addr %pM\n",
                (unsigned long long)pci_resource_start(pdev, use_io ? 1 : 0),
                pdev->irq, netdev->dev_addr);
@@ -2871,6 +2875,7 @@ static void __devexit e100_remove(struct pci_dev *pdev)
                unregister_netdev(netdev);
                e100_free(nic);
                pci_iounmap(pdev, nic->csr);
+               pci_pool_destroy(nic->cbs_pool);
                free_netdev(netdev);
                pci_release_regions(pdev);
                pci_disable_device(pdev);
index 189dfa2d6c76b80ad4702bfb281a541ed500b783..3e187b0e4203e5aa574435a200c57d30468ab397 100644 (file)
@@ -141,6 +141,8 @@ struct e1000_info;
 #define HV_TNCRS_UPPER         PHY_REG(778, 29) /* Transmit with no CRS */
 #define HV_TNCRS_LOWER         PHY_REG(778, 30)
 
+#define E1000_FCRTV_PCH     0x05F40 /* PCH Flow Control Refresh Timer Value */
+
 /* BM PHY Copper Specific Status */
 #define BM_CS_STATUS                      17
 #define BM_CS_STATUS_LINK_UP              0x0400
index 1bf4d2a5d34f8b7421839033c7dd4cedb748bc1c..e82638ecae88dbc079adf98089b7c42a0626c4a2 100644 (file)
@@ -327,10 +327,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
 
                hw->fc.current_mode = hw->fc.requested_mode;
 
-               retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
-                         hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw));
+               if (hw->phy.media_type == e1000_media_type_fiber) {
+                       retval = hw->mac.ops.setup_link(hw);
+                       /* implicit goto out */
+               } else {
+                       retval = e1000e_force_mac_fc(hw);
+                       if (retval)
+                               goto out;
+                       e1000e_set_fc_watermarks(hw);
+               }
        }
 
+out:
        clear_bit(__E1000_RESETTING, &adapter->state);
        return retval;
 }
index 51ddb04ab19588adcb16e460f7beecd1261b8e69..eff3f478365556bf00a5ed996a398bf051190105 100644 (file)
@@ -1118,7 +1118,8 @@ static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
                        oem_reg |= HV_OEM_BITS_LPLU;
        }
        /* Restart auto-neg to activate the bits */
-       oem_reg |= HV_OEM_BITS_RESTART_AN;
+       if (!e1000_check_reset_block(hw))
+               oem_reg |= HV_OEM_BITS_RESTART_AN;
        ret_val = hw->phy.ops.write_phy_reg_locked(hw, HV_OEM_BITS, oem_reg);
 
 out:
@@ -3558,6 +3559,7 @@ struct e1000_info e1000_pch_info = {
                                  | FLAG_HAS_AMT
                                  | FLAG_HAS_FLASH
                                  | FLAG_HAS_JUMBO_FRAMES
+                                 | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
                                  | FLAG_APME_IN_WUC,
        .pba                    = 26,
        .max_hw_frame_size      = 4096,
index 0687c6aa4e46e406c0dd67d33169b6574be75f7f..fad8f9ea00435589a383afb004b55e7a8a05727f 100644 (file)
@@ -2769,25 +2769,38 @@ void e1000e_reset(struct e1000_adapter *adapter)
        /*
         * flow control settings
         *
-        * The high water mark must be low enough to fit two full frame
+        * The high water mark must be low enough to fit one full frame
         * (or the size used for early receive) above it in the Rx FIFO.
         * Set it to the lower of:
         * - 90% of the Rx FIFO size, and
         * - the full Rx FIFO size minus the early receive size (for parts
         *   with ERT support assuming ERT set to E1000_ERT_2048), or
-        * - the full Rx FIFO size minus two full frames
+        * - the full Rx FIFO size minus one full frame
         */
-       if ((adapter->flags & FLAG_HAS_ERT) &&
-           (adapter->netdev->mtu > ETH_DATA_LEN))
-               hwm = min(((pba << 10) * 9 / 10),
-                         ((pba << 10) - (E1000_ERT_2048 << 3)));
-       else
-               hwm = min(((pba << 10) * 9 / 10),
-                         ((pba << 10) - (2 * adapter->max_frame_size)));
+       if (hw->mac.type == e1000_pchlan) {
+               /*
+                * Workaround PCH LOM adapter hangs with certain network
+                * loads.  If hangs persist, try disabling Tx flow control.
+                */
+               if (adapter->netdev->mtu > ETH_DATA_LEN) {
+                       fc->high_water = 0x3500;
+                       fc->low_water  = 0x1500;
+               } else {
+                       fc->high_water = 0x5000;
+                       fc->low_water  = 0x3000;
+               }
+       } else {
+               if ((adapter->flags & FLAG_HAS_ERT) &&
+                   (adapter->netdev->mtu > ETH_DATA_LEN))
+                       hwm = min(((pba << 10) * 9 / 10),
+                                 ((pba << 10) - (E1000_ERT_2048 << 3)));
+               else
+                       hwm = min(((pba << 10) * 9 / 10),
+                                 ((pba << 10) - adapter->max_frame_size));
 
-       fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
-       fc->low_water = (fc->high_water - (2 * adapter->max_frame_size));
-       fc->low_water &= E1000_FCRTL_RTL; /* 8-byte granularity */
+               fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+               fc->low_water = fc->high_water - 8;
+       }
 
        if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
                fc->pause_time = 0xFFFF;
@@ -2813,6 +2826,10 @@ void e1000e_reset(struct e1000_adapter *adapter)
        if (mac->ops.init_hw(hw))
                e_err("Hardware Error\n");
 
+       /* additional part of the flow-control workaround above */
+       if (hw->mac.type == e1000_pchlan)
+               ew32(FCRTV_PCH, 0x1000);
+
        e1000_update_mng_vlan(adapter);
 
        /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
@@ -3610,7 +3627,7 @@ static void e1000_watchdog_task(struct work_struct *work)
                        case SPEED_100:
                                txb2b = 0;
                                netdev->tx_queue_len = 100;
-                               /* maybe add some timeout factor ? */
+                               adapter->tx_timeout_factor = 10;
                                break;
                        }
 
@@ -4288,8 +4305,10 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
 
        while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
                msleep(1);
-       /* e1000e_down has a dependency on max_frame_size */
+       /* e1000e_down -> e1000e_reset dependent on max_frame_size & mtu */
        adapter->max_frame_size = max_frame;
+       e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
+       netdev->mtu = new_mtu;
        if (netif_running(netdev))
                e1000e_down(adapter);
 
@@ -4319,9 +4338,6 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
                adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN
                                         + ETH_FCS_LEN;
 
-       e_info("changing MTU from %d to %d\n", netdev->mtu, new_mtu);
-       netdev->mtu = new_mtu;
-
        if (netif_running(netdev))
                e1000e_up(adapter);
        else
index 03175b3a2c9e25c28923439db0699ea312bca376..85f955f7041716855e313cf0d23e817a078d31bd 100644 (file)
@@ -71,7 +71,6 @@ static const u16 e1000_igp_2_cable_length_table[] =
 #define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
 #define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
 #define I82577_CTRL_REG                   23
-#define I82577_CTRL_DOWNSHIFT_MASK        (7 << 10)
 
 /* 82577 specific PHY registers */
 #define I82577_PHY_CTRL_2            18
@@ -660,15 +659,6 @@ s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
        phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
 
        ret_val = phy->ops.write_phy_reg(hw, I82577_CFG_REG, phy_data);
-       if (ret_val)
-               goto out;
-
-       /* Set number of link attempts before downshift */
-       ret_val = phy->ops.read_phy_reg(hw, I82577_CTRL_REG, &phy_data);
-       if (ret_val)
-               goto out;
-       phy_data &= ~I82577_CTRL_DOWNSHIFT_MASK;
-       ret_val = phy->ops.write_phy_reg(hw, I82577_CTRL_REG, phy_data);
 
 out:
        return ret_val;
@@ -2658,19 +2648,18 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
                page = 0;
 
        if (reg > MAX_PHY_MULTI_PAGE_REG) {
-               if ((hw->phy.type != e1000_phy_82578) ||
-                   ((reg != I82578_ADDR_REG) &&
-                    (reg != I82578_ADDR_REG + 1))) {
-                       u32 phy_addr = hw->phy.addr;
+               u32 phy_addr = hw->phy.addr;
 
-                       hw->phy.addr = 1;
+               hw->phy.addr = 1;
 
-                       /* Page is shifted left, PHY expects (page x 32) */
-                       ret_val = e1000e_write_phy_reg_mdic(hw,
-                                                    IGP01E1000_PHY_PAGE_SELECT,
-                                                    (page << IGP_PAGE_SHIFT));
-                       hw->phy.addr = phy_addr;
-               }
+               /* Page is shifted left, PHY expects (page x 32) */
+               ret_val = e1000e_write_phy_reg_mdic(hw,
+                                            IGP01E1000_PHY_PAGE_SELECT,
+                                            (page << IGP_PAGE_SHIFT));
+               hw->phy.addr = phy_addr;
+
+               if (ret_val)
+                       goto out;
        }
 
        ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
@@ -2678,7 +2667,7 @@ static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
 out:
        /* Revert to MDIO fast mode, if applicable */
        if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
-               ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+               ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
 
        if (!locked)
                hw->phy.ops.release_phy(hw);
@@ -2784,19 +2773,18 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
        }
 
        if (reg > MAX_PHY_MULTI_PAGE_REG) {
-               if ((hw->phy.type != e1000_phy_82578) ||
-                   ((reg != I82578_ADDR_REG) &&
-                    (reg != I82578_ADDR_REG + 1))) {
-                       u32 phy_addr = hw->phy.addr;
+               u32 phy_addr = hw->phy.addr;
 
-                       hw->phy.addr = 1;
+               hw->phy.addr = 1;
 
-                       /* Page is shifted left, PHY expects (page x 32) */
-                       ret_val = e1000e_write_phy_reg_mdic(hw,
-                                                    IGP01E1000_PHY_PAGE_SELECT,
-                                                    (page << IGP_PAGE_SHIFT));
-                       hw->phy.addr = phy_addr;
-               }
+               /* Page is shifted left, PHY expects (page x 32) */
+               ret_val = e1000e_write_phy_reg_mdic(hw,
+                                            IGP01E1000_PHY_PAGE_SELECT,
+                                            (page << IGP_PAGE_SHIFT));
+               hw->phy.addr = phy_addr;
+
+               if (ret_val)
+                       goto out;
        }
 
        ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
@@ -2805,7 +2793,7 @@ static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
 out:
        /* Revert to MDIO fast mode, if applicable */
        if ((hw->phy.type == e1000_phy_82577) && in_slow_mode)
-               ret_val = e1000_set_mdio_slow_mode_hv(hw, false);
+               ret_val |= e1000_set_mdio_slow_mode_hv(hw, false);
 
        if (!locked)
                hw->phy.ops.release_phy(hw);
index e1da4666f20426ecf4e5e9848fae64975d2bf463..3116601dbfea859984fa480edd0ecd4db5893e4e 100644 (file)
@@ -5821,10 +5821,7 @@ static int __devinit nv_probe(struct pci_dev *pci_dev, const struct pci_device_i
                        dev->dev_addr);
                dev_printk(KERN_ERR, &pci_dev->dev,
                        "Please complain to your hardware vendor. Switching to a random MAC.\n");
-               dev->dev_addr[0] = 0x00;
-               dev->dev_addr[1] = 0x00;
-               dev->dev_addr[2] = 0x6c;
-               get_random_bytes(&dev->dev_addr[3], 3);
+               random_ether_addr(dev->dev_addr);
        }
 
        dprintk(KERN_DEBUG "%s: MAC Address %pM\n",
index d34adf99fc6a45c87d5294f05b675251131b52dd..8a61b597a169e78a3a6035e274cc919bfa13b301 100644 (file)
@@ -263,8 +263,8 @@ struct emac_regs {
 
 
 /* EMACx_TRTR */
-#define EMAC_TRTR_SHIFT_EMAC4          27
-#define EMAC_TRTR_SHIFT                        24
+#define EMAC_TRTR_SHIFT_EMAC4          24
+#define EMAC_TRTR_SHIFT                27
 
 /* EMAC specific TX descriptor control fields (write access) */
 #define EMAC_TX_CTRL_GFCS              0x0200
index 5bd9e6bf6f2f0a8cbcc87c94c7613c94bb93a002..a456578b85786da64b32c75d98fafbe169b12893 100644 (file)
@@ -240,11 +240,11 @@ static void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
 static inline bool ixgbe_tx_is_paused(struct ixgbe_adapter *adapter,
                                       struct ixgbe_ring *tx_ring)
 {
-       int tc;
        u32 txoff = IXGBE_TFCS_TXOFF;
 
 #ifdef CONFIG_IXGBE_DCB
        if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               int tc;
                int reg_idx = tx_ring->reg_idx;
                int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
@@ -5994,6 +5994,7 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
        } else {
                pci_set_master(pdev);
                pci_restore_state(pdev);
+               pci_save_state(pdev);
 
                pci_wake_from_d3(pdev, false);
 
index 0be14d702bebcbb53aae6d5001ecd3ffa2a310e7..c146304d8d6ca6398b22a3bef00e10416719fe79 100644 (file)
@@ -568,6 +568,16 @@ static inline void ks_outblk(struct ks_net *ks, u16 *wptr, u32 len)
                iowrite16(*wptr++, ks->hw_addr);
 }
 
+static void ks_disable_int(struct ks_net *ks)
+{
+       ks_wrreg16(ks, KS_IER, 0x0000);
+}  /* ks_disable_int */
+
+static void ks_enable_int(struct ks_net *ks)
+{
+       ks_wrreg16(ks, KS_IER, ks->rc_ier);
+}  /* ks_enable_int */
+
 /**
  * ks_tx_fifo_space - return the available hardware buffer size.
  * @ks: The chip information
@@ -681,6 +691,47 @@ static void ks_soft_reset(struct ks_net *ks, unsigned op)
 }
 
 
+void ks_enable_qmu(struct ks_net *ks)
+{
+       u16 w;
+
+       w = ks_rdreg16(ks, KS_TXCR);
+       /* Enables QMU Transmit (TXCR). */
+       ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
+
+       /*
+        * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
+        * Enable
+        */
+
+       w = ks_rdreg16(ks, KS_RXQCR);
+       ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
+
+       /* Enables QMU Receive (RXCR1). */
+       w = ks_rdreg16(ks, KS_RXCR1);
+       ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
+       ks->enabled = true;
+}  /* ks_enable_qmu */
+
+static void ks_disable_qmu(struct ks_net *ks)
+{
+       u16     w;
+
+       w = ks_rdreg16(ks, KS_TXCR);
+
+       /* Disables QMU Transmit (TXCR). */
+       w  &= ~TXCR_TXE;
+       ks_wrreg16(ks, KS_TXCR, w);
+
+       /* Disables QMU Receive (RXCR1). */
+       w = ks_rdreg16(ks, KS_RXCR1);
+       w &= ~RXCR1_RXE ;
+       ks_wrreg16(ks, KS_RXCR1, w);
+
+       ks->enabled = false;
+
+}  /* ks_disable_qmu */
+
 /**
  * ks_read_qmu - read 1 pkt data from the QMU.
  * @ks: The chip information
@@ -752,7 +803,7 @@ static void ks_rcv(struct ks_net *ks, struct net_device *netdev)
                        (frame_hdr->len < RX_BUF_SIZE) && frame_hdr->len)) {
                        skb_reserve(skb, 2);
                        /* read data block including CRC 4 bytes */
-                       ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len + 4);
+                       ks_read_qmu(ks, (u16 *)skb->data, frame_hdr->len);
                        skb_put(skb, frame_hdr->len);
                        skb->dev = netdev;
                        skb->protocol = eth_type_trans(skb, netdev);
@@ -861,7 +912,7 @@ static int ks_net_open(struct net_device *netdev)
                ks_dbg(ks, "%s - entry\n", __func__);
 
        /* reset the HW */
-       err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, ks);
+       err = request_irq(ks->irq, ks_irq, KS_INT_FLAGS, DRV_NAME, netdev);
 
        if (err) {
                printk(KERN_ERR "Failed to request IRQ: %d: %d\n",
@@ -869,6 +920,15 @@ static int ks_net_open(struct net_device *netdev)
                return err;
        }
 
+       /* wake up powermode to normal mode */
+       ks_set_powermode(ks, PMECR_PM_NORMAL);
+       mdelay(1);      /* wait for normal mode to take effect */
+
+       ks_wrreg16(ks, KS_ISR, 0xffff);
+       ks_enable_int(ks);
+       ks_enable_qmu(ks);
+       netif_start_queue(ks->netdev);
+
        if (netif_msg_ifup(ks))
                ks_dbg(ks, "network device %s up\n", netdev->name);
 
@@ -892,19 +952,14 @@ static int ks_net_stop(struct net_device *netdev)
 
        netif_stop_queue(netdev);
 
-       kfree(ks->frame_head_info);
-
        mutex_lock(&ks->lock);
 
        /* turn off the IRQs and ack any outstanding */
        ks_wrreg16(ks, KS_IER, 0x0000);
        ks_wrreg16(ks, KS_ISR, 0xffff);
 
-       /* shutdown RX process */
-       ks_wrreg16(ks, KS_RXCR1, 0x0000);
-
-       /* shutdown TX process */
-       ks_wrreg16(ks, KS_TXCR, 0x0000);
+       /* shutdown RX/TX QMU */
+       ks_disable_qmu(ks);
 
        /* set powermode to soft power down to save power */
        ks_set_powermode(ks, PMECR_PM_SOFTDOWN);
@@ -929,17 +984,8 @@ static int ks_net_stop(struct net_device *netdev)
  */
 static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
 {
-       unsigned fid = ks->fid;
-
-       fid = ks->fid;
-       ks->fid = (ks->fid + 1) & TXFR_TXFID_MASK;
-
-       /* reduce the tx interrupt occurrances. */
-       if (!fid)
-               fid |= TXFR_TXIC;       /* irq on completion */
-
        /* start header at txb[0] to align txw entries */
-       ks->txh.txw[0] = cpu_to_le16(fid);
+       ks->txh.txw[0] = 0;
        ks->txh.txw[1] = cpu_to_le16(len);
 
        /* 1. set sudo-DMA mode */
@@ -957,16 +1003,6 @@ static void ks_write_qmu(struct ks_net *ks, u8 *pdata, u16 len)
                ;
 }
 
-static void ks_disable_int(struct ks_net *ks)
-{
-       ks_wrreg16(ks, KS_IER, 0x0000);
-}  /* ks_disable_int */
-
-static void ks_enable_int(struct ks_net *ks)
-{
-       ks_wrreg16(ks, KS_IER, ks->rc_ier);
-}  /* ks_enable_int */
-
 /**
  * ks_start_xmit - transmit packet
  * @skb                : The buffer to transmit
@@ -1410,25 +1446,6 @@ static int ks_read_selftest(struct ks_net *ks)
        return ret;
 }
 
-static void ks_disable(struct ks_net *ks)
-{
-       u16     w;
-
-       w = ks_rdreg16(ks, KS_TXCR);
-
-       /* Disables QMU Transmit (TXCR). */
-       w  &= ~TXCR_TXE;
-       ks_wrreg16(ks, KS_TXCR, w);
-
-       /* Disables QMU Receive (RXCR1). */
-       w = ks_rdreg16(ks, KS_RXCR1);
-       w &= ~RXCR1_RXE ;
-       ks_wrreg16(ks, KS_RXCR1, w);
-
-       ks->enabled = false;
-
-}  /* ks_disable */
-
 static void ks_setup(struct ks_net *ks)
 {
        u16     w;
@@ -1463,7 +1480,7 @@ static void ks_setup(struct ks_net *ks)
        w = TXCR_TXFCE | TXCR_TXPE | TXCR_TXCRC | TXCR_TCGIP;
        ks_wrreg16(ks, KS_TXCR, w);
 
-       w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE;
+       w = RXCR1_RXFCE | RXCR1_RXBE | RXCR1_RXUE | RXCR1_RXME | RXCR1_RXIPFCC;
 
        if (ks->promiscuous)         /* bPromiscuous */
                w |= (RXCR1_RXAE | RXCR1_RXINVF);
@@ -1486,28 +1503,6 @@ static void ks_setup_int(struct ks_net *ks)
        ks->rc_ier = (IRQ_LCI | IRQ_TXI | IRQ_RXI);
 }  /* ks_setup_int */
 
-void ks_enable(struct ks_net *ks)
-{
-       u16 w;
-
-       w = ks_rdreg16(ks, KS_TXCR);
-       /* Enables QMU Transmit (TXCR). */
-       ks_wrreg16(ks, KS_TXCR, w | TXCR_TXE);
-
-       /*
-        * RX Frame Count Threshold Enable and Auto-Dequeue RXQ Frame
-        * Enable
-        */
-
-       w = ks_rdreg16(ks, KS_RXQCR);
-       ks_wrreg16(ks, KS_RXQCR, w | RXQCR_RXFCTE);
-
-       /* Enables QMU Receive (RXCR1). */
-       w = ks_rdreg16(ks, KS_RXCR1);
-       ks_wrreg16(ks, KS_RXCR1, w | RXCR1_RXE);
-       ks->enabled = true;
-}  /* ks_enable */
-
 static int ks_hw_init(struct ks_net *ks)
 {
 #define        MHEADER_SIZE    (sizeof(struct type_frame_head) * MAX_RECV_FRAMES)
@@ -1612,11 +1607,9 @@ static int __devinit ks8851_probe(struct platform_device *pdev)
 
        ks_soft_reset(ks, GRR_GSR);
        ks_hw_init(ks);
-       ks_disable(ks);
+       ks_disable_qmu(ks);
        ks_setup(ks);
        ks_setup_int(ks);
-       ks_enable_int(ks);
-       ks_enable(ks);
        memcpy(netdev->dev_addr, ks->mac_addr, 6);
 
        data = ks_rdreg16(ks, KS_OBCR);
@@ -1658,6 +1651,7 @@ static int __devexit ks8851_remove(struct platform_device *pdev)
        struct ks_net *ks = netdev_priv(netdev);
        struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
 
+       kfree(ks->frame_head_info);
        unregister_netdev(netdev);
        iounmap(ks->hw_addr);
        free_netdev(netdev);
index 3aabfd9dd2121d7eaab6a7f30e2113e4bc0d22c4..2490aa39804ce3e6e6d74727cc0fa082034526a3 100644 (file)
@@ -360,6 +360,7 @@ static int macvlan_init(struct net_device *dev)
        dev->state              = (dev->state & ~MACVLAN_STATE_MASK) |
                                  (lowerdev->state & MACVLAN_STATE_MASK);
        dev->features           = lowerdev->features & MACVLAN_FEATURES;
+       dev->gso_max_size       = lowerdev->gso_max_size;
        dev->iflink             = lowerdev->ifindex;
        dev->hard_header_len    = lowerdev->hard_header_len;
 
@@ -596,6 +597,7 @@ static int macvlan_device_event(struct notifier_block *unused,
        case NETDEV_FEAT_CHANGE:
                list_for_each_entry(vlan, &port->vlans, list) {
                        vlan->dev->features = dev->features & MACVLAN_FEATURES;
+                       vlan->dev->gso_max_size = dev->gso_max_size;
                        netdev_features_change(vlan->dev);
                }
                break;
index 7384f59df61572a6cb3d170998b116ca1aae5aa7..e1237b8028724bce9a936cdeb1d3a7704175969e 100644 (file)
@@ -1163,6 +1163,8 @@ struct netxen_adapter {
        u32 int_vec_bit;
        u32 heartbit;
 
+       u8 mac_addr[ETH_ALEN];
+
        struct netxen_adapter_stats stats;
 
        struct netxen_recv_context recv_ctx;
index 1c46da63212586647c7d3696a117f9de1821541f..17bb3818d84e63a3faedb7aecec0073502911b6f 100644 (file)
@@ -545,6 +545,8 @@ enum {
 #define        NETXEN_NIU_TEST_MUX_CTL         (NETXEN_CRB_NIU + 0x00094)
 #define        NETXEN_NIU_XG_PAUSE_CTL         (NETXEN_CRB_NIU + 0x00098)
 #define        NETXEN_NIU_XG_PAUSE_LEVEL       (NETXEN_CRB_NIU + 0x000dc)
+#define        NETXEN_NIU_FRAME_COUNT_SELECT   (NETXEN_CRB_NIU + 0x000ac)
+#define        NETXEN_NIU_FRAME_COUNT          (NETXEN_CRB_NIU + 0x000b0)
 #define        NETXEN_NIU_XG_SEL               (NETXEN_CRB_NIU + 0x00128)
 #define NETXEN_NIU_GB_PAUSE_CTL                (NETXEN_CRB_NIU + 0x0030c)
 
index 3185a98b0917ead8c51d4b3fea2f13da3abe6f66..52a3798d8d947a9992762dab2987df158b6252bd 100644 (file)
@@ -383,24 +383,51 @@ int netxen_niu_disable_xg_port(struct netxen_adapter *adapter)
 
 int netxen_p2_nic_set_promisc(struct netxen_adapter *adapter, u32 mode)
 {
-       __u32 reg;
+       u32 mac_cfg;
+       u32 cnt = 0;
+       __u32 reg = 0x0200;
        u32 port = adapter->physical_port;
+       u16 board_type = adapter->ahw.board_type;
 
        if (port > NETXEN_NIU_MAX_XG_PORTS)
                return -EINVAL;
 
-       reg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
-       if (mode == NETXEN_NIU_PROMISC_MODE)
-               reg = (reg | 0x2000UL);
-       else
-               reg = (reg & ~0x2000UL);
+       mac_cfg = NXRD32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port));
+       mac_cfg &= ~0x4;
+       NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
 
-       if (mode == NETXEN_NIU_ALLMULTI_MODE)
-               reg = (reg | 0x1000UL);
-       else
-               reg = (reg & ~0x1000UL);
+       if ((board_type == NETXEN_BRDTYPE_P2_SB31_10G_IMEZ) ||
+                       (board_type == NETXEN_BRDTYPE_P2_SB31_10G_HMEZ))
+               reg = (0x20 << port);
+
+       NXWR32(adapter, NETXEN_NIU_FRAME_COUNT_SELECT, reg);
+
+       mdelay(10);
+
+       while (NXRD32(adapter, NETXEN_NIU_FRAME_COUNT) && ++cnt < 20)
+               mdelay(10);
+
+       if (cnt < 20) {
+
+               reg = NXRD32(adapter,
+                       NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port));
+
+               if (mode == NETXEN_NIU_PROMISC_MODE)
+                       reg = (reg | 0x2000UL);
+               else
+                       reg = (reg & ~0x2000UL);
+
+               if (mode == NETXEN_NIU_ALLMULTI_MODE)
+                       reg = (reg | 0x1000UL);
+               else
+                       reg = (reg & ~0x1000UL);
+
+               NXWR32(adapter,
+                       NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+       }
 
-       NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_1 + (0x10000 * port), reg);
+       mac_cfg |= 0x4;
+       NXWR32(adapter, NETXEN_NIU_XGE_CONFIG_0 + (0x10000 * port), mac_cfg);
 
        return 0;
 }
@@ -436,7 +463,7 @@ netxen_nic_enable_mcast_filter(struct netxen_adapter *adapter)
 {
        u32     val = 0;
        u16 port = adapter->physical_port;
-       u8 *addr = adapter->netdev->dev_addr;
+       u8 *addr = adapter->mac_addr;
 
        if (adapter->mc_enabled)
                return 0;
@@ -465,7 +492,7 @@ netxen_nic_disable_mcast_filter(struct netxen_adapter *adapter)
 {
        u32     val = 0;
        u16 port = adapter->physical_port;
-       u8 *addr = adapter->netdev->dev_addr;
+       u8 *addr = adapter->mac_addr;
 
        if (!adapter->mc_enabled)
                return 0;
@@ -660,7 +687,7 @@ void netxen_p3_nic_set_multi(struct net_device *netdev)
 
        list_splice_tail_init(&adapter->mac_list, &del_list);
 
-       nx_p3_nic_add_mac(adapter, netdev->dev_addr, &del_list);
+       nx_p3_nic_add_mac(adapter, adapter->mac_addr, &del_list);
        nx_p3_nic_add_mac(adapter, bcast_addr, &del_list);
 
        if (netdev->flags & IFF_PROMISC) {
index e40b914d6faf7def7bedbb9f309195afc2a61e59..8a0904368e0838027e7a23621918203ebdd02d5e 100644 (file)
@@ -544,6 +544,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
                                continue;
                        if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
                                continue;
+                       if ((off & 0x0ff00000) == NETXEN_CRB_DDR_NET)
+                               continue;
                        if (off == (NETXEN_CRB_PEG_NET_1 + 0x18))
                                buf[i].data = 0x1020;
                        /* skip the function enable register */
index 0b4a56a8c8d5fc2d9d9d1f8619e88e2031577a55..3bf78dbfbf0f5d63e3ed92251b32f16cae911da5 100644 (file)
@@ -437,6 +437,7 @@ netxen_read_mac_addr(struct netxen_adapter *adapter)
                netdev->dev_addr[i] = *(p + 5 - i);
 
        memcpy(netdev->perm_addr, netdev->dev_addr, netdev->addr_len);
+       memcpy(adapter->mac_addr, netdev->dev_addr, netdev->addr_len);
 
        /* set station address */
 
@@ -459,6 +460,7 @@ int netxen_nic_set_mac(struct net_device *netdev, void *p)
                netxen_napi_disable(adapter);
        }
 
+       memcpy(adapter->mac_addr, addr->sa_data, netdev->addr_len);
        memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
        adapter->macaddr_set(adapter, addr->sa_data);
 
@@ -956,7 +958,7 @@ netxen_nic_up(struct netxen_adapter *adapter, struct net_device *netdev)
                return err;
        }
        if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
-               adapter->macaddr_set(adapter, netdev->dev_addr);
+               adapter->macaddr_set(adapter, adapter->mac_addr);
 
        adapter->set_multi(netdev);
        adapter->set_mtu(adapter, netdev->mtu);
index 8659d341e7696fcfe8df1b55318db5957181d013..35897134a5dd34c210e72229136e50c7a44a0040 100644 (file)
@@ -139,7 +139,7 @@ out:
        return NULL;
 }
 
-static void __devinit mdio_gpio_bus_deinit(struct device *dev)
+static void mdio_gpio_bus_deinit(struct device *dev)
 {
        struct mii_bus *bus = dev_get_drvdata(dev);
        struct mdio_gpio_info *bitbang = bus->priv;
index 9bf2a6be90319b2cd9249c3acfbaf2a78c56d79f..965adb6174c33a4b65223fabd7372f3da4781b35 100644 (file)
@@ -1944,8 +1944,15 @@ ppp_receive_mp_frame(struct ppp *ppp, struct sk_buff *skb, struct channel *pch)
        }
 
        /* Pull completed packets off the queue and receive them. */
-       while ((skb = ppp_mp_reconstruct(ppp)))
-               ppp_receive_nonmp_frame(ppp, skb);
+       while ((skb = ppp_mp_reconstruct(ppp))) {
+               if (pskb_may_pull(skb, 2))
+                       ppp_receive_nonmp_frame(ppp, skb);
+               else {
+                       ++ppp->dev->stats.rx_length_errors;
+                       kfree_skb(skb);
+                       ppp_receive_error(ppp);
+               }
+       }
 
        return;
 
index 7dfcb58b0eb42d807f401b69b37499b059c8ef08..8b14c6eda7c3379e67eda5fac94960e0e538b46b 100644 (file)
@@ -1085,7 +1085,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
        int bar = 0;
        u16 *adrp;
 
-       printk(KERN_INFO "%s\n", version);
+       printk("%s\n", version);
 
        err = pci_enable_device(pdev);
        if (err)
index fa49356784883049d4487f95bf0b0fa1db8b3cd7..0fe2fc90f207ebdf74d01620b50781fb2d556882 100644 (file)
@@ -3235,6 +3235,10 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
        flush_scheduled_work();
 
        unregister_netdev(dev);
+
+       /* restore original MAC address */
+       rtl_rar_set(tp, dev->perm_addr);
+
        rtl_disable_msi(pdev, tp);
        rtl8169_release_board(pdev, dev, tp->mmio_addr);
        pci_set_drvdata(pdev, NULL);
@@ -3243,9 +3247,9 @@ static void __devexit rtl8169_remove_one(struct pci_dev *pdev)
 static void rtl8169_set_rxbufsize(struct rtl8169_private *tp,
                                  struct net_device *dev)
 {
-       unsigned int mtu = dev->mtu;
+       unsigned int max_frame = dev->mtu + VLAN_ETH_HLEN + ETH_FCS_LEN;
 
-       tp->rx_buf_sz = (mtu > RX_BUF_SIZE) ? mtu + ETH_HLEN + 8 : RX_BUF_SIZE;
+       tp->rx_buf_sz = (max_frame > RX_BUF_SIZE) ? max_frame : RX_BUF_SIZE;
 }
 
 static int rtl8169_open(struct net_device *dev)
@@ -4881,6 +4885,9 @@ static void rtl_shutdown(struct pci_dev *pdev)
 
        rtl8169_net_suspend(dev);
 
+       /* restore original MAC address */
+       rtl_rar_set(tp, dev->perm_addr);
+
        spin_lock_irq(&tp->lock);
 
        rtl8169_asic_down(ioaddr);
index ddccf5fa56b63b998d2344e6e67cf6e7fd4977e4..0dd7839322bc6044d8d21738978955dc1a752cf6 100644 (file)
@@ -3494,6 +3494,7 @@ static void s2io_reset(struct s2io_nic *sp)
 
                /* Restore the PCI state saved during initialization. */
                pci_restore_state(sp->pdev);
+               pci_save_state(sp->pdev);
                pci_read_config_word(sp->pdev, 0x2, &val16);
                if (check_pci_device_id(val16) != (u16)PCI_ANY_ID)
                        break;
index 05c91ee6921e8a2082ed605db93f3ddb92581b27..f12206bdbb75db7e0ea4d051f1130d2960c66767 100644 (file)
@@ -2283,7 +2283,7 @@ static int __devinit smc_drv_probe(struct platform_device *pdev)
 
        ndev->irq = ires->start;
 
-       if (ires->flags & IRQF_TRIGGER_MASK)
+       if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
                irq_flags = ires->flags & IRQF_TRIGGER_MASK;
 
        ret = smc_request_attrib(pdev, ndev);
index ccdd196f5297952f993291819e1d65b5526402b0..f9cdcbcb77d4989efe930f1014a9c32128081216 100644 (file)
@@ -986,7 +986,7 @@ static int smsc911x_poll(struct napi_struct *napi, int budget)
        struct net_device *dev = pdata->dev;
        int npackets = 0;
 
-       while (likely(netif_running(dev)) && (npackets < budget)) {
+       while (npackets < budget) {
                unsigned int pktlength;
                unsigned int pktwords;
                struct sk_buff *skb;
index b4909a2dec66bfb4e9835cf0223654cafcebc347..0f7909276237260593e03263dfbd450884794923 100644 (file)
@@ -252,6 +252,9 @@ static int smsc9420_ethtool_get_settings(struct net_device *dev,
 {
        struct smsc9420_pdata *pd = netdev_priv(dev);
 
+       if (!pd->phy_dev)
+               return -ENODEV;
+
        cmd->maxtxpkt = 1;
        cmd->maxrxpkt = 1;
        return phy_ethtool_gset(pd->phy_dev, cmd);
@@ -262,6 +265,9 @@ static int smsc9420_ethtool_set_settings(struct net_device *dev,
 {
        struct smsc9420_pdata *pd = netdev_priv(dev);
 
+       if (!pd->phy_dev)
+               return -ENODEV;
+
        return phy_ethtool_sset(pd->phy_dev, cmd);
 }
 
@@ -290,6 +296,10 @@ static void smsc9420_ethtool_set_msglevel(struct net_device *netdev, u32 data)
 static int smsc9420_ethtool_nway_reset(struct net_device *netdev)
 {
        struct smsc9420_pdata *pd = netdev_priv(netdev);
+
+       if (!pd->phy_dev)
+               return -ENODEV;
+
        return phy_start_aneg(pd->phy_dev);
 }
 
@@ -312,6 +322,10 @@ smsc9420_ethtool_getregs(struct net_device *dev, struct ethtool_regs *regs,
        for (i = 0; i < 0x100; i += (sizeof(u32)))
                data[j++] = smsc9420_reg_read(pd, i);
 
+       // cannot read phy registers if the net device is down
+       if (!phy_dev)
+               return;
+
        for (i = 0; i <= 31; i++)
                data[j++] = smsc9420_mii_read(phy_dev->bus, phy_dev->addr, i);
 }
index c2f14dc9ba289df6839fad9e9e7eec98c395cf21..9542995ba6674a0ce309001ca7208ab218027214 100644 (file)
@@ -416,13 +416,8 @@ static void init_dma_desc_rings(struct net_device *dev)
        unsigned int txsize = priv->dma_tx_size;
        unsigned int rxsize = priv->dma_rx_size;
        unsigned int bfsize = priv->dma_buf_sz;
-       int buff2_needed = 0;
-       int dis_ic = 0;
+       int buff2_needed = 0, dis_ic = 0;
 
-#ifdef CONFIG_STMMAC_TIMER
-       /* Using Timers disable interrupts on completion for the reception */
-       dis_ic = 1;
-#endif
        /* Set the Buffer size according to the MTU;
         * indeed, in case of jumbo we need to bump-up the buffer sizes.
         */
@@ -437,6 +432,11 @@ static void init_dma_desc_rings(struct net_device *dev)
        else
                bfsize = DMA_BUFFER_SIZE;
 
+#ifdef CONFIG_STMMAC_TIMER
+       /* Disable interrupts on completion for the reception if timer is on */
+       if (likely(priv->tm->enable))
+               dis_ic = 1;
+#endif
        /* If the MTU exceeds 8k so use the second buffer in the chain */
        if (bfsize >= BUF_SIZE_8KiB)
                buff2_needed = 1;
@@ -809,20 +809,22 @@ static void stmmac_tx(struct stmmac_priv *priv)
 
 static inline void stmmac_enable_irq(struct stmmac_priv *priv)
 {
-#ifndef CONFIG_STMMAC_TIMER
-       writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
-#else
-       priv->tm->timer_start(tmrate);
+#ifdef CONFIG_STMMAC_TIMER
+       if (likely(priv->tm->enable))
+               priv->tm->timer_start(tmrate);
+       else
 #endif
+       writel(DMA_INTR_DEFAULT_MASK, priv->dev->base_addr + DMA_INTR_ENA);
 }
 
 static inline void stmmac_disable_irq(struct stmmac_priv *priv)
 {
-#ifndef CONFIG_STMMAC_TIMER
-       writel(0, priv->dev->base_addr + DMA_INTR_ENA);
-#else
-       priv->tm->timer_stop();
+#ifdef CONFIG_STMMAC_TIMER
+       if (likely(priv->tm->enable))
+               priv->tm->timer_stop();
+       else
 #endif
+       writel(0, priv->dev->base_addr + DMA_INTR_ENA);
 }
 
 static int stmmac_has_work(struct stmmac_priv *priv)
@@ -1031,22 +1033,23 @@ static int stmmac_open(struct net_device *dev)
        }
 
 #ifdef CONFIG_STMMAC_TIMER
-       priv->tm = kmalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
+       priv->tm = kzalloc(sizeof(struct stmmac_timer *), GFP_KERNEL);
        if (unlikely(priv->tm == NULL)) {
                pr_err("%s: ERROR: timer memory alloc failed \n", __func__);
                return -ENOMEM;
        }
        priv->tm->freq = tmrate;
 
-       /* Test if the HW timer can be actually used.
-        * In case of failure continue with no timer. */
+       /* Test if the external timer can be actually used.
+        * In case of failure continue without timer. */
        if (unlikely((stmmac_open_ext_timer(dev, priv->tm)) < 0)) {
-               pr_warning("stmmaceth: cannot attach the HW timer\n");
+               pr_warning("stmmaceth: cannot attach the external timer.\n");
                tmrate = 0;
                priv->tm->freq = 0;
                priv->tm->timer_start = stmmac_no_timer_started;
                priv->tm->timer_stop = stmmac_no_timer_stopped;
-       }
+       } else
+               priv->tm->enable = 1;
 #endif
 
        /* Create and initialize the TX/RX descriptors chains. */
@@ -1322,9 +1325,11 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
 
        /* Interrupt on completition only for the latest segment */
        priv->mac_type->ops->close_tx_desc(desc);
+
 #ifdef CONFIG_STMMAC_TIMER
-       /* Clean IC while using timers */
-       priv->mac_type->ops->clear_tx_ic(desc);
+       /* Clean IC while using timer */
+       if (likely(priv->tm->enable))
+               priv->mac_type->ops->clear_tx_ic(desc);
 #endif
        /* To avoid raise condition */
        priv->mac_type->ops->set_tx_owner(first);
@@ -2028,7 +2033,8 @@ static int stmmac_suspend(struct platform_device *pdev, pm_message_t state)
 
 #ifdef CONFIG_STMMAC_TIMER
                priv->tm->timer_stop();
-               dis_ic = 1;
+               if (likely(priv->tm->enable))
+                       dis_ic = 1;
 #endif
                napi_disable(&priv->napi);
 
index b838c6582077b0a345e4858fa1f509fb24cb0681..679f61ffb1f868254f75381e652112ae97d8b83b 100644 (file)
@@ -63,7 +63,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
 
        stmmac_rtc = rtc_class_open(CONFIG_RTC_HCTOSYS_DEVICE);
        if (stmmac_rtc == NULL) {
-               pr_error("open rtc device failed\n");
+               pr_err("open rtc device failed\n");
                return -ENODEV;
        }
 
@@ -71,7 +71,7 @@ int stmmac_open_ext_timer(struct net_device *dev, struct stmmac_timer *tm)
 
        /* Periodic mode is not supported */
        if ((rtc_irq_set_freq(stmmac_rtc, &stmmac_task, tm->freq) < 0)) {
-               pr_error("set periodic failed\n");
+               pr_err("set periodic failed\n");
                rtc_irq_unregister(stmmac_rtc, &stmmac_task);
                rtc_class_close(stmmac_rtc);
                return -1;
index f795cae33725913697d654e26e8a839bb6e8a483..6863590d184bcc9c335da8bfb51562eafafc7557 100644 (file)
@@ -26,6 +26,7 @@ struct stmmac_timer {
        void (*timer_start) (unsigned int new_freq);
        void (*timer_stop) (void);
        unsigned int freq;
+       unsigned int enable;
 };
 
 /* Open the HW timer device and return 0 in case of success */
index 7019a0d1a82bd48dd9e15f2e5b213be928423f9e..61640b99b7055d07836c43a08bc311f40b548dc8 100644 (file)
@@ -2063,7 +2063,15 @@ static int gem_check_invariants(struct gem *gp)
                mif_cfg &= ~MIF_CFG_PSELECT;
                writel(mif_cfg, gp->regs + MIF_CFG);
        } else {
-               gp->phy_type = phy_serialink;
+#ifdef CONFIG_SPARC
+               const char *p;
+
+               p = of_get_property(gp->of_node, "shared-pins", NULL);
+               if (p && !strcmp(p, "serdes"))
+                       gp->phy_type = phy_serdes;
+               else
+#endif
+                       gp->phy_type = phy_serialink;
        }
        if (gp->phy_type == phy_mii_mdio1 ||
            gp->phy_type == phy_mii_mdio0) {
index fa4e58196c214f3eeea5afb5893625eed58e61ce..43bc3fcc0d8523e837a07dcd40f3b3ca738fe218 100644 (file)
@@ -378,7 +378,7 @@ static void dbg_dump(int line_count, const char *func_name, unsigned char *buf,
 }
 
 #define DUMP(buf_, len_)       \
-       dbg_dump(__LINE__, __func__, buf_, len_)
+       dbg_dump(__LINE__, __func__, (unsigned char *)buf_, len_)
 
 #define DUMP1(buf_, len_)                      \
        do {                                    \
@@ -1363,7 +1363,7 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
        /* reset the rts and dtr */
        /* do the actual close */
        serial->open_count--;
-       kref_put(&serial->parent->ref, hso_serial_ref_free);
+
        if (serial->open_count <= 0) {
                serial->open_count = 0;
                spin_lock_irq(&serial->serial_lock);
@@ -1383,6 +1383,8 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
                usb_autopm_put_interface(serial->parent->interface);
 
        mutex_unlock(&serial->parent->mutex);
+
+       kref_put(&serial->parent->ref, hso_serial_ref_free);
 }
 
 /* close the requested serial port */
@@ -1527,7 +1529,7 @@ static void tiocmget_intr_callback(struct urb *urb)
                dev_warn(&usb->dev,
                         "hso received invalid serial state notification\n");
                DUMP(serial_state_notification,
-                    sizeof(hso_serial_state_notifation))
+                    sizeof(struct hso_serial_state_notification));
        } else {
 
                UART_state_bitmap = le16_to_cpu(serial_state_notification->
index ade5b344f75d73fb5852a0a9b30322576d1ce868..52af5017c46b42be7051a93b58c1f52da5909fb5 100644 (file)
@@ -210,32 +210,29 @@ rx_drop:
 static struct net_device_stats *veth_get_stats(struct net_device *dev)
 {
        struct veth_priv *priv;
-       struct net_device_stats *dev_stats;
        int cpu;
-       struct veth_net_stats *stats;
+       struct veth_net_stats *stats, total = {0};
 
        priv = netdev_priv(dev);
-       dev_stats = &dev->stats;
-
-       dev_stats->rx_packets = 0;
-       dev_stats->tx_packets = 0;
-       dev_stats->rx_bytes = 0;
-       dev_stats->tx_bytes = 0;
-       dev_stats->tx_dropped = 0;
-       dev_stats->rx_dropped = 0;
 
-       for_each_online_cpu(cpu) {
+       for_each_possible_cpu(cpu) {
                stats = per_cpu_ptr(priv->stats, cpu);
 
-               dev_stats->rx_packets += stats->rx_packets;
-               dev_stats->tx_packets += stats->tx_packets;
-               dev_stats->rx_bytes += stats->rx_bytes;
-               dev_stats->tx_bytes += stats->tx_bytes;
-               dev_stats->tx_dropped += stats->tx_dropped;
-               dev_stats->rx_dropped += stats->rx_dropped;
+               total.rx_packets += stats->rx_packets;
+               total.tx_packets += stats->tx_packets;
+               total.rx_bytes   += stats->rx_bytes;
+               total.tx_bytes   += stats->tx_bytes;
+               total.tx_dropped += stats->tx_dropped;
+               total.rx_dropped += stats->rx_dropped;
        }
-
-       return dev_stats;
+       dev->stats.rx_packets = total.rx_packets;
+       dev->stats.tx_packets = total.tx_packets;
+       dev->stats.rx_bytes   = total.rx_bytes;
+       dev->stats.tx_bytes   = total.tx_bytes;
+       dev->stats.tx_dropped = total.tx_dropped;
+       dev->stats.rx_dropped = total.rx_dropped;
+
+       return &dev->stats;
 }
 
 static int veth_open(struct net_device *dev)
index e2c33c06190bb1fa0975b634b765de70d22ad71f..8e25ca7080c7bb0a73ae3b3eb63ed79721d58d64 100644 (file)
@@ -907,6 +907,7 @@ static ssize_t cosa_write(struct file *file,
                        current->state = TASK_RUNNING;
                        chan->tx_status = 1;
                        spin_unlock_irqrestore(&cosa->lock, flags);
+                       up(&chan->wsem);
                        return -ERESTARTSYS;
                }
        }
index 9c6ab5378f6e1a5ab693b88cb67bfd447e294581..95a8e232b58f772a74ea0c7d1d5ab96e11142e2e 100644 (file)
@@ -1125,7 +1125,6 @@ ath5k_mode_setup(struct ath5k_softc *sc)
        /* configure operational mode */
        ath5k_hw_set_opmode(ah);
 
-       ath5k_hw_set_mcast_filter(ah, 0, 0);
        ATH5K_DBG(sc, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
 }
 
index b767c3b67b24b97aad051e2a42301b8d6725bade..b548c8eaaae18f9ab206fc114891a67319fa7f7b 100644 (file)
@@ -63,12 +63,16 @@ static const struct pci_device_id ath5k_led_devices[] = {
        { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
        /* E-machines E510 (tuliom@gmail.com) */
        { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
+       /* BenQ Joybook R55v (nowymarluk@wp.pl) */
+       { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) },
        /* Acer Extensa 5620z (nekoreeve@gmail.com) */
        { ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
        /* Fukato Datacask Jupiter 1014a (mrb74@gmx.at) */
        { ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) },
        /* IBM ThinkPad AR5BXB6 (legovini@spiro.fisica.unipd.it) */
        { ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) },
+       /* HP Compaq CQ60-206US (ddreggors@jumptv.com) */
+       { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
        /* HP Compaq C700 (nitrousnrg@gmail.com) */
        { ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 1) },
        /* IBM-specific AR5212 (all others) */
index 52bed89063d4e6a0941541683dc116e034ac22fb..43d2be9867fc08e288c5dc3eab5949e0cf1fa96c 100644 (file)
@@ -1555,6 +1555,8 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
                BIT(NL80211_IFTYPE_ADHOC) |
                BIT(NL80211_IFTYPE_MESH_POINT);
 
+       hw->wiphy->ps_default = false;
+
        hw->queues = 4;
        hw->max_rates = 4;
        hw->channel_change_time = 5000;
index 86f35827f0085a19d01d94624c4f2aee3603df3f..098dda1a67c13af65abb94e3c5bb0bd2a7c4a816 100644 (file)
@@ -4521,9 +4521,8 @@ static int b43_op_beacon_set_tim(struct ieee80211_hw *hw,
 {
        struct b43_wl *wl = hw_to_b43_wl(hw);
 
-       mutex_lock(&wl->mutex);
+       /* FIXME: add locking */
        b43_update_templates(wl);
-       mutex_unlock(&wl->mutex);
 
        return 0;
 }
index 240cff1e6979a4e77cb54c11ccc074dadf3ff028..6e2fc0cb6f8a83386252923c9c5cc0d57c190d4b 100644 (file)
@@ -6029,7 +6029,7 @@ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev,
        struct ipw2100_priv *priv;
        struct net_device *dev;
 
-       dev = alloc_ieee80211(sizeof(struct ipw2100_priv), 0);
+       dev = alloc_ieee80211(sizeof(struct ipw2100_priv));
        if (!dev)
                return NULL;
        priv = libipw_priv(dev);
@@ -6342,7 +6342,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
                sysfs_remove_group(&pci_dev->dev.kobj,
                                   &ipw2100_attribute_group);
 
-               free_ieee80211(dev, 0);
+               free_ieee80211(dev);
                pci_set_drvdata(pci_dev, NULL);
        }
 
@@ -6400,7 +6400,7 @@ static void __devexit ipw2100_pci_remove_one(struct pci_dev *pci_dev)
                if (dev->base_addr)
                        iounmap((void __iomem *)dev->base_addr);
 
-               free_ieee80211(dev, 0);
+               free_ieee80211(dev);
        }
 
        pci_release_regions(pci_dev);
index 827824d45de9bb1da433ad66be0dc225376270ef..a6ca536e44f81658e15cc361f9e51232a517801a 100644 (file)
@@ -104,25 +104,6 @@ static int antenna = CFG_SYS_ANTENNA_BOTH;
 static int rtap_iface = 0;     /* def: 0 -- do not create rtap interface */
 #endif
 
-static struct ieee80211_rate ipw2200_rates[] = {
-       { .bitrate = 10 },
-       { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
-       { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
-       { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
-       { .bitrate = 60 },
-       { .bitrate = 90 },
-       { .bitrate = 120 },
-       { .bitrate = 180 },
-       { .bitrate = 240 },
-       { .bitrate = 360 },
-       { .bitrate = 480 },
-       { .bitrate = 540 }
-};
-
-#define ipw2200_a_rates                (ipw2200_rates + 4)
-#define ipw2200_num_a_rates    8
-#define ipw2200_bg_rates       (ipw2200_rates + 0)
-#define ipw2200_num_bg_rates   12
 
 #ifdef CONFIG_IPW2200_QOS
 static int qos_enable = 0;
@@ -8674,6 +8655,24 @@ static int ipw_sw_reset(struct ipw_priv *priv, int option)
  *
  */
 
+static int ipw_wx_get_name(struct net_device *dev,
+                          struct iw_request_info *info,
+                          union iwreq_data *wrqu, char *extra)
+{
+       struct ipw_priv *priv = libipw_priv(dev);
+       mutex_lock(&priv->mutex);
+       if (priv->status & STATUS_RF_KILL_MASK)
+               strcpy(wrqu->name, "radio off");
+       else if (!(priv->status & STATUS_ASSOCIATED))
+               strcpy(wrqu->name, "unassociated");
+       else
+               snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11%c",
+                        ipw_modes[priv->assoc_request.ieee_mode]);
+       IPW_DEBUG_WX("Name: %s\n", wrqu->name);
+       mutex_unlock(&priv->mutex);
+       return 0;
+}
+
 static int ipw_set_channel(struct ipw_priv *priv, u8 channel)
 {
        if (channel == 0) {
@@ -9973,7 +9972,7 @@ static int ipw_wx_sw_reset(struct net_device *dev,
 /* Rebase the WE IOCTLs to zero for the handler array */
 #define IW_IOCTL(x) [(x)-SIOCSIWCOMMIT]
 static iw_handler ipw_wx_handlers[] = {
-       IW_IOCTL(SIOCGIWNAME) = (iw_handler) cfg80211_wext_giwname,
+       IW_IOCTL(SIOCGIWNAME) = ipw_wx_get_name,
        IW_IOCTL(SIOCSIWFREQ) = ipw_wx_set_freq,
        IW_IOCTL(SIOCGIWFREQ) = ipw_wx_get_freq,
        IW_IOCTL(SIOCSIWMODE) = ipw_wx_set_mode,
@@ -11417,100 +11416,16 @@ static void ipw_bg_down(struct work_struct *work)
 /* Called by register_netdev() */
 static int ipw_net_init(struct net_device *dev)
 {
-       int i, rc = 0;
        struct ipw_priv *priv = libipw_priv(dev);
-       const struct libipw_geo *geo = libipw_get_geo(priv->ieee);
-       struct wireless_dev *wdev = &priv->ieee->wdev;
        mutex_lock(&priv->mutex);
 
        if (ipw_up(priv)) {
-               rc = -EIO;
-               goto out;
-       }
-
-       memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN);
-
-       /* fill-out priv->ieee->bg_band */
-       if (geo->bg_channels) {
-               struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band;
-
-               bg_band->band = IEEE80211_BAND_2GHZ;
-               bg_band->n_channels = geo->bg_channels;
-               bg_band->channels =
-                       kzalloc(geo->bg_channels *
-                               sizeof(struct ieee80211_channel), GFP_KERNEL);
-               /* translate geo->bg to bg_band.channels */
-               for (i = 0; i < geo->bg_channels; i++) {
-                       bg_band->channels[i].band = IEEE80211_BAND_2GHZ;
-                       bg_band->channels[i].center_freq = geo->bg[i].freq;
-                       bg_band->channels[i].hw_value = geo->bg[i].channel;
-                       bg_band->channels[i].max_power = geo->bg[i].max_power;
-                       if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY)
-                               bg_band->channels[i].flags |=
-                                       IEEE80211_CHAN_PASSIVE_SCAN;
-                       if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS)
-                               bg_band->channels[i].flags |=
-                                       IEEE80211_CHAN_NO_IBSS;
-                       if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT)
-                               bg_band->channels[i].flags |=
-                                       IEEE80211_CHAN_RADAR;
-                       /* No equivalent for LIBIPW_CH_80211H_RULES,
-                          LIBIPW_CH_UNIFORM_SPREADING, or
-                          LIBIPW_CH_B_ONLY... */
-               }
-               /* point at bitrate info */
-               bg_band->bitrates = ipw2200_bg_rates;
-               bg_band->n_bitrates = ipw2200_num_bg_rates;
-
-               wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band;
-       }
-
-       /* fill-out priv->ieee->a_band */
-       if (geo->a_channels) {
-               struct ieee80211_supported_band *a_band = &priv->ieee->a_band;
-
-               a_band->band = IEEE80211_BAND_5GHZ;
-               a_band->n_channels = geo->a_channels;
-               a_band->channels =
-                       kzalloc(geo->a_channels *
-                               sizeof(struct ieee80211_channel), GFP_KERNEL);
-               /* translate geo->bg to a_band.channels */
-               for (i = 0; i < geo->a_channels; i++) {
-                       a_band->channels[i].band = IEEE80211_BAND_2GHZ;
-                       a_band->channels[i].center_freq = geo->a[i].freq;
-                       a_band->channels[i].hw_value = geo->a[i].channel;
-                       a_band->channels[i].max_power = geo->a[i].max_power;
-                       if (geo->a[i].flags & LIBIPW_CH_PASSIVE_ONLY)
-                               a_band->channels[i].flags |=
-                                       IEEE80211_CHAN_PASSIVE_SCAN;
-                       if (geo->a[i].flags & LIBIPW_CH_NO_IBSS)
-                               a_band->channels[i].flags |=
-                                       IEEE80211_CHAN_NO_IBSS;
-                       if (geo->a[i].flags & LIBIPW_CH_RADAR_DETECT)
-                               a_band->channels[i].flags |=
-                                       IEEE80211_CHAN_RADAR;
-                       /* No equivalent for LIBIPW_CH_80211H_RULES,
-                          LIBIPW_CH_UNIFORM_SPREADING, or
-                          LIBIPW_CH_B_ONLY... */
-               }
-               /* point at bitrate info */
-               a_band->bitrates = ipw2200_a_rates;
-               a_band->n_bitrates = ipw2200_num_a_rates;
-
-               wdev->wiphy->bands[IEEE80211_BAND_5GHZ] = a_band;
-       }
-
-       set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev);
-
-       /* With that information in place, we can now register the wiphy... */
-       if (wiphy_register(wdev->wiphy)) {
-               rc = -EIO;
-               goto out;
+               mutex_unlock(&priv->mutex);
+               return -EIO;
        }
 
-out:
        mutex_unlock(&priv->mutex);
-       return rc;
+       return 0;
 }
 
 /* PCI driver stuff */
@@ -11641,7 +11556,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
        if (priv->prom_net_dev)
                return -EPERM;
 
-       priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv), 1);
+       priv->prom_net_dev = alloc_ieee80211(sizeof(struct ipw_prom_priv));
        if (priv->prom_net_dev == NULL)
                return -ENOMEM;
 
@@ -11660,7 +11575,7 @@ static int ipw_prom_alloc(struct ipw_priv *priv)
 
        rc = register_netdev(priv->prom_net_dev);
        if (rc) {
-               free_ieee80211(priv->prom_net_dev, 1);
+               free_ieee80211(priv->prom_net_dev);
                priv->prom_net_dev = NULL;
                return rc;
        }
@@ -11674,7 +11589,7 @@ static void ipw_prom_free(struct ipw_priv *priv)
                return;
 
        unregister_netdev(priv->prom_net_dev);
-       free_ieee80211(priv->prom_net_dev, 1);
+       free_ieee80211(priv->prom_net_dev);
 
        priv->prom_net_dev = NULL;
 }
@@ -11702,7 +11617,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
        struct ipw_priv *priv;
        int i;
 
-       net_dev = alloc_ieee80211(sizeof(struct ipw_priv), 0);
+       net_dev = alloc_ieee80211(sizeof(struct ipw_priv));
        if (net_dev == NULL) {
                err = -ENOMEM;
                goto out;
@@ -11850,7 +11765,7 @@ static int __devinit ipw_pci_probe(struct pci_dev *pdev,
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
       out_free_ieee80211:
-       free_ieee80211(priv->net_dev, 0);
+       free_ieee80211(priv->net_dev);
       out:
        return err;
 }
@@ -11917,7 +11832,7 @@ static void __devexit ipw_pci_remove(struct pci_dev *pdev)
        pci_release_regions(pdev);
        pci_disable_device(pdev);
        pci_set_drvdata(pdev, NULL);
-       free_ieee80211(priv->net_dev, 0);
+       free_ieee80211(priv->net_dev);
        free_firmware();
 }
 
index bf45391172f3a3a6b739826e8e7816176ba29d0d..1e334ff6bd52263a0dac8447b479aad012c7395f 100644 (file)
@@ -31,7 +31,6 @@
 #include <linux/ieee80211.h>
 
 #include <net/lib80211.h>
-#include <net/cfg80211.h>
 
 #define LIBIPW_VERSION "git-1.1.13"
 
@@ -784,15 +783,12 @@ struct libipw_geo {
 
 struct libipw_device {
        struct net_device *dev;
-       struct wireless_dev wdev;
        struct libipw_security sec;
 
        /* Bookkeeping structures */
        struct libipw_stats ieee_stats;
 
        struct libipw_geo geo;
-       struct ieee80211_supported_band bg_band;
-       struct ieee80211_supported_band a_band;
 
        /* Probe / Beacon management */
        struct list_head network_free_list;
@@ -1018,8 +1014,8 @@ static inline int libipw_is_cck_rate(u8 rate)
 }
 
 /* ieee80211.c */
-extern void free_ieee80211(struct net_device *dev, int monitor);
-extern struct net_device *alloc_ieee80211(int sizeof_priv, int monitor);
+extern void free_ieee80211(struct net_device *dev);
+extern struct net_device *alloc_ieee80211(int sizeof_priv);
 extern int libipw_change_mtu(struct net_device *dev, int new_mtu);
 
 extern void libipw_networks_age(struct libipw_device *ieee,
index a0e9f6aed7daf8568ff27b0d484d17fc9df5da03..eb2b60834c1746e9e8c50046eef6759d7380f21e 100644 (file)
@@ -62,9 +62,6 @@ MODULE_DESCRIPTION(DRV_DESCRIPTION);
 MODULE_AUTHOR(DRV_COPYRIGHT);
 MODULE_LICENSE("GPL");
 
-struct cfg80211_ops libipw_config_ops = { };
-void *libipw_wiphy_privid = &libipw_wiphy_privid;
-
 static int libipw_networks_allocate(struct libipw_device *ieee)
 {
        if (ieee->networks)
@@ -143,7 +140,7 @@ int libipw_change_mtu(struct net_device *dev, int new_mtu)
 }
 EXPORT_SYMBOL(libipw_change_mtu);
 
-struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
+struct net_device *alloc_ieee80211(int sizeof_priv)
 {
        struct libipw_device *ieee;
        struct net_device *dev;
@@ -160,31 +157,10 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
 
        ieee->dev = dev;
 
-       if (!monitor) {
-               ieee->wdev.wiphy = wiphy_new(&libipw_config_ops, 0);
-               if (!ieee->wdev.wiphy) {
-                       LIBIPW_ERROR("Unable to allocate wiphy.\n");
-                       goto failed_free_netdev;
-               }
-
-               ieee->dev->ieee80211_ptr = &ieee->wdev;
-               ieee->wdev.iftype = NL80211_IFTYPE_STATION;
-
-               /* Fill-out wiphy structure bits we know...  Not enough info
-                  here to call set_wiphy_dev or set MAC address or channel info
-                  -- have to do that in ->ndo_init... */
-               ieee->wdev.wiphy->privid = libipw_wiphy_privid;
-
-               ieee->wdev.wiphy->max_scan_ssids = 1;
-               ieee->wdev.wiphy->max_scan_ie_len = 0;
-               ieee->wdev.wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION)
-                                               | BIT(NL80211_IFTYPE_ADHOC);
-       }
-
        err = libipw_networks_allocate(ieee);
        if (err) {
                LIBIPW_ERROR("Unable to allocate beacon storage: %d\n", err);
-               goto failed_free_wiphy;
+               goto failed_free_netdev;
        }
        libipw_networks_initialize(ieee);
 
@@ -217,31 +193,19 @@ struct net_device *alloc_ieee80211(int sizeof_priv, int monitor)
 
        return dev;
 
-failed_free_wiphy:
-       if (!monitor)
-               wiphy_free(ieee->wdev.wiphy);
 failed_free_netdev:
        free_netdev(dev);
 failed:
        return NULL;
 }
 
-void free_ieee80211(struct net_device *dev, int monitor)
+void free_ieee80211(struct net_device *dev)
 {
        struct libipw_device *ieee = netdev_priv(dev);
 
        lib80211_crypt_info_free(&ieee->crypt_info);
 
        libipw_networks_free(ieee);
-
-       /* free cfg80211 resources */
-       if (!monitor) {
-               wiphy_unregister(ieee->wdev.wiphy);
-               kfree(ieee->a_band.channels);
-               kfree(ieee->bg_band.channels);
-               wiphy_free(ieee->wdev.wiphy);
-       }
-
        free_netdev(dev);
 }
 
index 2716b91ba9fa753b879c80e0de57ae430356a575..950267ab556a96a5abfd225866a9267955900a28 100644 (file)
@@ -161,5 +161,6 @@ struct iwl_cfg iwl1000_bgn_cfg = {
        .max_ll_items = OTP_MAX_LL_ITEMS_1000,
        .shadow_ram_support = false,
        .ht_greenfield_support = true,
+       .use_rts_for_ht = true, /* use rts/cts protection */
 };
 
index c295b8ee922896ab16be8812bef15b0641b7cf2f..1473452ba22fc21a18c171f39a20e858d97aab3c 100644 (file)
@@ -175,6 +175,7 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
+       .use_rts_for_ht = true, /* use rts/cts protection */
 };
 
 /*
@@ -198,6 +199,7 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
+       .use_rts_for_ht = true, /* use rts/cts protection */
 };
 
 struct iwl_cfg iwl6050_2agn_cfg = {
@@ -218,6 +220,7 @@ struct iwl_cfg iwl6050_2agn_cfg = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
+       .use_rts_for_ht = true, /* use rts/cts protection */
 };
 
 struct iwl_cfg iwl6000_3agn_cfg = {
@@ -238,6 +241,7 @@ struct iwl_cfg iwl6000_3agn_cfg = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
+       .use_rts_for_ht = true, /* use rts/cts protection */
 };
 
 struct iwl_cfg iwl6050_3agn_cfg = {
@@ -258,6 +262,7 @@ struct iwl_cfg iwl6050_3agn_cfg = {
        .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
        .shadow_ram_support = true,
        .ht_greenfield_support = true,
+       .use_rts_for_ht = true, /* use rts/cts protection */
 };
 
 MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
index 346dc06fa7b77f79f5cdd986653e19b8d547bf56..81726ee32858e131e45f9bb57ba3be62de9d4247 100644 (file)
@@ -418,6 +418,15 @@ static void rs_tl_turn_on_agg(struct iwl_priv *priv, u8 tid,
        else if (tid == IWL_AGG_ALL_TID)
                for (tid = 0; tid < TID_MAX_LOAD_COUNT; tid++)
                        rs_tl_turn_on_agg_for_tid(priv, lq_data, tid, sta);
+       if (priv->cfg->use_rts_for_ht) {
+               /*
+                * switch to RTS/CTS if it is the prefer protection method
+                * for HT traffic
+                */
+               IWL_DEBUG_HT(priv, "use RTS/CTS protection for HT\n");
+               priv->staging_rxon.flags &= ~RXON_FLG_SELF_CTS_EN;
+               iwlcore_commit_rxon(priv);
+       }
 }
 
 static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
index eaafae091f5bd48bfc3eec8d323e2a210864c54a..921dc4a26fe2eef00455865f996cf91d67d17bd3 100644 (file)
@@ -116,9 +116,6 @@ int iwl_commit_rxon(struct iwl_priv *priv)
 
        /* always get timestamp with Rx frame */
        priv->staging_rxon.flags |= RXON_FLG_TSF2HOST_MSK;
-       /* allow CTS-to-self if possible. this is relevant only for
-        * 5000, but will not damage 4965 */
-       priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
 
        ret = iwl_check_rxon_cmd(priv);
        if (ret) {
@@ -218,6 +215,13 @@ int iwl_commit_rxon(struct iwl_priv *priv)
                                        "Could not send WEP static key.\n");
                }
 
+               /*
+                * allow CTS-to-self if possible for new association.
+                * this is relevant only for 5000 series and up,
+                * but will not damage 4965
+                */
+               priv->staging_rxon.flags |= RXON_FLG_SELF_CTS_EN;
+
                /* Apply the new configuration
                 * RXON assoc doesn't clear the station table in uCode,
                 */
index e50103a956b1052015ddcd072e557e4f694c0540..7754538c2194f3068019295270f1c6e4527a1b64 100644 (file)
@@ -213,6 +213,7 @@ struct iwl_mod_params {
  * @pa_type: used by 6000 series only to identify the type of Power Amplifier
  * @max_ll_items: max number of OTP blocks
  * @shadow_ram_support: shadow support for OTP memory
+ * @use_rts_for_ht: use rts/cts protection for HT traffic
  *
  * We enable the driver to be backward compatible wrt API version. The
  * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -255,6 +256,7 @@ struct iwl_cfg {
        const bool shadow_ram_support;
        const bool ht_greenfield_support;
        const bool broken_powersave;
+       bool use_rts_for_ht;
 };
 
 /***************************
index fb9bcfa6d9471e057bac4a2d7e98237c451c03e0..b7e196e3c8d37e1671171e2cacecf86a99306dcc 100644 (file)
@@ -1277,8 +1277,16 @@ int iwl_tx_agg_stop(struct iwl_priv *priv , const u8 *ra, u16 tid)
                return -ENXIO;
        }
 
+       if (priv->stations[sta_id].tid[tid].agg.state ==
+                               IWL_EMPTYING_HW_QUEUE_ADDBA) {
+               IWL_DEBUG_HT(priv, "AGG stop before setup done\n");
+               ieee80211_stop_tx_ba_cb_irqsafe(priv->hw, ra, tid);
+               priv->stations[sta_id].tid[tid].agg.state = IWL_AGG_OFF;
+               return 0;
+       }
+
        if (priv->stations[sta_id].tid[tid].agg.state != IWL_AGG_ON)
-               IWL_WARN(priv, "Stopping AGG while state not IWL_AGG_ON\n");
+               IWL_WARN(priv, "Stopping AGG while state not ON or starting\n");
 
        tid_data = &priv->stations[sta_id].tid[tid];
        ssn = (tid_data->seq_number & IEEE80211_SCTL_SEQ) >> 4;
index 039b555e4d7630336f4ea9750a796566f9255cdf..53d56ab83c03cb54411885b5f26e67e943030b46 100644 (file)
@@ -169,16 +169,19 @@ static int lbs_ethtool_set_wol(struct net_device *dev,
        struct lbs_private *priv = dev->ml_priv;
        uint32_t criteria = 0;
 
-       if (priv->wol_criteria == 0xffffffff && wol->wolopts)
-               return -EOPNOTSUPP;
-
        if (wol->wolopts & ~(WAKE_UCAST|WAKE_MCAST|WAKE_BCAST|WAKE_PHY))
                return -EOPNOTSUPP;
 
-       if (wol->wolopts & WAKE_UCAST) criteria |= EHS_WAKE_ON_UNICAST_DATA;
-       if (wol->wolopts & WAKE_MCAST) criteria |= EHS_WAKE_ON_MULTICAST_DATA;
-       if (wol->wolopts & WAKE_BCAST) criteria |= EHS_WAKE_ON_BROADCAST_DATA;
-       if (wol->wolopts & WAKE_PHY)   criteria |= EHS_WAKE_ON_MAC_EVENT;
+       if (wol->wolopts & WAKE_UCAST)
+               criteria |= EHS_WAKE_ON_UNICAST_DATA;
+       if (wol->wolopts & WAKE_MCAST)
+               criteria |= EHS_WAKE_ON_MULTICAST_DATA;
+       if (wol->wolopts & WAKE_BCAST)
+               criteria |= EHS_WAKE_ON_BROADCAST_DATA;
+       if (wol->wolopts & WAKE_PHY)
+               criteria |= EHS_WAKE_ON_MAC_EVENT;
+       if (wol->wolopts == 0)
+               criteria |= EHS_REMOVE_WAKEUP;
 
        return lbs_host_sleep_cfg(priv, criteria, (struct wol_config *)NULL);
 }
index 17e199546eebca34e7089e93382739005e7241fc..92af9b96bb7a2bb51f3adba3316db4ca86bf1beb 100644 (file)
@@ -426,12 +426,16 @@ static const char p54u_romboot_3887[] = "~~~~";
 static int p54u_firmware_reset_3887(struct ieee80211_hw *dev)
 {
        struct p54u_priv *priv = dev->priv;
-       u8 buf[4];
+       u8 *buf;
        int ret;
 
-       memcpy(&buf, p54u_romboot_3887, sizeof(buf));
+       buf = kmalloc(4, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+       memcpy(buf, p54u_romboot_3887, 4);
        ret = p54u_bulk_msg(priv, P54U_PIPE_DATA,
-                           buf, sizeof(buf));
+                           buf, 4);
+       kfree(buf);
        if (ret)
                dev_err(&priv->udev->dev, "(p54usb) unable to jump to "
                        "boot ROM (%d)!\n", ret);
index 9fab13e4004e805aff19fa9796ee08956a5bafcd..cad8037ab2af3a4d136194dcdf3e322d65495ad6 100644 (file)
@@ -18,6 +18,7 @@
 #include <net/mac80211.h>
 
 #include "rtl8187.h"
+#include "rtl8187_rfkill.h"
 
 static bool rtl8187_is_radio_enabled(struct rtl8187_priv *priv)
 {
index e5f8fc164fd3329218d6d15a43ba76072c0079b3..b952ebc7a78b67776d985f006fb33633be43b80b 100644 (file)
@@ -609,6 +609,9 @@ int __init check_zero_address(void)
                                     dmi_get_system_info(DMI_BIOS_VENDOR),
                                     dmi_get_system_info(DMI_BIOS_VERSION),
                                     dmi_get_system_info(DMI_PRODUCT_VERSION));
+#ifdef CONFIG_DMAR
+                               dmar_disabled = 1;
+#endif
                                return 0;
                        }
                        break;
index 0a8f735f6c4a9fd4f79b79ef1c341a6b247be73b..ab64522aaa6433df3e1fcb3c2d92c5633d2c72ad 100644 (file)
@@ -52,7 +52,7 @@
  */
 #undef START_IN_KERNEL_MODE
 
-#define DRV_VER "0.5.17"
+#define DRV_VER "0.5.18"
 
 /*
  * According to the Atom N270 datasheet,
@@ -61,7 +61,7 @@
  * measured by the on-die thermal monitor are within 0 <= Tj <= 90. So,
  * assume 89°C is critical temperature.
  */
-#define ACERHDF_TEMP_CRIT 89
+#define ACERHDF_TEMP_CRIT 89000
 #define ACERHDF_FAN_OFF 0
 #define ACERHDF_FAN_AUTO 1
 
@@ -69,7 +69,7 @@
  * No matter what value the user puts into the fanon variable, turn on the fan
  * at 80 degree Celsius to prevent hardware damage
  */
-#define ACERHDF_MAX_FANON 80
+#define ACERHDF_MAX_FANON 80000
 
 /*
  * Maximum interval between two temperature checks is 15 seconds, as the die
@@ -85,8 +85,8 @@ static int kernelmode;
 #endif
 
 static unsigned int interval = 10;
-static unsigned int fanon = 63;
-static unsigned int fanoff = 58;
+static unsigned int fanon = 63000;
+static unsigned int fanoff = 58000;
 static unsigned int verbose;
 static unsigned int fanstate = ACERHDF_FAN_AUTO;
 static char force_bios[16];
@@ -171,7 +171,7 @@ static int acerhdf_get_temp(int *temp)
        if (ec_read(bios_cfg->tempreg, &read_temp))
                return -EINVAL;
 
-       *temp = read_temp;
+       *temp = read_temp * 1000;
 
        return 0;
 }
index d93108d148fc14cebff5ca14202ff161bb3551b0..a848c7e20aeb1895faee46832b3d34811ae92d92 100644 (file)
@@ -1680,36 +1680,48 @@ static void tpacpi_remove_driver_attributes(struct device_driver *drv)
                          | (__bv1) << 8 | (__bv2) }
 
 #define TPV_Q_X(__v, __bid1, __bid2, __bv1, __bv2,     \
-               __eid1, __eid2, __ev1, __ev2)           \
+               __eid, __ev1, __ev2)                    \
        { .vendor       = (__v),                        \
          .bios         = TPID(__bid1, __bid2),         \
-         .ec           = TPID(__eid1, __eid2),         \
+         .ec           = __eid,                        \
          .quirks       = (__ev1) << 24 | (__ev2) << 16 \
                          | (__bv1) << 8 | (__bv2) }
 
 #define TPV_QI0(__id1, __id2, __bv1, __bv2) \
        TPV_Q(PCI_VENDOR_ID_IBM, __id1, __id2, __bv1, __bv2)
 
+/* Outdated IBM BIOSes often lack the EC id string */
 #define TPV_QI1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
        TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2,        \
-               __bv1, __bv2, __id1, __id2, __ev1, __ev2)
+               __bv1, __bv2, TPID(__id1, __id2),       \
+               __ev1, __ev2),                          \
+       TPV_Q_X(PCI_VENDOR_ID_IBM, __id1, __id2,        \
+               __bv1, __bv2, TPACPI_MATCH_UNKNOWN,     \
+               __ev1, __ev2)
 
+/* Outdated IBM BIOSes often lack the EC id string */
 #define TPV_QI2(__bid1, __bid2, __bv1, __bv2,          \
                __eid1, __eid2, __ev1, __ev2)           \
        TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2,      \
-               __bv1, __bv2, __eid1, __eid2, __ev1, __ev2)
+               __bv1, __bv2, TPID(__eid1, __eid2),     \
+               __ev1, __ev2),                          \
+       TPV_Q_X(PCI_VENDOR_ID_IBM, __bid1, __bid2,      \
+               __bv1, __bv2, TPACPI_MATCH_UNKNOWN,     \
+               __ev1, __ev2)
 
 #define TPV_QL0(__id1, __id2, __bv1, __bv2) \
        TPV_Q(PCI_VENDOR_ID_LENOVO, __id1, __id2, __bv1, __bv2)
 
 #define TPV_QL1(__id1, __id2, __bv1, __bv2, __ev1, __ev2) \
        TPV_Q_X(PCI_VENDOR_ID_LENOVO, __id1, __id2,     \
-               __bv1, __bv2, __id1, __id2, __ev1, __ev2)
+               __bv1, __bv2, TPID(__id1, __id2),       \
+               __ev1, __ev2)
 
 #define TPV_QL2(__bid1, __bid2, __bv1, __bv2,          \
                __eid1, __eid2, __ev1, __ev2)           \
        TPV_Q_X(PCI_VENDOR_ID_LENOVO, __bid1, __bid2,   \
-               __bv1, __bv2, __eid1, __eid2, __ev1, __ev2)
+               __bv1, __bv2, TPID(__eid1, __eid2),     \
+               __ev1, __ev2)
 
 static const struct tpacpi_quirk tpacpi_bios_version_qtable[] __initconst = {
        /*  Numeric models ------------------ */
@@ -6313,7 +6325,7 @@ static int brightness_write(char *buf)
         * Doing it this way makes the syscall restartable in case of EINTR
         */
        rc = brightness_set(level);
-       return (rc == -EINTR)? ERESTARTSYS : rc;
+       return (rc == -EINTR)? -ERESTARTSYS : rc;
 }
 
 static struct ibm_struct brightness_driver_data = {
index b2f6949bc8d33996c0cc294b2f4d98367f2257ec..bd34b0db2d6b3bdd29f1616687d94299ebc33a68 100644 (file)
@@ -41,6 +41,8 @@ u32 *bfi_image_cb;
 
 #define        BFAD_FW_FILE_CT "ctfw.bin"
 #define        BFAD_FW_FILE_CB "cbfw.bin"
+MODULE_FIRMWARE(BFAD_FW_FILE_CT);
+MODULE_FIRMWARE(BFAD_FW_FILE_CB);
 
 u32 *
 bfad_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
index 158c99243c089f9fa104872d1eb2a8024e3cb6ba..55d012a9a6681d74203c09944840f32127411004 100644 (file)
@@ -948,7 +948,7 @@ bfad_os_fc_host_init(struct bfad_im_port_s *im_port)
        if (bfad_supported_fc4s & (BFA_PORT_ROLE_FCP_IM | BFA_PORT_ROLE_FCP_TM))
                /* For FCP type 0x08 */
                fc_host_supported_fc4s(host)[2] = 1;
-       if (bfad_supported_fc4s | BFA_PORT_ROLE_FCP_IPFC)
+       if (bfad_supported_fc4s & BFA_PORT_ROLE_FCP_IPFC)
                /* For LLC/SNAP type 0x05 */
                fc_host_supported_fc4s(host)[3] = 0x20;
        /* For fibre channel services type 0x20 */
index 185e6bc4dd40076519dea524d9687fa8cc0f51f5..9e8fce0f0c1b3f1022b5658a2ad503fcc810abf6 100644 (file)
@@ -2900,7 +2900,7 @@ static int gdth_read_event(gdth_ha_str *ha, int handle, gdth_evt_str *estr)
         eindex = handle;
     estr->event_source = 0;
 
-    if (eindex >= MAX_EVENTS) {
+    if (eindex < 0 || eindex >= MAX_EVENTS) {
         spin_unlock_irqrestore(&ha->smp_lock, flags);
         return eindex;
     }
index 5fd2da494d087da334b65b9bd88b2945191184ff..c968cc31cd862514ae432eee7573d6ade5277863 100644 (file)
@@ -164,8 +164,8 @@ void scsi_remove_host(struct Scsi_Host *shost)
                        return;
                }
        spin_unlock_irqrestore(shost->host_lock, flags);
-       mutex_unlock(&shost->scan_mutex);
        scsi_forget_host(shost);
+       mutex_unlock(&shost->scan_mutex);
        scsi_proc_host_rm(shost);
 
        spin_lock_irqsave(shost->host_lock, flags);
index 5f045505a1f4a48eb5f5a1d17ff61508f7cadf7e..76d294fc78461c13bdf6aae03dc6c2fc2273a0cf 100644 (file)
@@ -4188,6 +4188,25 @@ static irqreturn_t ipr_handle_other_interrupt(struct ipr_ioa_cfg *ioa_cfg,
        return rc;
 }
 
+/**
+ * ipr_isr_eh - Interrupt service routine error handler
+ * @ioa_cfg:   ioa config struct
+ * @msg:       message to log
+ *
+ * Return value:
+ *     none
+ **/
+static void ipr_isr_eh(struct ipr_ioa_cfg *ioa_cfg, char *msg)
+{
+       ioa_cfg->errors_logged++;
+       dev_err(&ioa_cfg->pdev->dev, "%s\n", msg);
+
+       if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
+               ioa_cfg->sdt_state = GET_DUMP;
+
+       ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+}
+
 /**
  * ipr_isr - Interrupt service routine
  * @irq:       irq number
@@ -4203,6 +4222,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
        volatile u32 int_reg, int_mask_reg;
        u32 ioasc;
        u16 cmd_index;
+       int num_hrrq = 0;
        struct ipr_cmnd *ipr_cmd;
        irqreturn_t rc = IRQ_NONE;
 
@@ -4233,13 +4253,7 @@ static irqreturn_t ipr_isr(int irq, void *devp)
                                     IPR_HRRQ_REQ_RESP_HANDLE_MASK) >> IPR_HRRQ_REQ_RESP_HANDLE_SHIFT;
 
                        if (unlikely(cmd_index >= IPR_NUM_CMD_BLKS)) {
-                               ioa_cfg->errors_logged++;
-                               dev_err(&ioa_cfg->pdev->dev, "Invalid response handle from IOA\n");
-
-                               if (WAIT_FOR_DUMP == ioa_cfg->sdt_state)
-                                       ioa_cfg->sdt_state = GET_DUMP;
-
-                               ipr_initiate_ioa_reset(ioa_cfg, IPR_SHUTDOWN_NONE);
+                               ipr_isr_eh(ioa_cfg, "Invalid response handle from IOA");
                                spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
                                return IRQ_HANDLED;
                        }
@@ -4266,8 +4280,18 @@ static irqreturn_t ipr_isr(int irq, void *devp)
 
                if (ipr_cmd != NULL) {
                        /* Clear the PCI interrupt */
-                       writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
-                       int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+                       do {
+                               writel(IPR_PCII_HRRQ_UPDATED, ioa_cfg->regs.clr_interrupt_reg);
+                               int_reg = readl(ioa_cfg->regs.sense_interrupt_reg) & ~int_mask_reg;
+                       } while (int_reg & IPR_PCII_HRRQ_UPDATED &&
+                                       num_hrrq++ < IPR_MAX_HRRQ_RETRIES);
+
+                       if (int_reg & IPR_PCII_HRRQ_UPDATED) {
+                               ipr_isr_eh(ioa_cfg, "Error clearing HRRQ");
+                               spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
+                               return IRQ_HANDLED;
+                       }
+
                } else
                        break;
        }
index 163245a1c3e5a666b0d97cabbf5813eccb4a6dcb..19bbcf39f0c9fb759af60b670782e3c48dc12e01 100644 (file)
 #define IPR_IOA_MAX_SECTORS                            32767
 #define IPR_VSET_MAX_SECTORS                           512
 #define IPR_MAX_CDB_LEN                                        16
+#define IPR_MAX_HRRQ_RETRIES                           3
 
 #define IPR_DEFAULT_BUS_WIDTH                          16
 #define IPR_80MBs_SCSI_RATE            ((80 * 10) / (IPR_DEFAULT_BUS_WIDTH / 8))
index b3381959acce19ee167c183ccd7b2b9354203c1b..33cf988c8c8a2cd25548ce5be0f67d97ee58e997 100644 (file)
@@ -960,7 +960,6 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
 
                        }
                }
-               res = 0;
        }
 
        return res;
index f7c70e2a8224b442b827f255728a784ceaf0b0c5..0a97bc9074bb2676429a2d777d5597e2ec8645c3 100644 (file)
@@ -1071,7 +1071,7 @@ static struct pmcraid_cmd *pmcraid_init_hcam
 
        ioarcb->data_transfer_length = cpu_to_le32(rcb_size);
 
-       ioadl[0].flags |= cpu_to_le32(IOADL_FLAGS_READ_LAST);
+       ioadl[0].flags |= IOADL_FLAGS_READ_LAST;
        ioadl[0].data_len = cpu_to_le32(rcb_size);
        ioadl[0].address = cpu_to_le32(dma);
 
@@ -2251,7 +2251,7 @@ static void pmcraid_request_sense(struct pmcraid_cmd *cmd)
 
        ioadl->address = cpu_to_le64(cmd->sense_buffer_dma);
        ioadl->data_len = cpu_to_le32(SCSI_SENSE_BUFFERSIZE);
-       ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+       ioadl->flags = IOADL_FLAGS_LAST_DESC;
 
        /* request sense might be called as part of error response processing
         * which runs in tasklets context. It is possible that mid-layer might
@@ -3017,7 +3017,7 @@ static int pmcraid_build_ioadl(
                ioadl[i].flags = 0;
        }
        /* setup last descriptor */
-       ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+       ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
 
        return 0;
 }
@@ -3387,7 +3387,7 @@ static int pmcraid_build_passthrough_ioadls(
        }
 
        /* setup the last descriptor */
-       ioadl[i - 1].flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+       ioadl[i - 1].flags = IOADL_FLAGS_LAST_DESC;
 
        return 0;
 }
@@ -5314,7 +5314,7 @@ static void pmcraid_querycfg(struct pmcraid_cmd *cmd)
                cpu_to_le32(sizeof(struct pmcraid_config_table));
 
        ioadl = &(ioarcb->add_data.u.ioadl[0]);
-       ioadl->flags = cpu_to_le32(IOADL_FLAGS_LAST_DESC);
+       ioadl->flags = IOADL_FLAGS_LAST_DESC;
        ioadl->address = cpu_to_le64(pinstance->cfg_table_bus_addr);
        ioadl->data_len = cpu_to_le32(sizeof(struct pmcraid_config_table));
 
index 0547a7f44d4277768f6c4fc4b407fedd72df39d0..47291bcff0d52c35b84c089b892a84da46f52990 100644 (file)
@@ -952,16 +952,6 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        return SCSI_SCAN_LUN_PRESENT;
 }
 
-static inline void scsi_destroy_sdev(struct scsi_device *sdev)
-{
-       scsi_device_set_state(sdev, SDEV_DEL);
-       if (sdev->host->hostt->slave_destroy)
-               sdev->host->hostt->slave_destroy(sdev);
-       transport_destroy_device(&sdev->sdev_gendev);
-       put_device(&sdev->sdev_dev);
-       put_device(&sdev->sdev_gendev);
-}
-
 #ifdef CONFIG_SCSI_LOGGING
 /** 
  * scsi_inq_str - print INQUIRY data from min to max index, strip trailing whitespace
@@ -1139,7 +1129,7 @@ static int scsi_probe_and_add_lun(struct scsi_target *starget,
                        }
                }
        } else
-               scsi_destroy_sdev(sdev);
+               __scsi_remove_device(sdev);
  out:
        return res;
 }
@@ -1500,7 +1490,7 @@ static int scsi_report_lun_scan(struct scsi_target *starget, int bflags,
                /*
                 * the sdev we used didn't appear in the report luns scan
                 */
-               scsi_destroy_sdev(sdev);
+               __scsi_remove_device(sdev);
        return ret;
 }
 
@@ -1710,7 +1700,7 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
        shost_for_each_device(sdev, shost) {
                if (!scsi_host_scan_allowed(shost) ||
                    scsi_sysfs_add_sdev(sdev) != 0)
-                       scsi_destroy_sdev(sdev);
+                       __scsi_remove_device(sdev);
        }
 }
 
@@ -1943,7 +1933,7 @@ void scsi_free_host_dev(struct scsi_device *sdev)
 {
        BUG_ON(sdev->id != sdev->host->this_id);
 
-       scsi_destroy_sdev(sdev);
+       __scsi_remove_device(sdev);
 }
 EXPORT_SYMBOL(scsi_free_host_dev);
 
index 5c7eb63a19d13732514e0bd3f50ade6ab380c4d5..392d8db33905cbe31f1e2f96644552d9b692eb27 100644 (file)
@@ -854,82 +854,73 @@ int scsi_sysfs_add_sdev(struct scsi_device *sdev)
        transport_configure_device(&starget->dev);
        error = device_add(&sdev->sdev_gendev);
        if (error) {
-               put_device(sdev->sdev_gendev.parent);
                printk(KERN_INFO "error 1\n");
-               return error;
+               goto out_remove;
        }
        error = device_add(&sdev->sdev_dev);
        if (error) {
                printk(KERN_INFO "error 2\n");
-               goto clean_device;
+               device_del(&sdev->sdev_gendev);
+               goto out_remove;
        }
+       transport_add_device(&sdev->sdev_gendev);
+       sdev->is_visible = 1;
 
        /* create queue files, which may be writable, depending on the host */
        if (sdev->host->hostt->change_queue_depth)
                error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_depth_rw);
        else
                error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_depth);
-       if (error) {
-               __scsi_remove_device(sdev);
-               goto out;
-       }
+       if (error)
+               goto out_remove;
+
        if (sdev->host->hostt->change_queue_type)
                error = device_create_file(&sdev->sdev_gendev, &sdev_attr_queue_type_rw);
        else
                error = device_create_file(&sdev->sdev_gendev, &dev_attr_queue_type);
-       if (error) {
-               __scsi_remove_device(sdev);
-               goto out;
-       }
+       if (error)
+               goto out_remove;
 
        error = bsg_register_queue(rq, &sdev->sdev_gendev, NULL, NULL);
 
        if (error)
+               /* we're treating error on bsg register as non-fatal,
+                * so pretend nothing went wrong */
                sdev_printk(KERN_INFO, sdev,
                            "Failed to register bsg queue, errno=%d\n", error);
 
-       /* we're treating error on bsg register as non-fatal, so pretend
-        * nothing went wrong */
-       error = 0;
-
        /* add additional host specific attributes */
        if (sdev->host->hostt->sdev_attrs) {
                for (i = 0; sdev->host->hostt->sdev_attrs[i]; i++) {
                        error = device_create_file(&sdev->sdev_gendev,
                                        sdev->host->hostt->sdev_attrs[i]);
-                       if (error) {
-                               __scsi_remove_device(sdev);
-                               goto out;
-                       }
+                       if (error)
+                               goto out_remove;
                }
        }
 
-       transport_add_device(&sdev->sdev_gendev);
- out:
-       return error;
-
- clean_device:
-       scsi_device_set_state(sdev, SDEV_CANCEL);
-
-       device_del(&sdev->sdev_gendev);
-       transport_destroy_device(&sdev->sdev_gendev);
-       put_device(&sdev->sdev_dev);
-       put_device(&sdev->sdev_gendev);
+       return 0;
 
+ out_remove:
+       __scsi_remove_device(sdev);
        return error;
+
 }
 
 void __scsi_remove_device(struct scsi_device *sdev)
 {
        struct device *dev = &sdev->sdev_gendev;
 
-       if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
-               return;
+       if (sdev->is_visible) {
+               if (scsi_device_set_state(sdev, SDEV_CANCEL) != 0)
+                       return;
 
-       bsg_unregister_queue(sdev->request_queue);
-       device_unregister(&sdev->sdev_dev);
-       transport_remove_device(dev);
-       device_del(dev);
+               bsg_unregister_queue(sdev->request_queue);
+               device_unregister(&sdev->sdev_dev);
+               transport_remove_device(dev);
+               device_del(dev);
+       } else
+               put_device(&sdev->sdev_dev);
        scsi_device_set_state(sdev, SDEV_DEL);
        if (sdev->host->hostt->slave_destroy)
                sdev->host->hostt->slave_destroy(sdev);
index a67fed10598acfc3dd00234d3f3222470d18a9f9..c6f70dae9b2eecdb5a602bc94520d3da032b346d 100644 (file)
@@ -3656,6 +3656,7 @@ fc_bsg_host_dispatch(struct request_queue *q, struct Scsi_Host *shost,
 fail_host_msg:
        /* return the errno failure code as the only status */
        BUG_ON(job->reply_len < sizeof(uint32_t));
+       job->reply->reply_payload_rcv_len = 0;
        job->reply->result = ret;
        job->reply_len = sizeof(uint32_t);
        fc_bsg_jobdone(job);
@@ -3741,6 +3742,7 @@ check_bidi:
 fail_rport_msg:
        /* return the errno failure code as the only status */
        BUG_ON(job->reply_len < sizeof(uint32_t));
+       job->reply->reply_payload_rcv_len = 0;
        job->reply->result = ret;
        job->reply_len = sizeof(uint32_t);
        fc_bsg_jobdone(job);
@@ -3797,6 +3799,7 @@ fc_bsg_request_handler(struct request_queue *q, struct Scsi_Host *shost,
                /* check if we have the msgcode value at least */
                if (job->request_len < sizeof(uint32_t)) {
                        BUG_ON(job->reply_len < sizeof(uint32_t));
+                       job->reply->reply_payload_rcv_len = 0;
                        job->reply->result = -ENOMSG;
                        job->reply_len = sizeof(uint32_t);
                        fc_bsg_jobdone(job);
index 88da977457102e78729781f89a5a8f82f5e2f80a..84be62149c6c92f9de34c1708828398dfba586d9 100644 (file)
@@ -418,7 +418,7 @@ error:
                  __func__, virt, phys, be32_to_cpu(sdt->ref_tag),
                  be16_to_cpu(sdt->app_tag));
 
-       return -EIO;
+       return -EILSEQ;
 }
 
 /*
index a2d4a19550ab9be4c5fbf99abf93c7e68e248ee1..ed7d958b0a01fa96a2f4a746189090cdce45bc6d 100644 (file)
@@ -53,20 +53,21 @@ void sunserial_unregister_minors(struct uart_driver *drv, int count)
 EXPORT_SYMBOL(sunserial_unregister_minors);
 
 int sunserial_console_match(struct console *con, struct device_node *dp,
-                           struct uart_driver *drv, int line)
+                           struct uart_driver *drv, int line, bool ignore_line)
 {
-       int off;
-
        if (!con || of_console_device != dp)
                return 0;
 
-       off = 0;
-       if (of_console_options &&
-           *of_console_options == 'b')
-               off = 1;
+       if (!ignore_line) {
+               int off = 0;
 
-       if ((line & 1) != off)
-               return 0;
+               if (of_console_options &&
+                   *of_console_options == 'b')
+                       off = 1;
+
+               if ((line & 1) != off)
+                       return 0;
+       }
 
        con->index = line;
        drv->cons = con;
@@ -76,23 +77,24 @@ int sunserial_console_match(struct console *con, struct device_node *dp,
 }
 EXPORT_SYMBOL(sunserial_console_match);
 
-void
-sunserial_console_termios(struct console *con)
+void sunserial_console_termios(struct console *con, struct device_node *uart_dp)
 {
-       struct device_node *dp;
-       const char *od, *mode, *s;
+       const char *mode, *s;
        char mode_prop[] = "ttyX-mode";
        int baud, bits, stop, cflag;
        char parity;
 
-       dp = of_find_node_by_path("/options");
-       od = of_get_property(dp, "output-device", NULL);
-       if (!strcmp(od, "rsc")) {
-               mode = of_get_property(of_console_device,
+       if (!strcmp(uart_dp->name, "rsc") ||
+           !strcmp(uart_dp->name, "rsc-console") ||
+           !strcmp(uart_dp->name, "rsc-control")) {
+               mode = of_get_property(uart_dp,
                                       "ssp-console-modes", NULL);
                if (!mode)
                        mode = "115200,8,n,1,-";
+       } else if (!strcmp(uart_dp->name, "lom-console")) {
+               mode = "9600,8,n,1,-";
        } else {
+               struct device_node *dp;
                char c;
 
                c = 'a';
@@ -101,6 +103,7 @@ sunserial_console_termios(struct console *con)
 
                mode_prop[3] = c;
 
+               dp = of_find_node_by_path("/options");
                mode = of_get_property(dp, mode_prop, NULL);
                if (!mode)
                        mode = "9600,8,n,1,-";
index 042668aa602e756f00d8c011dbcc4307c04709ef..db2057936c31ab6b2f981223e7e86e482364cd6e 100644 (file)
@@ -26,7 +26,8 @@ extern int sunserial_register_minors(struct uart_driver *, int);
 extern void sunserial_unregister_minors(struct uart_driver *, int);
 
 extern int sunserial_console_match(struct console *, struct device_node *,
-                                  struct uart_driver *, int);
-extern void sunserial_console_termios(struct console *);
+                                  struct uart_driver *, int, bool);
+extern void sunserial_console_termios(struct console *,
+                                     struct device_node *);
 
 #endif /* !(_SERIAL_SUN_H) */
index d548652dee5004f6608c3804ad33c07a072f6055..d14cca7fb88d2510f6c8fb2232160eff83c3efa4 100644 (file)
@@ -566,7 +566,7 @@ static int __devinit hv_probe(struct of_device *op, const struct of_device_id *m
                goto out_free_con_read_page;
 
        sunserial_console_match(&sunhv_console, op->node,
-                               &sunhv_reg, port->line);
+                               &sunhv_reg, port->line, false);
 
        err = uart_add_one_port(&sunhv_reg, port);
        if (err)
index d1ad34128635d215c83348bd57ee72bf88894f96..d514e28d07557b7516de0ecd7478d1cb9401f619 100644 (file)
@@ -883,7 +883,7 @@ static int sunsab_console_setup(struct console *con, char *options)
        printk("Console: ttyS%d (SAB82532)\n",
               (sunsab_reg.minor - 64) + con->index);
 
-       sunserial_console_termios(con);
+       sunserial_console_termios(con, to_of_device(up->port.dev)->node);
 
        switch (con->cflag & CBAUD) {
        case B150: baud = 150; break;
@@ -1027,10 +1027,12 @@ static int __devinit sab_probe(struct of_device *op, const struct of_device_id *
                goto out1;
 
        sunserial_console_match(SUNSAB_CONSOLE(), op->node,
-                               &sunsab_reg, up[0].port.line);
+                               &sunsab_reg, up[0].port.line,
+                               false);
 
        sunserial_console_match(SUNSAB_CONSOLE(), op->node,
-                               &sunsab_reg, up[1].port.line);
+                               &sunsab_reg, up[1].port.line,
+                               false);
 
        err = uart_add_one_port(&sunsab_reg, &up[0].port);
        if (err)
@@ -1116,7 +1118,6 @@ static int __init sunsab_init(void)
                if (!sunsab_ports)
                        return -ENOMEM;
 
-               sunsab_reg.cons = SUNSAB_CONSOLE();
                err = sunserial_register_minors(&sunsab_reg, num_channels);
                if (err) {
                        kfree(sunsab_ports);
index 68d262b15749584354defe8c5bf7a886383dad76..170d3d68c8f04d38fb67f8d3b84c9c977f57f7a1 100644 (file)
@@ -1329,11 +1329,9 @@ static void sunsu_console_write(struct console *co, const char *s,
  */
 static int __init sunsu_console_setup(struct console *co, char *options)
 {
+       static struct ktermios dummy;
+       struct ktermios termios;
        struct uart_port *port;
-       int baud = 9600;
-       int bits = 8;
-       int parity = 'n';
-       int flow = 'n';
 
        printk("Console: ttyS%d (SU)\n",
               (sunsu_reg.minor - 64) + co->index);
@@ -1352,10 +1350,15 @@ static int __init sunsu_console_setup(struct console *co, char *options)
         */
        spin_lock_init(&port->lock);
 
-       if (options)
-               uart_parse_options(options, &baud, &parity, &bits, &flow);
+       /* Get firmware console settings.  */
+       sunserial_console_termios(co, to_of_device(port->dev)->node);
 
-       return uart_set_options(port, co, baud, parity, bits, flow);
+       memset(&termios, 0, sizeof(struct ktermios));
+       termios.c_cflag = co->cflag;
+       port->mctrl |= TIOCM_DTR;
+       port->ops->set_termios(port, &termios, &dummy);
+
+       return 0;
 }
 
 static struct console sunsu_console = {
@@ -1409,6 +1412,7 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
        struct uart_sunsu_port *up;
        struct resource *rp;
        enum su_type type;
+       bool ignore_line;
        int err;
 
        type = su_get_type(dp);
@@ -1467,8 +1471,14 @@ static int __devinit su_probe(struct of_device *op, const struct of_device_id *m
 
        up->port.ops = &sunsu_pops;
 
+       ignore_line = false;
+       if (!strcmp(dp->name, "rsc-console") ||
+           !strcmp(dp->name, "lom-console"))
+               ignore_line = true;
+
        sunserial_console_match(SUNSU_CONSOLE(), dp,
-                               &sunsu_reg, up->port.line);
+                               &sunsu_reg, up->port.line,
+                               ignore_line);
        err = uart_add_one_port(&sunsu_reg, &up->port);
        if (err)
                goto out_unmap;
@@ -1517,6 +1527,10 @@ static const struct of_device_id su_match[] = {
                .name = "serial",
                .compatible = "su",
        },
+       {
+               .type = "serial",
+               .compatible = "su",
+       },
        {},
 };
 MODULE_DEVICE_TABLE(of, su_match);
@@ -1548,6 +1562,12 @@ static int __init sunsu_init(void)
                                num_uart++;
                }
        }
+       for_each_node_by_type(dp, "serial") {
+               if (of_device_is_compatible(dp, "su")) {
+                       if (su_get_type(dp) == SU_PORT_PORT)
+                               num_uart++;
+               }
+       }
 
        if (num_uart) {
                err = sunserial_register_minors(&sunsu_reg, num_uart);
index ef693ae22e7fc98e307eaf6a16a4d4ce7207fd72..2c7a66af4f5289047395cca207dedbd3a311cac2 100644 (file)
@@ -1180,7 +1180,7 @@ static int __init sunzilog_console_setup(struct console *con, char *options)
               (sunzilog_reg.minor - 64) + con->index, con->index);
 
        /* Get firmware console settings.  */
-       sunserial_console_termios(con);
+       sunserial_console_termios(con, to_of_device(up->port.dev)->node);
 
        /* Firmware console speed is limited to 150-->38400 baud so
         * this hackish cflag thing is OK.
@@ -1416,7 +1416,8 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
 
        if (!keyboard_mouse) {
                if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
-                                           &sunzilog_reg, up[0].port.line))
+                                           &sunzilog_reg, up[0].port.line,
+                                           false))
                        up->flags |= SUNZILOG_FLAG_IS_CONS;
                err = uart_add_one_port(&sunzilog_reg, &up[0].port);
                if (err) {
@@ -1425,7 +1426,8 @@ static int __devinit zs_probe(struct of_device *op, const struct of_device_id *m
                        return err;
                }
                if (sunserial_console_match(SUNZILOG_CONSOLE(), op->node,
-                                           &sunzilog_reg, up[1].port.line))
+                                           &sunzilog_reg, up[1].port.line,
+                                           false))
                        up->flags |= SUNZILOG_FLAG_IS_CONS;
                err = uart_add_one_port(&sunzilog_reg, &up[1].port);
                if (err) {
index d871dc23909ca856bb11b467ae09cb5f60fc7f14..2552bb36400564f58a66285647a7faa951090be8 100644 (file)
@@ -242,7 +242,7 @@ static int stmp_spi_txrx_dma(struct stmp_spi *ss, int cs,
        wait_for_completion(&ss->done);
 
        if (!busy_wait(readl(ss->regs + HW_SSP_CTRL0) & BM_SSP_CTRL0_RUN))
-               status = ETIMEDOUT;
+               status = -ETIMEDOUT;
 
        if (!dma_buf)
                dma_unmap_single(ss->master_dev, spi_buf_dma, len, dir);
index b74212d698c78b8d39c541ec44d2857cb9ad0e19..e8b89e8ac9bd96ed1279d0602f4aeee8fc050b84 100644 (file)
@@ -162,6 +162,8 @@ static u8 chipid_to_nrcores(u16 chipid)
 static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx,
                       u16 offset)
 {
+       u32 lo, hi;
+
        switch (bus->bustype) {
        case SSB_BUSTYPE_SSB:
                offset += current_coreidx * SSB_CORE_SIZE;
@@ -174,7 +176,9 @@ static u32 scan_read32(struct ssb_bus *bus, u8 current_coreidx,
                        offset -= 0x800;
                } else
                        ssb_pcmcia_switch_segment(bus, 0);
-               break;
+               lo = readw(bus->mmio + offset);
+               hi = readw(bus->mmio + offset + 2);
+               return lo | (hi << 16);
        case SSB_BUSTYPE_SDIO:
                offset += current_coreidx * SSB_CORE_SIZE;
                return ssb_sdio_scan_read32(bus, offset);
index 8c85a9c3665a4a37d01fdebd478e827f65239ff8..f4a6541c3e60ffcdffa1166a353e45929b4d25d7 100644 (file)
@@ -261,7 +261,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
 
        memset(buf, 0xcd, 6);
        usb = go->hpi_context;
-       if (down_interruptible(&usb->i2c_lock) != 0) {
+       if (mutex_lock_interruptible(&usb->i2c_lock) != 0) {
                printk(KERN_INFO "i2c lock failed\n");
                kfree(buf);
                return -EINTR;
@@ -270,7 +270,7 @@ static int read_reg_fp(struct i2c_client *client, u16 addr, u16 *val)
                kfree(buf);
                return -EFAULT;
        }
-       up(&usb->i2c_lock);
+       mutex_unlock(&usb->i2c_lock);
 
        *val = (buf[0] << 8) | buf[1];
        kfree(buf);
diff --git a/drivers/staging/go7007/s2250-loader.h b/drivers/staging/go7007/s2250-loader.h
new file mode 100644 (file)
index 0000000..b7c301a
--- /dev/null
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2005-2006 Micronas USA Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License (Version 2) as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
+ */
+
+#ifndef _S2250_LOADER_H_
+#define _S2250_LOADER_H_
+
+extern int s2250loader_init(void);
+extern void s2250loader_cleanup(void);
+
+#endif
index 42230e62a2227b9bf1297ff575a2c618b98902de..31a58e50892417b57f58b28374e6a320eaff5c43 100644 (file)
@@ -170,7 +170,7 @@ static u32 cvm_oct_get_link(struct net_device *dev)
        return ret;
 }
 
-struct const ethtool_ops cvm_oct_ethtool_ops = {
+const struct ethtool_ops cvm_oct_ethtool_ops = {
        .get_drvinfo = cvm_oct_get_drvinfo,
        .get_settings = cvm_oct_get_settings,
        .set_settings = cvm_oct_set_settings,
index 66190b0cb68f355c6f482bba828cdcc6c4228017..00dc0f4bad19933b0dd2d48ce9c1bc61faf3f173 100644 (file)
@@ -317,6 +317,6 @@ void cvm_oct_spi_uninit(struct net_device *dev)
                        cvmx_write_csr(CVMX_SPXX_INT_MSK(interface), 0);
                        cvmx_write_csr(CVMX_STXX_INT_MSK(interface), 0);
                }
-               free_irq(8 + 46, &number_spi_ports);
+               free_irq(OCTEON_IRQ_RML, &number_spi_ports);
        }
 }
index b8479517dce28b9fa76af52c532c8af556cb54e5..492c5029992de65c9b35e854fd141d9fd8c5969a 100644 (file)
@@ -111,6 +111,16 @@ MODULE_PARM_DESC(disable_core_queueing, "\n"
        "\tallows packets to be sent without lock contention in the packet\n"
        "\tscheduler resulting in some cases in improved throughput.\n");
 
+
+/*
+ * The offset from mac_addr_base that should be used for the next port
+ * that is configured.  By convention, if any mgmt ports exist on the
+ * chip, they get the first mac addresses, The ports controlled by
+ * this driver are numbered sequencially following any mgmt addresses
+ * that may exist.
+ */
+static unsigned int cvm_oct_mac_addr_offset;
+
 /**
  * Periodic timer to check auto negotiation
  */
@@ -474,16 +484,30 @@ static int cvm_oct_common_set_mac_address(struct net_device *dev, void *addr)
  */
 int cvm_oct_common_init(struct net_device *dev)
 {
-       static int count;
-       char mac[8] = { 0x00, 0x00,
-               octeon_bootinfo->mac_addr_base[0],
-               octeon_bootinfo->mac_addr_base[1],
-               octeon_bootinfo->mac_addr_base[2],
-               octeon_bootinfo->mac_addr_base[3],
-               octeon_bootinfo->mac_addr_base[4],
-               octeon_bootinfo->mac_addr_base[5] + count
-       };
        struct octeon_ethernet *priv = netdev_priv(dev);
+       struct sockaddr sa;
+       u64 mac = ((u64)(octeon_bootinfo->mac_addr_base[0] & 0xff) << 40) |
+               ((u64)(octeon_bootinfo->mac_addr_base[1] & 0xff) << 32) |
+               ((u64)(octeon_bootinfo->mac_addr_base[2] & 0xff) << 24) |
+               ((u64)(octeon_bootinfo->mac_addr_base[3] & 0xff) << 16) |
+               ((u64)(octeon_bootinfo->mac_addr_base[4] & 0xff) << 8) |
+               (u64)(octeon_bootinfo->mac_addr_base[5] & 0xff);
+
+       mac += cvm_oct_mac_addr_offset;
+       sa.sa_data[0] = (mac >> 40) & 0xff;
+       sa.sa_data[1] = (mac >> 32) & 0xff;
+       sa.sa_data[2] = (mac >> 24) & 0xff;
+       sa.sa_data[3] = (mac >> 16) & 0xff;
+       sa.sa_data[4] = (mac >> 8) & 0xff;
+       sa.sa_data[5] = mac & 0xff;
+
+       if (cvm_oct_mac_addr_offset >= octeon_bootinfo->mac_addr_count)
+               printk(KERN_DEBUG "%s: Using MAC outside of the assigned range:"
+                       " %02x:%02x:%02x:%02x:%02x:%02x\n", dev->name,
+                       sa.sa_data[0] & 0xff, sa.sa_data[1] & 0xff,
+                       sa.sa_data[2] & 0xff, sa.sa_data[3] & 0xff,
+                       sa.sa_data[4] & 0xff, sa.sa_data[5] & 0xff);
+       cvm_oct_mac_addr_offset++;
 
        /*
         * Force the interface to use the POW send if always_use_pow
@@ -496,14 +520,12 @@ int cvm_oct_common_init(struct net_device *dev)
        if (priv->queue != -1 && USE_HW_TCPUDP_CHECKSUM)
                dev->features |= NETIF_F_IP_CSUM;
 
-       count++;
-
        /* We do our own locking, Linux doesn't need to */
        dev->features |= NETIF_F_LLTX;
        SET_ETHTOOL_OPS(dev, &cvm_oct_ethtool_ops);
 
        cvm_oct_mdio_setup_device(dev);
-       dev->netdev_ops->ndo_set_mac_address(dev, mac);
+       dev->netdev_ops->ndo_set_mac_address(dev, &sa);
        dev->netdev_ops->ndo_change_mtu(dev, dev->mtu);
 
        /*
@@ -620,6 +642,13 @@ static int __init cvm_oct_init_module(void)
 
        pr_notice("cavium-ethernet %s\n", OCTEON_ETHERNET_VERSION);
 
+       if (OCTEON_IS_MODEL(OCTEON_CN52XX))
+               cvm_oct_mac_addr_offset = 2; /* First two are the mgmt ports. */
+       else if (OCTEON_IS_MODEL(OCTEON_CN56XX))
+               cvm_oct_mac_addr_offset = 1; /* First one is the mgmt port. */
+       else
+               cvm_oct_mac_addr_offset = 0;
+
        cvm_oct_proc_initialize();
        cvm_oct_rx_initialize();
        cvm_oct_configure_common_hw();
index 02347c57357d20511e3e0e25f34572ab053078c0..aa53db9f2e88309d739809adf10928fd0a6d1a57 100644 (file)
@@ -178,6 +178,7 @@ static int uio_pdrv_genirq_probe(struct platform_device *pdev)
        return 0;
  bad1:
        kfree(priv);
+       pm_runtime_disable(&pdev->dev);
  bad0:
        return ret;
 }
index e3861b21e776600fb721699c8ede4e62dc51f8fc..e4eca7810bcf1bed6821086e4bab6130525e3c17 100644 (file)
@@ -609,9 +609,9 @@ static int acm_tty_open(struct tty_struct *tty, struct file *filp)
 
        acm->throttle = 0;
 
-       tasklet_schedule(&acm->urb_task);
        set_bit(ASYNCB_INITIALIZED, &acm->port.flags);
        rv = tty_port_block_til_ready(&acm->port, tty, filp);
+       tasklet_schedule(&acm->urb_task);
 done:
        mutex_unlock(&acm->mutex);
 err_out:
@@ -686,15 +686,21 @@ static void acm_tty_close(struct tty_struct *tty, struct file *filp)
 
        /* Perform the closing process and see if we need to do the hardware
           shutdown */
-       if (!acm || tty_port_close_start(&acm->port, tty, filp) == 0)
+       if (!acm)
+               return;
+       if (tty_port_close_start(&acm->port, tty, filp) == 0) {
+               mutex_lock(&open_mutex);
+               if (!acm->dev) {
+                       tty_port_tty_set(&acm->port, NULL);
+                       acm_tty_unregister(acm);
+                       tty->driver_data = NULL;
+               }
+               mutex_unlock(&open_mutex);
                return;
+       }
        acm_port_down(acm, 0);
        tty_port_close_end(&acm->port, tty);
-       mutex_lock(&open_mutex);
        tty_port_tty_set(&acm->port, NULL);
-       if (!acm->dev)
-               acm_tty_unregister(acm);
-       mutex_unlock(&open_mutex);
 }
 
 static int acm_tty_write(struct tty_struct *tty,
index 78bb7710f36d9ef641f7857b3f688b5c2c9640af..24eb74781919d58ed4454a4e7c1dd7999e04ade1 100644 (file)
@@ -87,6 +87,7 @@ static int ohci_restart (struct ohci_hcd *ohci);
 #ifdef CONFIG_PCI
 static void quirk_amd_pll(int state);
 static void amd_iso_dev_put(void);
+static void sb800_prefetch(struct ohci_hcd *ohci, int on);
 #else
 static inline void quirk_amd_pll(int state)
 {
@@ -96,6 +97,10 @@ static inline void amd_iso_dev_put(void)
 {
        return;
 }
+static inline void sb800_prefetch(struct ohci_hcd *ohci, int on)
+{
+       return;
+}
 #endif
 
 
index d2ba04dd785e4942d69ad28f4b2a3a727d3efd54..b8a1148f248e4faeb404fbcedf76e6c36d0e6ea2 100644 (file)
@@ -177,6 +177,13 @@ static int ohci_quirk_amd700(struct usb_hcd *hcd)
                return 0;
 
        pci_read_config_byte(amd_smbus_dev, PCI_REVISION_ID, &rev);
+
+       /* SB800 needs pre-fetch fix */
+       if ((rev >= 0x40) && (rev <= 0x4f)) {
+               ohci->flags |= OHCI_QUIRK_AMD_PREFETCH;
+               ohci_dbg(ohci, "enabled AMD prefetch quirk\n");
+       }
+
        if ((rev > 0x3b) || (rev < 0x30)) {
                pci_dev_put(amd_smbus_dev);
                amd_smbus_dev = NULL;
@@ -262,6 +269,19 @@ static void amd_iso_dev_put(void)
 
 }
 
+static void sb800_prefetch(struct ohci_hcd *ohci, int on)
+{
+       struct pci_dev *pdev;
+       u16 misc;
+
+       pdev = to_pci_dev(ohci_to_hcd(ohci)->self.controller);
+       pci_read_config_word(pdev, 0x50, &misc);
+       if (on == 0)
+               pci_write_config_word(pdev, 0x50, misc & 0xfcff);
+       else
+               pci_write_config_word(pdev, 0x50, misc | 0x0300);
+}
+
 /* List of quirks for OHCI */
 static const struct pci_device_id ohci_pci_quirks[] = {
        {
index 16fecb8ecc39dde0360153ac4f1637c036fd0ab5..35288bcae0dbc10ccc04fe380e36ba8026f1220a 100644 (file)
@@ -49,9 +49,12 @@ __acquires(ohci->lock)
        switch (usb_pipetype (urb->pipe)) {
        case PIPE_ISOCHRONOUS:
                ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs--;
-               if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
-                               && quirk_amdiso(ohci))
-                       quirk_amd_pll(1);
+               if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
+                       if (quirk_amdiso(ohci))
+                               quirk_amd_pll(1);
+                       if (quirk_amdprefetch(ohci))
+                               sb800_prefetch(ohci, 0);
+               }
                break;
        case PIPE_INTERRUPT:
                ohci_to_hcd(ohci)->self.bandwidth_int_reqs--;
@@ -680,9 +683,12 @@ static void td_submit_urb (
                                data + urb->iso_frame_desc [cnt].offset,
                                urb->iso_frame_desc [cnt].length, urb, cnt);
                }
-               if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0
-                               && quirk_amdiso(ohci))
-                       quirk_amd_pll(0);
+               if (ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs == 0) {
+                       if (quirk_amdiso(ohci))
+                               quirk_amd_pll(0);
+                       if (quirk_amdprefetch(ohci))
+                               sb800_prefetch(ohci, 1);
+               }
                periodic = ohci_to_hcd(ohci)->self.bandwidth_isoc_reqs++ == 0
                        && ohci_to_hcd(ohci)->self.bandwidth_int_reqs == 0;
                break;
index 222011f6172cbccd23b199e85319730f72f7bb52..5bf15fed0d9fcd4f4654bb51c7ae38ea82cf905c 100644 (file)
@@ -402,6 +402,7 @@ struct ohci_hcd {
 #define        OHCI_QUIRK_FRAME_NO     0x80                    /* no big endian frame_no shift */
 #define        OHCI_QUIRK_HUB_POWER    0x100                   /* distrust firmware power/oc setup */
 #define        OHCI_QUIRK_AMD_ISO      0x200                   /* ISO transfers*/
+#define        OHCI_QUIRK_AMD_PREFETCH 0x400                   /* pre-fetch for ISO transfer */
        // there are also chip quirks/bugs in init logic
 
        struct work_struct      nec_work;       /* Worker for NEC quirk */
@@ -433,6 +434,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci)
 {
        return ohci->flags & OHCI_QUIRK_AMD_ISO;
 }
+static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
+{
+       return ohci->flags & OHCI_QUIRK_AMD_PREFETCH;
+}
 #else
 static inline int quirk_nec(struct ohci_hcd *ohci)
 {
@@ -446,6 +451,10 @@ static inline int quirk_amdiso(struct ohci_hcd *ohci)
 {
        return 0;
 }
+static inline int quirk_amdprefetch(struct ohci_hcd *ohci)
+{
+       return 0;
+}
 #endif
 
 /* convert between an hcd pointer and the corresponding ohci_hcd */
index 1db4fea8c1704e62cdc33ddfeacc353f70273a09..b8fd270a8b0d5f2630ed87dbcf6d24f8a566c86e 100644 (file)
@@ -802,9 +802,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
        int i;
 
        /* Free the Event Ring Segment Table and the actual Event Ring */
-       xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
-       xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
-       xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
+       if (xhci->ir_set) {
+               xhci_writel(xhci, 0, &xhci->ir_set->erst_size);
+               xhci_write_64(xhci, 0, &xhci->ir_set->erst_base);
+               xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue);
+       }
        size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
        if (xhci->erst.entries)
                pci_free_consistent(pdev, size,
@@ -841,9 +843,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
                                xhci->dcbaa, xhci->dcbaa->dma);
        xhci->dcbaa = NULL;
 
+       scratchpad_free(xhci);
        xhci->page_size = 0;
        xhci->page_shift = 0;
-       scratchpad_free(xhci);
 }
 
 int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags)
index 173c39c7648912a1e63ef53a38881c4091094a07..821b7b4709de6531b28afb78c428ad4f5379ff81 100644 (file)
@@ -864,9 +864,11 @@ static struct xhci_segment *trb_in_td(
        cur_seg = start_seg;
 
        do {
+               if (start_dma == 0)
+                       return 0;
                /* We may get an event for a Link TRB in the middle of a TD */
                end_seg_dma = xhci_trb_virt_to_dma(cur_seg,
-                               &start_seg->trbs[TRBS_PER_SEGMENT - 1]);
+                               &cur_seg->trbs[TRBS_PER_SEGMENT - 1]);
                /* If the end TRB isn't in this segment, this is set to 0 */
                end_trb_dma = xhci_trb_virt_to_dma(cur_seg, end_trb);
 
@@ -893,8 +895,9 @@ static struct xhci_segment *trb_in_td(
                }
                cur_seg = cur_seg->next;
                start_dma = xhci_trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
-       } while (1);
+       } while (cur_seg != start_seg);
 
+       return 0;
 }
 
 /*
index 9ed3e741bee160c7f2d71906810146f95f1da59a..10f3205798e880ce4f1351735666bac903e34398 100644 (file)
@@ -348,12 +348,12 @@ static unsigned int mon_buff_area_alloc_contiguous(struct mon_reader_bin *rp,
 
 /*
  * Return a few (kilo-)bytes to the head of the buffer.
- * This is used if a DMA fetch fails.
+ * This is used if a data fetch fails.
  */
 static void mon_buff_area_shrink(struct mon_reader_bin *rp, unsigned int size)
 {
 
-       size = (size + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
+       /* size &= ~(PKT_ALIGN-1);  -- we're called with aligned size */
        rp->b_cnt -= size;
        if (rp->b_in < size)
                rp->b_in += rp->b_size;
@@ -433,6 +433,7 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
        unsigned int urb_length;
        unsigned int offset;
        unsigned int length;
+       unsigned int delta;
        unsigned int ndesc, lendesc;
        unsigned char dir;
        struct mon_bin_hdr *ep;
@@ -537,8 +538,10 @@ static void mon_bin_event(struct mon_reader_bin *rp, struct urb *urb,
        if (length != 0) {
                ep->flag_data = mon_bin_get_data(rp, offset, urb, length);
                if (ep->flag_data != 0) {       /* Yes, it's 0x00, not '0' */
-                       ep->len_cap = 0;
-                       mon_buff_area_shrink(rp, length);
+                       delta = (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
+                       ep->len_cap -= length;
+                       delta -= (ep->len_cap + PKT_ALIGN-1) & ~(PKT_ALIGN-1);
+                       mon_buff_area_shrink(rp, delta);
                }
        } else {
                ep->flag_data = data_tag;
index 698252a4dc5d9b59878a26b64a9a5caf00c8efef..bd254ec97d14be6d9b9e4db7e64bfbbca1f7d14c 100644 (file)
@@ -50,6 +50,8 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *,
 static void cp210x_break_ctl(struct tty_struct *, int);
 static int cp210x_startup(struct usb_serial *);
 static void cp210x_disconnect(struct usb_serial *);
+static void cp210x_dtr_rts(struct usb_serial_port *p, int on);
+static int cp210x_carrier_raised(struct usb_serial_port *p);
 
 static int debug;
 
@@ -143,6 +145,8 @@ static struct usb_serial_driver cp210x_device = {
        .tiocmset               = cp210x_tiocmset,
        .attach                 = cp210x_startup,
        .disconnect             = cp210x_disconnect,
+       .dtr_rts                = cp210x_dtr_rts,
+       .carrier_raised         = cp210x_carrier_raised
 };
 
 /* Config request types */
@@ -746,6 +750,14 @@ static int cp210x_tiocmset_port(struct usb_serial_port *port, struct file *file,
        return cp210x_set_config(port, CP210X_SET_MHS, &control, 2);
 }
 
+static void cp210x_dtr_rts(struct usb_serial_port *p, int on)
+{
+       if (on)
+               cp210x_tiocmset_port(p, NULL,  TIOCM_DTR|TIOCM_RTS, 0);
+       else
+               cp210x_tiocmset_port(p, NULL,  0, TIOCM_DTR|TIOCM_RTS);
+}
+
 static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
 {
        struct usb_serial_port *port = tty->driver_data;
@@ -768,6 +780,15 @@ static int cp210x_tiocmget (struct tty_struct *tty, struct file *file)
        return result;
 }
 
+static int cp210x_carrier_raised(struct usb_serial_port *p)
+{
+       unsigned int control;
+       cp210x_get_config(p, CP210X_GET_MDMSTS, &control, 1);
+       if (control & CONTROL_DCD)
+               return 1;
+       return 0;
+}
+
 static void cp210x_break_ctl (struct tty_struct *tty, int break_state)
 {
        struct usb_serial_port *port = tty->driver_data;
index cd44c68954df55e65dff18ae6f89c007d698b938..319aaf9725b37f99678381637fa605d29bfe3346 100644 (file)
@@ -308,6 +308,7 @@ static int  option_resume(struct usb_serial *serial);
 
 #define DLINK_VENDOR_ID                                0x1186
 #define DLINK_PRODUCT_DWM_652                  0x3e04
+#define DLINK_PRODUCT_DWM_652_U5               0xce16
 
 #define QISDA_VENDOR_ID                                0x1da5
 #define QISDA_PRODUCT_H21_4512                 0x4512
@@ -586,6 +587,7 @@ static struct usb_device_id option_ids[] = {
        { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) },
        { USB_DEVICE(BENQ_VENDOR_ID, BENQ_PRODUCT_H10) },
        { USB_DEVICE(DLINK_VENDOR_ID, DLINK_PRODUCT_DWM_652) },
+       { USB_DEVICE(ALINK_VENDOR_ID, DLINK_PRODUCT_DWM_652_U5) }, /* Yes, ALINK_VENDOR_ID */
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4512) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H21_4523) },
        { USB_DEVICE(QISDA_VENDOR_ID, QISDA_PRODUCT_H20_4515) },
index d065894ce38fbf952a46d2da87da67b34374289a..035d56835b75477118c0bfc430d6a4d7c1d71d62 100644 (file)
@@ -704,7 +704,7 @@ static int __init fb_probe(struct platform_device *device)
 
        if (i == ARRAY_SIZE(known_lcd_panels)) {
                dev_err(&device->dev, "GLCD: No valid panel found\n");
-               ret = ENODEV;
+               ret = -ENODEV;
                goto err_clk_disable;
        } else
                dev_info(&device->dev, "GLCD: Found %s panel\n",
index f24d04132eda85252615a11d2907dbb8c5631362..4d227b15200118468be1a355f37ba8e1cc28adf0 100644 (file)
@@ -317,7 +317,7 @@ static int __devexit pnx4008_wdt_remove(struct platform_device *pdev)
 
 static struct platform_driver platform_wdt_driver = {
        .driver = {
-               .name = "watchdog",
+               .name = "pnx4008-watchdog",
                .owner  = THIS_MODULE,
        },
        .probe = pnx4008_wdt_probe,
@@ -352,4 +352,4 @@ MODULE_PARM_DESC(nowayout,
 
 MODULE_LICENSE("GPL");
 MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
-MODULE_ALIAS("platform:watchdog");
+MODULE_ALIAS("platform:pnx4008-watchdog");
index 51c94e26a346f1de790e4f3cac3e039d097c5494..bcc5357a906987147269ba9c020ba6ed642a9ec9 100644 (file)
@@ -343,18 +343,7 @@ int __v9fs_fscache_release_page(struct page *page, gfp_t gfp)
 
        BUG_ON(!vcookie->fscache);
 
-       if (PageFsCache(page)) {
-               if (fscache_check_page_write(vcookie->fscache, page)) {
-                       if (!(gfp & __GFP_WAIT))
-                               return 0;
-                       fscache_wait_on_page_write(vcookie->fscache, page);
-               }
-
-               fscache_uncache_page(vcookie->fscache, page);
-               ClearPageFsCache(page);
-       }
-
-       return 1;
+       return fscache_maybe_release_page(vnode->cache, page, gfp);
 }
 
 void __v9fs_fscache_invalidate_page(struct page *page)
@@ -368,7 +357,6 @@ void __v9fs_fscache_invalidate_page(struct page *page)
                fscache_wait_on_page_write(vcookie->fscache, page);
                BUG_ON(!PageLocked(page));
                fscache_uncache_page(vcookie->fscache, page);
-               ClearPageFsCache(page);
        }
 }
 
index 681c2a7b013fc3c06bfa2d0d75d05804eafb8ffb..39b301662f22449e0896e77bf9a964e68bd9a9f0 100644 (file)
@@ -315,7 +315,6 @@ static void afs_invalidatepage(struct page *page, unsigned long offset)
                        struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
                        fscache_wait_on_page_write(vnode->cache, page);
                        fscache_uncache_page(vnode->cache, page);
-                       ClearPageFsCache(page);
                }
 #endif
 
@@ -349,17 +348,9 @@ static int afs_releasepage(struct page *page, gfp_t gfp_flags)
        /* deny if page is being written to the cache and the caller hasn't
         * elected to wait */
 #ifdef CONFIG_AFS_FSCACHE
-       if (PageFsCache(page)) {
-               if (fscache_check_page_write(vnode->cache, page)) {
-                       if (!(gfp_flags & __GFP_WAIT)) {
-                               _leave(" = F [cache busy]");
-                               return 0;
-                       }
-                       fscache_wait_on_page_write(vnode->cache, page);
-               }
-
-               fscache_uncache_page(vnode->cache, page);
-               ClearPageFsCache(page);
+       if (!fscache_maybe_release_page(vnode->cache, page, gfp_flags)) {
+               _leave(" = F [cache busy]");
+               return 0;
        }
 #endif
 
index 431accd475a73aba67ad8d24a80b31da2c94db24..27089311fbea467b4ca6ec7d6522ae0525231c6f 100644 (file)
@@ -114,8 +114,9 @@ nomem_lookup_data:
 
 /*
  * attempt to look up the nominated node in this cache
+ * - return -ETIMEDOUT to be scheduled again
  */
-static void cachefiles_lookup_object(struct fscache_object *_object)
+static int cachefiles_lookup_object(struct fscache_object *_object)
 {
        struct cachefiles_lookup_data *lookup_data;
        struct cachefiles_object *parent, *object;
@@ -145,13 +146,15 @@ static void cachefiles_lookup_object(struct fscache_object *_object)
            object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
                cachefiles_attr_changed(&object->fscache);
 
-       if (ret < 0) {
-               printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n",
-                      ret);
+       if (ret < 0 && ret != -ETIMEDOUT) {
+               if (ret != -ENOBUFS)
+                       printk(KERN_WARNING
+                              "CacheFiles: Lookup failed error %d\n", ret);
                fscache_object_lookup_error(&object->fscache);
        }
 
        _leave(" [%d]", ret);
+       return ret;
 }
 
 /*
@@ -331,6 +334,7 @@ static void cachefiles_put_object(struct fscache_object *_object)
                }
 
                cache = object->fscache.cache;
+               fscache_object_destroy(&object->fscache);
                kmem_cache_free(cachefiles_object_jar, object);
                fscache_object_destroyed(cache);
        }
@@ -403,12 +407,26 @@ static int cachefiles_attr_changed(struct fscache_object *_object)
        if (oi_size == ni_size)
                return 0;
 
-       newattrs.ia_size = ni_size;
-       newattrs.ia_valid = ATTR_SIZE;
-
        cachefiles_begin_secure(cache, &saved_cred);
        mutex_lock(&object->backer->d_inode->i_mutex);
+
+       /* if there's an extension to a partial page at the end of the backing
+        * file, we need to discard the partial page so that we pick up new
+        * data after it */
+       if (oi_size & ~PAGE_MASK && ni_size > oi_size) {
+               _debug("discard tail %llx", oi_size);
+               newattrs.ia_valid = ATTR_SIZE;
+               newattrs.ia_size = oi_size & PAGE_MASK;
+               ret = notify_change(object->backer, &newattrs);
+               if (ret < 0)
+                       goto truncate_failed;
+       }
+
+       newattrs.ia_valid = ATTR_SIZE;
+       newattrs.ia_size = ni_size;
        ret = notify_change(object->backer, &newattrs);
+
+truncate_failed:
        mutex_unlock(&object->backer->d_inode->i_mutex);
        cachefiles_end_secure(cache, saved_cred);
 
index 4ce818ae39ea8d6ae10f9777b089f75b018f201f..14ac4806e2913f8c37e76d95dc1d0274f4bbd027 100644 (file)
 #include <linux/security.h>
 #include "internal.h"
 
-static int cachefiles_wait_bit(void *flags)
+#define CACHEFILES_KEYBUF_SIZE 512
+
+/*
+ * dump debugging info about an object
+ */
+static noinline
+void __cachefiles_printk_object(struct cachefiles_object *object,
+                               const char *prefix,
+                               u8 *keybuf)
 {
-       schedule();
-       return 0;
+       struct fscache_cookie *cookie;
+       unsigned keylen, loop;
+
+       printk(KERN_ERR "%sobject: OBJ%x\n",
+              prefix, object->fscache.debug_id);
+       printk(KERN_ERR "%sobjstate=%s fl=%lx swfl=%lx ev=%lx[%lx]\n",
+              prefix, fscache_object_states[object->fscache.state],
+              object->fscache.flags, object->fscache.work.flags,
+              object->fscache.events,
+              object->fscache.event_mask & FSCACHE_OBJECT_EVENTS_MASK);
+       printk(KERN_ERR "%sops=%u inp=%u exc=%u\n",
+              prefix, object->fscache.n_ops, object->fscache.n_in_progress,
+              object->fscache.n_exclusive);
+       printk(KERN_ERR "%sparent=%p\n",
+              prefix, object->fscache.parent);
+
+       spin_lock(&object->fscache.lock);
+       cookie = object->fscache.cookie;
+       if (cookie) {
+               printk(KERN_ERR "%scookie=%p [pr=%p nd=%p fl=%lx]\n",
+                      prefix,
+                      object->fscache.cookie,
+                      object->fscache.cookie->parent,
+                      object->fscache.cookie->netfs_data,
+                      object->fscache.cookie->flags);
+               if (keybuf)
+                       keylen = cookie->def->get_key(cookie->netfs_data, keybuf,
+                                                     CACHEFILES_KEYBUF_SIZE);
+               else
+                       keylen = 0;
+       } else {
+               printk(KERN_ERR "%scookie=NULL\n", prefix);
+               keylen = 0;
+       }
+       spin_unlock(&object->fscache.lock);
+
+       if (keylen) {
+               printk(KERN_ERR "%skey=[%u] '", prefix, keylen);
+               for (loop = 0; loop < keylen; loop++)
+                       printk("%02x", keybuf[loop]);
+               printk("'\n");
+       }
+}
+
+/*
+ * dump debugging info about a pair of objects
+ */
+static noinline void cachefiles_printk_object(struct cachefiles_object *object,
+                                             struct cachefiles_object *xobject)
+{
+       u8 *keybuf;
+
+       keybuf = kmalloc(CACHEFILES_KEYBUF_SIZE, GFP_NOIO);
+       if (object)
+               __cachefiles_printk_object(object, "", keybuf);
+       if (xobject)
+               __cachefiles_printk_object(xobject, "x", keybuf);
+       kfree(keybuf);
 }
 
 /*
  * record the fact that an object is now active
  */
-static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
-                                         struct cachefiles_object *object)
+static int cachefiles_mark_object_active(struct cachefiles_cache *cache,
+                                        struct cachefiles_object *object)
 {
        struct cachefiles_object *xobject;
        struct rb_node **_p, *_parent = NULL;
@@ -42,8 +106,11 @@ static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
 try_again:
        write_lock(&cache->active_lock);
 
-       if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
+       if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
+               printk(KERN_ERR "CacheFiles: Error: Object already active\n");
+               cachefiles_printk_object(object, NULL);
                BUG();
+       }
 
        dentry = object->dentry;
        _p = &cache->active_nodes.rb_node;
@@ -66,8 +133,8 @@ try_again:
        rb_insert_color(&object->active_node, &cache->active_nodes);
 
        write_unlock(&cache->active_lock);
-       _leave("");
-       return;
+       _leave(" = 0");
+       return 0;
 
        /* an old object from a previous incarnation is hogging the slot - we
         * need to wait for it to be destroyed */
@@ -76,44 +143,70 @@ wait_for_old_object:
                printk(KERN_ERR "\n");
                printk(KERN_ERR "CacheFiles: Error:"
                       " Unexpected object collision\n");
-               printk(KERN_ERR "xobject: OBJ%x\n",
-                      xobject->fscache.debug_id);
-               printk(KERN_ERR "xobjstate=%s\n",
-                      fscache_object_states[xobject->fscache.state]);
-               printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
-               printk(KERN_ERR "xobjevent=%lx [%lx]\n",
-                      xobject->fscache.events, xobject->fscache.event_mask);
-               printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
-                      xobject->fscache.n_ops, xobject->fscache.n_in_progress,
-                      xobject->fscache.n_exclusive);
-               printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
-                      xobject->fscache.cookie,
-                      xobject->fscache.cookie->parent,
-                      xobject->fscache.cookie->netfs_data,
-                      xobject->fscache.cookie->flags);
-               printk(KERN_ERR "xparent=%p\n",
-                      xobject->fscache.parent);
-               printk(KERN_ERR "object: OBJ%x\n",
-                      object->fscache.debug_id);
-               printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
-                      object->fscache.cookie,
-                      object->fscache.cookie->parent,
-                      object->fscache.cookie->netfs_data,
-                      object->fscache.cookie->flags);
-               printk(KERN_ERR "parent=%p\n",
-                      object->fscache.parent);
+               cachefiles_printk_object(object, xobject);
                BUG();
        }
        atomic_inc(&xobject->usage);
        write_unlock(&cache->active_lock);
 
-       _debug(">>> wait");
-       wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE,
-                   cachefiles_wait_bit, TASK_UNINTERRUPTIBLE);
-       _debug("<<< waited");
+       if (test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
+               wait_queue_head_t *wq;
+
+               signed long timeout = 60 * HZ;
+               wait_queue_t wait;
+               bool requeue;
+
+               /* if the object we're waiting for is queued for processing,
+                * then just put ourselves on the queue behind it */
+               if (slow_work_is_queued(&xobject->fscache.work)) {
+                       _debug("queue OBJ%x behind OBJ%x immediately",
+                              object->fscache.debug_id,
+                              xobject->fscache.debug_id);
+                       goto requeue;
+               }
+
+               /* otherwise we sleep until either the object we're waiting for
+                * is done, or the slow-work facility wants the thread back to
+                * do other work */
+               wq = bit_waitqueue(&xobject->flags, CACHEFILES_OBJECT_ACTIVE);
+               init_wait(&wait);
+               requeue = false;
+               do {
+                       prepare_to_wait(wq, &wait, TASK_UNINTERRUPTIBLE);
+                       if (!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags))
+                               break;
+                       requeue = slow_work_sleep_till_thread_needed(
+                               &object->fscache.work, &timeout);
+               } while (timeout > 0 && !requeue);
+               finish_wait(wq, &wait);
+
+               if (requeue &&
+                   test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags)) {
+                       _debug("queue OBJ%x behind OBJ%x after wait",
+                              object->fscache.debug_id,
+                              xobject->fscache.debug_id);
+                       goto requeue;
+               }
+
+               if (timeout <= 0) {
+                       printk(KERN_ERR "\n");
+                       printk(KERN_ERR "CacheFiles: Error: Overlong"
+                              " wait for old active object to go away\n");
+                       cachefiles_printk_object(object, xobject);
+                       goto requeue;
+               }
+       }
+
+       ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &xobject->flags));
 
        cache->cache.ops->put_object(&xobject->fscache);
        goto try_again;
+
+requeue:
+       clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
+       cache->cache.ops->put_object(&xobject->fscache);
+       _leave(" = -ETIMEDOUT");
+       return -ETIMEDOUT;
 }
 
 /*
@@ -254,7 +347,7 @@ int cachefiles_delete_object(struct cachefiles_cache *cache,
 
        dir = dget_parent(object->dentry);
 
-       mutex_lock(&dir->d_inode->i_mutex);
+       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
        ret = cachefiles_bury_object(cache, dir, object->dentry);
 
        dput(dir);
@@ -307,7 +400,7 @@ lookup_again:
        /* search the current directory for the element name */
        _debug("lookup '%s'", name);
 
-       mutex_lock(&dir->d_inode->i_mutex);
+       mutex_lock_nested(&dir->d_inode->i_mutex, I_MUTEX_PARENT);
 
        start = jiffies;
        next = lookup_one_len(name, dir, nlen);
@@ -418,12 +511,15 @@ lookup_again:
        }
 
        /* note that we're now using this object */
-       cachefiles_mark_object_active(cache, object);
+       ret = cachefiles_mark_object_active(cache, object);
 
        mutex_unlock(&dir->d_inode->i_mutex);
        dput(dir);
        dir = NULL;
 
+       if (ret == -ETIMEDOUT)
+               goto mark_active_timed_out;
+
        _debug("=== OBTAINED_OBJECT ===");
 
        if (object->new) {
@@ -467,6 +563,10 @@ create_error:
                cachefiles_io_error(cache, "Create/mkdir failed");
        goto error;
 
+mark_active_timed_out:
+       _debug("mark active timed out");
+       goto release_dentry;
+
 check_error:
        _debug("check error %d", ret);
        write_lock(&cache->active_lock);
@@ -474,7 +574,7 @@ check_error:
        clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
        wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
        write_unlock(&cache->active_lock);
-
+release_dentry:
        dput(object->dentry);
        object->dentry = NULL;
        goto error_out;
@@ -495,9 +595,6 @@ error:
 error_out2:
        dput(dir);
 error_out:
-       if (ret == -ENOSPC)
-               ret = -ENOBUFS;
-
        _leave(" = error %d", -ret);
        return ret;
 }
index a69787e7dd964b2c57c016b6d6aa87b987285ace..1d8332563863ce5d858971a68f4a7463c72385c2 100644 (file)
@@ -40,8 +40,10 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
 
        _debug("--- monitor %p %lx ---", page, page->flags);
 
-       if (!PageUptodate(page) && !PageError(page))
-               dump_stack();
+       if (!PageUptodate(page) && !PageError(page)) {
+               /* unlocked, not uptodate and not erronous? */
+               _debug("page probably truncated");
+       }
 
        /* remove from the waitqueue */
        list_del(&wait->task_list);
@@ -60,6 +62,84 @@ static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
        return 0;
 }
 
+/*
+ * handle a probably truncated page
+ * - check to see if the page is still relevant and reissue the read if
+ *   possible
+ * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
+ *   must wait again and 0 if successful
+ */
+static int cachefiles_read_reissue(struct cachefiles_object *object,
+                                  struct cachefiles_one_read *monitor)
+{
+       struct address_space *bmapping = object->backer->d_inode->i_mapping;
+       struct page *backpage = monitor->back_page, *backpage2;
+       int ret;
+
+       kenter("{ino=%lx},{%lx,%lx}",
+              object->backer->d_inode->i_ino,
+              backpage->index, backpage->flags);
+
+       /* skip if the page was truncated away completely */
+       if (backpage->mapping != bmapping) {
+               kleave(" = -ENODATA [mapping]");
+               return -ENODATA;
+       }
+
+       backpage2 = find_get_page(bmapping, backpage->index);
+       if (!backpage2) {
+               kleave(" = -ENODATA [gone]");
+               return -ENODATA;
+       }
+
+       if (backpage != backpage2) {
+               put_page(backpage2);
+               kleave(" = -ENODATA [different]");
+               return -ENODATA;
+       }
+
+       /* the page is still there and we already have a ref on it, so we don't
+        * need a second */
+       put_page(backpage2);
+
+       INIT_LIST_HEAD(&monitor->op_link);
+       add_page_wait_queue(backpage, &monitor->monitor);
+
+       if (trylock_page(backpage)) {
+               ret = -EIO;
+               if (PageError(backpage))
+                       goto unlock_discard;
+               ret = 0;
+               if (PageUptodate(backpage))
+                       goto unlock_discard;
+
+               kdebug("reissue read");
+               ret = bmapping->a_ops->readpage(NULL, backpage);
+               if (ret < 0)
+                       goto unlock_discard;
+       }
+
+       /* but the page may have been read before the monitor was installed, so
+        * the monitor may miss the event - so we have to ensure that we do get
+        * one in such a case */
+       if (trylock_page(backpage)) {
+               _debug("jumpstart %p {%lx}", backpage, backpage->flags);
+               unlock_page(backpage);
+       }
+
+       /* it'll reappear on the todo list */
+       kleave(" = -EINPROGRESS");
+       return -EINPROGRESS;
+
+unlock_discard:
+       unlock_page(backpage);
+       spin_lock_irq(&object->work_lock);
+       list_del(&monitor->op_link);
+       spin_unlock_irq(&object->work_lock);
+       kleave(" = %d", ret);
+       return ret;
+}
+
 /*
  * copy data from backing pages to netfs pages to complete a read operation
  * - driven by FS-Cache's thread pool
@@ -92,20 +172,26 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
 
                _debug("- copy {%lu}", monitor->back_page->index);
 
-               error = -EIO;
+       recheck:
                if (PageUptodate(monitor->back_page)) {
                        copy_highpage(monitor->netfs_page, monitor->back_page);
 
                        pagevec_add(&pagevec, monitor->netfs_page);
                        fscache_mark_pages_cached(monitor->op, &pagevec);
                        error = 0;
-               }
-
-               if (error)
+               } else if (!PageError(monitor->back_page)) {
+                       /* the page has probably been truncated */
+                       error = cachefiles_read_reissue(object, monitor);
+                       if (error == -EINPROGRESS)
+                               goto next;
+                       goto recheck;
+               } else {
                        cachefiles_io_error_obj(
                                object,
                                "Readpage failed on backing file %lx",
                                (unsigned long) monitor->back_page->flags);
+                       error = -EIO;
+               }
 
                page_cache_release(monitor->back_page);
 
@@ -114,6 +200,7 @@ static void cachefiles_read_copier(struct fscache_operation *_op)
                fscache_put_retrieval(op);
                kfree(monitor);
 
+       next:
                /* let the thread pool have some air occasionally */
                max--;
                if (max < 0 || need_resched()) {
@@ -333,7 +420,8 @@ int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
 
        shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
 
-       op->op.flags = FSCACHE_OP_FAST;
+       op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
+       op->op.flags |= FSCACHE_OP_FAST;
        op->op.processor = cachefiles_read_copier;
 
        pagevec_init(&pagevec, 0);
@@ -639,7 +727,8 @@ int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
 
        pagevec_init(&pagevec, 0);
 
-       op->op.flags = FSCACHE_OP_FAST;
+       op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
+       op->op.flags |= FSCACHE_OP_FAST;
        op->op.processor = cachefiles_read_copier;
 
        INIT_LIST_HEAD(&backpages);
@@ -801,7 +890,8 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
        struct cachefiles_cache *cache;
        mm_segment_t old_fs;
        struct file *file;
-       loff_t pos;
+       loff_t pos, eof;
+       size_t len;
        void *data;
        int ret;
 
@@ -835,15 +925,29 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
                ret = -EIO;
                if (file->f_op->write) {
                        pos = (loff_t) page->index << PAGE_SHIFT;
+
+                       /* we mustn't write more data than we have, so we have
+                        * to beware of a partial page at EOF */
+                       eof = object->fscache.store_limit_l;
+                       len = PAGE_SIZE;
+                       if (eof & ~PAGE_MASK) {
+                               ASSERTCMP(pos, <, eof);
+                               if (eof - pos < PAGE_SIZE) {
+                                       _debug("cut short %llx to %llx",
+                                              pos, eof);
+                                       len = eof - pos;
+                                       ASSERTCMP(pos + len, ==, eof);
+                               }
+                       }
+
                        data = kmap(page);
                        old_fs = get_fs();
                        set_fs(KERNEL_DS);
                        ret = file->f_op->write(
-                               file, (const void __user *) data, PAGE_SIZE,
-                               &pos);
+                               file, (const void __user *) data, len, &pos);
                        set_fs(old_fs);
                        kunmap(page);
-                       if (ret != PAGE_SIZE)
+                       if (ret != len)
                                ret = -EIO;
                }
                fput(file);
index 145540a316ab4497be3c60814d05137aca4f0568..094ea65afc85354f029a2c3be38d01b684441156 100644 (file)
@@ -1,3 +1,12 @@
+Version 1.61
+------------
+Fix append problem to Samba servers (files opened with O_APPEND could
+have duplicated data). Fix oops in cifs_lookup. Workaround problem
+mounting to OS/400 Netserve. Fix oops in cifs_get_tcp_session.
+Disable use of server inode numbers when server only
+partially supports them (e.g. for one server querying inode numbers on
+FindFirst fails but QPathInfo queries works).
+
 Version 1.60
 -------------
 Fix memory leak in reconnect.  Fix oops in DFS mount error path.
index 9a5e4f5f312272af2f06d7243a43c37fe37f3210..29f1da761bbf10fc948f16e0c77a0d75dea22125 100644 (file)
@@ -1037,7 +1037,7 @@ init_cifs(void)
        if (rc)
                goto out_unregister_key_type;
 #endif
-       rc = slow_work_register_user();
+       rc = slow_work_register_user(THIS_MODULE);
        if (rc)
                goto out_unregister_resolver_key;
 
index 627a60a6c1b11078c33c37503afcd8061f4ff428..1f42f772865a54f1f45b516b090533a4a4715bf4 100644 (file)
@@ -214,8 +214,6 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
                posix_flags |= SMB_O_EXCL;
        if (oflags & O_TRUNC)
                posix_flags |= SMB_O_TRUNC;
-       if (oflags & O_APPEND)
-               posix_flags |= SMB_O_APPEND;
        if (oflags & O_SYNC)
                posix_flags |= SMB_O_SYNC;
        if (oflags & O_DIRECTORY)
@@ -643,9 +641,9 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
         * O_EXCL: optimize away the lookup, but don't hash the dentry. Let
         * the VFS handle the create.
         */
-       if (nd->flags & LOOKUP_EXCL) {
+       if (nd && (nd->flags & LOOKUP_EXCL)) {
                d_instantiate(direntry, NULL);
-               return 0;
+               return NULL;
        }
 
        /* can not grab the rename sem here since it would
@@ -675,7 +673,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
         * reduction in network traffic in the other paths.
         */
        if (pTcon->unix_ext) {
-               if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
+               if (nd && !(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY)) &&
                     (nd->flags & LOOKUP_OPEN) && !pTcon->broken_posix_open &&
                     (nd->intent.open.flags & O_CREAT)) {
                        rc = cifs_posix_open(full_path, &newInode, nd->path.mnt,
index fc089f2f7f56ccbbd5662eb63e2ec9832d768004..2cf93ec40a677f1e4569f5dbd37ca979e8fc31ef 100644 (file)
@@ -284,7 +284,7 @@ static int f_setown_ex(struct file *filp, unsigned long arg)
                type = PIDTYPE_PID;
                break;
 
-       case F_OWNER_GID:
+       case F_OWNER_PGRP:
                type = PIDTYPE_PGID;
                break;
 
@@ -321,7 +321,7 @@ static int f_getown_ex(struct file *filp, unsigned long arg)
                break;
 
        case PIDTYPE_PGID:
-               owner.type = F_OWNER_GID;
+               owner.type = F_OWNER_PGRP;
                break;
 
        default:
index 9bbb8ce7bea02bad5cf1c8aab1fe1093ac92e855..864dac20a242a4e2b56de366646dfbe37d3112a5 100644 (file)
@@ -54,3 +54,10 @@ config FSCACHE_DEBUG
          enabled by setting bits in /sys/modules/fscache/parameter/debug.
 
          See Documentation/filesystems/caching/fscache.txt for more information.
+
+config FSCACHE_OBJECT_LIST
+       bool "Maintain global object list for debugging purposes"
+       depends on FSCACHE && PROC_FS
+       help
+         Maintain a global list of active fscache objects that can be
+         retrieved through /proc/fs/fscache/objects for debugging purposes
index 91571b95aacc38029f006f02bebcfaec671f5dc7..6d561531cb36c661355fa060e0b6f60a67509e15 100644 (file)
@@ -15,5 +15,6 @@ fscache-y := \
 fscache-$(CONFIG_PROC_FS) += proc.o
 fscache-$(CONFIG_FSCACHE_STATS) += stats.o
 fscache-$(CONFIG_FSCACHE_HISTOGRAM) += histogram.o
+fscache-$(CONFIG_FSCACHE_OBJECT_LIST) += object-list.o
 
 obj-$(CONFIG_FSCACHE) := fscache.o
index e21985bbb1fb00eedb38b6d5e7f829952c3eb066..6a3c48abd677f52eb2bd10a5bb89108cd5d89d0a 100644 (file)
@@ -263,6 +263,7 @@ int fscache_add_cache(struct fscache_cache *cache,
        spin_lock(&cache->object_list_lock);
        list_add_tail(&ifsdef->cache_link, &cache->object_list);
        spin_unlock(&cache->object_list_lock);
+       fscache_objlist_add(ifsdef);
 
        /* add the cache's netfs definition index object to the top level index
         * cookie as a known backing object */
@@ -380,11 +381,15 @@ void fscache_withdraw_cache(struct fscache_cache *cache)
 
        /* make sure all pages pinned by operations on behalf of the netfs are
         * written to disk */
+       fscache_stat(&fscache_n_cop_sync_cache);
        cache->ops->sync_cache(cache);
+       fscache_stat_d(&fscache_n_cop_sync_cache);
 
        /* dissociate all the netfs pages backed by this cache from the block
         * mappings in the cache */
+       fscache_stat(&fscache_n_cop_dissociate_pages);
        cache->ops->dissociate_pages(cache);
+       fscache_stat_d(&fscache_n_cop_dissociate_pages);
 
        /* we now have to destroy all the active objects pertaining to this
         * cache - which we do by passing them off to thread pool to be
index 72fd18f6c71f49fa79434e78e36aa5dc4598b60d..990535071a8aeadf4a67e646b6fdcad86403d63b 100644 (file)
@@ -36,6 +36,7 @@ void fscache_cookie_init_once(void *_cookie)
 
        memset(cookie, 0, sizeof(*cookie));
        spin_lock_init(&cookie->lock);
+       spin_lock_init(&cookie->stores_lock);
        INIT_HLIST_HEAD(&cookie->backing_objects);
 }
 
@@ -102,7 +103,9 @@ struct fscache_cookie *__fscache_acquire_cookie(
        cookie->netfs_data      = netfs_data;
        cookie->flags           = 0;
 
-       INIT_RADIX_TREE(&cookie->stores, GFP_NOFS);
+       /* radix tree insertion won't use the preallocation pool unless it's
+        * told it may not wait */
+       INIT_RADIX_TREE(&cookie->stores, GFP_NOFS & ~__GFP_WAIT);
 
        switch (cookie->def->type) {
        case FSCACHE_COOKIE_TYPE_INDEX:
@@ -249,7 +252,9 @@ static int fscache_alloc_object(struct fscache_cache *cache,
 
        /* ask the cache to allocate an object (we may end up with duplicate
         * objects at this stage, but we sort that out later) */
+       fscache_stat(&fscache_n_cop_alloc_object);
        object = cache->ops->alloc_object(cache, cookie);
+       fscache_stat_d(&fscache_n_cop_alloc_object);
        if (IS_ERR(object)) {
                fscache_stat(&fscache_n_object_no_alloc);
                ret = PTR_ERR(object);
@@ -270,8 +275,11 @@ static int fscache_alloc_object(struct fscache_cache *cache,
        /* only attach if we managed to allocate all we needed, otherwise
         * discard the object we just allocated and instead use the one
         * attached to the cookie */
-       if (fscache_attach_object(cookie, object) < 0)
+       if (fscache_attach_object(cookie, object) < 0) {
+               fscache_stat(&fscache_n_cop_put_object);
                cache->ops->put_object(object);
+               fscache_stat_d(&fscache_n_cop_put_object);
+       }
 
        _leave(" = 0");
        return 0;
@@ -287,7 +295,9 @@ object_already_extant:
        return 0;
 
 error_put:
+       fscache_stat(&fscache_n_cop_put_object);
        cache->ops->put_object(object);
+       fscache_stat_d(&fscache_n_cop_put_object);
 error:
        _leave(" = %d", ret);
        return ret;
@@ -349,6 +359,8 @@ static int fscache_attach_object(struct fscache_cookie *cookie,
        object->cookie = cookie;
        atomic_inc(&cookie->usage);
        hlist_add_head(&object->cookie_link, &cookie->backing_objects);
+
+       fscache_objlist_add(object);
        ret = 0;
 
 cant_attach_object:
@@ -403,6 +415,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
        unsigned long event;
 
        fscache_stat(&fscache_n_relinquishes);
+       if (retire)
+               fscache_stat(&fscache_n_relinquishes_retire);
 
        if (!cookie) {
                fscache_stat(&fscache_n_relinquishes_null);
@@ -428,12 +442,8 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
 
        event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
 
-       /* detach pointers back to the netfs */
        spin_lock(&cookie->lock);
 
-       cookie->netfs_data      = NULL;
-       cookie->def             = NULL;
-
        /* break links with all the active objects */
        while (!hlist_empty(&cookie->backing_objects)) {
                object = hlist_entry(cookie->backing_objects.first,
@@ -456,6 +466,10 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
                        BUG();
        }
 
+       /* detach pointers back to the netfs */
+       cookie->netfs_data      = NULL;
+       cookie->def             = NULL;
+
        spin_unlock(&cookie->lock);
 
        if (cookie->parent) {
index 1c341304621fc7b132b0f2fffa91ab283f758779..edd7434ab6e5eb5ff3a8b3b9f38b127a3e2753cf 100644 (file)
@@ -17,6 +17,7 @@
  * - cache->object_list_lock
  * - object->lock
  * - object->parent->lock
+ * - cookie->stores_lock
  * - fscache_thread_lock
  *
  */
@@ -88,10 +89,23 @@ extern int fscache_wait_bit_interruptible(void *);
 /*
  * object.c
  */
+extern const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5];
+
 extern void fscache_withdrawing_object(struct fscache_cache *,
                                       struct fscache_object *);
 extern void fscache_enqueue_object(struct fscache_object *);
 
+/*
+ * object-list.c
+ */
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+extern const struct file_operations fscache_objlist_fops;
+
+extern void fscache_objlist_add(struct fscache_object *);
+#else
+#define fscache_objlist_add(object) do {} while(0)
+#endif
+
 /*
  * operation.c
  */
@@ -99,6 +113,7 @@ extern int fscache_submit_exclusive_op(struct fscache_object *,
                                       struct fscache_operation *);
 extern int fscache_submit_op(struct fscache_object *,
                             struct fscache_operation *);
+extern int fscache_cancel_op(struct fscache_operation *);
 extern void fscache_abort_object(struct fscache_object *);
 extern void fscache_start_operations(struct fscache_object *);
 extern void fscache_operation_gc(struct work_struct *);
@@ -127,6 +142,8 @@ extern atomic_t fscache_n_op_enqueue;
 extern atomic_t fscache_n_op_deferred_release;
 extern atomic_t fscache_n_op_release;
 extern atomic_t fscache_n_op_gc;
+extern atomic_t fscache_n_op_cancelled;
+extern atomic_t fscache_n_op_rejected;
 
 extern atomic_t fscache_n_attr_changed;
 extern atomic_t fscache_n_attr_changed_ok;
@@ -138,6 +155,8 @@ extern atomic_t fscache_n_allocs;
 extern atomic_t fscache_n_allocs_ok;
 extern atomic_t fscache_n_allocs_wait;
 extern atomic_t fscache_n_allocs_nobufs;
+extern atomic_t fscache_n_allocs_intr;
+extern atomic_t fscache_n_allocs_object_dead;
 extern atomic_t fscache_n_alloc_ops;
 extern atomic_t fscache_n_alloc_op_waits;
 
@@ -148,6 +167,7 @@ extern atomic_t fscache_n_retrievals_nodata;
 extern atomic_t fscache_n_retrievals_nobufs;
 extern atomic_t fscache_n_retrievals_intr;
 extern atomic_t fscache_n_retrievals_nomem;
+extern atomic_t fscache_n_retrievals_object_dead;
 extern atomic_t fscache_n_retrieval_ops;
 extern atomic_t fscache_n_retrieval_op_waits;
 
@@ -158,6 +178,14 @@ extern atomic_t fscache_n_stores_nobufs;
 extern atomic_t fscache_n_stores_oom;
 extern atomic_t fscache_n_store_ops;
 extern atomic_t fscache_n_store_calls;
+extern atomic_t fscache_n_store_pages;
+extern atomic_t fscache_n_store_radix_deletes;
+extern atomic_t fscache_n_store_pages_over_limit;
+
+extern atomic_t fscache_n_store_vmscan_not_storing;
+extern atomic_t fscache_n_store_vmscan_gone;
+extern atomic_t fscache_n_store_vmscan_busy;
+extern atomic_t fscache_n_store_vmscan_cancelled;
 
 extern atomic_t fscache_n_marks;
 extern atomic_t fscache_n_uncaches;
@@ -176,6 +204,7 @@ extern atomic_t fscache_n_updates_run;
 extern atomic_t fscache_n_relinquishes;
 extern atomic_t fscache_n_relinquishes_null;
 extern atomic_t fscache_n_relinquishes_waitcrt;
+extern atomic_t fscache_n_relinquishes_retire;
 
 extern atomic_t fscache_n_cookie_index;
 extern atomic_t fscache_n_cookie_data;
@@ -186,6 +215,7 @@ extern atomic_t fscache_n_object_no_alloc;
 extern atomic_t fscache_n_object_lookups;
 extern atomic_t fscache_n_object_lookups_negative;
 extern atomic_t fscache_n_object_lookups_positive;
+extern atomic_t fscache_n_object_lookups_timed_out;
 extern atomic_t fscache_n_object_created;
 extern atomic_t fscache_n_object_avail;
 extern atomic_t fscache_n_object_dead;
@@ -195,15 +225,41 @@ extern atomic_t fscache_n_checkaux_okay;
 extern atomic_t fscache_n_checkaux_update;
 extern atomic_t fscache_n_checkaux_obsolete;
 
+extern atomic_t fscache_n_cop_alloc_object;
+extern atomic_t fscache_n_cop_lookup_object;
+extern atomic_t fscache_n_cop_lookup_complete;
+extern atomic_t fscache_n_cop_grab_object;
+extern atomic_t fscache_n_cop_update_object;
+extern atomic_t fscache_n_cop_drop_object;
+extern atomic_t fscache_n_cop_put_object;
+extern atomic_t fscache_n_cop_sync_cache;
+extern atomic_t fscache_n_cop_attr_changed;
+extern atomic_t fscache_n_cop_read_or_alloc_page;
+extern atomic_t fscache_n_cop_read_or_alloc_pages;
+extern atomic_t fscache_n_cop_allocate_page;
+extern atomic_t fscache_n_cop_allocate_pages;
+extern atomic_t fscache_n_cop_write_page;
+extern atomic_t fscache_n_cop_uncache_page;
+extern atomic_t fscache_n_cop_dissociate_pages;
+
 static inline void fscache_stat(atomic_t *stat)
 {
        atomic_inc(stat);
 }
 
+static inline void fscache_stat_d(atomic_t *stat)
+{
+       atomic_dec(stat);
+}
+
+#define __fscache_stat(stat) (stat)
+
 extern const struct file_operations fscache_stats_fops;
 #else
 
+#define __fscache_stat(stat) (NULL)
 #define fscache_stat(stat) do {} while (0)
+#define fscache_stat_d(stat) do {} while (0)
 #endif
 
 /*
index 4de41b5974991f05b6a1fc4f07fe40ac746770d8..add6bdb53f04400779ca899370d07bd2480e108a 100644 (file)
@@ -48,7 +48,7 @@ static int __init fscache_init(void)
 {
        int ret;
 
-       ret = slow_work_register_user();
+       ret = slow_work_register_user(THIS_MODULE);
        if (ret < 0)
                goto error_slow_work;
 
@@ -80,7 +80,7 @@ error_kobj:
 error_cookie_jar:
        fscache_proc_cleanup();
 error_proc:
-       slow_work_unregister_user();
+       slow_work_unregister_user(THIS_MODULE);
 error_slow_work:
        return ret;
 }
@@ -97,7 +97,7 @@ static void __exit fscache_exit(void)
        kobject_put(fscache_root);
        kmem_cache_destroy(fscache_cookie_jar);
        fscache_proc_cleanup();
-       slow_work_unregister_user();
+       slow_work_unregister_user(THIS_MODULE);
        printk(KERN_NOTICE "FS-Cache: Unloaded\n");
 }
 
diff --git a/fs/fscache/object-list.c b/fs/fscache/object-list.c
new file mode 100644 (file)
index 0000000..e590242
--- /dev/null
@@ -0,0 +1,432 @@
+/* Global fscache object list maintainer and viewer
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include <linux/seq_file.h>
+#include <linux/key.h>
+#include <keys/user-type.h>
+#include "internal.h"
+
+static struct rb_root fscache_object_list;
+static DEFINE_RWLOCK(fscache_object_list_lock);
+
+struct fscache_objlist_data {
+       unsigned long   config;         /* display configuration */
+#define FSCACHE_OBJLIST_CONFIG_KEY     0x00000001      /* show object keys */
+#define FSCACHE_OBJLIST_CONFIG_AUX     0x00000002      /* show object auxdata */
+#define FSCACHE_OBJLIST_CONFIG_COOKIE  0x00000004      /* show objects with cookies */
+#define FSCACHE_OBJLIST_CONFIG_NOCOOKIE        0x00000008      /* show objects without cookies */
+#define FSCACHE_OBJLIST_CONFIG_BUSY    0x00000010      /* show busy objects */
+#define FSCACHE_OBJLIST_CONFIG_IDLE    0x00000020      /* show idle objects */
+#define FSCACHE_OBJLIST_CONFIG_PENDWR  0x00000040      /* show objects with pending writes */
+#define FSCACHE_OBJLIST_CONFIG_NOPENDWR        0x00000080      /* show objects without pending writes */
+#define FSCACHE_OBJLIST_CONFIG_READS   0x00000100      /* show objects with active reads */
+#define FSCACHE_OBJLIST_CONFIG_NOREADS 0x00000200      /* show objects without active reads */
+#define FSCACHE_OBJLIST_CONFIG_EVENTS  0x00000400      /* show objects with events */
+#define FSCACHE_OBJLIST_CONFIG_NOEVENTS        0x00000800      /* show objects without no events */
+#define FSCACHE_OBJLIST_CONFIG_WORK    0x00001000      /* show objects with slow work */
+#define FSCACHE_OBJLIST_CONFIG_NOWORK  0x00002000      /* show objects without slow work */
+
+       u8              buf[512];       /* key and aux data buffer */
+};
+
+/*
+ * Add an object to the object list
+ * - we use the address of the fscache_object structure as the key into the
+ *   tree
+ */
+void fscache_objlist_add(struct fscache_object *obj)
+{
+       struct fscache_object *xobj;
+       struct rb_node **p = &fscache_object_list.rb_node, *parent = NULL;
+
+       write_lock(&fscache_object_list_lock);
+
+       while (*p) {
+               parent = *p;
+               xobj = rb_entry(parent, struct fscache_object, objlist_link);
+
+               if (obj < xobj)
+                       p = &(*p)->rb_left;
+               else if (obj > xobj)
+                       p = &(*p)->rb_right;
+               else
+                       BUG();
+       }
+
+       rb_link_node(&obj->objlist_link, parent, p);
+       rb_insert_color(&obj->objlist_link, &fscache_object_list);
+
+       write_unlock(&fscache_object_list_lock);
+}
+
+/**
+ * fscache_object_destroy - Note that a cache object is about to be destroyed
+ * @object: The object to be destroyed
+ *
+ * Note the imminent destruction and deallocation of a cache object record.
+ */
+void fscache_object_destroy(struct fscache_object *obj)
+{
+       write_lock(&fscache_object_list_lock);
+
+       BUG_ON(RB_EMPTY_ROOT(&fscache_object_list));
+       rb_erase(&obj->objlist_link, &fscache_object_list);
+
+       write_unlock(&fscache_object_list_lock);
+}
+EXPORT_SYMBOL(fscache_object_destroy);
+
+/*
+ * find the object in the tree on or after the specified index
+ */
+static struct fscache_object *fscache_objlist_lookup(loff_t *_pos)
+{
+       struct fscache_object *pobj, *obj, *minobj = NULL;
+       struct rb_node *p;
+       unsigned long pos;
+
+       if (*_pos >= (unsigned long) ERR_PTR(-ENOENT))
+               return NULL;
+       pos = *_pos;
+
+       /* banners (can't represent line 0 by pos 0 as that would involve
+        * returning a NULL pointer) */
+       if (pos == 0)
+               return (struct fscache_object *) ++(*_pos);
+       if (pos < 3)
+               return (struct fscache_object *)pos;
+
+       pobj = (struct fscache_object *)pos;
+       p = fscache_object_list.rb_node;
+       while (p) {
+               obj = rb_entry(p, struct fscache_object, objlist_link);
+               if (pobj < obj) {
+                       if (!minobj || minobj > obj)
+                               minobj = obj;
+                       p = p->rb_left;
+               } else if (pobj > obj) {
+                       p = p->rb_right;
+               } else {
+                       minobj = obj;
+                       break;
+               }
+               obj = NULL;
+       }
+
+       if (!minobj)
+               *_pos = (unsigned long) ERR_PTR(-ENOENT);
+       else if (minobj != obj)
+               *_pos = (unsigned long) minobj;
+       return minobj;
+}
+
+/*
+ * set up the iterator to start reading from the first line
+ */
+static void *fscache_objlist_start(struct seq_file *m, loff_t *_pos)
+       __acquires(&fscache_object_list_lock)
+{
+       read_lock(&fscache_object_list_lock);
+       return fscache_objlist_lookup(_pos);
+}
+
+/*
+ * move to the next line
+ */
+static void *fscache_objlist_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+       (*_pos)++;
+       return fscache_objlist_lookup(_pos);
+}
+
+/*
+ * clean up after reading
+ */
+static void fscache_objlist_stop(struct seq_file *m, void *v)
+       __releases(&fscache_object_list_lock)
+{
+       read_unlock(&fscache_object_list_lock);
+}
+
+/*
+ * display an object
+ */
+static int fscache_objlist_show(struct seq_file *m, void *v)
+{
+       struct fscache_objlist_data *data = m->private;
+       struct fscache_object *obj = v;
+       unsigned long config = data->config;
+       uint16_t keylen, auxlen;
+       char _type[3], *type;
+       bool no_cookie;
+       u8 *buf = data->buf, *p;
+
+       if ((unsigned long) v == 1) {
+               seq_puts(m, "OBJECT   PARENT   STAT CHLDN OPS OOP IPR EX READS"
+                        " EM EV F S"
+                        " | NETFS_COOKIE_DEF TY FL NETFS_DATA");
+               if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
+                             FSCACHE_OBJLIST_CONFIG_AUX))
+                       seq_puts(m, "       ");
+               if (config & FSCACHE_OBJLIST_CONFIG_KEY)
+                       seq_puts(m, "OBJECT_KEY");
+               if ((config & (FSCACHE_OBJLIST_CONFIG_KEY |
+                              FSCACHE_OBJLIST_CONFIG_AUX)) ==
+                   (FSCACHE_OBJLIST_CONFIG_KEY | FSCACHE_OBJLIST_CONFIG_AUX))
+                       seq_puts(m, ", ");
+               if (config & FSCACHE_OBJLIST_CONFIG_AUX)
+                       seq_puts(m, "AUX_DATA");
+               seq_puts(m, "\n");
+               return 0;
+       }
+
+       if ((unsigned long) v == 2) {
+               seq_puts(m, "======== ======== ==== ===== === === === == ====="
+                        " == == = ="
+                        " | ================ == == ================");
+               if (config & (FSCACHE_OBJLIST_CONFIG_KEY |
+                             FSCACHE_OBJLIST_CONFIG_AUX))
+                       seq_puts(m, " ================");
+               seq_puts(m, "\n");
+               return 0;
+       }
+
+       /* filter out any unwanted objects */
+#define FILTER(criterion, _yes, _no)                                   \
+       do {                                                            \
+               unsigned long yes = FSCACHE_OBJLIST_CONFIG_##_yes;      \
+               unsigned long no = FSCACHE_OBJLIST_CONFIG_##_no;        \
+               if (criterion) {                                        \
+                       if (!(config & yes))                            \
+                               return 0;                               \
+               } else {                                                \
+                       if (!(config & no))                             \
+                               return 0;                               \
+               }                                                       \
+       } while(0)
+
+       if (~config) {
+               FILTER(obj->cookie,
+                      COOKIE, NOCOOKIE);
+               FILTER(obj->state != FSCACHE_OBJECT_ACTIVE ||
+                      obj->n_ops != 0 ||
+                      obj->n_obj_ops != 0 ||
+                      obj->flags ||
+                      !list_empty(&obj->dependents),
+                      BUSY, IDLE);
+               FILTER(test_bit(FSCACHE_OBJECT_PENDING_WRITE, &obj->flags),
+                      PENDWR, NOPENDWR);
+               FILTER(atomic_read(&obj->n_reads),
+                      READS, NOREADS);
+               FILTER(obj->events & obj->event_mask,
+                      EVENTS, NOEVENTS);
+               FILTER(obj->work.flags & ~(1UL << SLOW_WORK_VERY_SLOW),
+                      WORK, NOWORK);
+       }
+
+       seq_printf(m,
+                  "%8x %8x %s %5u %3u %3u %3u %2u %5u %2lx %2lx %1lx %1lx | ",
+                  obj->debug_id,
+                  obj->parent ? obj->parent->debug_id : -1,
+                  fscache_object_states_short[obj->state],
+                  obj->n_children,
+                  obj->n_ops,
+                  obj->n_obj_ops,
+                  obj->n_in_progress,
+                  obj->n_exclusive,
+                  atomic_read(&obj->n_reads),
+                  obj->event_mask & FSCACHE_OBJECT_EVENTS_MASK,
+                  obj->events,
+                  obj->flags,
+                  obj->work.flags);
+
+       no_cookie = true;
+       keylen = auxlen = 0;
+       if (obj->cookie) {
+               spin_lock(&obj->lock);
+               if (obj->cookie) {
+                       switch (obj->cookie->def->type) {
+                       case 0:
+                               type = "IX";
+                               break;
+                       case 1:
+                               type = "DT";
+                               break;
+                       default:
+                               sprintf(_type, "%02u",
+                                       obj->cookie->def->type);
+                               type = _type;
+                               break;
+                       }
+
+                       seq_printf(m, "%-16s %s %2lx %16p",
+                                  obj->cookie->def->name,
+                                  type,
+                                  obj->cookie->flags,
+                                  obj->cookie->netfs_data);
+
+                       if (obj->cookie->def->get_key &&
+                           config & FSCACHE_OBJLIST_CONFIG_KEY)
+                               keylen = obj->cookie->def->get_key(
+                                       obj->cookie->netfs_data,
+                                       buf, 400);
+
+                       if (obj->cookie->def->get_aux &&
+                           config & FSCACHE_OBJLIST_CONFIG_AUX)
+                               auxlen = obj->cookie->def->get_aux(
+                                       obj->cookie->netfs_data,
+                                       buf + keylen, 512 - keylen);
+
+                       no_cookie = false;
+               }
+               spin_unlock(&obj->lock);
+
+               if (!no_cookie && (keylen > 0 || auxlen > 0)) {
+                       seq_printf(m, " ");
+                       for (p = buf; keylen > 0; keylen--)
+                               seq_printf(m, "%02x", *p++);
+                       if (auxlen > 0) {
+                               if (config & FSCACHE_OBJLIST_CONFIG_KEY)
+                                       seq_printf(m, ", ");
+                               for (; auxlen > 0; auxlen--)
+                                       seq_printf(m, "%02x", *p++);
+                       }
+               }
+       }
+
+       if (no_cookie)
+               seq_printf(m, "<no_cookie>\n");
+       else
+               seq_printf(m, "\n");
+       return 0;
+}
+
+static const struct seq_operations fscache_objlist_ops = {
+       .start          = fscache_objlist_start,
+       .stop           = fscache_objlist_stop,
+       .next           = fscache_objlist_next,
+       .show           = fscache_objlist_show,
+};
+
+/*
+ * get the configuration for filtering the list
+ */
+static void fscache_objlist_config(struct fscache_objlist_data *data)
+{
+#ifdef CONFIG_KEYS
+       struct user_key_payload *confkey;
+       unsigned long config;
+       struct key *key;
+       const char *buf;
+       int len;
+
+       key = request_key(&key_type_user, "fscache:objlist", NULL);
+       if (IS_ERR(key))
+               goto no_config;
+
+       config = 0;
+       rcu_read_lock();
+
+       confkey = key->payload.data;
+       buf = confkey->data;
+
+       for (len = confkey->datalen - 1; len >= 0; len--) {
+               switch (buf[len]) {
+               case 'K': config |= FSCACHE_OBJLIST_CONFIG_KEY;         break;
+               case 'A': config |= FSCACHE_OBJLIST_CONFIG_AUX;         break;
+               case 'C': config |= FSCACHE_OBJLIST_CONFIG_COOKIE;      break;
+               case 'c': config |= FSCACHE_OBJLIST_CONFIG_NOCOOKIE;    break;
+               case 'B': config |= FSCACHE_OBJLIST_CONFIG_BUSY;        break;
+               case 'b': config |= FSCACHE_OBJLIST_CONFIG_IDLE;        break;
+               case 'W': config |= FSCACHE_OBJLIST_CONFIG_PENDWR;      break;
+               case 'w': config |= FSCACHE_OBJLIST_CONFIG_NOPENDWR;    break;
+               case 'R': config |= FSCACHE_OBJLIST_CONFIG_READS;       break;
+               case 'r': config |= FSCACHE_OBJLIST_CONFIG_NOREADS;     break;
+               case 'S': config |= FSCACHE_OBJLIST_CONFIG_WORK;        break;
+               case 's': config |= FSCACHE_OBJLIST_CONFIG_NOWORK;      break;
+               }
+       }
+
+       rcu_read_unlock();
+       key_put(key);
+
+       if (!(config & (FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE)))
+           config   |= FSCACHE_OBJLIST_CONFIG_COOKIE | FSCACHE_OBJLIST_CONFIG_NOCOOKIE;
+       if (!(config & (FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE)))
+           config   |= FSCACHE_OBJLIST_CONFIG_BUSY | FSCACHE_OBJLIST_CONFIG_IDLE;
+       if (!(config & (FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR)))
+           config   |= FSCACHE_OBJLIST_CONFIG_PENDWR | FSCACHE_OBJLIST_CONFIG_NOPENDWR;
+       if (!(config & (FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS)))
+           config   |= FSCACHE_OBJLIST_CONFIG_READS | FSCACHE_OBJLIST_CONFIG_NOREADS;
+       if (!(config & (FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS)))
+           config   |= FSCACHE_OBJLIST_CONFIG_EVENTS | FSCACHE_OBJLIST_CONFIG_NOEVENTS;
+       if (!(config & (FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK)))
+           config   |= FSCACHE_OBJLIST_CONFIG_WORK | FSCACHE_OBJLIST_CONFIG_NOWORK;
+
+       data->config = config;
+       return;
+
+no_config:
+#endif
+       data->config = ULONG_MAX;
+}
+
+/*
+ * open "/proc/fs/fscache/objects" to provide a list of active objects
+ * - can be configured by a user-defined key added to the caller's keyrings
+ */
+static int fscache_objlist_open(struct inode *inode, struct file *file)
+{
+       struct fscache_objlist_data *data;
+       struct seq_file *m;
+       int ret;
+
+       ret = seq_open(file, &fscache_objlist_ops);
+       if (ret < 0)
+               return ret;
+
+       m = file->private_data;
+
+       /* buffer for key extraction */
+       data = kmalloc(sizeof(struct fscache_objlist_data), GFP_KERNEL);
+       if (!data) {
+               seq_release(inode, file);
+               return -ENOMEM;
+       }
+
+       /* get the configuration key */
+       fscache_objlist_config(data);
+
+       m->private = data;
+       return 0;
+}
+
+/*
+ * clean up on close
+ */
+static int fscache_objlist_release(struct inode *inode, struct file *file)
+{
+       struct seq_file *m = file->private_data;
+
+       kfree(m->private);
+       m->private = NULL;
+       return seq_release(inode, file);
+}
+
+const struct file_operations fscache_objlist_fops = {
+       .owner          = THIS_MODULE,
+       .open           = fscache_objlist_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = fscache_objlist_release,
+};
index 392a41b1b79d417324f4cc41dd94e670d6272400..e513ac599c8e9b587c8646e33485be20d2ce1c4e 100644 (file)
 
 #define FSCACHE_DEBUG_LEVEL COOKIE
 #include <linux/module.h>
+#include <linux/seq_file.h>
 #include "internal.h"
 
-const char *fscache_object_states[] = {
+const char *fscache_object_states[FSCACHE_OBJECT__NSTATES] = {
        [FSCACHE_OBJECT_INIT]           = "OBJECT_INIT",
        [FSCACHE_OBJECT_LOOKING_UP]     = "OBJECT_LOOKING_UP",
        [FSCACHE_OBJECT_CREATING]       = "OBJECT_CREATING",
@@ -33,9 +34,28 @@ const char *fscache_object_states[] = {
 };
 EXPORT_SYMBOL(fscache_object_states);
 
+const char fscache_object_states_short[FSCACHE_OBJECT__NSTATES][5] = {
+       [FSCACHE_OBJECT_INIT]           = "INIT",
+       [FSCACHE_OBJECT_LOOKING_UP]     = "LOOK",
+       [FSCACHE_OBJECT_CREATING]       = "CRTN",
+       [FSCACHE_OBJECT_AVAILABLE]      = "AVBL",
+       [FSCACHE_OBJECT_ACTIVE]         = "ACTV",
+       [FSCACHE_OBJECT_UPDATING]       = "UPDT",
+       [FSCACHE_OBJECT_DYING]          = "DYNG",
+       [FSCACHE_OBJECT_LC_DYING]       = "LCDY",
+       [FSCACHE_OBJECT_ABORT_INIT]     = "ABTI",
+       [FSCACHE_OBJECT_RELEASING]      = "RELS",
+       [FSCACHE_OBJECT_RECYCLING]      = "RCYC",
+       [FSCACHE_OBJECT_WITHDRAWING]    = "WTHD",
+       [FSCACHE_OBJECT_DEAD]           = "DEAD",
+};
+
 static void fscache_object_slow_work_put_ref(struct slow_work *);
 static int  fscache_object_slow_work_get_ref(struct slow_work *);
 static void fscache_object_slow_work_execute(struct slow_work *);
+#ifdef CONFIG_SLOW_WORK_PROC
+static void fscache_object_slow_work_desc(struct slow_work *, struct seq_file *);
+#endif
 static void fscache_initialise_object(struct fscache_object *);
 static void fscache_lookup_object(struct fscache_object *);
 static void fscache_object_available(struct fscache_object *);
@@ -45,9 +65,13 @@ static void fscache_enqueue_dependents(struct fscache_object *);
 static void fscache_dequeue_object(struct fscache_object *);
 
 const struct slow_work_ops fscache_object_slow_work_ops = {
+       .owner          = THIS_MODULE,
        .get_ref        = fscache_object_slow_work_get_ref,
        .put_ref        = fscache_object_slow_work_put_ref,
        .execute        = fscache_object_slow_work_execute,
+#ifdef CONFIG_SLOW_WORK_PROC
+       .desc           = fscache_object_slow_work_desc,
+#endif
 };
 EXPORT_SYMBOL(fscache_object_slow_work_ops);
 
@@ -81,6 +105,7 @@ static inline void fscache_done_parent_op(struct fscache_object *object)
 static void fscache_object_state_machine(struct fscache_object *object)
 {
        enum fscache_object_state new_state;
+       struct fscache_cookie *cookie;
 
        ASSERT(object != NULL);
 
@@ -120,20 +145,31 @@ static void fscache_object_state_machine(struct fscache_object *object)
        case FSCACHE_OBJECT_UPDATING:
                clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
                fscache_stat(&fscache_n_updates_run);
+               fscache_stat(&fscache_n_cop_update_object);
                object->cache->ops->update_object(object);
+               fscache_stat_d(&fscache_n_cop_update_object);
                goto active_transit;
 
                /* handle an object dying during lookup or creation */
        case FSCACHE_OBJECT_LC_DYING:
                object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
+               fscache_stat(&fscache_n_cop_lookup_complete);
                object->cache->ops->lookup_complete(object);
+               fscache_stat_d(&fscache_n_cop_lookup_complete);
 
                spin_lock(&object->lock);
                object->state = FSCACHE_OBJECT_DYING;
-               if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
-                                      &object->cookie->flags))
-                       wake_up_bit(&object->cookie->flags,
-                                   FSCACHE_COOKIE_CREATING);
+               cookie = object->cookie;
+               if (cookie) {
+                       if (test_and_clear_bit(FSCACHE_COOKIE_LOOKING_UP,
+                                              &cookie->flags))
+                               wake_up_bit(&cookie->flags,
+                                           FSCACHE_COOKIE_LOOKING_UP);
+                       if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
+                                              &cookie->flags))
+                               wake_up_bit(&cookie->flags,
+                                           FSCACHE_COOKIE_CREATING);
+               }
                spin_unlock(&object->lock);
 
                fscache_done_parent_op(object);
@@ -165,6 +201,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
                }
                spin_unlock(&object->lock);
                fscache_enqueue_dependents(object);
+               fscache_start_operations(object);
                goto terminal_transit;
 
                /* handle an abort during initialisation */
@@ -316,14 +353,29 @@ static void fscache_object_slow_work_execute(struct slow_work *work)
 
        _enter("{OBJ%x}", object->debug_id);
 
-       clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
-
        start = jiffies;
        fscache_object_state_machine(object);
        fscache_hist(fscache_objs_histogram, start);
        if (object->events & object->event_mask)
                fscache_enqueue_object(object);
+       clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+}
+
+/*
+ * describe an object for slow-work debugging
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+static void fscache_object_slow_work_desc(struct slow_work *work,
+                                         struct seq_file *m)
+{
+       struct fscache_object *object =
+               container_of(work, struct fscache_object, work);
+
+       seq_printf(m, "FSC: OBJ%x: %s",
+                  object->debug_id,
+                  fscache_object_states_short[object->state]);
 }
+#endif
 
 /*
  * initialise an object
@@ -376,7 +428,9 @@ static void fscache_initialise_object(struct fscache_object *object)
                         * binding on to us, so we need to make sure we don't
                         * add ourself to the list multiple times */
                        if (list_empty(&object->dep_link)) {
+                               fscache_stat(&fscache_n_cop_grab_object);
                                object->cache->ops->grab_object(object);
+                               fscache_stat_d(&fscache_n_cop_grab_object);
                                list_add(&object->dep_link,
                                         &parent->dependents);
 
@@ -414,6 +468,7 @@ static void fscache_lookup_object(struct fscache_object *object)
 {
        struct fscache_cookie *cookie = object->cookie;
        struct fscache_object *parent;
+       int ret;
 
        _enter("");
 
@@ -438,11 +493,20 @@ static void fscache_lookup_object(struct fscache_object *object)
               object->cache->tag->name);
 
        fscache_stat(&fscache_n_object_lookups);
-       object->cache->ops->lookup_object(object);
+       fscache_stat(&fscache_n_cop_lookup_object);
+       ret = object->cache->ops->lookup_object(object);
+       fscache_stat_d(&fscache_n_cop_lookup_object);
 
        if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
                set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
 
+       if (ret == -ETIMEDOUT) {
+               /* probably stuck behind another object, so move this one to
+                * the back of the queue */
+               fscache_stat(&fscache_n_object_lookups_timed_out);
+               set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+       }
+
        _leave("");
 }
 
@@ -546,7 +610,8 @@ static void fscache_object_available(struct fscache_object *object)
 
        spin_lock(&object->lock);
 
-       if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
+       if (object->cookie &&
+           test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
                wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
 
        fscache_done_parent_op(object);
@@ -562,7 +627,9 @@ static void fscache_object_available(struct fscache_object *object)
        }
        spin_unlock(&object->lock);
 
+       fscache_stat(&fscache_n_cop_lookup_complete);
        object->cache->ops->lookup_complete(object);
+       fscache_stat_d(&fscache_n_cop_lookup_complete);
        fscache_enqueue_dependents(object);
 
        fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
@@ -581,11 +648,16 @@ static void fscache_drop_object(struct fscache_object *object)
 
        _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
 
+       ASSERTCMP(object->cookie, ==, NULL);
+       ASSERT(hlist_unhashed(&object->cookie_link));
+
        spin_lock(&cache->object_list_lock);
        list_del_init(&object->cache_link);
        spin_unlock(&cache->object_list_lock);
 
+       fscache_stat(&fscache_n_cop_drop_object);
        cache->ops->drop_object(object);
+       fscache_stat_d(&fscache_n_cop_drop_object);
 
        if (parent) {
                _debug("release parent OBJ%x {%d}",
@@ -600,7 +672,9 @@ static void fscache_drop_object(struct fscache_object *object)
        }
 
        /* this just shifts the object release to the slow work processor */
+       fscache_stat(&fscache_n_cop_put_object);
        object->cache->ops->put_object(object);
+       fscache_stat_d(&fscache_n_cop_put_object);
 
        _leave("");
 }
@@ -690,8 +764,12 @@ static int fscache_object_slow_work_get_ref(struct slow_work *work)
 {
        struct fscache_object *object =
                container_of(work, struct fscache_object, work);
+       int ret;
 
-       return object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
+       fscache_stat(&fscache_n_cop_grab_object);
+       ret = object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
+       fscache_stat_d(&fscache_n_cop_grab_object);
+       return ret;
 }
 
 /*
@@ -702,7 +780,9 @@ static void fscache_object_slow_work_put_ref(struct slow_work *work)
        struct fscache_object *object =
                container_of(work, struct fscache_object, work);
 
-       return object->cache->ops->put_object(object);
+       fscache_stat(&fscache_n_cop_put_object);
+       object->cache->ops->put_object(object);
+       fscache_stat_d(&fscache_n_cop_put_object);
 }
 
 /*
@@ -739,7 +819,9 @@ static void fscache_enqueue_dependents(struct fscache_object *object)
 
                /* sort onto appropriate lists */
                fscache_enqueue_object(dep);
+               fscache_stat(&fscache_n_cop_put_object);
                dep->cache->ops->put_object(dep);
+               fscache_stat_d(&fscache_n_cop_put_object);
 
                if (!list_empty(&object->dependents))
                        cond_resched_lock(&object->lock);
index e7f8d53b8b6ba90af4bf057a204e03775efb12ef..313e79a14266e69e2a47aafdffefc89b29f510df 100644 (file)
@@ -13,6 +13,7 @@
 
 #define FSCACHE_DEBUG_LEVEL OPERATION
 #include <linux/module.h>
+#include <linux/seq_file.h>
 #include "internal.h"
 
 atomic_t fscache_op_debug_id;
@@ -31,32 +32,33 @@ void fscache_enqueue_operation(struct fscache_operation *op)
        _enter("{OBJ%x OP%x,%u}",
               op->object->debug_id, op->debug_id, atomic_read(&op->usage));
 
+       fscache_set_op_state(op, "EnQ");
+
+       ASSERT(list_empty(&op->pend_link));
        ASSERT(op->processor != NULL);
        ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
        ASSERTCMP(atomic_read(&op->usage), >, 0);
 
-       if (list_empty(&op->pend_link)) {
-               switch (op->flags & FSCACHE_OP_TYPE) {
-               case FSCACHE_OP_FAST:
-                       _debug("queue fast");
-                       atomic_inc(&op->usage);
-                       if (!schedule_work(&op->fast_work))
-                               fscache_put_operation(op);
-                       break;
-               case FSCACHE_OP_SLOW:
-                       _debug("queue slow");
-                       slow_work_enqueue(&op->slow_work);
-                       break;
-               case FSCACHE_OP_MYTHREAD:
-                       _debug("queue for caller's attention");
-                       break;
-               default:
-                       printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
-                              op->flags);
-                       BUG();
-                       break;
-               }
-               fscache_stat(&fscache_n_op_enqueue);
+       fscache_stat(&fscache_n_op_enqueue);
+       switch (op->flags & FSCACHE_OP_TYPE) {
+       case FSCACHE_OP_FAST:
+               _debug("queue fast");
+               atomic_inc(&op->usage);
+               if (!schedule_work(&op->fast_work))
+                       fscache_put_operation(op);
+               break;
+       case FSCACHE_OP_SLOW:
+               _debug("queue slow");
+               slow_work_enqueue(&op->slow_work);
+               break;
+       case FSCACHE_OP_MYTHREAD:
+               _debug("queue for caller's attention");
+               break;
+       default:
+               printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
+                      op->flags);
+               BUG();
+               break;
        }
 }
 EXPORT_SYMBOL(fscache_enqueue_operation);
@@ -67,6 +69,8 @@ EXPORT_SYMBOL(fscache_enqueue_operation);
 static void fscache_run_op(struct fscache_object *object,
                           struct fscache_operation *op)
 {
+       fscache_set_op_state(op, "Run");
+
        object->n_in_progress++;
        if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
                wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
@@ -87,9 +91,12 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
 
        _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
 
+       fscache_set_op_state(op, "SubmitX");
+
        spin_lock(&object->lock);
        ASSERTCMP(object->n_ops, >=, object->n_in_progress);
        ASSERTCMP(object->n_ops, >=, object->n_exclusive);
+       ASSERT(list_empty(&op->pend_link));
 
        ret = -ENOBUFS;
        if (fscache_object_is_active(object)) {
@@ -190,9 +197,12 @@ int fscache_submit_op(struct fscache_object *object,
 
        ASSERTCMP(atomic_read(&op->usage), >, 0);
 
+       fscache_set_op_state(op, "Submit");
+
        spin_lock(&object->lock);
        ASSERTCMP(object->n_ops, >=, object->n_in_progress);
        ASSERTCMP(object->n_ops, >=, object->n_exclusive);
+       ASSERT(list_empty(&op->pend_link));
 
        ostate = object->state;
        smp_rmb();
@@ -222,6 +232,11 @@ int fscache_submit_op(struct fscache_object *object,
                list_add_tail(&op->pend_link, &object->pending_ops);
                fscache_stat(&fscache_n_op_pend);
                ret = 0;
+       } else if (object->state == FSCACHE_OBJECT_DYING ||
+                  object->state == FSCACHE_OBJECT_LC_DYING ||
+                  object->state == FSCACHE_OBJECT_WITHDRAWING) {
+               fscache_stat(&fscache_n_op_rejected);
+               ret = -ENOBUFS;
        } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
                fscache_report_unexpected_submission(object, op, ostate);
                ASSERT(!fscache_object_is_active(object));
@@ -264,12 +279,7 @@ void fscache_start_operations(struct fscache_object *object)
                        stop = true;
                }
                list_del_init(&op->pend_link);
-               object->n_in_progress++;
-
-               if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
-                       wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
-               if (op->processor)
-                       fscache_enqueue_operation(op);
+               fscache_run_op(object, op);
 
                /* the pending queue was holding a ref on the object */
                fscache_put_operation(op);
@@ -281,6 +291,36 @@ void fscache_start_operations(struct fscache_object *object)
               object->n_in_progress, object->debug_id);
 }
 
+/*
+ * cancel an operation that's pending on an object
+ */
+int fscache_cancel_op(struct fscache_operation *op)
+{
+       struct fscache_object *object = op->object;
+       int ret;
+
+       _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
+
+       spin_lock(&object->lock);
+
+       ret = -EBUSY;
+       if (!list_empty(&op->pend_link)) {
+               fscache_stat(&fscache_n_op_cancelled);
+               list_del_init(&op->pend_link);
+               object->n_ops--;
+               if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
+                       object->n_exclusive--;
+               if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+                       wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+               fscache_put_operation(op);
+               ret = 0;
+       }
+
+       spin_unlock(&object->lock);
+       _leave(" = %d", ret);
+       return ret;
+}
+
 /*
  * release an operation
  * - queues pending ops if this is the last in-progress op
@@ -298,6 +338,8 @@ void fscache_put_operation(struct fscache_operation *op)
        if (!atomic_dec_and_test(&op->usage))
                return;
 
+       fscache_set_op_state(op, "Put");
+
        _debug("PUT OP");
        if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
                BUG();
@@ -311,6 +353,9 @@ void fscache_put_operation(struct fscache_operation *op)
 
        object = op->object;
 
+       if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
+               atomic_dec(&object->n_reads);
+
        /* now... we may get called with the object spinlock held, so we
         * complete the cleanup here only if we can immediately acquire the
         * lock, and defer it otherwise */
@@ -452,8 +497,27 @@ static void fscache_op_execute(struct slow_work *work)
        _leave("");
 }
 
+/*
+ * describe an operation for slow-work debugging
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
+{
+       struct fscache_operation *op =
+               container_of(work, struct fscache_operation, slow_work);
+
+       seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
+                  op->object->debug_id, op->debug_id,
+                  op->name, op->state, op->flags);
+}
+#endif
+
 const struct slow_work_ops fscache_op_slow_work_ops = {
+       .owner          = THIS_MODULE,
        .get_ref        = fscache_op_get_ref,
        .put_ref        = fscache_op_put_ref,
        .execute        = fscache_op_execute,
+#ifdef CONFIG_SLOW_WORK_PROC
+       .desc           = fscache_op_desc,
+#endif
 };
index 2568e0eb644f67453d2acca6e3aac584a513ea2c..c598ea4c4e7d580d9046e1cb8ccb42e1d14db6c6 100644 (file)
@@ -43,18 +43,102 @@ void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *pa
 EXPORT_SYMBOL(__fscache_wait_on_page_write);
 
 /*
- * note that a page has finished being written to the cache
+ * decide whether a page can be released, possibly by cancelling a store to it
+ * - we're allowed to sleep if __GFP_WAIT is flagged
  */
-static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page)
+bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
+                                 struct page *page,
+                                 gfp_t gfp)
 {
        struct page *xpage;
+       void *val;
+
+       _enter("%p,%p,%x", cookie, page, gfp);
+
+       rcu_read_lock();
+       val = radix_tree_lookup(&cookie->stores, page->index);
+       if (!val) {
+               rcu_read_unlock();
+               fscache_stat(&fscache_n_store_vmscan_not_storing);
+               __fscache_uncache_page(cookie, page);
+               return true;
+       }
+
+       /* see if the page is actually undergoing storage - if so we can't get
+        * rid of it till the cache has finished with it */
+       if (radix_tree_tag_get(&cookie->stores, page->index,
+                              FSCACHE_COOKIE_STORING_TAG)) {
+               rcu_read_unlock();
+               goto page_busy;
+       }
+
+       /* the page is pending storage, so we attempt to cancel the store and
+        * discard the store request so that the page can be reclaimed */
+       spin_lock(&cookie->stores_lock);
+       rcu_read_unlock();
+
+       if (radix_tree_tag_get(&cookie->stores, page->index,
+                              FSCACHE_COOKIE_STORING_TAG)) {
+               /* the page started to undergo storage whilst we were looking,
+                * so now we can only wait or return */
+               spin_unlock(&cookie->stores_lock);
+               goto page_busy;
+       }
 
-       spin_lock(&cookie->lock);
        xpage = radix_tree_delete(&cookie->stores, page->index);
-       spin_unlock(&cookie->lock);
-       ASSERT(xpage != NULL);
+       spin_unlock(&cookie->stores_lock);
+
+       if (xpage) {
+               fscache_stat(&fscache_n_store_vmscan_cancelled);
+               fscache_stat(&fscache_n_store_radix_deletes);
+               ASSERTCMP(xpage, ==, page);
+       } else {
+               fscache_stat(&fscache_n_store_vmscan_gone);
+       }
 
        wake_up_bit(&cookie->flags, 0);
+       if (xpage)
+               page_cache_release(xpage);
+       __fscache_uncache_page(cookie, page);
+       return true;
+
+page_busy:
+       /* we might want to wait here, but that could deadlock the allocator as
+        * the slow-work threads writing to the cache may all end up sleeping
+        * on memory allocation */
+       fscache_stat(&fscache_n_store_vmscan_busy);
+       return false;
+}
+EXPORT_SYMBOL(__fscache_maybe_release_page);
+
+/*
+ * note that a page has finished being written to the cache
+ */
+static void fscache_end_page_write(struct fscache_object *object,
+                                  struct page *page)
+{
+       struct fscache_cookie *cookie;
+       struct page *xpage = NULL;
+
+       spin_lock(&object->lock);
+       cookie = object->cookie;
+       if (cookie) {
+               /* delete the page from the tree if it is now no longer
+                * pending */
+               spin_lock(&cookie->stores_lock);
+               radix_tree_tag_clear(&cookie->stores, page->index,
+                                    FSCACHE_COOKIE_STORING_TAG);
+               if (!radix_tree_tag_get(&cookie->stores, page->index,
+                                       FSCACHE_COOKIE_PENDING_TAG)) {
+                       fscache_stat(&fscache_n_store_radix_deletes);
+                       xpage = radix_tree_delete(&cookie->stores, page->index);
+               }
+               spin_unlock(&cookie->stores_lock);
+               wake_up_bit(&cookie->flags, 0);
+       }
+       spin_unlock(&object->lock);
+       if (xpage)
+               page_cache_release(xpage);
 }
 
 /*
@@ -63,14 +147,21 @@ static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *p
 static void fscache_attr_changed_op(struct fscache_operation *op)
 {
        struct fscache_object *object = op->object;
+       int ret;
 
        _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
 
        fscache_stat(&fscache_n_attr_changed_calls);
 
-       if (fscache_object_is_active(object) &&
-           object->cache->ops->attr_changed(object) < 0)
-               fscache_abort_object(object);
+       if (fscache_object_is_active(object)) {
+               fscache_set_op_state(op, "CallFS");
+               fscache_stat(&fscache_n_cop_attr_changed);
+               ret = object->cache->ops->attr_changed(object);
+               fscache_stat_d(&fscache_n_cop_attr_changed);
+               fscache_set_op_state(op, "Done");
+               if (ret < 0)
+                       fscache_abort_object(object);
+       }
 
        _leave("");
 }
@@ -99,6 +190,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
        fscache_operation_init(op, NULL);
        fscache_operation_init_slow(op, fscache_attr_changed_op);
        op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
+       fscache_set_op_name(op, "Attr");
 
        spin_lock(&cookie->lock);
 
@@ -184,6 +276,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
        op->start_time  = jiffies;
        INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
        INIT_LIST_HEAD(&op->to_do);
+       fscache_set_op_name(&op->op, "Retr");
        return op;
 }
 
@@ -220,6 +313,43 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
        return 0;
 }
 
+/*
+ * wait for an object to become active (or dead)
+ */
+static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
+                                                struct fscache_retrieval *op,
+                                                atomic_t *stat_op_waits,
+                                                atomic_t *stat_object_dead)
+{
+       int ret;
+
+       if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
+               goto check_if_dead;
+
+       _debug(">>> WT");
+       fscache_stat(stat_op_waits);
+       if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+                       fscache_wait_bit_interruptible,
+                       TASK_INTERRUPTIBLE) < 0) {
+               ret = fscache_cancel_op(&op->op);
+               if (ret == 0)
+                       return -ERESTARTSYS;
+
+               /* it's been removed from the pending queue by another party,
+                * so we should get to run shortly */
+               wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+       }
+       _debug("<<< GO");
+
+check_if_dead:
+       if (unlikely(fscache_object_is_dead(object))) {
+               fscache_stat(stat_object_dead);
+               return -ENOBUFS;
+       }
+       return 0;
+}
+
 /*
  * read a page from the cache or allocate a block in which to store it
  * - we return:
@@ -257,6 +387,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
                _leave(" = -ENOMEM");
                return -ENOMEM;
        }
+       fscache_set_op_name(&op->op, "RetrRA1");
 
        spin_lock(&cookie->lock);
 
@@ -267,6 +398,9 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
        ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
 
+       atomic_inc(&object->n_reads);
+       set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+
        if (fscache_submit_op(object, &op->op) < 0)
                goto nobufs_unlock;
        spin_unlock(&cookie->lock);
@@ -279,23 +413,27 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
 
        /* we wait for the operation to become active, and then process it
         * *here*, in this thread, and not in the thread pool */
-       if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
-               _debug(">>> WT");
-               fscache_stat(&fscache_n_retrieval_op_waits);
-               wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-               _debug("<<< GO");
-       }
+       ret = fscache_wait_for_retrieval_activation(
+               object, op,
+               __fscache_stat(&fscache_n_retrieval_op_waits),
+               __fscache_stat(&fscache_n_retrievals_object_dead));
+       if (ret < 0)
+               goto error;
 
        /* ask the cache to honour the operation */
        if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
+               fscache_stat(&fscache_n_cop_allocate_page);
                ret = object->cache->ops->allocate_page(op, page, gfp);
+               fscache_stat_d(&fscache_n_cop_allocate_page);
                if (ret == 0)
                        ret = -ENODATA;
        } else {
+               fscache_stat(&fscache_n_cop_read_or_alloc_page);
                ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
+               fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
        }
 
+error:
        if (ret == -ENOMEM)
                fscache_stat(&fscache_n_retrievals_nomem);
        else if (ret == -ERESTARTSYS)
@@ -347,7 +485,6 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
                                  void *context,
                                  gfp_t gfp)
 {
-       fscache_pages_retrieval_func_t func;
        struct fscache_retrieval *op;
        struct fscache_object *object;
        int ret;
@@ -369,6 +506,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
        op = fscache_alloc_retrieval(mapping, end_io_func, context);
        if (!op)
                return -ENOMEM;
+       fscache_set_op_name(&op->op, "RetrRAN");
 
        spin_lock(&cookie->lock);
 
@@ -377,6 +515,9 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
        object = hlist_entry(cookie->backing_objects.first,
                             struct fscache_object, cookie_link);
 
+       atomic_inc(&object->n_reads);
+       set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
+
        if (fscache_submit_op(object, &op->op) < 0)
                goto nobufs_unlock;
        spin_unlock(&cookie->lock);
@@ -389,21 +530,27 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
 
        /* we wait for the operation to become active, and then process it
         * *here*, in this thread, and not in the thread pool */
-       if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
-               _debug(">>> WT");
-               fscache_stat(&fscache_n_retrieval_op_waits);
-               wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-               _debug("<<< GO");
-       }
+       ret = fscache_wait_for_retrieval_activation(
+               object, op,
+               __fscache_stat(&fscache_n_retrieval_op_waits),
+               __fscache_stat(&fscache_n_retrievals_object_dead));
+       if (ret < 0)
+               goto error;
 
        /* ask the cache to honour the operation */
-       if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags))
-               func = object->cache->ops->allocate_pages;
-       else
-               func = object->cache->ops->read_or_alloc_pages;
-       ret = func(op, pages, nr_pages, gfp);
+       if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
+               fscache_stat(&fscache_n_cop_allocate_pages);
+               ret = object->cache->ops->allocate_pages(
+                       op, pages, nr_pages, gfp);
+               fscache_stat_d(&fscache_n_cop_allocate_pages);
+       } else {
+               fscache_stat(&fscache_n_cop_read_or_alloc_pages);
+               ret = object->cache->ops->read_or_alloc_pages(
+                       op, pages, nr_pages, gfp);
+               fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
+       }
 
+error:
        if (ret == -ENOMEM)
                fscache_stat(&fscache_n_retrievals_nomem);
        else if (ret == -ERESTARTSYS)
@@ -461,6 +608,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
        op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
        if (!op)
                return -ENOMEM;
+       fscache_set_op_name(&op->op, "RetrAL1");
 
        spin_lock(&cookie->lock);
 
@@ -475,18 +623,22 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
 
        fscache_stat(&fscache_n_alloc_ops);
 
-       if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
-               _debug(">>> WT");
-               fscache_stat(&fscache_n_alloc_op_waits);
-               wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
-                           fscache_wait_bit, TASK_UNINTERRUPTIBLE);
-               _debug("<<< GO");
-       }
+       ret = fscache_wait_for_retrieval_activation(
+               object, op,
+               __fscache_stat(&fscache_n_alloc_op_waits),
+               __fscache_stat(&fscache_n_allocs_object_dead));
+       if (ret < 0)
+               goto error;
 
        /* ask the cache to honour the operation */
+       fscache_stat(&fscache_n_cop_allocate_page);
        ret = object->cache->ops->allocate_page(op, page, gfp);
+       fscache_stat_d(&fscache_n_cop_allocate_page);
 
-       if (ret < 0)
+error:
+       if (ret == -ERESTARTSYS)
+               fscache_stat(&fscache_n_allocs_intr);
+       else if (ret < 0)
                fscache_stat(&fscache_n_allocs_nobufs);
        else
                fscache_stat(&fscache_n_allocs_ok);
@@ -521,7 +673,7 @@ static void fscache_write_op(struct fscache_operation *_op)
        struct fscache_storage *op =
                container_of(_op, struct fscache_storage, op);
        struct fscache_object *object = op->op.object;
-       struct fscache_cookie *cookie = object->cookie;
+       struct fscache_cookie *cookie;
        struct page *page;
        unsigned n;
        void *results[1];
@@ -529,16 +681,19 @@ static void fscache_write_op(struct fscache_operation *_op)
 
        _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
 
-       spin_lock(&cookie->lock);
+       fscache_set_op_state(&op->op, "GetPage");
+
        spin_lock(&object->lock);
+       cookie = object->cookie;
 
-       if (!fscache_object_is_active(object)) {
+       if (!fscache_object_is_active(object) || !cookie) {
                spin_unlock(&object->lock);
-               spin_unlock(&cookie->lock);
                _leave("");
                return;
        }
 
+       spin_lock(&cookie->stores_lock);
+
        fscache_stat(&fscache_n_store_calls);
 
        /* find a page to store */
@@ -549,23 +704,35 @@ static void fscache_write_op(struct fscache_operation *_op)
                goto superseded;
        page = results[0];
        _debug("gang %d [%lx]", n, page->index);
-       if (page->index > op->store_limit)
+       if (page->index > op->store_limit) {
+               fscache_stat(&fscache_n_store_pages_over_limit);
                goto superseded;
+       }
 
-       radix_tree_tag_clear(&cookie->stores, page->index,
-                            FSCACHE_COOKIE_PENDING_TAG);
+       if (page) {
+               radix_tree_tag_set(&cookie->stores, page->index,
+                                  FSCACHE_COOKIE_STORING_TAG);
+               radix_tree_tag_clear(&cookie->stores, page->index,
+                                    FSCACHE_COOKIE_PENDING_TAG);
+       }
 
+       spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
-       spin_unlock(&cookie->lock);
 
        if (page) {
+               fscache_set_op_state(&op->op, "Store");
+               fscache_stat(&fscache_n_store_pages);
+               fscache_stat(&fscache_n_cop_write_page);
                ret = object->cache->ops->write_page(op, page);
-               fscache_end_page_write(cookie, page);
-               page_cache_release(page);
-               if (ret < 0)
+               fscache_stat_d(&fscache_n_cop_write_page);
+               fscache_set_op_state(&op->op, "EndWrite");
+               fscache_end_page_write(object, page);
+               if (ret < 0) {
+                       fscache_set_op_state(&op->op, "Abort");
                        fscache_abort_object(object);
-               else
+               } else {
                        fscache_enqueue_operation(&op->op);
+               }
        }
 
        _leave("");
@@ -575,9 +742,9 @@ superseded:
        /* this writer is going away and there aren't any more things to
         * write */
        _debug("cease");
+       spin_unlock(&cookie->stores_lock);
        clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
        spin_unlock(&object->lock);
-       spin_unlock(&cookie->lock);
        _leave("");
 }
 
@@ -634,6 +801,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        fscache_operation_init(&op->op, fscache_release_write_op);
        fscache_operation_init_slow(&op->op, fscache_write_op);
        op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
+       fscache_set_op_name(&op->op, "Write1");
 
        ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
        if (ret < 0)
@@ -652,6 +820,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        /* add the page to the pending-storage radix tree on the backing
         * object */
        spin_lock(&object->lock);
+       spin_lock(&cookie->stores_lock);
 
        _debug("store limit %llx", (unsigned long long) object->store_limit);
 
@@ -672,6 +841,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
        if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
                goto already_pending;
 
+       spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
 
        op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
@@ -693,6 +863,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
 already_queued:
        fscache_stat(&fscache_n_stores_again);
 already_pending:
+       spin_unlock(&cookie->stores_lock);
        spin_unlock(&object->lock);
        spin_unlock(&cookie->lock);
        radix_tree_preload_end();
@@ -702,7 +873,9 @@ already_pending:
        return 0;
 
 submit_failed:
+       spin_lock(&cookie->stores_lock);
        radix_tree_delete(&cookie->stores, page->index);
+       spin_unlock(&cookie->stores_lock);
        page_cache_release(page);
        ret = -ENOBUFS;
        goto nobufs;
@@ -763,7 +936,9 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
        if (TestClearPageFsCache(page) &&
            object->cache->ops->uncache_page) {
                /* the cache backend releases the cookie lock */
+               fscache_stat(&fscache_n_cop_uncache_page);
                object->cache->ops->uncache_page(object, page);
+               fscache_stat_d(&fscache_n_cop_uncache_page);
                goto done;
        }
 
index beeab44bc31a2f987267c898150244c7dee0e9a2..1d9e4951a5979817951eb81180f037a01ef0a026 100644 (file)
@@ -37,10 +37,20 @@ int __init fscache_proc_init(void)
                goto error_histogram;
 #endif
 
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+       if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL,
+                        &fscache_objlist_fops))
+               goto error_objects;
+#endif
+
        _leave(" = 0");
        return 0;
 
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+error_objects:
+#endif
 #ifdef CONFIG_FSCACHE_HISTOGRAM
+       remove_proc_entry("fs/fscache/histogram", NULL);
 error_histogram:
 #endif
 #ifdef CONFIG_FSCACHE_STATS
@@ -58,6 +68,9 @@ error_dir:
  */
 void fscache_proc_cleanup(void)
 {
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+       remove_proc_entry("fs/fscache/objects", NULL);
+#endif
 #ifdef CONFIG_FSCACHE_HISTOGRAM
        remove_proc_entry("fs/fscache/histogram", NULL);
 #endif
index 65deb99e756b023836d1b71728aa7162d72e98ee..46435f3aae689936404bf1ecd5e3c6d6094a214b 100644 (file)
@@ -25,6 +25,8 @@ atomic_t fscache_n_op_requeue;
 atomic_t fscache_n_op_deferred_release;
 atomic_t fscache_n_op_release;
 atomic_t fscache_n_op_gc;
+atomic_t fscache_n_op_cancelled;
+atomic_t fscache_n_op_rejected;
 
 atomic_t fscache_n_attr_changed;
 atomic_t fscache_n_attr_changed_ok;
@@ -36,6 +38,8 @@ atomic_t fscache_n_allocs;
 atomic_t fscache_n_allocs_ok;
 atomic_t fscache_n_allocs_wait;
 atomic_t fscache_n_allocs_nobufs;
+atomic_t fscache_n_allocs_intr;
+atomic_t fscache_n_allocs_object_dead;
 atomic_t fscache_n_alloc_ops;
 atomic_t fscache_n_alloc_op_waits;
 
@@ -46,6 +50,7 @@ atomic_t fscache_n_retrievals_nodata;
 atomic_t fscache_n_retrievals_nobufs;
 atomic_t fscache_n_retrievals_intr;
 atomic_t fscache_n_retrievals_nomem;
+atomic_t fscache_n_retrievals_object_dead;
 atomic_t fscache_n_retrieval_ops;
 atomic_t fscache_n_retrieval_op_waits;
 
@@ -56,6 +61,14 @@ atomic_t fscache_n_stores_nobufs;
 atomic_t fscache_n_stores_oom;
 atomic_t fscache_n_store_ops;
 atomic_t fscache_n_store_calls;
+atomic_t fscache_n_store_pages;
+atomic_t fscache_n_store_radix_deletes;
+atomic_t fscache_n_store_pages_over_limit;
+
+atomic_t fscache_n_store_vmscan_not_storing;
+atomic_t fscache_n_store_vmscan_gone;
+atomic_t fscache_n_store_vmscan_busy;
+atomic_t fscache_n_store_vmscan_cancelled;
 
 atomic_t fscache_n_marks;
 atomic_t fscache_n_uncaches;
@@ -74,6 +87,7 @@ atomic_t fscache_n_updates_run;
 atomic_t fscache_n_relinquishes;
 atomic_t fscache_n_relinquishes_null;
 atomic_t fscache_n_relinquishes_waitcrt;
+atomic_t fscache_n_relinquishes_retire;
 
 atomic_t fscache_n_cookie_index;
 atomic_t fscache_n_cookie_data;
@@ -84,6 +98,7 @@ atomic_t fscache_n_object_no_alloc;
 atomic_t fscache_n_object_lookups;
 atomic_t fscache_n_object_lookups_negative;
 atomic_t fscache_n_object_lookups_positive;
+atomic_t fscache_n_object_lookups_timed_out;
 atomic_t fscache_n_object_created;
 atomic_t fscache_n_object_avail;
 atomic_t fscache_n_object_dead;
@@ -93,6 +108,23 @@ atomic_t fscache_n_checkaux_okay;
 atomic_t fscache_n_checkaux_update;
 atomic_t fscache_n_checkaux_obsolete;
 
+atomic_t fscache_n_cop_alloc_object;
+atomic_t fscache_n_cop_lookup_object;
+atomic_t fscache_n_cop_lookup_complete;
+atomic_t fscache_n_cop_grab_object;
+atomic_t fscache_n_cop_update_object;
+atomic_t fscache_n_cop_drop_object;
+atomic_t fscache_n_cop_put_object;
+atomic_t fscache_n_cop_sync_cache;
+atomic_t fscache_n_cop_attr_changed;
+atomic_t fscache_n_cop_read_or_alloc_page;
+atomic_t fscache_n_cop_read_or_alloc_pages;
+atomic_t fscache_n_cop_allocate_page;
+atomic_t fscache_n_cop_allocate_pages;
+atomic_t fscache_n_cop_write_page;
+atomic_t fscache_n_cop_uncache_page;
+atomic_t fscache_n_cop_dissociate_pages;
+
 /*
  * display the general statistics
  */
@@ -129,10 +161,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_acquires_nobufs),
                   atomic_read(&fscache_n_acquires_oom));
 
-       seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n",
+       seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
                   atomic_read(&fscache_n_object_lookups),
                   atomic_read(&fscache_n_object_lookups_negative),
                   atomic_read(&fscache_n_object_lookups_positive),
+                  atomic_read(&fscache_n_object_lookups_timed_out),
                   atomic_read(&fscache_n_object_created));
 
        seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
@@ -140,10 +173,11 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_updates_null),
                   atomic_read(&fscache_n_updates_run));
 
-       seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n",
+       seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
                   atomic_read(&fscache_n_relinquishes),
                   atomic_read(&fscache_n_relinquishes_null),
-                  atomic_read(&fscache_n_relinquishes_waitcrt));
+                  atomic_read(&fscache_n_relinquishes_waitcrt),
+                  atomic_read(&fscache_n_relinquishes_retire));
 
        seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
                   atomic_read(&fscache_n_attr_changed),
@@ -152,14 +186,16 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_attr_changed_nomem),
                   atomic_read(&fscache_n_attr_changed_calls));
 
-       seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n",
+       seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
                   atomic_read(&fscache_n_allocs),
                   atomic_read(&fscache_n_allocs_ok),
                   atomic_read(&fscache_n_allocs_wait),
-                  atomic_read(&fscache_n_allocs_nobufs));
-       seq_printf(m, "Allocs : ops=%u owt=%u\n",
+                  atomic_read(&fscache_n_allocs_nobufs),
+                  atomic_read(&fscache_n_allocs_intr));
+       seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
                   atomic_read(&fscache_n_alloc_ops),
-                  atomic_read(&fscache_n_alloc_op_waits));
+                  atomic_read(&fscache_n_alloc_op_waits),
+                  atomic_read(&fscache_n_allocs_object_dead));
 
        seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
                   " int=%u oom=%u\n",
@@ -170,9 +206,10 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_retrievals_nobufs),
                   atomic_read(&fscache_n_retrievals_intr),
                   atomic_read(&fscache_n_retrievals_nomem));
-       seq_printf(m, "Retrvls: ops=%u owt=%u\n",
+       seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
                   atomic_read(&fscache_n_retrieval_ops),
-                  atomic_read(&fscache_n_retrieval_op_waits));
+                  atomic_read(&fscache_n_retrieval_op_waits),
+                  atomic_read(&fscache_n_retrievals_object_dead));
 
        seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
                   atomic_read(&fscache_n_stores),
@@ -180,18 +217,49 @@ static int fscache_stats_show(struct seq_file *m, void *v)
                   atomic_read(&fscache_n_stores_again),
                   atomic_read(&fscache_n_stores_nobufs),
                   atomic_read(&fscache_n_stores_oom));
-       seq_printf(m, "Stores : ops=%u run=%u\n",
+       seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
                   atomic_read(&fscache_n_store_ops),
-                  atomic_read(&fscache_n_store_calls));
+                  atomic_read(&fscache_n_store_calls),
+                  atomic_read(&fscache_n_store_pages),
+                  atomic_read(&fscache_n_store_radix_deletes),
+                  atomic_read(&fscache_n_store_pages_over_limit));
 
-       seq_printf(m, "Ops    : pend=%u run=%u enq=%u\n",
+       seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u\n",
+                  atomic_read(&fscache_n_store_vmscan_not_storing),
+                  atomic_read(&fscache_n_store_vmscan_gone),
+                  atomic_read(&fscache_n_store_vmscan_busy),
+                  atomic_read(&fscache_n_store_vmscan_cancelled));
+
+       seq_printf(m, "Ops    : pend=%u run=%u enq=%u can=%u rej=%u\n",
                   atomic_read(&fscache_n_op_pend),
                   atomic_read(&fscache_n_op_run),
-                  atomic_read(&fscache_n_op_enqueue));
+                  atomic_read(&fscache_n_op_enqueue),
+                  atomic_read(&fscache_n_op_cancelled),
+                  atomic_read(&fscache_n_op_rejected));
        seq_printf(m, "Ops    : dfr=%u rel=%u gc=%u\n",
                   atomic_read(&fscache_n_op_deferred_release),
                   atomic_read(&fscache_n_op_release),
                   atomic_read(&fscache_n_op_gc));
+
+       seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
+                  atomic_read(&fscache_n_cop_alloc_object),
+                  atomic_read(&fscache_n_cop_lookup_object),
+                  atomic_read(&fscache_n_cop_lookup_complete),
+                  atomic_read(&fscache_n_cop_grab_object));
+       seq_printf(m, "CacheOp: upo=%d dro=%d pto=%d atc=%d syn=%d\n",
+                  atomic_read(&fscache_n_cop_update_object),
+                  atomic_read(&fscache_n_cop_drop_object),
+                  atomic_read(&fscache_n_cop_put_object),
+                  atomic_read(&fscache_n_cop_attr_changed),
+                  atomic_read(&fscache_n_cop_sync_cache));
+       seq_printf(m, "CacheOp: rap=%d ras=%d alp=%d als=%d wrp=%d ucp=%d dsp=%d\n",
+                  atomic_read(&fscache_n_cop_read_or_alloc_page),
+                  atomic_read(&fscache_n_cop_read_or_alloc_pages),
+                  atomic_read(&fscache_n_cop_allocate_page),
+                  atomic_read(&fscache_n_cop_allocate_pages),
+                  atomic_read(&fscache_n_cop_write_page),
+                  atomic_read(&fscache_n_cop_uncache_page),
+                  atomic_read(&fscache_n_cop_dissociate_pages));
        return 0;
 }
 
index 8ada78aade58353d67b3fb161fb63fedd39f8ad7..4787ae6c5c1c5aac632c871502ca4a99823dd3bb 100644 (file)
@@ -385,6 +385,9 @@ static int fuse_create_open(struct inode *dir, struct dentry *entry, int mode,
        if (fc->no_create)
                return -ENOSYS;
 
+       if (flags & O_DIRECT)
+               return -EINVAL;
+
        forget_req = fuse_get_req(fc);
        if (IS_ERR(forget_req))
                return PTR_ERR(forget_req);
index eacd78a5d0827c3e0d7c37fb89eace335b400eca..5b31f7741a8f83c694db681076d534a7970e1a73 100644 (file)
@@ -114,7 +114,7 @@ static int __init init_gfs2_fs(void)
        if (error)
                goto fail_unregister;
 
-       error = slow_work_register_user();
+       error = slow_work_register_user(THIS_MODULE);
        if (error)
                goto fail_slow;
 
@@ -163,7 +163,7 @@ static void __exit exit_gfs2_fs(void)
        gfs2_unregister_debugfs();
        unregister_filesystem(&gfs2_fs_type);
        unregister_filesystem(&gfs2meta_fs_type);
-       slow_work_unregister_user();
+       slow_work_unregister_user(THIS_MODULE);
 
        kmem_cache_destroy(gfs2_quotad_cachep);
        kmem_cache_destroy(gfs2_rgrpd_cachep);
index 59d2695509d30c0fd876175f32fe34ec96f29582..09fa31965576c6f169c1ea699e246d8916465008 100644 (file)
@@ -7,6 +7,7 @@
  * of the GNU General Public License version 2.
  */
 
+#include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/spinlock.h>
 #include <linux/completion.h>
@@ -593,6 +594,7 @@ fail:
 }
 
 struct slow_work_ops gfs2_recover_ops = {
+       .owner   = THIS_MODULE,
        .get_ref = gfs2_recover_get_ref,
        .put_ref = gfs2_recover_put_ref,
        .execute = gfs2_recover_work,
index cfe05c1966a582140986e33cf25940683abdafb5..3f39be1b0455a03be83b8cb08bfa224a6dd75d7b 100644 (file)
@@ -164,12 +164,15 @@ int jffs2_read_inode_range(struct jffs2_sb_info *c, struct jffs2_inode_info *f,
 
        /* XXX FIXME: Where a single physical node actually shows up in two
           frags, we read it twice. Don't do that. */
-       /* Now we're pointing at the first frag which overlaps our page */
+       /* Now we're pointing at the first frag which overlaps our page
+        * (or perhaps is before it, if we've been asked to read off the
+        * end of the file). */
        while(offset < end) {
                D2(printk(KERN_DEBUG "jffs2_read_inode_range: offset %d, end %d\n", offset, end));
-               if (unlikely(!frag || frag->ofs > offset)) {
+               if (unlikely(!frag || frag->ofs > offset ||
+                            frag->ofs + frag->size <= offset)) {
                        uint32_t holesize = end - offset;
-                       if (frag) {
+                       if (frag && frag->ofs > offset) {
                                D1(printk(KERN_NOTICE "Eep. Hole in ino #%u fraglist. frag->ofs = 0x%08x, offset = 0x%08x\n", f->inocache->ino, frag->ofs, offset));
                                holesize = min(holesize, frag->ofs - offset);
                        }
index 70fad69eb9593a41894102164e26db60eb13fa56..fa588006588dd3403bb9a883eedc79ddc4588655 100644 (file)
@@ -359,17 +359,13 @@ int nfs_fscache_release_page(struct page *page, gfp_t gfp)
 
        BUG_ON(!cookie);
 
-       if (fscache_check_page_write(cookie, page)) {
-               if (!(gfp & __GFP_WAIT))
-                       return 0;
-               fscache_wait_on_page_write(cookie, page);
-       }
-
        if (PageFsCache(page)) {
                dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
                         cookie, page, nfsi);
 
-               fscache_uncache_page(cookie, page);
+               if (!fscache_maybe_release_page(cookie, page, gfp))
+                       return 0;
+
                nfs_add_fscache_stats(page->mapping->host,
                                      NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
        }
index ff37454fa783f42196b733a9723dba7567c37f72..741a562177fc3f587dcd1e1019f3cfe53c367de8 100644 (file)
@@ -2767,7 +2767,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                .pages = &page,
                .pgbase = 0,
                .count = count,
-               .bitmask = NFS_SERVER(dentry->d_inode)->cache_consistency_bitmask,
+               .bitmask = NFS_SERVER(dentry->d_inode)->attr_bitmask,
        };
        struct nfs4_readdir_res res;
        struct rpc_message msg = {
index 89fc8ee1f5a5932ea40bba6537bd9a8e319f1c84..de059f49058694b7887c49f5d907deb32760bb82 100644 (file)
@@ -1712,7 +1712,8 @@ int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
        struct super_block *sb = inode->i_sb;
 
        if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
-           !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL))
+           !(OCFS2_I(inode)->ip_dyn_features & OCFS2_HAS_REFCOUNT_FL) ||
+           OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
                return 0;
 
        cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
index eae4046024241131eb948a9d8539fc5158878c44..d963d863870994075b8af17c84484c09cb0b84d8 100644 (file)
 #include <linux/kref.h>
 #include <linux/mutex.h>
 #include <linux/lockdep.h>
-#ifndef CONFIG_OCFS2_COMPAT_JBD
-# include <linux/jbd2.h>
-#else
-# include <linux/jbd.h>
-# include "ocfs2_jbd_compat.h"
-#endif
+#include <linux/jbd2.h>
 
 /* For union ocfs2_dlm_lksb */
 #include "stackglue.h"
index 60287fc56bcb3a73eb53612539effc28eece9e4c..3a0df7a1b8109666b92dea616c1b45ce57a2add9 100644 (file)
@@ -3743,6 +3743,9 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
                goto out;
        }
 
+       if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
+               goto attach_xattr;
+
        ocfs2_init_dinode_extent_tree(&di_et, INODE_CACHE(inode), di_bh);
 
        size = i_size_read(inode);
@@ -3769,6 +3772,7 @@ static int ocfs2_attach_refcount_tree(struct inode *inode,
                cpos += num_clusters;
        }
 
+attach_xattr:
        if (oi->ip_dyn_features & OCFS2_HAS_XATTR_FL) {
                ret = ocfs2_xattr_attach_refcount_tree(inode, di_bh,
                                                       &ref_tree->rf_ci,
@@ -3858,6 +3862,49 @@ out:
        return ret;
 }
 
+static int ocfs2_duplicate_inline_data(struct inode *s_inode,
+                                      struct buffer_head *s_bh,
+                                      struct inode *t_inode,
+                                      struct buffer_head *t_bh)
+{
+       int ret;
+       handle_t *handle;
+       struct ocfs2_super *osb = OCFS2_SB(s_inode->i_sb);
+       struct ocfs2_dinode *s_di = (struct ocfs2_dinode *)s_bh->b_data;
+       struct ocfs2_dinode *t_di = (struct ocfs2_dinode *)t_bh->b_data;
+
+       BUG_ON(!(OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
+
+       handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
+       if (IS_ERR(handle)) {
+               ret = PTR_ERR(handle);
+               mlog_errno(ret);
+               goto out;
+       }
+
+       ret = ocfs2_journal_access_di(handle, INODE_CACHE(t_inode), t_bh,
+                                     OCFS2_JOURNAL_ACCESS_WRITE);
+       if (ret) {
+               mlog_errno(ret);
+               goto out_commit;
+       }
+
+       t_di->id2.i_data.id_count = s_di->id2.i_data.id_count;
+       memcpy(t_di->id2.i_data.id_data, s_di->id2.i_data.id_data,
+              le16_to_cpu(s_di->id2.i_data.id_count));
+       spin_lock(&OCFS2_I(t_inode)->ip_lock);
+       OCFS2_I(t_inode)->ip_dyn_features |= OCFS2_INLINE_DATA_FL;
+       t_di->i_dyn_features = cpu_to_le16(OCFS2_I(t_inode)->ip_dyn_features);
+       spin_unlock(&OCFS2_I(t_inode)->ip_lock);
+
+       ocfs2_journal_dirty(handle, t_bh);
+
+out_commit:
+       ocfs2_commit_trans(osb, handle);
+out:
+       return ret;
+}
+
 static int ocfs2_duplicate_extent_list(struct inode *s_inode,
                                struct inode *t_inode,
                                struct buffer_head *t_bh,
@@ -3997,6 +4044,14 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
                goto out;
        }
 
+       if (OCFS2_I(s_inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
+               ret = ocfs2_duplicate_inline_data(s_inode, s_bh,
+                                                 t_inode, t_bh);
+               if (ret)
+                       mlog_errno(ret);
+               goto out;
+       }
+
        ret = ocfs2_lock_refcount_tree(osb, le64_to_cpu(di->i_refcount_loc),
                                       1, &ref_tree, &ref_root_bh);
        if (ret) {
@@ -4013,10 +4068,6 @@ static int ocfs2_create_reflink_node(struct inode *s_inode,
                goto out_unlock_refcount;
        }
 
-       ret = ocfs2_complete_reflink(s_inode, s_bh, t_inode, t_bh, preserve);
-       if (ret)
-               mlog_errno(ret);
-
 out_unlock_refcount:
        ocfs2_unlock_refcount_tree(osb, ref_tree, 1);
        brelse(ref_root_bh);
@@ -4068,9 +4119,17 @@ static int __ocfs2_reflink(struct dentry *old_dentry,
                ret = ocfs2_reflink_xattrs(inode, old_bh,
                                           new_inode, new_bh,
                                           preserve);
-               if (ret)
+               if (ret) {
                        mlog_errno(ret);
+                       goto inode_unlock;
+               }
        }
+
+       ret = ocfs2_complete_reflink(inode, old_bh,
+                                    new_inode, new_bh, preserve);
+       if (ret)
+               mlog_errno(ret);
+
 inode_unlock:
        ocfs2_inode_unlock(new_inode, 1);
        brelse(new_bh);
index c0e48aeebb1c3877862853ec1761bed05e6f9355..14f47d2bfe02eb6500666e8a63a227a2d4ee10f2 100644 (file)
@@ -773,18 +773,20 @@ static int ocfs2_sb_probe(struct super_block *sb,
                if (tmpstat < 0) {
                        status = tmpstat;
                        mlog_errno(status);
-                       goto bail;
+                       break;
                }
                di = (struct ocfs2_dinode *) (*bh)->b_data;
                memset(stats, 0, sizeof(struct ocfs2_blockcheck_stats));
                spin_lock_init(&stats->b_lock);
-               status = ocfs2_verify_volume(di, *bh, blksize, stats);
-               if (status >= 0)
-                       goto bail;
-               brelse(*bh);
-               *bh = NULL;
-               if (status != -EAGAIN)
+               tmpstat = ocfs2_verify_volume(di, *bh, blksize, stats);
+               if (tmpstat < 0) {
+                       brelse(*bh);
+                       *bh = NULL;
+               }
+               if (tmpstat != -EAGAIN) {
+                       status = tmpstat;
                        break;
+               }
        }
 
 bail:
@@ -1645,6 +1647,10 @@ static int ocfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
        buf->f_bavail = buf->f_bfree;
        buf->f_files = numbits;
        buf->f_ffree = freebits;
+       buf->f_fsid.val[0] = crc32_le(0, osb->uuid_str, OCFS2_VOL_UUID_LEN)
+                               & 0xFFFFFFFFUL;
+       buf->f_fsid.val[1] = crc32_le(0, osb->uuid_str + OCFS2_VOL_UUID_LEN,
+                               OCFS2_VOL_UUID_LEN) & 0xFFFFFFFFUL;
 
        brelse(bh);
 
index b6284f235d2ff9e54e15834014ebc8f3e059851d..c61369342a276e17a6e35fcc79df544a2bd4419c 100644 (file)
 #include <linux/highmem.h>
 #include <linux/buffer_head.h>
 #include <linux/rbtree.h>
-#ifndef CONFIG_OCFS2_COMPAT_JBD
-# include <linux/jbd2.h>
-#else
-# include <linux/jbd.h>
-#endif
 
 #define MLOG_MASK_PREFIX ML_UPTODATE
 
index 07f77a7945c320e7b9ac133e523f83b9824691e2..822c2d5065189906cd6a63bedd27875290964e04 100644 (file)
@@ -571,7 +571,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
                rsslim,
                mm ? mm->start_code : 0,
                mm ? mm->end_code : 0,
-               (permitted) ? task->stack_start : 0,
+               (permitted && mm) ? task->stack_start : 0,
                esp,
                eip,
                /* The signal information here is obsolete.
index cd2d7896e34baede0f7e3f37c1c25f91057d18db..495dc8af4044fa1cb15898dc5c1450f8ecfb19a8 100644 (file)
@@ -89,7 +89,7 @@
 
 #define F_OWNER_TID    0
 #define F_OWNER_PID    1
-#define F_OWNER_GID    2
+#define F_OWNER_PGRP   2
 
 struct f_owner_ex {
        int     type;
index 84d3532dd3eada229bb0f3642092c73675cc7a12..7be0c6fbe8808be086d884939c7c621e3f5d78cf 100644 (file)
@@ -91,6 +91,8 @@ struct fscache_operation {
 #define FSCACHE_OP_WAITING     4       /* cleared when op is woken */
 #define FSCACHE_OP_EXCLUSIVE   5       /* exclusive op, other ops must wait */
 #define FSCACHE_OP_DEAD                6       /* op is now dead */
+#define FSCACHE_OP_DEC_READ_CNT        7       /* decrement object->n_reads on destruction */
+#define FSCACHE_OP_KEEP_FLAGS  0xc0    /* flags to keep when repurposing an op */
 
        atomic_t                usage;
        unsigned                debug_id;       /* debugging ID */
@@ -102,6 +104,16 @@ struct fscache_operation {
 
        /* operation releaser */
        fscache_operation_release_t release;
+
+#ifdef CONFIG_SLOW_WORK_PROC
+       const char *name;               /* operation name */
+       const char *state;              /* operation state */
+#define fscache_set_op_name(OP, N)     do { (OP)->name  = (N); } while(0)
+#define fscache_set_op_state(OP, S)    do { (OP)->state = (S); } while(0)
+#else
+#define fscache_set_op_name(OP, N)     do { } while(0)
+#define fscache_set_op_state(OP, S)    do { } while(0)
+#endif
 };
 
 extern atomic_t fscache_op_debug_id;
@@ -125,6 +137,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
        op->debug_id = atomic_inc_return(&fscache_op_debug_id);
        op->release = release;
        INIT_LIST_HEAD(&op->pend_link);
+       fscache_set_op_state(op, "Init");
 }
 
 /**
@@ -221,8 +234,10 @@ struct fscache_cache_ops {
        struct fscache_object *(*alloc_object)(struct fscache_cache *cache,
                                               struct fscache_cookie *cookie);
 
-       /* look up the object for a cookie */
-       void (*lookup_object)(struct fscache_object *object);
+       /* look up the object for a cookie
+        * - return -ETIMEDOUT to be requeued
+        */
+       int (*lookup_object)(struct fscache_object *object);
 
        /* finished looking up */
        void (*lookup_complete)(struct fscache_object *object);
@@ -297,12 +312,14 @@ struct fscache_cookie {
        atomic_t                        usage;          /* number of users of this cookie */
        atomic_t                        n_children;     /* number of children of this cookie */
        spinlock_t                      lock;
+       spinlock_t                      stores_lock;    /* lock on page store tree */
        struct hlist_head               backing_objects; /* object(s) backing this file/index */
        const struct fscache_cookie_def *def;           /* definition */
        struct fscache_cookie           *parent;        /* parent of this entry */
        void                            *netfs_data;    /* back pointer to netfs */
        struct radix_tree_root          stores;         /* pages to be stored on this cookie */
 #define FSCACHE_COOKIE_PENDING_TAG     0               /* pages tag: pending write to cache */
+#define FSCACHE_COOKIE_STORING_TAG     1               /* pages tag: writing to cache */
 
        unsigned long                   flags;
 #define FSCACHE_COOKIE_LOOKING_UP      0       /* T if non-index cookie being looked up still */
@@ -337,6 +354,7 @@ struct fscache_object {
                FSCACHE_OBJECT_RECYCLING,       /* retiring object */
                FSCACHE_OBJECT_WITHDRAWING,     /* withdrawing object */
                FSCACHE_OBJECT_DEAD,            /* object is now dead */
+               FSCACHE_OBJECT__NSTATES
        } state;
 
        int                     debug_id;       /* debugging ID */
@@ -345,6 +363,7 @@ struct fscache_object {
        int                     n_obj_ops;      /* number of object ops outstanding on object */
        int                     n_in_progress;  /* number of ops in progress */
        int                     n_exclusive;    /* number of exclusive ops queued */
+       atomic_t                n_reads;        /* number of read ops in progress */
        spinlock_t              lock;           /* state and operations lock */
 
        unsigned long           lookup_jif;     /* time at which lookup started */
@@ -358,6 +377,7 @@ struct fscache_object {
 #define FSCACHE_OBJECT_EV_RELEASE      4       /* T if netfs requested object release */
 #define FSCACHE_OBJECT_EV_RETIRE       5       /* T if netfs requested object retirement */
 #define FSCACHE_OBJECT_EV_WITHDRAW     6       /* T if cache requested object withdrawal */
+#define FSCACHE_OBJECT_EVENTS_MASK     0x7f    /* mask of all events*/
 
        unsigned long           flags;
 #define FSCACHE_OBJECT_LOCK            0       /* T if object is busy being processed */
@@ -373,7 +393,11 @@ struct fscache_object {
        struct list_head        dependents;     /* FIFO of dependent objects */
        struct list_head        dep_link;       /* link in parent's dependents list */
        struct list_head        pending_ops;    /* unstarted operations on this object */
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+       struct rb_node          objlist_link;   /* link in global object list */
+#endif
        pgoff_t                 store_limit;    /* current storage limit */
+       loff_t                  store_limit_l;  /* current storage limit */
 };
 
 extern const char *fscache_object_states[];
@@ -383,6 +407,10 @@ extern const char *fscache_object_states[];
         (obj)->state >= FSCACHE_OBJECT_AVAILABLE &&          \
         (obj)->state < FSCACHE_OBJECT_DYING)
 
+#define fscache_object_is_dead(obj)                            \
+       (test_bit(FSCACHE_IOERROR, &(obj)->cache->flags) &&     \
+        (obj)->state >= FSCACHE_OBJECT_DYING)
+
 extern const struct slow_work_ops fscache_object_slow_work_ops;
 
 /**
@@ -414,6 +442,7 @@ void fscache_object_init(struct fscache_object *object,
        object->events = object->event_mask = 0;
        object->flags = 0;
        object->store_limit = 0;
+       object->store_limit_l = 0;
        object->cache = cache;
        object->cookie = cookie;
        object->parent = NULL;
@@ -422,6 +451,12 @@ void fscache_object_init(struct fscache_object *object,
 extern void fscache_object_lookup_negative(struct fscache_object *object);
 extern void fscache_obtained_object(struct fscache_object *object);
 
+#ifdef CONFIG_FSCACHE_OBJECT_LIST
+extern void fscache_object_destroy(struct fscache_object *object);
+#else
+#define fscache_object_destroy(object) do {} while(0)
+#endif
+
 /**
  * fscache_object_destroyed - Note destruction of an object in a cache
  * @cache: The cache from which the object came
@@ -460,6 +495,7 @@ static inline void fscache_object_lookup_error(struct fscache_object *object)
 static inline
 void fscache_set_store_limit(struct fscache_object *object, loff_t i_size)
 {
+       object->store_limit_l = i_size;
        object->store_limit = i_size >> PAGE_SHIFT;
        if (i_size & ~PAGE_MASK)
                object->store_limit++;
index 6d8ee466e0a00a8b3f4e98927a373869dfee9726..595ce49288b7807f0f42c40a6c2dcde9ed354d36 100644 (file)
@@ -202,6 +202,8 @@ extern int __fscache_write_page(struct fscache_cookie *, struct page *, gfp_t);
 extern void __fscache_uncache_page(struct fscache_cookie *, struct page *);
 extern bool __fscache_check_page_write(struct fscache_cookie *, struct page *);
 extern void __fscache_wait_on_page_write(struct fscache_cookie *, struct page *);
+extern bool __fscache_maybe_release_page(struct fscache_cookie *, struct page *,
+                                        gfp_t);
 
 /**
  * fscache_register_netfs - Register a filesystem as desiring caching services
@@ -615,4 +617,29 @@ void fscache_wait_on_page_write(struct fscache_cookie *cookie,
                __fscache_wait_on_page_write(cookie, page);
 }
 
+/**
+ * fscache_maybe_release_page - Consider releasing a page, cancelling a store
+ * @cookie: The cookie representing the cache object
+ * @page: The netfs page that is being cached.
+ * @gfp: The gfp flags passed to releasepage()
+ *
+ * Consider releasing a page for the vmscan algorithm, on behalf of the netfs's
+ * releasepage() call.  A storage request on the page may cancelled if it is
+ * not currently being processed.
+ *
+ * The function returns true if the page no longer has a storage request on it,
+ * and false if a storage request is left in place.  If true is returned, the
+ * page will have been passed to fscache_uncache_page().  If false is returned
+ * the page cannot be freed yet.
+ */
+static inline
+bool fscache_maybe_release_page(struct fscache_cookie *cookie,
+                               struct page *page,
+                               gfp_t gfp)
+{
+       if (fscache_cookie_valid(cookie) && PageFsCache(page))
+               return __fscache_maybe_release_page(cookie, page, gfp);
+       return false;
+}
+
 #endif /* _LINUX_FSCACHE_H */
index f13255e06406aa9eb7a8c60485599b960a976f8b..9eb07bbc6522bb4508797a7468a22ba465894e34 100644 (file)
@@ -21,7 +21,7 @@ struct i2c_pnx_mif {
        int                     mode;           /* Interface mode */
        struct completion       complete;       /* I/O completion */
        struct timer_list       timer;          /* Timeout */
-       char *                  buf;            /* Data buffer */
+       u8 *                    buf;            /* Data buffer */
        int                     len;            /* Length of data buffer */
 };
 
index 4c218ee7587ad47c83c5b7d7375181ee6c2ec9ea..8687a7dc0632378c4828b2c58664ec638227919d 100644 (file)
@@ -157,7 +157,7 @@ typedef struct {
 
 typedef struct {
   int mp_mrru;                        /* unused                             */
-  struct sk_buff_head frags;   /* fragments sl list */
+  struct sk_buff * frags;      /* fragments sl list -- use skb->next */
   long frames;                 /* number of frames in the frame list */
   unsigned int seq;            /* last processed packet seq #: any packets
                                 * with smaller seq # will be dropped
index b65c8881f07acae832e16a8dca1dae7380f1a3c4..5035a26917392ea0ae5b5f133f9442740ff0b4cb 100644 (file)
 #ifdef CONFIG_SLOW_WORK
 
 #include <linux/sysctl.h>
+#include <linux/timer.h>
 
 struct slow_work;
+#ifdef CONFIG_SLOW_WORK_PROC
+struct seq_file;
+#endif
 
 /*
  * The operations used to support slow work items
  */
 struct slow_work_ops {
+       /* owner */
+       struct module *owner;
+
        /* get a ref on a work item
         * - return 0 if successful, -ve if not
         */
@@ -34,6 +41,11 @@ struct slow_work_ops {
 
        /* execute a work item */
        void (*execute)(struct slow_work *work);
+
+#ifdef CONFIG_SLOW_WORK_PROC
+       /* describe a work item for /proc */
+       void (*desc)(struct slow_work *work, struct seq_file *m);
+#endif
 };
 
 /*
@@ -42,13 +54,24 @@ struct slow_work_ops {
  *   queued
  */
 struct slow_work {
+       struct module           *owner; /* the owning module */
        unsigned long           flags;
 #define SLOW_WORK_PENDING      0       /* item pending (further) execution */
 #define SLOW_WORK_EXECUTING    1       /* item currently executing */
 #define SLOW_WORK_ENQ_DEFERRED 2       /* item enqueue deferred */
 #define SLOW_WORK_VERY_SLOW    3       /* item is very slow */
+#define SLOW_WORK_CANCELLING   4       /* item is being cancelled, don't enqueue */
+#define SLOW_WORK_DELAYED      5       /* item is struct delayed_slow_work with active timer */
        const struct slow_work_ops *ops; /* operations table for this item */
        struct list_head        link;   /* link in queue */
+#ifdef CONFIG_SLOW_WORK_PROC
+       struct timespec         mark;   /* jiffies at which queued or exec begun */
+#endif
+};
+
+struct delayed_slow_work {
+       struct slow_work        work;
+       struct timer_list       timer;
 };
 
 /**
@@ -66,6 +89,20 @@ static inline void slow_work_init(struct slow_work *work,
        INIT_LIST_HEAD(&work->link);
 }
 
+/**
+ * slow_work_init - Initialise a delayed slow work item
+ * @work: The work item to initialise
+ * @ops: The operations to use to handle the slow work item
+ *
+ * Initialise a delayed slow work item.
+ */
+static inline void delayed_slow_work_init(struct delayed_slow_work *dwork,
+                                         const struct slow_work_ops *ops)
+{
+       init_timer(&dwork->timer);
+       slow_work_init(&dwork->work, ops);
+}
+
 /**
  * vslow_work_init - Initialise a very slow work item
  * @work: The work item to initialise
@@ -83,9 +120,40 @@ static inline void vslow_work_init(struct slow_work *work,
        INIT_LIST_HEAD(&work->link);
 }
 
+/**
+ * slow_work_is_queued - Determine if a slow work item is on the work queue
+ * work: The work item to test
+ *
+ * Determine if the specified slow-work item is on the work queue.  This
+ * returns true if it is actually on the queue.
+ *
+ * If the item is executing and has been marked for requeue when execution
+ * finishes, then false will be returned.
+ *
+ * Anyone wishing to wait for completion of execution can wait on the
+ * SLOW_WORK_EXECUTING bit.
+ */
+static inline bool slow_work_is_queued(struct slow_work *work)
+{
+       unsigned long flags = work->flags;
+       return flags & SLOW_WORK_PENDING && !(flags & SLOW_WORK_EXECUTING);
+}
+
 extern int slow_work_enqueue(struct slow_work *work);
-extern int slow_work_register_user(void);
-extern void slow_work_unregister_user(void);
+extern void slow_work_cancel(struct slow_work *work);
+extern int slow_work_register_user(struct module *owner);
+extern void slow_work_unregister_user(struct module *owner);
+
+extern int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
+                                    unsigned long delay);
+
+static inline void delayed_slow_work_cancel(struct delayed_slow_work *dwork)
+{
+       slow_work_cancel(&dwork->work);
+}
+
+extern bool slow_work_sleep_till_thread_needed(struct slow_work *work,
+                                              signed long *_timeout);
 
 #ifdef CONFIG_SYSCTL
 extern ctl_table slow_work_sysctls[];
index cd15df6c63cdc07f51d2823ccb2a7e77b070c8ee..5e781d824e6d1680466479bdd6d726195203c55a 100644 (file)
@@ -301,6 +301,8 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
 #define pm_notifier(fn, pri)   do { (void)(fn); } while (0)
 #endif /* !CONFIG_PM_SLEEP */
 
+extern struct mutex pm_mutex;
+
 #ifndef CONFIG_HIBERNATION
 static inline void register_nosave_region(unsigned long b, unsigned long e)
 {
@@ -308,8 +310,23 @@ static inline void register_nosave_region(unsigned long b, unsigned long e)
 static inline void register_nosave_region_late(unsigned long b, unsigned long e)
 {
 }
-#endif
 
-extern struct mutex pm_mutex;
+static inline void lock_system_sleep(void) {}
+static inline void unlock_system_sleep(void) {}
+
+#else
+
+/* Let some subsystems like memory hotadd exclude hibernation */
+
+static inline void lock_system_sleep(void)
+{
+       mutex_lock(&pm_mutex);
+}
+
+static inline void unlock_system_sleep(void)
+{
+       mutex_unlock(&pm_mutex);
+}
+#endif
 
 #endif /* _LINUX_SUSPEND_H */
index 7afca0d72139f4993b089b5b1761c16f846f3434..7ffa11f062324de1b79bb0d3093dac8523aaee0c 100644 (file)
@@ -70,8 +70,8 @@ struct vt_event {
 #define VT_EVENT_UNBLANK       0x0004  /* Screen unblank */
 #define VT_EVENT_RESIZE                0x0008  /* Resize display */
 #define VT_MAX_EVENT           0x000F
-       unsigned int old;               /* Old console */
-       unsigned int new;               /* New console (if changing) */
+       unsigned int oldev;             /* Old console */
+       unsigned int newev;             /* New console (if changing) */
        unsigned int pad[4];            /* Padding for expansion */
 };
 
index c75b960c8ac8e0e954c400f43c4c163cc360caea..998c30fc89819f2d48140dd83ad82feb40e6f99b 100644 (file)
@@ -1283,6 +1283,12 @@ enum ieee80211_filter_flags {
  *
  * These flags are used with the ampdu_action() callback in
  * &struct ieee80211_ops to indicate which action is needed.
+ *
+ * Note that drivers MUST be able to deal with a TX aggregation
+ * session being stopped even before they OK'ed starting it by
+ * calling ieee80211_start_tx_ba_cb(_irqsafe), because the peer
+ * might receive the addBA frame and send a delBA right away!
+ *
  * @IEEE80211_AMPDU_RX_START: start Rx aggregation
  * @IEEE80211_AMPDU_RX_STOP: stop Rx aggregation
  * @IEEE80211_AMPDU_TX_START: start Tx aggregation
index 6e5f0e0c7967e9f78589ba4f5d1f56ab09c2e2aa..0a474568b003053686f98e27d4102d34148a8ade 100644 (file)
@@ -893,7 +893,6 @@ struct sctp_transport {
         */
        /* RTO         : The current retransmission timeout value.  */
        unsigned long rto;
-       unsigned long last_rto;
 
        __u32 rtt;              /* This is the most recent RTT.  */
 
@@ -1980,7 +1979,7 @@ void sctp_assoc_set_primary(struct sctp_association *,
 void sctp_assoc_del_nonprimary_peers(struct sctp_association *,
                                    struct sctp_transport *);
 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *,
-                                    gfp_t);
+                                    sctp_scope_t, gfp_t);
 int sctp_assoc_set_bind_addr_from_cookie(struct sctp_association *,
                                         struct sctp_cookie*,
                                         gfp_t gfp);
index 9af48cbf0036782386e6cfdf403476b8b159d563..f097ae340bc19f486a7ff5a287acf1c4a8fd72e0 100644 (file)
@@ -145,6 +145,7 @@ struct scsi_device {
        unsigned retry_hwerror:1;       /* Retry HARDWARE_ERROR */
        unsigned last_sector_bug:1;     /* do not use multisector accesses on
                                           SD_LAST_BUGGY_SECTORS */
+       unsigned is_visible:1;  /* is the device visible in sysfs */
 
        DECLARE_BITMAP(supported_events, SDEV_EVT_MAXBITS); /* supported events */
        struct list_head event_list;    /* asserted events */
index 6e728b176904664abb12f132fa63aaced2a4aa00..47941fc5aba753e2de036f8fb79f06588f73c9ff 100644 (file)
@@ -797,30 +797,23 @@ static inline unsigned int scsi_host_get_prot(struct Scsi_Host *shost)
 
 static inline unsigned int scsi_host_dif_capable(struct Scsi_Host *shost, unsigned int target_type)
 {
-       switch (target_type) {
-       case 1:
-               if (shost->prot_capabilities & SHOST_DIF_TYPE1_PROTECTION)
-                       return target_type;
-       case 2:
-               if (shost->prot_capabilities & SHOST_DIF_TYPE2_PROTECTION)
-                       return target_type;
-       case 3:
-               if (shost->prot_capabilities & SHOST_DIF_TYPE3_PROTECTION)
-                       return target_type;
-       }
+       static unsigned char cap[] = { 0,
+                                      SHOST_DIF_TYPE1_PROTECTION,
+                                      SHOST_DIF_TYPE2_PROTECTION,
+                                      SHOST_DIF_TYPE3_PROTECTION };
 
-       return 0;
+       return shost->prot_capabilities & cap[target_type] ? target_type : 0;
 }
 
 static inline unsigned int scsi_host_dix_capable(struct Scsi_Host *shost, unsigned int target_type)
 {
 #if defined(CONFIG_BLK_DEV_INTEGRITY)
-       switch (target_type) {
-       case 0: return shost->prot_capabilities & SHOST_DIX_TYPE0_PROTECTION;
-       case 1: return shost->prot_capabilities & SHOST_DIX_TYPE1_PROTECTION;
-       case 2: return shost->prot_capabilities & SHOST_DIX_TYPE2_PROTECTION;
-       case 3: return shost->prot_capabilities & SHOST_DIX_TYPE3_PROTECTION;
-       }
+       static unsigned char cap[] = { SHOST_DIX_TYPE0_PROTECTION,
+                                      SHOST_DIX_TYPE1_PROTECTION,
+                                      SHOST_DIX_TYPE2_PROTECTION,
+                                      SHOST_DIX_TYPE3_PROTECTION };
+
+       return shost->prot_capabilities & cap[target_type];
 #endif
        return 0;
 }
index 9e03ef8b311ec303b7fd1f70cc21021620100171..ab5c64801fe50ab21dc3ae7db263f5f19a99ad56 100644 (file)
@@ -1098,6 +1098,16 @@ config SLOW_WORK
 
          See Documentation/slow-work.txt.
 
+config SLOW_WORK_PROC
+       bool "Slow work debugging through /proc"
+       default n
+       depends on SLOW_WORK && PROC_FS
+       help
+         Display the contents of the slow work run queue through /proc,
+         including items currently executing.
+
+         See Documentation/slow-work.txt.
+
 endmenu                # General setup
 
 config HAVE_GENERIC_DMA_COHERENT
index b8d4cd8ac0b9d5d93e303833e70ed55d450582ee..776ffed1556d188dc5737dc8ee15a884de274474 100644 (file)
@@ -94,6 +94,7 @@ obj-$(CONFIG_X86_DS) += trace/
 obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_SMP) += sched_cpupri.o
 obj-$(CONFIG_SLOW_WORK) += slow-work.o
+obj-$(CONFIG_SLOW_WORK_PROC) += slow-work-proc.o
 obj-$(CONFIG_PERF_EVENTS) += perf_event.o
 
 ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
diff --git a/kernel/slow-work-proc.c b/kernel/slow-work-proc.c
new file mode 100644 (file)
index 0000000..3988032
--- /dev/null
@@ -0,0 +1,227 @@
+/* Slow work debugging
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/slow-work.h>
+#include <linux/fs.h>
+#include <linux/time.h>
+#include <linux/seq_file.h>
+#include "slow-work.h"
+
+#define ITERATOR_SHIFT         (BITS_PER_LONG - 4)
+#define ITERATOR_SELECTOR      (0xfUL << ITERATOR_SHIFT)
+#define ITERATOR_COUNTER       (~ITERATOR_SELECTOR)
+
+void slow_work_new_thread_desc(struct slow_work *work, struct seq_file *m)
+{
+       seq_puts(m, "Slow-work: New thread");
+}
+
+/*
+ * Render the time mark field on a work item into a 5-char time with units plus
+ * a space
+ */
+static void slow_work_print_mark(struct seq_file *m, struct slow_work *work)
+{
+       struct timespec now, diff;
+
+       now = CURRENT_TIME;
+       diff = timespec_sub(now, work->mark);
+
+       if (diff.tv_sec < 0)
+               seq_puts(m, "  -ve ");
+       else if (diff.tv_sec == 0 && diff.tv_nsec < 1000)
+               seq_printf(m, "%3luns ", diff.tv_nsec);
+       else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000)
+               seq_printf(m, "%3luus ", diff.tv_nsec / 1000);
+       else if (diff.tv_sec == 0 && diff.tv_nsec < 1000000000)
+               seq_printf(m, "%3lums ", diff.tv_nsec / 1000000);
+       else if (diff.tv_sec <= 1)
+               seq_puts(m, "   1s ");
+       else if (diff.tv_sec < 60)
+               seq_printf(m, "%4lus ", diff.tv_sec);
+       else if (diff.tv_sec < 60 * 60)
+               seq_printf(m, "%4lum ", diff.tv_sec / 60);
+       else if (diff.tv_sec < 60 * 60 * 24)
+               seq_printf(m, "%4luh ", diff.tv_sec / 3600);
+       else
+               seq_puts(m, "exces ");
+}
+
+/*
+ * Describe a slow work item for /proc
+ */
+static int slow_work_runqueue_show(struct seq_file *m, void *v)
+{
+       struct slow_work *work;
+       struct list_head *p = v;
+       unsigned long id;
+
+       switch ((unsigned long) v) {
+       case 1:
+               seq_puts(m, "THR PID   ITEM ADDR        FL MARK  DESC\n");
+               return 0;
+       case 2:
+               seq_puts(m, "=== ===== ================ == ===== ==========\n");
+               return 0;
+
+       case 3 ... 3 + SLOW_WORK_THREAD_LIMIT - 1:
+               id = (unsigned long) v - 3;
+
+               read_lock(&slow_work_execs_lock);
+               work = slow_work_execs[id];
+               if (work) {
+                       smp_read_barrier_depends();
+
+                       seq_printf(m, "%3lu %5d %16p %2lx ",
+                                  id, slow_work_pids[id], work, work->flags);
+                       slow_work_print_mark(m, work);
+
+                       if (work->ops->desc)
+                               work->ops->desc(work, m);
+                       seq_putc(m, '\n');
+               }
+               read_unlock(&slow_work_execs_lock);
+               return 0;
+
+       default:
+               work = list_entry(p, struct slow_work, link);
+               seq_printf(m, "%3s     - %16p %2lx ",
+                          work->flags & SLOW_WORK_VERY_SLOW ? "vsq" : "sq",
+                          work, work->flags);
+               slow_work_print_mark(m, work);
+
+               if (work->ops->desc)
+                       work->ops->desc(work, m);
+               seq_putc(m, '\n');
+               return 0;
+       }
+}
+
+/*
+ * map the iterator to a work item
+ */
+static void *slow_work_runqueue_index(struct seq_file *m, loff_t *_pos)
+{
+       struct list_head *p;
+       unsigned long count, id;
+
+       switch (*_pos >> ITERATOR_SHIFT) {
+       case 0x0:
+               if (*_pos == 0)
+                       *_pos = 1;
+               if (*_pos < 3)
+                       return (void *)(unsigned long) *_pos;
+               if (*_pos < 3 + SLOW_WORK_THREAD_LIMIT)
+                       for (id = *_pos - 3;
+                            id < SLOW_WORK_THREAD_LIMIT;
+                            id++, (*_pos)++)
+                               if (slow_work_execs[id])
+                                       return (void *)(unsigned long) *_pos;
+               *_pos = 0x1UL << ITERATOR_SHIFT;
+
+       case 0x1:
+               count = *_pos & ITERATOR_COUNTER;
+               list_for_each(p, &slow_work_queue) {
+                       if (count == 0)
+                               return p;
+                       count--;
+               }
+               *_pos = 0x2UL << ITERATOR_SHIFT;
+
+       case 0x2:
+               count = *_pos & ITERATOR_COUNTER;
+               list_for_each(p, &vslow_work_queue) {
+                       if (count == 0)
+                               return p;
+                       count--;
+               }
+               *_pos = 0x3UL << ITERATOR_SHIFT;
+
+       default:
+               return NULL;
+       }
+}
+
+/*
+ * set up the iterator to start reading from the first line
+ */
+static void *slow_work_runqueue_start(struct seq_file *m, loff_t *_pos)
+{
+       spin_lock_irq(&slow_work_queue_lock);
+       return slow_work_runqueue_index(m, _pos);
+}
+
+/*
+ * move to the next line
+ */
+static void *slow_work_runqueue_next(struct seq_file *m, void *v, loff_t *_pos)
+{
+       struct list_head *p = v;
+       unsigned long selector = *_pos >> ITERATOR_SHIFT;
+
+       (*_pos)++;
+       switch (selector) {
+       case 0x0:
+               return slow_work_runqueue_index(m, _pos);
+
+       case 0x1:
+               if (*_pos >> ITERATOR_SHIFT == 0x1) {
+                       p = p->next;
+                       if (p != &slow_work_queue)
+                               return p;
+               }
+               *_pos = 0x2UL << ITERATOR_SHIFT;
+               p = &vslow_work_queue;
+
+       case 0x2:
+               if (*_pos >> ITERATOR_SHIFT == 0x2) {
+                       p = p->next;
+                       if (p != &vslow_work_queue)
+                               return p;
+               }
+               *_pos = 0x3UL << ITERATOR_SHIFT;
+
+       default:
+               return NULL;
+       }
+}
+
+/*
+ * clean up after reading
+ */
+static void slow_work_runqueue_stop(struct seq_file *m, void *v)
+{
+       spin_unlock_irq(&slow_work_queue_lock);
+}
+
+static const struct seq_operations slow_work_runqueue_ops = {
+       .start          = slow_work_runqueue_start,
+       .stop           = slow_work_runqueue_stop,
+       .next           = slow_work_runqueue_next,
+       .show           = slow_work_runqueue_show,
+};
+
+/*
+ * open "/proc/slow_work_rq" to list queue contents
+ */
+static int slow_work_runqueue_open(struct inode *inode, struct file *file)
+{
+       return seq_open(file, &slow_work_runqueue_ops);
+}
+
+const struct file_operations slow_work_runqueue_fops = {
+       .owner          = THIS_MODULE,
+       .open           = slow_work_runqueue_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = seq_release,
+};
index 0d31135efbf4cab0b98babcaa05355eaf5130dfa..da94f3c101af77985272b72861bca34db1c853aa 100644 (file)
 #include <linux/kthread.h>
 #include <linux/freezer.h>
 #include <linux/wait.h>
-
-#define SLOW_WORK_CULL_TIMEOUT (5 * HZ)        /* cull threads 5s after running out of
-                                        * things to do */
-#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
-                                        * OOM */
+#include <linux/proc_fs.h>
+#include "slow-work.h"
 
 static void slow_work_cull_timeout(unsigned long);
 static void slow_work_oom_timeout(unsigned long);
@@ -46,7 +43,7 @@ static unsigned vslow_work_proportion = 50; /* % of threads that may process
 
 #ifdef CONFIG_SYSCTL
 static const int slow_work_min_min_threads = 2;
-static int slow_work_max_max_threads = 255;
+static int slow_work_max_max_threads = SLOW_WORK_THREAD_LIMIT;
 static const int slow_work_min_vslow = 1;
 static const int slow_work_max_vslow = 99;
 
@@ -97,6 +94,32 @@ static DEFINE_TIMER(slow_work_cull_timer, slow_work_cull_timeout, 0, 0);
 static DEFINE_TIMER(slow_work_oom_timer, slow_work_oom_timeout, 0, 0);
 static struct slow_work slow_work_new_thread; /* new thread starter */
 
+/*
+ * slow work ID allocation (use slow_work_queue_lock)
+ */
+static DECLARE_BITMAP(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
+
+/*
+ * Unregistration tracking to prevent put_ref() from disappearing during module
+ * unload
+ */
+#ifdef CONFIG_MODULES
+static struct module *slow_work_thread_processing[SLOW_WORK_THREAD_LIMIT];
+static struct module *slow_work_unreg_module;
+static struct slow_work *slow_work_unreg_work_item;
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_unreg_wq);
+static DEFINE_MUTEX(slow_work_unreg_sync_lock);
+#endif
+
+/*
+ * Data for tracking currently executing items for indication through /proc
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+struct slow_work *slow_work_execs[SLOW_WORK_THREAD_LIMIT];
+pid_t slow_work_pids[SLOW_WORK_THREAD_LIMIT];
+DEFINE_RWLOCK(slow_work_execs_lock);
+#endif
+
 /*
  * The queues of work items and the lock governing access to them.  These are
  * shared between all the CPUs.  It doesn't make sense to have per-CPU queues
@@ -105,9 +128,18 @@ static struct slow_work slow_work_new_thread; /* new thread starter */
  * There are two queues of work items: one for slow work items, and one for
  * very slow work items.
  */
-static LIST_HEAD(slow_work_queue);
-static LIST_HEAD(vslow_work_queue);
-static DEFINE_SPINLOCK(slow_work_queue_lock);
+LIST_HEAD(slow_work_queue);
+LIST_HEAD(vslow_work_queue);
+DEFINE_SPINLOCK(slow_work_queue_lock);
+
+/*
+ * The following are two wait queues that get pinged when a work item is placed
+ * on an empty queue.  These allow work items that are hogging a thread by
+ * sleeping in a way that could be deferred to yield their thread and enqueue
+ * themselves.
+ */
+static DECLARE_WAIT_QUEUE_HEAD(slow_work_queue_waits_for_occupation);
+static DECLARE_WAIT_QUEUE_HEAD(vslow_work_queue_waits_for_occupation);
 
 /*
  * The thread controls.  A variable used to signal to the threads that they
@@ -126,6 +158,20 @@ static DECLARE_COMPLETION(slow_work_last_thread_exited);
 static int slow_work_user_count;
 static DEFINE_MUTEX(slow_work_user_lock);
 
+static inline int slow_work_get_ref(struct slow_work *work)
+{
+       if (work->ops->get_ref)
+               return work->ops->get_ref(work);
+
+       return 0;
+}
+
+static inline void slow_work_put_ref(struct slow_work *work)
+{
+       if (work->ops->put_ref)
+               work->ops->put_ref(work);
+}
+
 /*
  * Calculate the maximum number of active threads in the pool that are
  * permitted to process very slow work items.
@@ -149,8 +195,11 @@ static unsigned slow_work_calc_vsmax(void)
  * Attempt to execute stuff queued on a slow thread.  Return true if we managed
  * it, false if there was nothing to do.
  */
-static bool slow_work_execute(void)
+static noinline bool slow_work_execute(int id)
 {
+#ifdef CONFIG_MODULES
+       struct module *module;
+#endif
        struct slow_work *work = NULL;
        unsigned vsmax;
        bool very_slow;
@@ -186,6 +235,16 @@ static bool slow_work_execute(void)
        } else {
                very_slow = false; /* avoid the compiler warning */
        }
+
+#ifdef CONFIG_MODULES
+       if (work)
+               slow_work_thread_processing[id] = work->owner;
+#endif
+       if (work) {
+               slow_work_mark_time(work);
+               slow_work_begin_exec(id, work);
+       }
+
        spin_unlock_irq(&slow_work_queue_lock);
 
        if (!work)
@@ -194,12 +253,19 @@ static bool slow_work_execute(void)
        if (!test_and_clear_bit(SLOW_WORK_PENDING, &work->flags))
                BUG();
 
-       work->ops->execute(work);
+       /* don't execute if the work is in the process of being cancelled */
+       if (!test_bit(SLOW_WORK_CANCELLING, &work->flags))
+               work->ops->execute(work);
 
        if (very_slow)
                atomic_dec(&vslow_work_executing_count);
        clear_bit_unlock(SLOW_WORK_EXECUTING, &work->flags);
 
+       /* wake up anyone waiting for this work to be complete */
+       wake_up_bit(&work->flags, SLOW_WORK_EXECUTING);
+
+       slow_work_end_exec(id, work);
+
        /* if someone tried to enqueue the item whilst we were executing it,
         * then it'll be left unenqueued to avoid multiple threads trying to
         * execute it simultaneously
@@ -219,7 +285,18 @@ static bool slow_work_execute(void)
                spin_unlock_irq(&slow_work_queue_lock);
        }
 
-       work->ops->put_ref(work);
+       /* sort out the race between module unloading and put_ref() */
+       slow_work_put_ref(work);
+
+#ifdef CONFIG_MODULES
+       module = slow_work_thread_processing[id];
+       slow_work_thread_processing[id] = NULL;
+       smp_mb();
+       if (slow_work_unreg_work_item == work ||
+           slow_work_unreg_module == module)
+               wake_up_all(&slow_work_unreg_wq);
+#endif
+
        return true;
 
 auto_requeue:
@@ -227,14 +304,60 @@ auto_requeue:
         * - we transfer our ref on the item back to the appropriate queue
         * - don't wake another thread up as we're awake already
         */
+       slow_work_mark_time(work);
        if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
                list_add_tail(&work->link, &vslow_work_queue);
        else
                list_add_tail(&work->link, &slow_work_queue);
        spin_unlock_irq(&slow_work_queue_lock);
+       slow_work_thread_processing[id] = NULL;
        return true;
 }
 
+/**
+ * slow_work_sleep_till_thread_needed - Sleep till thread needed by other work
+ * work: The work item under execution that wants to sleep
+ * _timeout: Scheduler sleep timeout
+ *
+ * Allow a requeueable work item to sleep on a slow-work processor thread until
+ * that thread is needed to do some other work or the sleep is interrupted by
+ * some other event.
+ *
+ * The caller must set up a wake up event before calling this and must have set
+ * the appropriate sleep mode (such as TASK_UNINTERRUPTIBLE) and tested its own
+ * condition before calling this function as no test is made here.
+ *
+ * False is returned if there is nothing on the queue; true is returned if the
+ * work item should be requeued
+ */
+bool slow_work_sleep_till_thread_needed(struct slow_work *work,
+                                       signed long *_timeout)
+{
+       wait_queue_head_t *wfo_wq;
+       struct list_head *queue;
+
+       DEFINE_WAIT(wait);
+
+       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+               wfo_wq = &vslow_work_queue_waits_for_occupation;
+               queue = &vslow_work_queue;
+       } else {
+               wfo_wq = &slow_work_queue_waits_for_occupation;
+               queue = &slow_work_queue;
+       }
+
+       if (!list_empty(queue))
+               return true;
+
+       add_wait_queue_exclusive(wfo_wq, &wait);
+       if (list_empty(queue))
+               *_timeout = schedule_timeout(*_timeout);
+       finish_wait(wfo_wq, &wait);
+
+       return !list_empty(queue);
+}
+EXPORT_SYMBOL(slow_work_sleep_till_thread_needed);
+
 /**
  * slow_work_enqueue - Schedule a slow work item for processing
  * @work: The work item to queue
@@ -260,16 +383,22 @@ auto_requeue:
  * allowed to pick items to execute.  This ensures that very slow items won't
  * overly block ones that are just ordinarily slow.
  *
- * Returns 0 if successful, -EAGAIN if not.
+ * Returns 0 if successful, -EAGAIN if not (or -ECANCELED if cancelled work is
+ * attempted queued)
  */
 int slow_work_enqueue(struct slow_work *work)
 {
+       wait_queue_head_t *wfo_wq;
+       struct list_head *queue;
        unsigned long flags;
+       int ret;
+
+       if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
+               return -ECANCELED;
 
        BUG_ON(slow_work_user_count <= 0);
        BUG_ON(!work);
        BUG_ON(!work->ops);
-       BUG_ON(!work->ops->get_ref);
 
        /* when honouring an enqueue request, we only promise that we will run
         * the work function in the future; we do not promise to run it once
@@ -280,8 +409,19 @@ int slow_work_enqueue(struct slow_work *work)
         * maintaining our promise
         */
        if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+               if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+                       wfo_wq = &vslow_work_queue_waits_for_occupation;
+                       queue = &vslow_work_queue;
+               } else {
+                       wfo_wq = &slow_work_queue_waits_for_occupation;
+                       queue = &slow_work_queue;
+               }
+
                spin_lock_irqsave(&slow_work_queue_lock, flags);
 
+               if (unlikely(test_bit(SLOW_WORK_CANCELLING, &work->flags)))
+                       goto cancelled;
+
                /* we promise that we will not attempt to execute the work
                 * function in more than one thread simultaneously
                 *
@@ -299,25 +439,221 @@ int slow_work_enqueue(struct slow_work *work)
                if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
                        set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
                } else {
-                       if (work->ops->get_ref(work) < 0)
-                               goto cant_get_ref;
-                       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags))
-                               list_add_tail(&work->link, &vslow_work_queue);
-                       else
-                               list_add_tail(&work->link, &slow_work_queue);
+                       ret = slow_work_get_ref(work);
+                       if (ret < 0)
+                               goto failed;
+                       slow_work_mark_time(work);
+                       list_add_tail(&work->link, queue);
                        wake_up(&slow_work_thread_wq);
+
+                       /* if someone who could be requeued is sleeping on a
+                        * thread, then ask them to yield their thread */
+                       if (work->link.prev == queue)
+                               wake_up(wfo_wq);
                }
 
                spin_unlock_irqrestore(&slow_work_queue_lock, flags);
        }
        return 0;
 
-cant_get_ref:
+cancelled:
+       ret = -ECANCELED;
+failed:
        spin_unlock_irqrestore(&slow_work_queue_lock, flags);
-       return -EAGAIN;
+       return ret;
 }
 EXPORT_SYMBOL(slow_work_enqueue);
 
+static int slow_work_wait(void *word)
+{
+       schedule();
+       return 0;
+}
+
+/**
+ * slow_work_cancel - Cancel a slow work item
+ * @work: The work item to cancel
+ *
+ * This function will cancel a previously enqueued work item. If we cannot
+ * cancel the work item, it is guarenteed to have run when this function
+ * returns.
+ */
+void slow_work_cancel(struct slow_work *work)
+{
+       bool wait = true, put = false;
+
+       set_bit(SLOW_WORK_CANCELLING, &work->flags);
+       smp_mb();
+
+       /* if the work item is a delayed work item with an active timer, we
+        * need to wait for the timer to finish _before_ getting the spinlock,
+        * lest we deadlock against the timer routine
+        *
+        * the timer routine will leave DELAYED set if it notices the
+        * CANCELLING flag in time
+        */
+       if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
+               struct delayed_slow_work *dwork =
+                       container_of(work, struct delayed_slow_work, work);
+               del_timer_sync(&dwork->timer);
+       }
+
+       spin_lock_irq(&slow_work_queue_lock);
+
+       if (test_bit(SLOW_WORK_DELAYED, &work->flags)) {
+               /* the timer routine aborted or never happened, so we are left
+                * holding the timer's reference on the item and should just
+                * drop the pending flag and wait for any ongoing execution to
+                * finish */
+               struct delayed_slow_work *dwork =
+                       container_of(work, struct delayed_slow_work, work);
+
+               BUG_ON(timer_pending(&dwork->timer));
+               BUG_ON(!list_empty(&work->link));
+
+               clear_bit(SLOW_WORK_DELAYED, &work->flags);
+               put = true;
+               clear_bit(SLOW_WORK_PENDING, &work->flags);
+
+       } else if (test_bit(SLOW_WORK_PENDING, &work->flags) &&
+                  !list_empty(&work->link)) {
+               /* the link in the pending queue holds a reference on the item
+                * that we will need to release */
+               list_del_init(&work->link);
+               wait = false;
+               put = true;
+               clear_bit(SLOW_WORK_PENDING, &work->flags);
+
+       } else if (test_and_clear_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags)) {
+               /* the executor is holding our only reference on the item, so
+                * we merely need to wait for it to finish executing */
+               clear_bit(SLOW_WORK_PENDING, &work->flags);
+       }
+
+       spin_unlock_irq(&slow_work_queue_lock);
+
+       /* the EXECUTING flag is set by the executor whilst the spinlock is set
+        * and before the item is dequeued - so assuming the above doesn't
+        * actually dequeue it, simply waiting for the EXECUTING flag to be
+        * released here should be sufficient */
+       if (wait)
+               wait_on_bit(&work->flags, SLOW_WORK_EXECUTING, slow_work_wait,
+                           TASK_UNINTERRUPTIBLE);
+
+       clear_bit(SLOW_WORK_CANCELLING, &work->flags);
+       if (put)
+               slow_work_put_ref(work);
+}
+EXPORT_SYMBOL(slow_work_cancel);
+
+/*
+ * Handle expiry of the delay timer, indicating that a delayed slow work item
+ * should now be queued if not cancelled
+ */
+static void delayed_slow_work_timer(unsigned long data)
+{
+       wait_queue_head_t *wfo_wq;
+       struct list_head *queue;
+       struct slow_work *work = (struct slow_work *) data;
+       unsigned long flags;
+       bool queued = false, put = false, first = false;
+
+       if (test_bit(SLOW_WORK_VERY_SLOW, &work->flags)) {
+               wfo_wq = &vslow_work_queue_waits_for_occupation;
+               queue = &vslow_work_queue;
+       } else {
+               wfo_wq = &slow_work_queue_waits_for_occupation;
+               queue = &slow_work_queue;
+       }
+
+       spin_lock_irqsave(&slow_work_queue_lock, flags);
+       if (likely(!test_bit(SLOW_WORK_CANCELLING, &work->flags))) {
+               clear_bit(SLOW_WORK_DELAYED, &work->flags);
+
+               if (test_bit(SLOW_WORK_EXECUTING, &work->flags)) {
+                       /* we discard the reference the timer was holding in
+                        * favour of the one the executor holds */
+                       set_bit(SLOW_WORK_ENQ_DEFERRED, &work->flags);
+                       put = true;
+               } else {
+                       slow_work_mark_time(work);
+                       list_add_tail(&work->link, queue);
+                       queued = true;
+                       if (work->link.prev == queue)
+                               first = true;
+               }
+       }
+
+       spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+       if (put)
+               slow_work_put_ref(work);
+       if (first)
+               wake_up(wfo_wq);
+       if (queued)
+               wake_up(&slow_work_thread_wq);
+}
+
+/**
+ * delayed_slow_work_enqueue - Schedule a delayed slow work item for processing
+ * @dwork: The delayed work item to queue
+ * @delay: When to start executing the work, in jiffies from now
+ *
+ * This is similar to slow_work_enqueue(), but it adds a delay before the work
+ * is actually queued for processing.
+ *
+ * The item can have delayed processing requested on it whilst it is being
+ * executed.  The delay will begin immediately, and if it expires before the
+ * item finishes executing, the item will be placed back on the queue when it
+ * has done executing.
+ */
+int delayed_slow_work_enqueue(struct delayed_slow_work *dwork,
+                             unsigned long delay)
+{
+       struct slow_work *work = &dwork->work;
+       unsigned long flags;
+       int ret;
+
+       if (delay == 0)
+               return slow_work_enqueue(&dwork->work);
+
+       BUG_ON(slow_work_user_count <= 0);
+       BUG_ON(!work);
+       BUG_ON(!work->ops);
+
+       if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
+               return -ECANCELED;
+
+       if (!test_and_set_bit_lock(SLOW_WORK_PENDING, &work->flags)) {
+               spin_lock_irqsave(&slow_work_queue_lock, flags);
+
+               if (test_bit(SLOW_WORK_CANCELLING, &work->flags))
+                       goto cancelled;
+
+               /* the timer holds a reference whilst it is pending */
+               ret = work->ops->get_ref(work);
+               if (ret < 0)
+                       goto cant_get_ref;
+
+               if (test_and_set_bit(SLOW_WORK_DELAYED, &work->flags))
+                       BUG();
+               dwork->timer.expires = jiffies + delay;
+               dwork->timer.data = (unsigned long) work;
+               dwork->timer.function = delayed_slow_work_timer;
+               add_timer(&dwork->timer);
+
+               spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+       }
+
+       return 0;
+
+cancelled:
+       ret = -ECANCELED;
+cant_get_ref:
+       spin_unlock_irqrestore(&slow_work_queue_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL(delayed_slow_work_enqueue);
+
 /*
  * Schedule a cull of the thread pool at some time in the near future
  */
@@ -368,13 +704,23 @@ static inline bool slow_work_available(int vsmax)
  */
 static int slow_work_thread(void *_data)
 {
-       int vsmax;
+       int vsmax, id;
 
        DEFINE_WAIT(wait);
 
        set_freezable();
        set_user_nice(current, -5);
 
+       /* allocate ourselves an ID */
+       spin_lock_irq(&slow_work_queue_lock);
+       id = find_first_zero_bit(slow_work_ids, SLOW_WORK_THREAD_LIMIT);
+       BUG_ON(id < 0 || id >= SLOW_WORK_THREAD_LIMIT);
+       __set_bit(id, slow_work_ids);
+       slow_work_set_thread_pid(id, current->pid);
+       spin_unlock_irq(&slow_work_queue_lock);
+
+       sprintf(current->comm, "kslowd%03u", id);
+
        for (;;) {
                vsmax = vslow_work_proportion;
                vsmax *= atomic_read(&slow_work_thread_count);
@@ -395,7 +741,7 @@ static int slow_work_thread(void *_data)
                vsmax *= atomic_read(&slow_work_thread_count);
                vsmax /= 100;
 
-               if (slow_work_available(vsmax) && slow_work_execute()) {
+               if (slow_work_available(vsmax) && slow_work_execute(id)) {
                        cond_resched();
                        if (list_empty(&slow_work_queue) &&
                            list_empty(&vslow_work_queue) &&
@@ -412,6 +758,11 @@ static int slow_work_thread(void *_data)
                        break;
        }
 
+       spin_lock_irq(&slow_work_queue_lock);
+       slow_work_set_thread_pid(id, 0);
+       __clear_bit(id, slow_work_ids);
+       spin_unlock_irq(&slow_work_queue_lock);
+
        if (atomic_dec_and_test(&slow_work_thread_count))
                complete_and_exit(&slow_work_last_thread_exited, 0);
        return 0;
@@ -426,21 +777,6 @@ static void slow_work_cull_timeout(unsigned long data)
        wake_up(&slow_work_thread_wq);
 }
 
-/*
- * Get a reference on slow work thread starter
- */
-static int slow_work_new_thread_get_ref(struct slow_work *work)
-{
-       return 0;
-}
-
-/*
- * Drop a reference on slow work thread starter
- */
-static void slow_work_new_thread_put_ref(struct slow_work *work)
-{
-}
-
 /*
  * Start a new slow work thread
  */
@@ -475,9 +811,11 @@ static void slow_work_new_thread_execute(struct slow_work *work)
 }
 
 static const struct slow_work_ops slow_work_new_thread_ops = {
-       .get_ref        = slow_work_new_thread_get_ref,
-       .put_ref        = slow_work_new_thread_put_ref,
+       .owner          = THIS_MODULE,
        .execute        = slow_work_new_thread_execute,
+#ifdef CONFIG_SLOW_WORK_PROC
+       .desc           = slow_work_new_thread_desc,
+#endif
 };
 
 /*
@@ -546,12 +884,13 @@ static int slow_work_max_threads_sysctl(struct ctl_table *table, int write,
 
 /**
  * slow_work_register_user - Register a user of the facility
+ * @module: The module about to make use of the facility
  *
  * Register a user of the facility, starting up the initial threads if there
  * aren't any other users at this point.  This will return 0 if successful, or
  * an error if not.
  */
-int slow_work_register_user(void)
+int slow_work_register_user(struct module *module)
 {
        struct task_struct *p;
        int loop;
@@ -598,14 +937,79 @@ error:
 }
 EXPORT_SYMBOL(slow_work_register_user);
 
+/*
+ * wait for all outstanding items from the calling module to complete
+ * - note that more items may be queued whilst we're waiting
+ */
+static void slow_work_wait_for_items(struct module *module)
+{
+       DECLARE_WAITQUEUE(myself, current);
+       struct slow_work *work;
+       int loop;
+
+       mutex_lock(&slow_work_unreg_sync_lock);
+       add_wait_queue(&slow_work_unreg_wq, &myself);
+
+       for (;;) {
+               spin_lock_irq(&slow_work_queue_lock);
+
+               /* first of all, we wait for the last queued item in each list
+                * to be processed */
+               list_for_each_entry_reverse(work, &vslow_work_queue, link) {
+                       if (work->owner == module) {
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               slow_work_unreg_work_item = work;
+                               goto do_wait;
+                       }
+               }
+               list_for_each_entry_reverse(work, &slow_work_queue, link) {
+                       if (work->owner == module) {
+                               set_current_state(TASK_UNINTERRUPTIBLE);
+                               slow_work_unreg_work_item = work;
+                               goto do_wait;
+                       }
+               }
+
+               /* then we wait for the items being processed to finish */
+               slow_work_unreg_module = module;
+               smp_mb();
+               for (loop = 0; loop < SLOW_WORK_THREAD_LIMIT; loop++) {
+                       if (slow_work_thread_processing[loop] == module)
+                               goto do_wait;
+               }
+               spin_unlock_irq(&slow_work_queue_lock);
+               break; /* okay, we're done */
+
+       do_wait:
+               spin_unlock_irq(&slow_work_queue_lock);
+               schedule();
+               slow_work_unreg_work_item = NULL;
+               slow_work_unreg_module = NULL;
+       }
+
+       remove_wait_queue(&slow_work_unreg_wq, &myself);
+       mutex_unlock(&slow_work_unreg_sync_lock);
+}
+
 /**
  * slow_work_unregister_user - Unregister a user of the facility
+ * @module: The module whose items should be cleared
  *
  * Unregister a user of the facility, killing all the threads if this was the
  * last one.
+ *
+ * This waits for all the work items belonging to the nominated module to go
+ * away before proceeding.
  */
-void slow_work_unregister_user(void)
+void slow_work_unregister_user(struct module *module)
 {
+       /* first of all, wait for all outstanding items from the calling module
+        * to complete */
+       if (module)
+               slow_work_wait_for_items(module);
+
+       /* then we can actually go about shutting down the facility if need
+        * be */
        mutex_lock(&slow_work_user_lock);
 
        BUG_ON(slow_work_user_count <= 0);
@@ -638,6 +1042,10 @@ static int __init init_slow_work(void)
 #ifdef CONFIG_SYSCTL
        if (slow_work_max_max_threads < nr_cpus * 2)
                slow_work_max_max_threads = nr_cpus * 2;
+#endif
+#ifdef CONFIG_SLOW_WORK_PROC
+       proc_create("slow_work_rq", S_IFREG | 0400, NULL,
+                   &slow_work_runqueue_fops);
 #endif
        return 0;
 }
diff --git a/kernel/slow-work.h b/kernel/slow-work.h
new file mode 100644 (file)
index 0000000..3c2f007
--- /dev/null
@@ -0,0 +1,72 @@
+/* Slow work private definitions
+ *
+ * Copyright (C) 2009 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define SLOW_WORK_CULL_TIMEOUT (5 * HZ)        /* cull threads 5s after running out of
+                                        * things to do */
+#define SLOW_WORK_OOM_TIMEOUT (5 * HZ) /* can't start new threads for 5s after
+                                        * OOM */
+
+#define SLOW_WORK_THREAD_LIMIT 255     /* abs maximum number of slow-work threads */
+
+/*
+ * slow-work.c
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+extern struct slow_work *slow_work_execs[];
+extern pid_t slow_work_pids[];
+extern rwlock_t slow_work_execs_lock;
+#endif
+
+extern struct list_head slow_work_queue;
+extern struct list_head vslow_work_queue;
+extern spinlock_t slow_work_queue_lock;
+
+/*
+ * slow-work-proc.c
+ */
+#ifdef CONFIG_SLOW_WORK_PROC
+extern const struct file_operations slow_work_runqueue_fops;
+
+extern void slow_work_new_thread_desc(struct slow_work *, struct seq_file *);
+#endif
+
+/*
+ * Helper functions
+ */
+static inline void slow_work_set_thread_pid(int id, pid_t pid)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+       slow_work_pids[id] = pid;
+#endif
+}
+
+static inline void slow_work_mark_time(struct slow_work *work)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+       work->mark = CURRENT_TIME;
+#endif
+}
+
+static inline void slow_work_begin_exec(int id, struct slow_work *work)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+       slow_work_execs[id] = work;
+#endif
+}
+
+static inline void slow_work_end_exec(int id, struct slow_work *work)
+{
+#ifdef CONFIG_SLOW_WORK_PROC
+       write_lock(&slow_work_execs_lock);
+       slow_work_execs[id] = NULL;
+       write_unlock(&slow_work_execs_lock);
+#endif
+}
index 12328147132c0e6e560ff2e3de39c2fda2a4f116..67e526b6ae815bbc846fd0424d43399f9fb5a0ba 100644 (file)
@@ -692,31 +692,29 @@ int schedule_on_each_cpu(work_func_t func)
        if (!works)
                return -ENOMEM;
 
+       get_online_cpus();
+
        /*
-        * when running in keventd don't schedule a work item on itself.
-        * Can just call directly because the work queue is already bound.
-        * This also is faster.
-        * Make this a generic parameter for other workqueues?
+        * When running in keventd don't schedule a work item on
+        * itself.  Can just call directly because the work queue is
+        * already bound.  This also is faster.
         */
-       if (current_is_keventd()) {
+       if (current_is_keventd())
                orig = raw_smp_processor_id();
-               INIT_WORK(per_cpu_ptr(works, orig), func);
-               func(per_cpu_ptr(works, orig));
-       }
 
-       get_online_cpus();
        for_each_online_cpu(cpu) {
                struct work_struct *work = per_cpu_ptr(works, cpu);
 
-               if (cpu == orig)
-                       continue;
                INIT_WORK(work, func);
-               schedule_work_on(cpu, work);
-       }
-       for_each_online_cpu(cpu) {
                if (cpu != orig)
-                       flush_work(per_cpu_ptr(works, cpu));
+                       schedule_work_on(cpu, work);
        }
+       if (orig >= 0)
+               func(per_cpu_ptr(works, orig));
+
+       for_each_online_cpu(cpu)
+               flush_work(per_cpu_ptr(works, cpu));
+
        put_online_cpus();
        free_percpu(works);
        return 0;
index 23abbd93cae1fd50b87b2ed5f8ffd95d940230b1..92cdd9936e3d2797f4d9b3804ad648b6bbec3c1b 100644 (file)
@@ -200,6 +200,9 @@ radix_tree_node_free(struct radix_tree_node *node)
  * ensure that the addition of a single element in the tree cannot fail.  On
  * success, return zero, with preemption disabled.  On error, return -ENOMEM
  * with preemption not disabled.
+ *
+ * To make use of this facility, the radix tree must be initialised without
+ * __GFP_WAIT being passed to INIT_RADIX_TREE().
  */
 int radix_tree_preload(gfp_t gfp_mask)
 {
@@ -543,7 +546,6 @@ out:
 }
 EXPORT_SYMBOL(radix_tree_tag_clear);
 
-#ifndef __KERNEL__     /* Only the test harness uses this at present */
 /**
  * radix_tree_tag_get - get a tag on a radix tree node
  * @root:              radix tree root
@@ -606,7 +608,6 @@ int radix_tree_tag_get(struct radix_tree_root *root,
        }
 }
 EXPORT_SYMBOL(radix_tree_tag_get);
-#endif
 
 /**
  *     radix_tree_next_hole    -    find the next hole (not-present entry)
index b19b87af65a3553a0650d679cabfac9fecd4eb3b..e96421ab9a9a0a1a8ddd4496e32789acdc4b0749 100644 (file)
@@ -246,13 +246,17 @@ EXPORT_SYMBOL(strlcat);
 #undef strcmp
 int strcmp(const char *cs, const char *ct)
 {
-       signed char __res;
+       unsigned char c1, c2;
 
        while (1) {
-               if ((__res = *cs - *ct++) != 0 || !*cs++)
+               c1 = *cs++;
+               c2 = *ct++;
+               if (c1 != c2)
+                       return c1 < c2 ? -1 : 1;
+               if (!c1)
                        break;
        }
-       return __res;
+       return 0;
 }
 EXPORT_SYMBOL(strcmp);
 #endif
@@ -266,14 +270,18 @@ EXPORT_SYMBOL(strcmp);
  */
 int strncmp(const char *cs, const char *ct, size_t count)
 {
-       signed char __res = 0;
+       unsigned char c1, c2;
 
        while (count) {
-               if ((__res = *cs - *ct++) != 0 || !*cs++)
+               c1 = *cs++;
+               c2 = *ct++;
+               if (c1 != c2)
+                       return c1 < c2 ? -1 : 1;
+               if (!c1)
                        break;
                count--;
        }
-       return __res;
+       return 0;
 }
 EXPORT_SYMBOL(strncmp);
 #endif
index fd3386242cf06a9f588a363362ecc94b646ddc41..44cf6f0a3a6d34f1cdf90a0f4e65dabf3b9c0623 100644 (file)
@@ -128,12 +128,9 @@ config SPARSEMEM_VMEMMAP
 config MEMORY_HOTPLUG
        bool "Allow for memory hot-add"
        depends on SPARSEMEM || X86_64_ACPI_NUMA
-       depends on HOTPLUG && !(HIBERNATION && !S390) && ARCH_ENABLE_MEMORY_HOTPLUG
+       depends on HOTPLUG && ARCH_ENABLE_MEMORY_HOTPLUG
        depends on (IA64 || X86 || PPC_BOOK3S_64 || SUPERH || S390)
 
-comment "Memory hotplug is currently incompatible with Software Suspend"
-       depends on SPARSEMEM && HOTPLUG && HIBERNATION && !S390
-
 config MEMORY_HOTPLUG_SPARSE
        def_bool y
        depends on SPARSEMEM && MEMORY_HOTPLUG
index 11aee09dd2a611ea7529d4f1b863d383d533b7b7..67a33a5a1a93846bda0ce57ee37fe8d39486f60a 100644 (file)
@@ -604,10 +604,14 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
 
        /*
         * Finally, kill the kernel threads. We don't need to be RCU
-        * safe anymore, since the bdi is gone from visibility.
+        * safe anymore, since the bdi is gone from visibility. Force
+        * unfreeze of the thread before calling kthread_stop(), otherwise
+        * it would never exet if it is currently stuck in the refrigerator.
         */
-       list_for_each_entry(wb, &bdi->wb_list, list)
+       list_for_each_entry(wb, &bdi->wb_list, list) {
+               wb->task->flags &= ~PF_FROZEN;
                kthread_stop(wb->task);
+       }
 }
 
 /*
index 821dee596377fadcd93e2116e117eea0a0340e97..2047465cd27cf5b1829979057459857184a42b2e 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/migrate.h>
 #include <linux/page-isolation.h>
 #include <linux/pfn.h>
+#include <linux/suspend.h>
 
 #include <asm/tlbflush.h>
 
@@ -447,7 +448,8 @@ int online_pages(unsigned long pfn, unsigned long nr_pages)
 }
 #endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
 
-static pg_data_t *hotadd_new_pgdat(int nid, u64 start)
+/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
+static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
 {
        struct pglist_data *pgdat;
        unsigned long zones_size[MAX_NR_ZONES] = {0};
@@ -484,14 +486,18 @@ int __ref add_memory(int nid, u64 start, u64 size)
        struct resource *res;
        int ret;
 
+       lock_system_sleep();
+
        res = register_memory_resource(start, size);
+       ret = -EEXIST;
        if (!res)
-               return -EEXIST;
+               goto out;
 
        if (!node_online(nid)) {
                pgdat = hotadd_new_pgdat(nid, start);
+               ret = -ENOMEM;
                if (!pgdat)
-                       return -ENOMEM;
+                       goto out;
                new_pgdat = 1;
        }
 
@@ -514,7 +520,8 @@ int __ref add_memory(int nid, u64 start, u64 size)
                BUG_ON(ret);
        }
 
-       return ret;
+       goto out;
+
 error:
        /* rollback pgdat allocation and others */
        if (new_pgdat)
@@ -522,6 +529,8 @@ error:
        if (res)
                release_memory_resource(res);
 
+out:
+       unlock_system_sleep();
        return ret;
 }
 EXPORT_SYMBOL_GPL(add_memory);
@@ -758,6 +767,8 @@ int offline_pages(unsigned long start_pfn,
        if (!test_pages_in_a_zone(start_pfn, end_pfn))
                return -EINVAL;
 
+       lock_system_sleep();
+
        zone = page_zone(pfn_to_page(start_pfn));
        node = zone_to_nid(zone);
        nr_pages = end_pfn - start_pfn;
@@ -765,7 +776,7 @@ int offline_pages(unsigned long start_pfn,
        /* set above range as isolated */
        ret = start_isolate_page_range(start_pfn, end_pfn);
        if (ret)
-               return ret;
+               goto out;
 
        arg.start_pfn = start_pfn;
        arg.nr_pages = nr_pages;
@@ -843,6 +854,7 @@ repeat:
        writeback_set_ratelimit();
 
        memory_notify(MEM_OFFLINE, &arg);
+       unlock_system_sleep();
        return 0;
 
 failed_removal:
@@ -852,6 +864,8 @@ failed_removal:
        /* pushback to free area */
        undo_isolate_page_range(start_pfn, end_pfn);
 
+out:
+       unlock_system_sleep();
        return ret;
 }
 
index 8836575f9d79dcd9b28c1cfa7d01d034dde9f0c5..a29c5ab5815cef5887f99a381a89e9bc2c312b26 100644 (file)
@@ -281,8 +281,11 @@ out_uninit_applicant:
        if (ngrp)
                vlan_gvrp_uninit_applicant(real_dev);
 out_free_group:
-       if (ngrp)
-               vlan_group_free(ngrp);
+       if (ngrp) {
+               hlist_del_rcu(&ngrp->hlist);
+               /* Free the group, after all cpu's are done. */
+               call_rcu(&ngrp->rcu, vlan_rcu_free);
+       }
        return err;
 }
 
index a9750984f772a164ebb26cb7af1da01e46ea2cd4..b7c4224f4e7dee01288dd31f4581f7a8821c7a21 100644 (file)
@@ -211,6 +211,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst)
        conn->type  = type;
        conn->mode  = HCI_CM_ACTIVE;
        conn->state = BT_OPEN;
+       conn->auth_type = HCI_AT_GENERAL_BONDING;
 
        conn->power_save = 1;
        conn->disc_timeout = HCI_DISCONN_TIMEOUT;
index 77e9fb130adb4bd6475c53a481f6e176bf5ec240..947f8bbb4bb34e55ca29f57ce95551bd6013306b 100644 (file)
@@ -2205,7 +2205,7 @@ static int l2cap_build_conf_req(struct sock *sk, void *data)
 {
        struct l2cap_pinfo *pi = l2cap_pi(sk);
        struct l2cap_conf_req *req = data;
-       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_ERTM };
+       struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
        void *ptr = req->data;
 
        BT_DBG("sk %p", sk);
@@ -2394,6 +2394,10 @@ done:
                        rfc.monitor_timeout = L2CAP_DEFAULT_MONITOR_TO;
 
                        pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                       sizeof(rfc), (unsigned long) &rfc);
+
                        break;
 
                case L2CAP_MODE_STREAMING:
@@ -2401,6 +2405,10 @@ done:
                        pi->max_pdu_size = rfc.max_pdu_size;
 
                        pi->conf_state |= L2CAP_CONF_MODE_DONE;
+
+                       l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
+                                       sizeof(rfc), (unsigned long) &rfc);
+
                        break;
 
                default:
@@ -2410,9 +2418,6 @@ done:
                        rfc.mode = pi->mode;
                }
 
-               l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
-                                       sizeof(rfc), (unsigned long) &rfc);
-
                if (result == L2CAP_CONF_SUCCESS)
                        pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
        }
index b8f74cfb1bfdf1365494e87a1b712834f936f287..fe10551d3671053ea6d6a6da57de037f43ad78d0 100644 (file)
@@ -942,14 +942,15 @@ rollback:
        ret = notifier_to_errno(ret);
 
        if (ret) {
-               if (err) {
-                       printk(KERN_ERR
-                              "%s: name change rollback failed: %d.\n",
-                              dev->name, ret);
-               } else {
+               /* err >= 0 after dev_alloc_name() or stores the first errno */
+               if (err >= 0) {
                        err = ret;
                        memcpy(dev->name, oldname, IFNAMSIZ);
                        goto rollback;
+               } else {
+                       printk(KERN_ERR
+                              "%s: name change rollback failed: %d.\n",
+                              dev->name, ret);
                }
        }
 
index 6eb8d47cbf3a563f7d6479183a1b6339afc839cb..6e79e96cb4f29984b019ab0ea74820d0f1de114b 100644 (file)
@@ -363,6 +363,7 @@ struct pktgen_dev {
                                  * device name (not when the inject is
                                  * started as it used to do.)
                                  */
+       char odevname[32];
        struct flow_state *flows;
        unsigned cflows;        /* Concurrent flows (config) */
        unsigned lflow;         /* Flow length  (config) */
@@ -426,7 +427,7 @@ static const char version[] =
 static int pktgen_remove_device(struct pktgen_thread *t, struct pktgen_dev *i);
 static int pktgen_add_device(struct pktgen_thread *t, const char *ifname);
 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
-                                         const char *ifname);
+                                         const char *ifname, bool exact);
 static int pktgen_device_event(struct notifier_block *, unsigned long, void *);
 static void pktgen_run_all_threads(void);
 static void pktgen_reset_all_threads(void);
@@ -528,7 +529,7 @@ static int pktgen_if_show(struct seq_file *seq, void *v)
        seq_printf(seq,
                   "     frags: %d  delay: %llu  clone_skb: %d  ifname: %s\n",
                   pkt_dev->nfrags, (unsigned long long) pkt_dev->delay,
-                  pkt_dev->clone_skb, pkt_dev->odev->name);
+                  pkt_dev->clone_skb, pkt_dev->odevname);
 
        seq_printf(seq, "     flows: %u flowlen: %u\n", pkt_dev->cflows,
                   pkt_dev->lflow);
@@ -1688,13 +1689,13 @@ static int pktgen_thread_show(struct seq_file *seq, void *v)
        if_lock(t);
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (pkt_dev->running)
-                       seq_printf(seq, "%s ", pkt_dev->odev->name);
+                       seq_printf(seq, "%s ", pkt_dev->odevname);
 
        seq_printf(seq, "\nStopped: ");
 
        list_for_each_entry(pkt_dev, &t->if_list, list)
                if (!pkt_dev->running)
-                       seq_printf(seq, "%s ", pkt_dev->odev->name);
+                       seq_printf(seq, "%s ", pkt_dev->odevname);
 
        if (t->result[0])
                seq_printf(seq, "\nResult: %s\n", t->result);
@@ -1817,9 +1818,10 @@ static struct pktgen_dev *__pktgen_NN_threads(const char *ifname, int remove)
 {
        struct pktgen_thread *t;
        struct pktgen_dev *pkt_dev = NULL;
+       bool exact = (remove == FIND);
 
        list_for_each_entry(t, &pktgen_threads, th_list) {
-               pkt_dev = pktgen_find_dev(t, ifname);
+               pkt_dev = pktgen_find_dev(t, ifname, exact);
                if (pkt_dev) {
                        if (remove) {
                                if_lock(t);
@@ -1994,7 +1996,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
                       "queue_map_min (zero-based) (%d) exceeds valid range "
                       "[0 - %d] for (%d) queues on %s, resetting\n",
                       pkt_dev->queue_map_min, (ntxq ?: 1) - 1, ntxq,
-                      pkt_dev->odev->name);
+                      pkt_dev->odevname);
                pkt_dev->queue_map_min = ntxq - 1;
        }
        if (pkt_dev->queue_map_max >= ntxq) {
@@ -2002,7 +2004,7 @@ static void pktgen_setup_inject(struct pktgen_dev *pkt_dev)
                       "queue_map_max (zero-based) (%d) exceeds valid range "
                       "[0 - %d] for (%d) queues on %s, resetting\n",
                       pkt_dev->queue_map_max, (ntxq ?: 1) - 1, ntxq,
-                      pkt_dev->odev->name);
+                      pkt_dev->odevname);
                pkt_dev->queue_map_max = ntxq - 1;
        }
 
@@ -3262,7 +3264,7 @@ static int pktgen_stop_device(struct pktgen_dev *pkt_dev)
 
        if (!pkt_dev->running) {
                printk(KERN_WARNING "pktgen: interface: %s is already "
-                      "stopped\n", pkt_dev->odev->name);
+                      "stopped\n", pkt_dev->odevname);
                return -EINVAL;
        }
 
@@ -3464,7 +3466,7 @@ static void pktgen_xmit(struct pktgen_dev *pkt_dev)
        default: /* Drivers are not supposed to return other values! */
                if (net_ratelimit())
                        pr_info("pktgen: %s xmit error: %d\n",
-                               odev->name, ret);
+                               pkt_dev->odevname, ret);
                pkt_dev->errors++;
                /* fallthru */
        case NETDEV_TX_LOCKED:
@@ -3566,13 +3568,18 @@ static int pktgen_thread_worker(void *arg)
 }
 
 static struct pktgen_dev *pktgen_find_dev(struct pktgen_thread *t,
-                                         const char *ifname)
+                                         const char *ifname, bool exact)
 {
        struct pktgen_dev *p, *pkt_dev = NULL;
-       if_lock(t);
+       size_t len = strlen(ifname);
 
+       if_lock(t);
        list_for_each_entry(p, &t->if_list, list)
-               if (strncmp(p->odev->name, ifname, IFNAMSIZ) == 0) {
+               if (strncmp(p->odevname, ifname, len) == 0) {
+                       if (p->odevname[len]) {
+                               if (exact || p->odevname[len] != '@')
+                                       continue;
+                       }
                        pkt_dev = p;
                        break;
                }
@@ -3628,6 +3635,7 @@ static int pktgen_add_device(struct pktgen_thread *t, const char *ifname)
        if (!pkt_dev)
                return -ENOMEM;
 
+       strcpy(pkt_dev->odevname, ifname);
        pkt_dev->flows = vmalloc(MAX_CFLOWS * sizeof(struct flow_state));
        if (pkt_dev->flows == NULL) {
                kfree(pkt_dev);
index 80a96166df3910042a6c5b516ae77f202288f78f..ec85681a7dd83765878cd6c791538a7941732178 100644 (file)
@@ -2701,7 +2701,8 @@ int skb_gro_receive(struct sk_buff **head, struct sk_buff *skb)
 
                NAPI_GRO_CB(skb)->free = 1;
                goto done;
-       }
+       } else if (skb_gro_len(p) != pinfo->gso_size)
+               return -E2BIG;
 
        headroom = skb_headroom(p);
        nskb = netdev_alloc_skb(p->dev, headroom + skb_gro_offset(p));
index 575f9bd51ccdf745c62125a45611d845dc015b2b..d3fe10be721956eca29a46cdfa7d58abced87744 100644 (file)
@@ -563,7 +563,7 @@ out_oversize:
                printk(KERN_INFO "Oversized IP packet from %pI4.\n",
                        &qp->saddr);
 out_fail:
-       IP_INC_STATS_BH(dev_net(dev), IPSTATS_MIB_REASMFAILS);
+       IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
        return err;
 }
 
index 630a56df7b47ee80c9c77dcbb62048dc73ed9fc5..99508d66a64227fd532718486fa9eed924963e26 100644 (file)
@@ -483,8 +483,10 @@ static int vif_add(struct net *net, struct vifctl *vifc, int mrtsock)
                return -EINVAL;
        }
 
-       if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
+       if ((in_dev = __in_dev_get_rtnl(dev)) == NULL) {
+               dev_put(dev);
                return -EADDRNOTAVAIL;
+       }
        IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
        ip_rt_multicast_event(in_dev);
 
index 98440ad82558064ce4eb7ee9ad525f9725413739..f1813bc7108811a50ff8284775ad053623903a2e 100644 (file)
@@ -1183,7 +1183,9 @@ void tcp_cleanup_rbuf(struct sock *sk, int copied)
 #if TCP_DEBUG
        struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
 
-       WARN_ON(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
+       WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
+            KERN_INFO "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
+            tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
 #endif
 
        if (inet_csk_ack_scheduled(sk)) {
@@ -1430,11 +1432,13 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        /* Now that we have two receive queues this
                         * shouldn't happen.
                         */
-                       if (before(*seq, TCP_SKB_CB(skb)->seq)) {
-                               printk(KERN_INFO "recvmsg bug: copied %X "
-                                      "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
+                       if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
+                            KERN_INFO "recvmsg bug: copied %X "
+                                      "seq %X rcvnxt %X fl %X\n", *seq,
+                                      TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
+                                      flags))
                                break;
-                       }
+
                        offset = *seq - TCP_SKB_CB(skb)->seq;
                        if (tcp_hdr(skb)->syn)
                                offset--;
@@ -1443,8 +1447,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
                        if (tcp_hdr(skb)->fin)
                                goto found_fin_ok;
                        WARN(!(flags & MSG_PEEK), KERN_INFO "recvmsg bug 2: "
-                                       "copied %X seq %X\n", *seq,
-                                       TCP_SKB_CB(skb)->seq);
+                                       "copied %X seq %X rcvnxt %X fl %X\n",
+                                       *seq, TCP_SKB_CB(skb)->seq,
+                                       tp->rcv_nxt, flags);
                }
 
                /* Well, if we have backlog, try to process it now yet. */
index bc064d7933ff692e2728bfc68c81ded152c930c1..ce8e0e772bab773cd131ea7077a1be79af96695d 100644 (file)
@@ -85,10 +85,6 @@ void ieee80211_sta_stop_rx_ba_session(struct ieee80211_sub_if_data *sdata, u8 *r
        struct ieee80211_local *local = sdata->local;
        struct sta_info *sta;
 
-       /* stop HW Rx aggregation. ampdu_action existence
-        * already verified in session init so we add the BUG_ON */
-       BUG_ON(!local->ops->ampdu_action);
-
        rcu_read_lock();
 
        sta = sta_info_get(local, ra);
index b09948ceec4ae1c031754c08c59d425de3e5ab7b..89e238b001de936e4585f2eae73b3d09ae8cb3f7 100644 (file)
@@ -123,13 +123,18 @@ void ieee80211_send_bar(struct ieee80211_sub_if_data *sdata, u8 *ra, u16 tid, u1
        ieee80211_tx_skb(sdata, skb, 0);
 }
 
-static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
-                                          enum ieee80211_back_parties initiator)
+int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+                                   enum ieee80211_back_parties initiator)
 {
        struct ieee80211_local *local = sta->local;
        int ret;
        u8 *state;
 
+#ifdef CONFIG_MAC80211_HT_DEBUG
+       printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
+              sta->sta.addr, tid);
+#endif /* CONFIG_MAC80211_HT_DEBUG */
+
        state = &sta->ampdu_mlme.tid_state_tx[tid];
 
        if (*state == HT_AGG_STATE_OPERATIONAL)
@@ -143,7 +148,6 @@ static int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
 
        /* HW shall not deny going back to legacy */
        if (WARN_ON(ret)) {
-               *state = HT_AGG_STATE_OPERATIONAL;
                /*
                 * We may have pending packets get stuck in this case...
                 * Not bothering with a workaround for now.
@@ -173,12 +177,14 @@ static void sta_addba_resp_timer_expired(unsigned long data)
 
        /* check if the TID waits for addBA response */
        spin_lock_bh(&sta->lock);
-       if (!(*state & HT_ADDBA_REQUESTED_MSK)) {
+       if ((*state & (HT_ADDBA_REQUESTED_MSK | HT_ADDBA_RECEIVED_MSK)) !=
+                                               HT_ADDBA_REQUESTED_MSK) {
                spin_unlock_bh(&sta->lock);
                *state = HT_AGG_STATE_IDLE;
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "timer expired on tid %d but we are not "
-                               "expecting addBA response there", tid);
+                               "(or no longer) expecting addBA response there",
+                       tid);
 #endif
                return;
        }
@@ -523,11 +529,6 @@ int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                goto unlock;
        }
 
-#ifdef CONFIG_MAC80211_HT_DEBUG
-       printk(KERN_DEBUG "Tx BA session stop requested for %pM tid %u\n",
-              sta->sta.addr, tid);
-#endif /* CONFIG_MAC80211_HT_DEBUG */
-
        ret = ___ieee80211_stop_tx_ba_session(sta, tid, initiator);
 
  unlock:
@@ -543,7 +544,7 @@ int ieee80211_stop_tx_ba_session(struct ieee80211_hw *hw,
        struct sta_info *sta;
        int ret = 0;
 
-       if (WARN_ON(!local->ops->ampdu_action))
+       if (!local->ops->ampdu_action)
                return -EINVAL;
 
        if (tid >= STA_TID_NUM)
@@ -666,21 +667,21 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
 
        state = &sta->ampdu_mlme.tid_state_tx[tid];
 
-       del_timer_sync(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
-
        spin_lock_bh(&sta->lock);
 
        if (!(*state & HT_ADDBA_REQUESTED_MSK))
-               goto timer_still_needed;
+               goto out;
 
        if (mgmt->u.action.u.addba_resp.dialog_token !=
                sta->ampdu_mlme.tid_tx[tid]->dialog_token) {
 #ifdef CONFIG_MAC80211_HT_DEBUG
                printk(KERN_DEBUG "wrong addBA response token, tid %d\n", tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
-               goto timer_still_needed;
+               goto out;
        }
 
+       del_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
+
 #ifdef CONFIG_MAC80211_HT_DEBUG
        printk(KERN_DEBUG "switched off addBA timer for tid %d \n", tid);
 #endif /* CONFIG_MAC80211_HT_DEBUG */
@@ -699,10 +700,6 @@ void ieee80211_process_addba_resp(struct ieee80211_local *local,
                ___ieee80211_stop_tx_ba_session(sta, tid, WLAN_BACK_INITIATOR);
        }
 
-       goto out;
-
- timer_still_needed:
-       add_timer(&sta->ampdu_mlme.tid_tx[tid]->addba_resp_timer);
  out:
        spin_unlock_bh(&sta->lock);
 }
index 48ef1a282b91b8f1cf0bf9c3149c6b66a0467c2a..cdc58e61d921737e339ad0f13701e336f29c76c4 100644 (file)
@@ -141,7 +141,6 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
                             struct sta_info *sta,
                             struct ieee80211_mgmt *mgmt, size_t len)
 {
-       struct ieee80211_local *local = sdata->local;
        u16 tid, params;
        u16 initiator;
 
@@ -161,10 +160,9 @@ void ieee80211_process_delba(struct ieee80211_sub_if_data *sdata,
                                                 WLAN_BACK_INITIATOR, 0);
        else { /* WLAN_BACK_RECIPIENT */
                spin_lock_bh(&sta->lock);
-               sta->ampdu_mlme.tid_state_tx[tid] =
-                               HT_AGG_STATE_OPERATIONAL;
+               if (sta->ampdu_mlme.tid_state_tx[tid] & HT_ADDBA_REQUESTED_MSK)
+                       ___ieee80211_stop_tx_ba_session(sta, tid,
+                                                       WLAN_BACK_RECIPIENT);
                spin_unlock_bh(&sta->lock);
-               ieee80211_stop_tx_ba_session(&local->hw, sta->sta.addr, tid,
-                                            WLAN_BACK_RECIPIENT);
        }
 }
index 588005c84a6d2b34eb1ef9900ee9a6ef99fcbcd8..10d316e455de26424110df78364beb3cd12732b3 100644 (file)
@@ -661,6 +661,14 @@ struct ieee80211_local {
         */
        bool suspended;
 
+       /*
+        * Resuming is true while suspended, but when we're reprogramming the
+        * hardware -- at that time it's allowed to use ieee80211_queue_work()
+        * again even though some other parts of the stack are still suspended
+        * and we still drop received frames to avoid waking the stack.
+        */
+       bool resuming;
+
        /*
         * quiescing is true during the suspend process _only_ to
         * ease timer cancelling etc.
@@ -1083,6 +1091,8 @@ void ieee80211_process_addba_request(struct ieee80211_local *local,
 
 int __ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
                                   enum ieee80211_back_parties initiator);
+int ___ieee80211_stop_tx_ba_session(struct sta_info *sta, u16 tid,
+                                   enum ieee80211_back_parties initiator);
 
 /* Spectrum management */
 void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
index aeb65b3d2295a7a3c99c279a3e669e583c282e5e..e6c08da8da26de707ece65fb11bb8d45d7a309de 100644 (file)
@@ -520,9 +520,9 @@ EXPORT_SYMBOL_GPL(ieee80211_iterate_active_interfaces_atomic);
  */
 static bool ieee80211_can_queue_work(struct ieee80211_local *local)
 {
-        if (WARN(local->suspended, "queueing ieee80211 work while "
-                "going to suspend\n"))
-                return false;
+       if (WARN(local->suspended && !local->resuming,
+                "queueing ieee80211 work while going to suspend\n"))
+               return false;
 
        return true;
 }
@@ -1025,13 +1025,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        struct sta_info *sta;
        unsigned long flags;
        int res;
-       bool from_suspend = local->suspended;
 
-       /*
-        * We're going to start the hardware, at that point
-        * we are no longer suspended and can RX frames.
-        */
-       local->suspended = false;
+       if (local->suspended)
+               local->resuming = true;
 
        /* restart hardware */
        if (local->open_count) {
@@ -1129,11 +1125,14 @@ int ieee80211_reconfig(struct ieee80211_local *local)
         * If this is for hw restart things are still running.
         * We may want to change that later, however.
         */
-       if (!from_suspend)
+       if (!local->suspended)
                return 0;
 
 #ifdef CONFIG_PM
+       /* first set suspended false, then resuming */
        local->suspended = false;
+       mb();
+       local->resuming = false;
 
        list_for_each_entry(sdata, &local->interfaces, list) {
                switch(sdata->vif.type) {
index c93494fef8ef3cfdc8eb22352c59554d0c751eb2..d65d3481919ce1d72a5e476c6d1d083236223a67 100644 (file)
@@ -128,9 +128,8 @@ EXPORT_SYMBOL(nf_log_packet);
 
 #ifdef CONFIG_PROC_FS
 static void *seq_start(struct seq_file *seq, loff_t *pos)
-       __acquires(RCU)
 {
-       rcu_read_lock();
+       mutex_lock(&nf_log_mutex);
 
        if (*pos >= ARRAY_SIZE(nf_loggers))
                return NULL;
@@ -149,9 +148,8 @@ static void *seq_next(struct seq_file *s, void *v, loff_t *pos)
 }
 
 static void seq_stop(struct seq_file *s, void *v)
-       __releases(RCU)
 {
-       rcu_read_unlock();
+       mutex_unlock(&nf_log_mutex);
 }
 
 static int seq_show(struct seq_file *s, void *v)
@@ -161,7 +159,7 @@ static int seq_show(struct seq_file *s, void *v)
        struct nf_logger *t;
        int ret;
 
-       logger = rcu_dereference(nf_loggers[*pos]);
+       logger = nf_loggers[*pos];
 
        if (!logger)
                ret = seq_printf(s, "%2lld NONE (", *pos);
@@ -171,22 +169,16 @@ static int seq_show(struct seq_file *s, void *v)
        if (ret < 0)
                return ret;
 
-       mutex_lock(&nf_log_mutex);
        list_for_each_entry(t, &nf_loggers_l[*pos], list[*pos]) {
                ret = seq_printf(s, "%s", t->name);
-               if (ret < 0) {
-                       mutex_unlock(&nf_log_mutex);
+               if (ret < 0)
                        return ret;
-               }
                if (&t->list[*pos] != nf_loggers_l[*pos].prev) {
                        ret = seq_printf(s, ",");
-                       if (ret < 0) {
-                               mutex_unlock(&nf_log_mutex);
+                       if (ret < 0)
                                return ret;
-                       }
                }
        }
-       mutex_unlock(&nf_log_mutex);
 
        return seq_printf(s, ")\n");
 }
index 2e8089ecd0af31b4ac17cf310a65ee5db97b6e0e..2773be6a71ddf49a2d8297a3183274472a0a7d61 100644 (file)
@@ -112,7 +112,7 @@ static bool limit_mt_check(const struct xt_mtchk_param *par)
 
        priv = kmalloc(sizeof(*priv), GFP_KERNEL);
        if (priv == NULL)
-               return -ENOMEM;
+               return false;
 
        /* For SMP, we only want to use one set of state. */
        r->master = priv;
index 63e190504656dcea83b9706c12c0954466eaa59c..4d1a41bbd5d7b735f63c7f0482472b12f9784472 100644 (file)
@@ -118,7 +118,7 @@ static int xt_osf_remove_callback(struct sock *ctnl, struct sk_buff *skb,
 {
        struct xt_osf_user_finger *f;
        struct xt_osf_finger *sf;
-       int err = ENOENT;
+       int err = -ENOENT;
 
        if (!osf_attrs[OSF_ATTR_FINGER])
                return -EINVAL;
index ba2efb960c6007ecbd9b5b0a49372d21d32ed68a..a001f7c1f71145b61eef6aaa7b6128a121751f6e 100644 (file)
@@ -1189,6 +1189,7 @@ static long rfkill_fop_ioctl(struct file *file, unsigned int cmd,
 #endif
 
 static const struct file_operations rfkill_fops = {
+       .owner          = THIS_MODULE,
        .open           = rfkill_fop_open,
        .read           = rfkill_fop_read,
        .write          = rfkill_fop_write,
index 8450960df24f2001fd79b4de547939b991b56a4a..7eed77a39d0d149a518666ab509604f2c0ac9d4f 100644 (file)
@@ -1485,15 +1485,13 @@ void sctp_assoc_rwnd_decrease(struct sctp_association *asoc, unsigned len)
  * local endpoint and the remote peer.
  */
 int sctp_assoc_set_bind_addr_from_ep(struct sctp_association *asoc,
-                                    gfp_t gfp)
+                                    sctp_scope_t scope, gfp_t gfp)
 {
-       sctp_scope_t scope;
        int flags;
 
        /* Use scoping rules to determine the subset of addresses from
         * the endpoint.
         */
-       scope = sctp_scope(&asoc->peer.active_path->ipaddr);
        flags = (PF_INET6 == asoc->base.sk->sk_family) ? SCTP_ADDR6_ALLOWED : 0;
        if (asoc->peer.ipv4_address)
                flags |= SCTP_ADDR4_PEERSUPP;
index c9f20e28521b069b17583e7120ea44ac0dd9eec5..23e5e97aa6173b5684e9e930af64c5ccce590472 100644 (file)
@@ -423,16 +423,6 @@ void sctp_retransmit_mark(struct sctp_outq *q,
                if ((reason == SCTP_RTXR_FAST_RTX  &&
                            (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
                    (reason != SCTP_RTXR_FAST_RTX  && !chunk->tsn_gap_acked)) {
-                       /* If this chunk was sent less then 1 rto ago, do not
-                        * retransmit this chunk, but give the peer time
-                        * to acknowlege it.  Do this only when
-                        * retransmitting due to T3 timeout.
-                        */
-                       if (reason == SCTP_RTXR_T3_RTX &&
-                           time_before(jiffies, chunk->sent_at +
-                                                transport->last_rto))
-                               continue;
-
                        /* RFC 2960 6.2.1 Processing a Received SACK
                         *
                         * C) Any time a DATA chunk is marked for
index 8674d49195561fc7ed33a5b086eb4014d80f46a7..efa516b47e816b43bf8e6ad974f8dc5cc16d9ad9 100644 (file)
@@ -480,7 +480,6 @@ static void sctp_do_8_2_transport_strike(struct sctp_association *asoc,
         * that indicates that we have an outstanding HB.
         */
        if (!is_hb || transport->hb_sent) {
-               transport->last_rto = transport->rto;
                transport->rto = min((transport->rto * 2), transport->asoc->rto_max);
        }
 }
index c8fae1983dd15153625d4505500a5030402524d7..d4df45022ffab8f028b45037adf0b528df8273d9 100644 (file)
@@ -384,6 +384,11 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
        if (!new_asoc)
                goto nomem;
 
+       if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+                                            sctp_scope(sctp_source(chunk)),
+                                            GFP_ATOMIC) < 0)
+               goto nomem_init;
+
        /* The call, sctp_process_init(), can fail on memory allocation.  */
        if (!sctp_process_init(new_asoc, chunk->chunk_hdr->type,
                               sctp_source(chunk),
@@ -401,9 +406,6 @@ sctp_disposition_t sctp_sf_do_5_1B_init(const struct sctp_endpoint *ep,
                len = ntohs(err_chunk->chunk_hdr->length) -
                        sizeof(sctp_chunkhdr_t);
 
-       if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
-               goto nomem_init;
-
        repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
        if (!repl)
                goto nomem_init;
@@ -1452,6 +1454,10 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
        if (!new_asoc)
                goto nomem;
 
+       if (sctp_assoc_set_bind_addr_from_ep(new_asoc,
+                               sctp_scope(sctp_source(chunk)), GFP_ATOMIC) < 0)
+               goto nomem;
+
        /* In the outbound INIT ACK the endpoint MUST copy its current
         * Verification Tag and Peers Verification tag into a reserved
         * place (local tie-tag and per tie-tag) within the state cookie.
@@ -1488,9 +1494,6 @@ static sctp_disposition_t sctp_sf_do_unexpected_init(
                        sizeof(sctp_chunkhdr_t);
        }
 
-       if (sctp_assoc_set_bind_addr_from_ep(new_asoc, GFP_ATOMIC) < 0)
-               goto nomem;
-
        repl = sctp_make_init_ack(new_asoc, chunk, GFP_ATOMIC, len);
        if (!repl)
                goto nomem;
index c8d05758661d96cf09c41f3babb983c4f2194ce9..3a95fcb17a9e35c715123316cb458efde2975157 100644 (file)
@@ -1080,6 +1080,13 @@ static int __sctp_connect(struct sock* sk,
                                err = -ENOMEM;
                                goto out_free;
                        }
+
+                       err = sctp_assoc_set_bind_addr_from_ep(asoc, scope,
+                                                             GFP_KERNEL);
+                       if (err < 0) {
+                               goto out_free;
+                       }
+
                }
 
                /* Prime the peer's transport structures.  */
@@ -1095,11 +1102,6 @@ static int __sctp_connect(struct sock* sk,
                walk_size += af->sockaddr_len;
        }
 
-       err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
-       if (err < 0) {
-               goto out_free;
-       }
-
        /* In case the user of sctp_connectx() wants an association
         * id back, assign one now.
         */
@@ -1274,22 +1276,30 @@ SCTP_STATIC int sctp_setsockopt_connectx(struct sock* sk,
 }
 
 /*
- * New (hopefully final) interface for the API.  The option buffer is used
- * both for the returned association id and the addresses.
+ * New (hopefully final) interface for the API.
+ * We use the sctp_getaddrs_old structure so that use-space library
+ * can avoid any unnecessary allocations.   The only defferent part
+ * is that we store the actual length of the address buffer into the
+ * addrs_num structure member.  That way we can re-use the existing
+ * code.
  */
 SCTP_STATIC int sctp_getsockopt_connectx3(struct sock* sk, int len,
                                        char __user *optval,
                                        int __user *optlen)
 {
+       struct sctp_getaddrs_old param;
        sctp_assoc_t assoc_id = 0;
        int err = 0;
 
-       if (len < sizeof(assoc_id))
+       if (len < sizeof(param))
                return -EINVAL;
 
+       if (copy_from_user(&param, optval, sizeof(param)))
+               return -EFAULT;
+
        err = __sctp_setsockopt_connectx(sk,
-                       (struct sockaddr __user *)(optval + sizeof(assoc_id)),
-                       len - sizeof(assoc_id), &assoc_id);
+                       (struct sockaddr __user *)param.addrs,
+                       param.addr_num, &assoc_id);
 
        if (err == 0 || err == -EINPROGRESS) {
                if (copy_to_user(optval, &assoc_id, sizeof(assoc_id)))
@@ -1689,6 +1699,11 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                        goto out_unlock;
                }
                asoc = new_asoc;
+               err = sctp_assoc_set_bind_addr_from_ep(asoc, scope, GFP_KERNEL);
+               if (err < 0) {
+                       err = -ENOMEM;
+                       goto out_free;
+               }
 
                /* If the SCTP_INIT ancillary data is specified, set all
                 * the association init values accordingly.
@@ -1718,11 +1733,6 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
                        err = -ENOMEM;
                        goto out_free;
                }
-               err = sctp_assoc_set_bind_addr_from_ep(asoc, GFP_KERNEL);
-               if (err < 0) {
-                       err = -ENOMEM;
-                       goto out_free;
-               }
        }
 
        /* ASSERT: we have a valid association at this point.  */
index c256e4839316b1cadc1480a8a6b482a306882a22..37a1184d789f7fe90408120fef731e76ed6088c4 100644 (file)
@@ -74,7 +74,7 @@ static struct sctp_transport *sctp_transport_init(struct sctp_transport *peer,
         * given destination transport address, set RTO to the protocol
         * parameter 'RTO.Initial'.
         */
-       peer->last_rto = peer->rto = msecs_to_jiffies(sctp_rto_initial);
+       peer->rto = msecs_to_jiffies(sctp_rto_initial);
        peer->rtt = 0;
        peer->rttvar = 0;
        peer->srtt = 0;
@@ -308,7 +308,8 @@ void sctp_transport_route(struct sctp_transport *transport,
                /* Initialize sk->sk_rcv_saddr, if the transport is the
                 * association's active path for getsockname().
                 */
-               if (asoc && (transport == asoc->peer.active_path))
+               if (asoc && (!asoc->peer.primary_path ||
+                               (transport == asoc->peer.active_path)))
                        opt->pf->af->to_sk_saddr(&transport->saddr,
                                                 asoc->base.sk);
        } else
@@ -385,7 +386,6 @@ void sctp_transport_update_rto(struct sctp_transport *tp, __u32 rtt)
                tp->rto = tp->asoc->rto_max;
 
        tp->rtt = rtt;
-       tp->last_rto = tp->rto;
 
        /* Reset rto_pending so that a new RTT measurement is started when a
         * new data chunk is sent.
@@ -601,7 +601,7 @@ void sctp_transport_reset(struct sctp_transport *t)
         */
        t->cwnd = min(4*asoc->pathmtu, max_t(__u32, 2*asoc->pathmtu, 4380));
        t->ssthresh = asoc->peer.i.a_rwnd;
-       t->last_rto = t->rto = asoc->rto_initial;
+       t->rto = asoc->rto_initial;
        t->rtt = 0;
        t->srtt = 0;
        t->rttvar = 0;
index 22e8fd89477fd70894ff15d412d30ccf135f8acf..c7450c8f0a7c040ac756bfd28220152e9d3713b8 100644 (file)
@@ -306,24 +306,25 @@ EXPORT_SYMBOL_GPL(rpc_sockaddr2uaddr);
  * @sap: buffer into which to plant socket address
  * @salen: size of buffer
  *
+ * @uaddr does not have to be '\0'-terminated, but strict_strtoul() and
+ * rpc_pton() require proper string termination to be successful.
+ *
  * Returns the size of the socket address if successful; otherwise
  * zero is returned.
  */
 size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
                          struct sockaddr *sap, const size_t salen)
 {
-       char *c, buf[RPCBIND_MAXUADDRLEN];
+       char *c, buf[RPCBIND_MAXUADDRLEN + sizeof('\0')];
        unsigned long portlo, porthi;
        unsigned short port;
 
-       if (uaddr_len > sizeof(buf))
+       if (uaddr_len > RPCBIND_MAXUADDRLEN)
                return 0;
 
        memcpy(buf, uaddr, uaddr_len);
 
-       buf[uaddr_len] = '\n';
-       buf[uaddr_len + 1] = '\0';
-
+       buf[uaddr_len] = '\0';
        c = strrchr(buf, '.');
        if (unlikely(c == NULL))
                return 0;
@@ -332,9 +333,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
        if (unlikely(portlo > 255))
                return 0;
 
-       c[0] = '\n';
-       c[1] = '\0';
-
+       *c = '\0';
        c = strrchr(buf, '.');
        if (unlikely(c == NULL))
                return 0;
@@ -345,8 +344,7 @@ size_t rpc_uaddr2sockaddr(const char *uaddr, const size_t uaddr_len,
 
        port = (unsigned short)((porthi << 8) | portlo);
 
-       c[0] = '\0';
-
+       *c = '\0';
        if (rpc_pton(buf, strlen(buf), sap, salen) == 0)
                return 0;
 
index 6d69c7ccdcc788519efc06297678cfc50f9082d0..80599e3a7994107cfa06f04e6a70c2d3ff2acbda 100644 (file)
@@ -30,7 +30,7 @@ silentoldconfig: $(obj)/conf
        $< -s $(Kconfig)
 
 localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
-       $(Q)perl $< $(Kconfig) > .tmp.config
+       $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
        $(Q)if [ -f .config ]; then                             \
                        cmp -s .tmp.config .config ||           \
                        (mv -f .config .config.old.1;           \
@@ -44,7 +44,7 @@ localmodconfig: $(obj)/streamline_config.pl $(obj)/conf
        $(Q)rm -f .tmp.config
 
 localyesconfig: $(obj)/streamline_config.pl $(obj)/conf
-       $(Q)perl $< $(Kconfig) > .tmp.config
+       $(Q)perl $< $(srctree) $(Kconfig) > .tmp.config
        $(Q)sed -i s/=m/=y/ .tmp.config
        $(Q)if [ -f .config ]; then                             \
                        cmp -s .tmp.config .config ||           \
index 95984db8e1e097264d386910ca8d091bc5294a45..0d800820c3cd72d6408079b6dbdec96eac954515 100644 (file)
@@ -43,7 +43,6 @@
 #    make oldconfig
 #
 my $config = ".config";
-my $linuxpath = ".";
 
 my $uname = `uname -r`;
 chomp $uname;
@@ -111,7 +110,11 @@ sub find_config {
 
 find_config;
 
-my @makefiles = `find $linuxpath -name Makefile`;
+# Get the build source and top level Kconfig file (passed in)
+my $ksource = $ARGV[0];
+my $kconfig = $ARGV[1];
+
+my @makefiles = `find $ksource -name Makefile`;
 my %depends;
 my %selects;
 my %prompts;
@@ -119,9 +122,6 @@ my %objects;
 my $var;
 my $cont = 0;
 
-# Get the top level Kconfig file (passed in)
-my $kconfig = $ARGV[0];
-
 # prevent recursion
 my %read_kconfigs;
 
@@ -132,7 +132,7 @@ sub read_kconfig {
     my $config;
     my @kconfigs;
 
-    open(KIN, $kconfig) || die "Can't open $kconfig";
+    open(KIN, "$ksource/$kconfig") || die "Can't open $kconfig";
     while (<KIN>) {
        chomp;
 
index b8dd693f8790a62a4ae55b848bb8398cadbe3248..a4e2b1dac943b8ed05ee39dd23df2fbe56a80844 100644 (file)
@@ -58,11 +58,11 @@ struct ima_iint_cache *ima_iint_insert(struct inode *inode)
 
        if (!ima_initialized)
                return iint;
-       iint = kmem_cache_alloc(iint_cache, GFP_KERNEL);
+       iint = kmem_cache_alloc(iint_cache, GFP_NOFS);
        if (!iint)
                return iint;
 
-       rc = radix_tree_preload(GFP_KERNEL);
+       rc = radix_tree_preload(GFP_NOFS);
        if (rc < 0)
                goto out;
 
index 1f0f8213e2d5dde34a11e220d3fff44c3b70120b..6c160a038b239a7c9e2baa0d3f941c68c7d68398 100644 (file)
@@ -504,6 +504,10 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
        int err;
 
        aaci_pcm_hw_free(substream);
+       if (aacirun->pcm_open) {
+               snd_ac97_pcm_close(aacirun->pcm);
+               aacirun->pcm_open = 0;
+       }
 
        err = devdma_hw_alloc(NULL, substream,
                              params_buffer_bytes(params));
@@ -517,7 +521,7 @@ static int aaci_pcm_hw_params(struct snd_pcm_substream *substream,
        else
                err = snd_ac97_pcm_open(aacirun->pcm, params_rate(params),
                                        params_channels(params),
-                                       aacirun->pcm->r[1].slots);
+                                       aacirun->pcm->r[0].slots);
 
        if (err)
                goto out;
index 84a52efdb2d678fc40c3aae5ea74192a0933421c..70583719282bed6d0e62b44cc1c15127fc0b916e 100644 (file)
@@ -11462,6 +11462,7 @@ static struct snd_pci_quirk alc262_cfg_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9016, "Sony VAIO", ALC262_AUTO), /* dig-only */
        SND_PCI_QUIRK(0x104d, 0x9025, "Sony VAIO Z21MN", ALC262_TOSHIBA_S06),
        SND_PCI_QUIRK(0x104d, 0x9035, "Sony VAIO VGN-FW170J", ALC262_AUTO),
+       SND_PCI_QUIRK(0x104d, 0x9047, "Sony VAIO Type G", ALC262_AUTO),
        SND_PCI_QUIRK_MASK(0x104d, 0xff00, 0x9000, "Sony VAIO",
                           ALC262_SONY_ASSAMD),
        SND_PCI_QUIRK(0x1179, 0x0001, "Toshiba dynabook SS RX1",
index 0b8dcb5cd729281bfd80264acdb5a58b9025636e..90a0264f75389f49e4c9ed544787ba0375f00c7f 100644 (file)
@@ -265,8 +265,8 @@ static const int bosr_usb_divisor_table[] = {
 #define UPPER_GROUP ((1<<8) | (1<<9) | (1<<10) | (1<<11)        | (1<<15))
 static const unsigned short sr_valid_mask[] = {
        LOWER_GROUP|UPPER_GROUP,        /* Normal, bosr - 0*/
-       LOWER_GROUP|UPPER_GROUP,        /* Normal, bosr - 1*/
        LOWER_GROUP,                    /* Usb, bosr - 0*/
+       LOWER_GROUP|UPPER_GROUP,        /* Normal, bosr - 1*/
        UPPER_GROUP,                    /* Usb, bosr - 1*/
 };
 /*
@@ -625,11 +625,10 @@ static int tlv320aic23_resume(struct platform_device *pdev)
 {
        struct snd_soc_device *socdev = platform_get_drvdata(pdev);
        struct snd_soc_codec *codec = socdev->card->codec;
-       int i;
        u16 reg;
 
        /* Sync reg_cache with the hardware */
-       for (reg = 0; reg < ARRAY_SIZE(tlv320aic23_reg); i++) {
+       for (reg = 0; reg < TLV320AIC23_RESET; reg++) {
                u16 val = tlv320aic23_read_reg_cache(codec, reg);
                tlv320aic23_write(codec, reg, val);
        }
index 9114c263077bb18972164c41f29fcaaf02448005..13aa380de162daa7deab8abbdf8fb3231ddca418 100644 (file)
@@ -144,4 +144,4 @@ module_exit(omap3evm_soc_exit);
 
 MODULE_AUTHOR("Anuj Aggarwal <anuj.aggarwal@ti.com>");
 MODULE_DESCRIPTION("ALSA SoC OMAP3 EVM");
-MODULE_LICENSE("GPLv2");
+MODULE_LICENSE("GPL v2");
index ad219aaf7cb80678ae80c1263f41df565803cfe0..0cd06f5dd356f24214d91d714ecfbd991478ff81 100644 (file)
@@ -134,7 +134,7 @@ static int omap3pandora_hp_event(struct snd_soc_dapm_widget *w,
  *  |P| <--- TWL4030 <--------- Line In and MICs
  */
 static const struct snd_soc_dapm_widget omap3pandora_out_dapm_widgets[] = {
-       SND_SOC_DAPM_DAC("PCM DAC", "Playback", SND_SOC_NOPM, 0, 0),
+       SND_SOC_DAPM_DAC("PCM DAC", "HiFi Playback", SND_SOC_NOPM, 0, 0),
        SND_SOC_DAPM_PGA_E("Headphone Amplifier", SND_SOC_NOPM,
                           0, 0, NULL, 0, omap3pandora_hp_event,
                           SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_PRE_PMD),
@@ -181,6 +181,7 @@ static int omap3pandora_out_init(struct snd_soc_codec *codec)
        snd_soc_dapm_nc_pin(codec, "CARKITR");
        snd_soc_dapm_nc_pin(codec, "HFL");
        snd_soc_dapm_nc_pin(codec, "HFR");
+       snd_soc_dapm_nc_pin(codec, "VIBRA");
 
        ret = snd_soc_dapm_new_controls(codec, omap3pandora_out_dapm_widgets,
                                ARRAY_SIZE(omap3pandora_out_dapm_widgets));
index d89f6dc00908284d0e97e5778dfe9444b99683ff..66d4c165f99b468fe2fe051b6bd5782684705852 100644 (file)
@@ -973,9 +973,19 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
                        if (!w->power_check)
                                continue;
 
-                       power = w->power_check(w);
-                       if (power)
-                               sys_power = 1;
+                       /* If we're suspending then pull down all the 
+                        * power. */
+                       switch (event) {
+                       case SND_SOC_DAPM_STREAM_SUSPEND:
+                               power = 0;
+                               break;
+
+                       default:
+                               power = w->power_check(w);
+                               if (power)
+                                       sys_power = 1;
+                               break;
+                       }
 
                        if (w->power == power)
                                continue;
@@ -999,8 +1009,12 @@ static int dapm_power_widgets(struct snd_soc_codec *codec, int event)
                case SND_SOC_DAPM_STREAM_RESUME:
                        sys_power = 1;
                        break;
+               case SND_SOC_DAPM_STREAM_SUSPEND:
+                       sys_power = 0;
+                       break;
                case SND_SOC_DAPM_STREAM_NOP:
                        sys_power = codec->bias_level != SND_SOC_BIAS_STANDBY;
+                       break;
                default:
                        break;
                }
index 9efcfd08d747b1c430ae17f9429fda3b57974a0b..c998220b99c62e30c4d32276478ed7e15c2ca7c5 100644 (file)
@@ -1071,6 +1071,15 @@ static int parse_audio_feature_unit(struct mixer_build *state, int unitid, unsig
        channels = (ftr[0] - 7) / csize - 1;
 
        master_bits = snd_usb_combine_bytes(ftr + 6, csize);
+       /* master configuration quirks */
+       switch (state->chip->usb_id) {
+       case USB_ID(0x08bb, 0x2702):
+               snd_printk(KERN_INFO
+                          "usbmixer: master volume quirk for PCM2702 chip\n");
+               /* disable non-functional volume control */
+               master_bits &= ~(1 << (USB_FEATURE_VOLUME - 1));
+               break;
+       }
        if (channels > 0)
                first_ch_bits = snd_usb_combine_bytes(ftr + 6 + csize, csize);
        else
This page took 0.324207 seconds and 5 git commands to generate.