summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 22:20:48 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 22:20:48 -0700
commit2f37dd131c5d3a2eac21cd5baf80658b1b02a8ac (patch)
treee0f191b15865268e694c02f1f02cbc26a168ddf9
parent3aa2fc1667acdd9cca816a2bc9529f494bd61b05 (diff)
parentffc83a79b44e02995ab5e93af07e26f6c7243c53 (diff)
Merge tag 'staging-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging and IIO driver updates from Greg KH: "Here's the big staging and iio driver update for 4.7-rc1. I think we almost broke even with this release, only adding a few more lines than we removed, which isn't bad overall given that there's a bunch of new iio drivers added. The Lustre developers seem to have woken up from their sleep and have been doing a great job in cleaning up the code and pruning unused or old cruft, the filesystem is almost readable :) Other than that, just a lot of basic coding style cleanups in the churn. All have been in linux-next for a while with no reported issues" * tag 'staging-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (938 commits) Staging: emxx_udc: emxx_udc: fixed coding style issue staging/gdm724x: fix "alignment should match open parenthesis" issues staging/gdm724x: Fix avoid CamelCase staging: unisys: rename misleading var ii with frag staging: unisys: visorhba: switch success handling to error handling staging: unisys: visorhba: main path needs to flow down the left margin staging: unisys: visorinput: handle_locking_key() simplifications staging: unisys: visorhba: fail gracefully for thread creation failures staging: unisys: visornic: comment restructuring and removing bad diction staging: unisys: fix format string %Lx to %llx for u64 staging: unisys: remove unused struct members staging: unisys: visorchannel: correct variable misspelling staging: unisys: visorhba: replace functionlike macro with function staging: dgnc: Need to check for NULL of ch staging: dgnc: remove redundant condition check staging: dgnc: fix 'line over 80 characters' staging: dgnc: clean up the dgnc_get_modem_info() staging: lustre: lnet: enable configuration per NI interface staging: lustre: o2iblnd: properly set ibr_why staging: lustre: o2iblnd: remove last of kiblnd_tunables_fini ...
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio68
-rw-r--r--Documentation/DocBook/device-drivers.tmpl2
-rw-r--r--Documentation/devicetree/bindings/iio/accel/mma8452.txt3
-rw-r--r--Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt21
-rw-r--r--Documentation/devicetree/bindings/iio/adc/mxs-lradc.txt (renamed from Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt)0
-rw-r--r--Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt6
-rw-r--r--Documentation/devicetree/bindings/iio/dac/ad5592r.txt155
-rw-r--r--Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt20
-rw-r--r--Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt13
-rw-r--r--Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt12
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt21
-rw-r--r--Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt84
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/hp03.txt17
-rw-r--r--Documentation/devicetree/bindings/iio/pressure/ms5611.txt19
-rw-r--r--Documentation/devicetree/bindings/iio/st-sensors.txt6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/driver-model/devres.txt4
-rw-r--r--Documentation/sync_file.txt69
-rw-r--r--MAINTAINERS12
-rw-r--r--drivers/Kconfig2
-rw-r--r--drivers/dma-buf/Kconfig11
-rw-r--r--drivers/dma-buf/Makefile1
-rw-r--r--drivers/dma-buf/sync_file.c395
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c11
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h5
-rw-r--r--drivers/gpu/drm/i915/i915_gem.c14
-rw-r--r--drivers/gpu/drm/i915/i915_gem_execbuffer.c14
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c11
-rw-r--r--drivers/iio/accel/Kconfig5
-rw-r--r--drivers/iio/accel/bmc150-accel-core.c127
-rw-r--r--drivers/iio/accel/bmc150-accel-i2c.c7
-rw-r--r--drivers/iio/accel/bmc150-accel-spi.c8
-rw-r--r--drivers/iio/accel/bmc150-accel.h1
-rw-r--r--drivers/iio/accel/kxcjk-1013.c25
-rw-r--r--drivers/iio/accel/mma7455_core.c5
-rw-r--r--drivers/iio/accel/mma8452.c188
-rw-r--r--drivers/iio/accel/mma9553.c1
-rw-r--r--drivers/iio/accel/mxc4005.c29
-rw-r--r--drivers/iio/accel/st_accel.h1
-rw-r--r--drivers/iio/accel/st_accel_core.c105
-rw-r--r--drivers/iio/accel/st_accel_i2c.c4
-rw-r--r--drivers/iio/accel/stk8312.c1
-rw-r--r--drivers/iio/accel/stk8ba50.c1
-rw-r--r--drivers/iio/adc/Kconfig16
-rw-r--r--drivers/iio/adc/Makefile1
-rw-r--r--drivers/iio/adc/ad799x.c2
-rw-r--r--drivers/iio/adc/at91-sama5d2_adc.c102
-rw-r--r--drivers/iio/adc/at91_adc.c8
-rw-r--r--drivers/iio/adc/ina2xx-adc.c43
-rw-r--r--drivers/iio/adc/lpc18xx_adc.c231
-rw-r--r--drivers/iio/adc/mcp3422.c6
-rw-r--r--drivers/iio/adc/mxs-lradc.c37
-rw-r--r--drivers/iio/adc/rockchip_saradc.c19
-rw-r--r--drivers/iio/adc/ti-adc081c.c118
-rw-r--r--drivers/iio/adc/vf610_adc.c24
-rw-r--r--drivers/iio/common/hid-sensors/hid-sensor-trigger.c2
-rw-r--r--drivers/iio/common/ms_sensors/ms_sensors_i2c.c2
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c97
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_core.c20
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_trigger.c13
-rw-r--r--drivers/iio/dac/Kconfig37
-rw-r--r--drivers/iio/dac/Makefile4
-rw-r--r--drivers/iio/dac/ad5592r-base.c691
-rw-r--r--drivers/iio/dac/ad5592r-base.h76
-rw-r--r--drivers/iio/dac/ad5592r.c164
-rw-r--r--drivers/iio/dac/ad5593r.c131
-rw-r--r--drivers/iio/dac/lpc18xx_dac.c210
-rw-r--r--drivers/iio/frequency/ad9523.c19
-rw-r--r--drivers/iio/gyro/Kconfig2
-rw-r--r--drivers/iio/gyro/bmg160_core.c137
-rw-r--r--drivers/iio/gyro/st_gyro.h1
-rw-r--r--drivers/iio/gyro/st_gyro_core.c4
-rw-r--r--drivers/iio/gyro/st_gyro_i2c.c5
-rw-r--r--drivers/iio/gyro/st_gyro_spi.c1
-rw-r--r--drivers/iio/humidity/Kconfig10
-rw-r--r--drivers/iio/humidity/Makefile1
-rw-r--r--drivers/iio/humidity/am2315.c303
-rw-r--r--drivers/iio/humidity/dht11.c40
-rw-r--r--drivers/iio/imu/Kconfig2
-rw-r--r--drivers/iio/imu/Makefile1
-rw-r--r--drivers/iio/imu/adis.c7
-rw-r--r--drivers/iio/imu/bmi160/Kconfig32
-rw-r--r--drivers/iio/imu/bmi160/Makefile6
-rw-r--r--drivers/iio/imu/bmi160/bmi160.h10
-rw-r--r--drivers/iio/imu/bmi160/bmi160_core.c596
-rw-r--r--drivers/iio/imu/bmi160/bmi160_i2c.c72
-rw-r--r--drivers/iio/imu/bmi160/bmi160_spi.c63
-rw-r--r--drivers/iio/imu/inv_mpu6050/Kconfig10
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_core.c73
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c3
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h16
-rw-r--r--drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c20
-rw-r--r--drivers/iio/imu/kmx61.c1
-rw-r--r--drivers/iio/industrialio-core.c123
-rw-r--r--drivers/iio/inkern.c86
-rw-r--r--drivers/iio/light/Kconfig32
-rw-r--r--drivers/iio/light/Makefile3
-rw-r--r--drivers/iio/light/apds9960.c13
-rw-r--r--drivers/iio/light/bh1780.c297
-rw-r--r--drivers/iio/light/max44000.c639
-rw-r--r--drivers/iio/light/stk3310.c1
-rw-r--r--drivers/iio/light/tsl2563.c3
-rw-r--r--drivers/iio/light/veml6070.c218
-rw-r--r--drivers/iio/magnetometer/Kconfig33
-rw-r--r--drivers/iio/magnetometer/Makefile3
-rw-r--r--drivers/iio/magnetometer/ak8975.c232
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c156
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.h11
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_i2c.c77
-rw-r--r--drivers/iio/magnetometer/bmc150_magn_spi.c68
-rw-r--r--drivers/iio/magnetometer/st_magn_core.c1
-rw-r--r--drivers/iio/potentiometer/Kconfig28
-rw-r--r--drivers/iio/potentiometer/Makefile2
-rw-r--r--drivers/iio/potentiometer/ds1803.c173
-rw-r--r--drivers/iio/potentiometer/mcp4131.c494
-rw-r--r--drivers/iio/potentiometer/mcp4531.c13
-rw-r--r--drivers/iio/potentiometer/tpl0102.c2
-rw-r--r--drivers/iio/pressure/Kconfig28
-rw-r--r--drivers/iio/pressure/Makefile2
-rw-r--r--drivers/iio/pressure/bmp280.c564
-rw-r--r--drivers/iio/pressure/hp03.c312
-rw-r--r--drivers/iio/pressure/hp206c.c426
-rw-r--r--drivers/iio/pressure/ms5611.h23
-rw-r--r--drivers/iio/pressure/ms5611_core.c148
-rw-r--r--drivers/iio/pressure/ms5611_i2c.c25
-rw-r--r--drivers/iio/pressure/ms5611_spi.c34
-rw-r--r--drivers/iio/pressure/st_pressure_core.c10
-rw-r--r--drivers/staging/android/Kconfig17
-rw-r--r--drivers/staging/android/Makefile2
-rw-r--r--drivers/staging/android/ion/ion.c16
-rw-r--r--drivers/staging/android/ion/ion_chunk_heap.c4
-rw-r--r--drivers/staging/android/ion/ion_dummy_driver.c2
-rw-r--r--drivers/staging/android/ion/ion_test.c2
-rw-r--r--drivers/staging/android/lowmemorykiller.c9
-rw-r--r--drivers/staging/android/sync.c356
-rw-r--r--drivers/staging/android/sync.h91
-rw-r--r--drivers/staging/android/sync_debug.c8
-rw-r--r--drivers/staging/android/timed_gpio.c166
-rw-r--r--drivers/staging/android/timed_gpio.h33
-rw-r--r--drivers/staging/android/timed_output.c110
-rw-r--r--drivers/staging/android/timed_output.h37
-rw-r--r--drivers/staging/board/armadillo800eva.c8
-rw-r--r--drivers/staging/comedi/comedi_buf.c10
-rw-r--r--drivers/staging/comedi/comedi_fops.c54
-rw-r--r--drivers/staging/comedi/comedidev.h4
-rw-r--r--drivers/staging/comedi/drivers.c40
-rw-r--r--drivers/staging/comedi/drivers/amcc_s5933.h24
-rw-r--r--drivers/staging/comedi/drivers/amplc_dio200_common.c12
-rw-r--r--drivers/staging/comedi/drivers/amplc_pc263.c104
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci224.c71
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci230.c189
-rw-r--r--drivers/staging/comedi/drivers/amplc_pci263.c86
-rw-r--r--drivers/staging/comedi/drivers/c6xdigio.c4
-rw-r--r--drivers/staging/comedi/drivers/comedi_8254.h14
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1385
-rw-r--r--drivers/staging/comedi/drivers/dt282x.c119
-rw-r--r--drivers/staging/comedi/drivers/mite.c1113
-rw-r--r--drivers/staging/comedi/drivers/mite.h329
-rw-r--r--drivers/staging/comedi/drivers/ni_660x.c1174
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc.h33
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_common.c65
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_cs.c95
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_pci.c4
-rw-r--r--drivers/staging/comedi/drivers/ni_labpc_regs.h82
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_c_common.c0
-rw-r--r--drivers/staging/comedi/drivers/ni_mio_common.c981
-rw-r--r--drivers/staging/comedi/drivers/ni_pcidio.c37
-rw-r--r--drivers/staging/comedi/drivers/ni_pcimio.c36
-rw-r--r--drivers/staging/comedi/drivers/ni_stc.h56
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.c807
-rw-r--r--drivers/staging/comedi/drivers/ni_tio.h66
-rw-r--r--drivers/staging/comedi/drivers/ni_tio_internal.h322
-rw-r--r--drivers/staging/comedi/drivers/ni_tiocmd.c127
-rw-r--r--drivers/staging/comedi/drivers/plx9052.h122
-rw-r--r--drivers/staging/comedi/drivers/plx9080.h2
-rw-r--r--drivers/staging/comedi/drivers/z8536.h89
-rw-r--r--drivers/staging/dgnc/dgnc_cls.c2
-rw-r--r--drivers/staging/dgnc/dgnc_driver.c52
-rw-r--r--drivers/staging/dgnc/dgnc_driver.h23
-rw-r--r--drivers/staging/dgnc/dgnc_mgmt.c28
-rw-r--r--drivers/staging/dgnc/dgnc_neo.c131
-rw-r--r--drivers/staging/dgnc/dgnc_sysfs.c22
-rw-r--r--drivers/staging/dgnc/dgnc_tty.c277
-rw-r--r--drivers/staging/dgnc/digi.h4
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.c24
-rw-r--r--drivers/staging/emxx_udc/emxx_udc.h40
-rw-r--r--drivers/staging/fbtft/fb_agm1264k-fl.c2
-rw-r--r--drivers/staging/fbtft/fbtft-io.c8
-rw-r--r--drivers/staging/fbtft/fbtft_device.c6
-rw-r--r--drivers/staging/fsl-mc/README.txt138
-rw-r--r--drivers/staging/fsl-mc/TODO13
-rw-r--r--drivers/staging/fsl-mc/bus/dpbp.c77
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp-cmd.h7
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.c35
-rw-r--r--drivers/staging/fsl-mc/bus/dpmcp.h10
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-cmd.h6
-rw-r--r--drivers/staging/fsl-mc/bus/dprc-driver.c33
-rw-r--r--drivers/staging/fsl-mc/bus/dprc.c26
-rw-r--r--drivers/staging/fsl-mc/bus/mc-allocator.c79
-rw-r--r--drivers/staging/fsl-mc/bus/mc-bus.c90
-rw-r--r--drivers/staging/fsl-mc/bus/mc-msi.c14
-rw-r--r--drivers/staging/fsl-mc/include/dpbp-cmd.h4
-rw-r--r--drivers/staging/fsl-mc/include/dpbp.h51
-rw-r--r--drivers/staging/fsl-mc/include/dprc.h19
-rw-r--r--drivers/staging/fsl-mc/include/mc-private.h2
-rw-r--r--drivers/staging/fwserial/dma_fifo.c8
-rw-r--r--drivers/staging/fwserial/dma_fifo.h16
-rw-r--r--drivers/staging/fwserial/fwserial.c42
-rw-r--r--drivers/staging/fwserial/fwserial.h42
-rw-r--r--drivers/staging/gdm724x/gdm_mux.c5
-rw-r--r--drivers/staging/gdm724x/gdm_usb.c6
-rw-r--r--drivers/staging/gdm724x/hci_packet.h2
-rw-r--r--drivers/staging/gdm724x/netlink_k.c3
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.c8
-rw-r--r--drivers/staging/gs_fpgaboot/gs_fpgaboot.h2
-rw-r--r--drivers/staging/gs_fpgaboot/io.c1
-rw-r--r--drivers/staging/i4l/act2000/act2000_isa.c24
-rw-r--r--drivers/staging/i4l/pcbit/capi.h2
-rw-r--r--drivers/staging/i4l/pcbit/drv.c8
-rw-r--r--drivers/staging/i4l/pcbit/edss1.c2
-rw-r--r--drivers/staging/i4l/pcbit/layer2.h2
-rw-r--r--drivers/staging/iio/accel/Kconfig23
-rw-r--r--drivers/staging/iio/accel/Makefile6
-rw-r--r--drivers/staging/iio/accel/adis16201.h156
-rw-r--r--drivers/staging/iio/accel/adis16201_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16203.h132
-rw-r--r--drivers/staging/iio/accel/adis16203_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16204.h68
-rw-r--r--drivers/staging/iio/accel/adis16204_core.c253
-rw-r--r--drivers/staging/iio/accel/adis16209.h39
-rw-r--r--drivers/staging/iio/accel/adis16209_core.c1
-rw-r--r--drivers/staging/iio/accel/adis16220.h140
-rw-r--r--drivers/staging/iio/accel/adis16220_core.c494
-rw-r--r--drivers/staging/iio/accel/adis16240.h50
-rw-r--r--drivers/staging/iio/accel/adis16240_core.c5
-rw-r--r--drivers/staging/iio/adc/ad7192.c50
-rw-r--r--drivers/staging/iio/adc/ad7280a.c40
-rw-r--r--drivers/staging/iio/adc/ad7280a.h8
-rw-r--r--drivers/staging/iio/adc/ad7606.h28
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c18
-rw-r--r--drivers/staging/iio/adc/ad7606_spi.c5
-rw-r--r--drivers/staging/iio/adc/ad7780.c2
-rw-r--r--drivers/staging/iio/frequency/ad9832.c2
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c45
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.h28
-rw-r--r--drivers/staging/iio/light/isl29028.c55
-rw-r--r--drivers/staging/iio/light/tsl2x7x_core.c211
-rw-r--r--drivers/staging/iio/meter/ade7753.c4
-rw-r--r--drivers/staging/iio/meter/ade7754.c4
-rw-r--r--drivers/staging/iio/meter/ade7758.h16
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c77
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c4
-rw-r--r--drivers/staging/iio/meter/ade7759.c4
-rw-r--r--drivers/staging/iio/meter/ade7854.c3
-rw-r--r--drivers/staging/iio/resolver/ad2s1210.h8
-rw-r--r--drivers/staging/iio/trigger/iio-trig-bfin-timer.c15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs.h51
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h79
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h136
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h18
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h15
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h4
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h161
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h31
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_private.h75
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h12
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h2
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h80
-rw-r--r--drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h4
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-dlc.h29
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-lnet.h9
-rw-r--r--drivers/staging/lustre/include/linux/lnet/lib-types.h2
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c405
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h134
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c98
-rw-r--r--drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c139
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c1
-rw-r--r--drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/debug.c126
-rw-r--r--drivers/staging/lustre/lnet/libcfs/fail.c3
-rw-r--r--drivers/staging/lustre/lnet/libcfs/hash.c6
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_lock.c54
-rw-r--r--drivers/staging/lustre/lnet/libcfs/libcfs_mem.c28
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c9
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c283
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-module.c154
-rw-r--r--drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c31
-rw-r--r--drivers/staging/lustre/lnet/libcfs/module.c132
-rw-r--r--drivers/staging/lustre/lnet/libcfs/tracefile.c17
-rw-r--r--drivers/staging/lustre/lnet/libcfs/workitem.c12
-rw-r--r--drivers/staging/lustre/lnet/lnet/api-ni.c143
-rw-r--r--drivers/staging/lustre/lnet/lnet/config.c3
-rw-r--r--drivers/staging/lustre/lnet/lnet/lib-move.c10
-rw-r--r--drivers/staging/lustre/lnet/lnet/module.c7
-rw-r--r--drivers/staging/lustre/lnet/selftest/brw_test.c82
-rw-r--r--drivers/staging/lustre/lnet/selftest/conctl.c52
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.c215
-rw-r--r--drivers/staging/lustre/lnet/selftest/conrpc.h40
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.c282
-rw-r--r--drivers/staging/lustre/lnet/selftest/console.h47
-rw-r--r--drivers/staging/lustre/lnet/selftest/framework.c270
-rw-r--r--drivers/staging/lustre/lnet/selftest/ping_test.c44
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.c133
-rw-r--r--drivers/staging/lustre/lnet/selftest/rpc.h156
-rw-r--r--drivers/staging/lustre/lnet/selftest/selftest.h204
-rw-r--r--drivers/staging/lustre/lnet/selftest/timer.c12
-rw-r--r--drivers/staging/lustre/lustre/fid/fid_request.c12
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_cache.c3
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_internal.h9
-rw-r--r--drivers/staging/lustre/lustre/fld/fld_request.c94
-rw-r--r--drivers/staging/lustre/lustre/include/cl_object.h978
-rw-r--r--drivers/staging/lustre/lustre/include/lclient.h408
-rw-r--r--drivers/staging/lustre/lustre/include/linux/obd.h125
-rw-r--r--drivers/staging/lustre/lustre/include/lu_object.h75
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_idl.h112
-rw-r--r--drivers/staging/lustre/lustre/include/lustre/lustre_user.h54
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_cfg.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_disk.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm.h14
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_dlm_flags.h120
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_fid.h22
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_import.h2
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_lib.h60
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_mdc.h18
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_net.h4
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_param.h1
-rw-r--r--drivers/staging/lustre/lustre/include/lustre_req_layout.h3
-rw-r--r--drivers/staging/lustre/lustre/include/obd.h77
-rw-r--r--drivers/staging/lustre/lustre/include/obd_cksum.h1
-rw-r--r--drivers/staging/lustre/lustre/include/obd_class.h5
-rw-r--r--drivers/staging/lustre/lustre/include/obd_support.h4
-rw-r--r--drivers/staging/lustre/lustre/lclient/lcommon_cl.c1203
-rw-r--r--drivers/staging/lustre/lustre/ldlm/l_lock.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_extent.c4
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_flock.c30
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_internal.h19
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lib.c14
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lock.c115
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c28
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_request.c163
-rw-r--r--drivers/staging/lustre/lustre/ldlm/ldlm_resource.c19
-rw-r--r--drivers/staging/lustre/lustre/llite/Makefile5
-rw-r--r--drivers/staging/lustre/lustre/llite/dcache.c15
-rw-r--r--drivers/staging/lustre/lustre/llite/dir.c95
-rw-r--r--drivers/staging/lustre/lustre/llite/file.c277
-rw-r--r--drivers/staging/lustre/lustre/llite/glimpse.c (renamed from drivers/staging/lustre/lustre/lclient/glimpse.c)87
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_cl.c327
-rw-r--r--drivers/staging/lustre/lustre/llite/lcommon_misc.c (renamed from drivers/staging/lustre/lustre/lclient/lcommon_misc.c)45
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_close.c71
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_internal.h266
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_lib.c176
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_mmap.c48
-rw-r--r--drivers/staging/lustre/lustre/llite/llite_nfs.c29
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c3
-rw-r--r--drivers/staging/lustre/lustre/llite/lproc_llite.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/namei.c143
-rw-r--r--drivers/staging/lustre/lustre/llite/rw.c367
-rw-r--r--drivers/staging/lustre/lustre/llite/rw26.c314
-rw-r--r--drivers/staging/lustre/lustre/llite/statahead.c17
-rw-r--r--drivers/staging/lustre/lustre/llite/super25.c14
-rw-r--r--drivers/staging/lustre/lustre/llite/symlink.c10
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_dev.c270
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_internal.h332
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_io.c928
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_lock.c53
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_object.c141
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_page.c211
-rw-r--r--drivers/staging/lustre/lustre/llite/vvp_req.c121
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr.c33
-rw-r--r--drivers/staging/lustre/lustre/llite/xattr_cache.c1
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_internal.h3
-rw-r--r--drivers/staging/lustre/lustre/lmv/lmv_obd.c182
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_cl_internal.h105
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_dev.c15
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_ea.c5
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_internal.h34
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_io.c246
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_lock.c996
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_merge.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_obd.c26
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_object.c54
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_offset.c12
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pack.c8
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_page.c183
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_pool.c62
-rw-r--r--drivers/staging/lustre/lustre/lov/lov_request.c11
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_dev.c9
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_lock.c386
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_object.c7
-rw-r--r--drivers/staging/lustre/lustre/lov/lovsub_page.c4
-rw-r--r--drivers/staging/lustre/lustre/mdc/lproc_mdc.c8
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_lib.c24
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_locks.c5
-rw-r--r--drivers/staging/lustre/lustre/mdc/mdc_request.c26
-rw-r--r--drivers/staging/lustre/lustre/mgc/mgc_request.c12
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_io.c430
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_lock.c2086
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_object.c303
-rw-r--r--drivers/staging/lustre/lustre/obdclass/cl_page.c659
-rw-r--r--drivers/staging/lustre/lustre/obdclass/class_obd.c5
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/genops.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/linux/linux-module.c4
-rw-r--r--drivers/staging/lustre/lustre/obdclass/llog.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lprocfs_status.c72
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lu_object.c9
-rw-r--r--drivers/staging/lustre/lustre/obdclass/lustre_peer.c3
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_config.c26
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obd_mount.c15
-rw-r--r--drivers/staging/lustre/lustre/obdclass/obdo.c3
-rw-r--r--drivers/staging/lustre/lustre/obdecho/echo_client.c173
-rw-r--r--drivers/staging/lustre/lustre/osc/lproc_osc.c68
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cache.c531
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_cl_internal.h159
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_internal.h27
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_io.c283
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_lock.c1698
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_object.c38
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_page.c544
-rw-r--r--drivers/staging/lustre/lustre/osc/osc_request.c423
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/client.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/events.c1
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/import.c12
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/layout.c31
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c11
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/nrs.c7
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/pack_generic.c3
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c21
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c14
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/sec_plain.c2
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/service.c52
-rw-r--r--drivers/staging/lustre/lustre/ptlrpc/wiretest.c12
-rw-r--r--drivers/staging/media/omap1/omap1_camera.c68
-rw-r--r--drivers/staging/media/omap4iss/iss.c2
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_errors.h8
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_hal.h14
-rw-r--r--drivers/staging/most/hdm-dim2/dim2_reg.h8
-rw-r--r--drivers/staging/netlogic/xlr_net.c2
-rw-r--r--drivers/staging/nvec/nvec.c11
-rw-r--r--drivers/staging/nvec/nvec_power.c4
-rw-r--r--drivers/staging/octeon/ethernet-rx.c7
-rw-r--r--drivers/staging/octeon/ethernet-rx.h2
-rw-r--r--drivers/staging/octeon/ethernet-tx.c15
-rw-r--r--drivers/staging/octeon/ethernet.c4
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ap.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_debug.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_efuse.c7
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ieee80211.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_ioctl_set.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme.c13
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c49
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_pwrctrl.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_rf.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_security.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sreset.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_sta_mgt.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c2
-rw-r--r--drivers/staging/rtl8188eu/hal/bb_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/fw.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_com.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/hal_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/hal/mac_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_HWConfig.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/odm_RTL8188E.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/phy.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseq.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/pwrseqcmd.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf.c4
-rw-r--r--drivers/staging/rtl8188eu/hal/rf_cfg.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_dm.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c9
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_led.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c5
-rw-r--r--drivers/staging/rtl8188eu/hal/usb_halinit.c76
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h5
-rw-r--r--drivers/staging/rtl8188eu/include/HalVerDef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/basic_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/drv_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/fw.h4
-rw-r--r--drivers/staging/rtl8188eu/include/hal_com.h5
-rw-r--r--drivers/staging/rtl8188eu/include/hal_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/ieee80211.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mlme_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/mp_custom_oid.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_HWConfig.h4
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RTL8188E.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_RegDefine11N.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_precomp.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_reg.h5
-rw-r--r--drivers/staging/rtl8188eu/include/odm_types.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_intf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/osdep_service.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseq.h5
-rw-r--r--drivers/staging/rtl8188eu/include/pwrseqcmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/recv_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_dm.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_led.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_spec.h4
-rw-r--r--drivers/staging/rtl8188eu/include/rtl8188e_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_android.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ap.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_cmd.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_debug.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_eeprom.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_efuse.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_event.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ht.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_ioctl_set.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_iol.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mlme_ext.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_pwrctrl.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_qos.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_recv.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_rf.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_security.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_sreset.h5
-rw-r--r--drivers/staging/rtl8188eu/include/rtw_xmit.h5
-rw-r--r--drivers/staging/rtl8188eu/include/sta_info.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_hal.h5
-rw-r--r--drivers/staging/rtl8188eu/include/usb_ops_linux.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wifi.h5
-rw-r--r--drivers/staging/rtl8188eu/include/wlan_bssdef.h5
-rw-r--r--drivers/staging/rtl8188eu/include/xmit_osdep.h5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/ioctl_linux.c13
-rw-r--r--drivers/staging/rtl8188eu/os_dep/mlme_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/os_intfs.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/osdep_service.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/recv_linux.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/rtw_android.c5
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c7
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c4
-rw-r--r--drivers/staging/rtl8188eu/os_dep/xmit_linux.c5
-rw-r--r--drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c2
-rw-r--r--drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c2
-rw-r--r--drivers/staging/rtl8192u/r8190_rtl8256.c2
-rw-r--r--drivers/staging/rtl8192u/r8192U_core.c73
-rw-r--r--drivers/staging/rtl8192u/r8192U_wx.c22
-rw-r--r--drivers/staging/rtl8712/basic_types.h4
-rw-r--r--drivers/staging/rtl8712/drv_types.h4
-rw-r--r--drivers/staging/rtl8712/ethernet.h4
-rw-r--r--drivers/staging/rtl8712/hal_init.c25
-rw-r--r--drivers/staging/rtl8712/ieee80211.c4
-rw-r--r--drivers/staging/rtl8712/mlme_linux.c2
-rw-r--r--drivers/staging/rtl8712/os_intfs.c4
-rw-r--r--drivers/staging/rtl8712/osdep_service.h3
-rw-r--r--drivers/staging/rtl8712/rtl8712_cmd.c18
-rw-r--r--drivers/staging/rtl8712/rtl8712_recv.c10
-rw-r--r--drivers/staging/rtl8712/rtl8712_xmit.c8
-rw-r--r--drivers/staging/rtl8712/rtl871x_cmd.c80
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_linux.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_ioctl_set.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_mlme.c16
-rw-r--r--drivers/staging/rtl8712/rtl871x_recv.c2
-rw-r--r--drivers/staging/rtl8712/rtl871x_sta_mgt.c6
-rw-r--r--drivers/staging/rtl8712/rtl871x_xmit.c2
-rw-r--r--drivers/staging/rtl8712/usb_ops_linux.c2
-rw-r--r--drivers/staging/rtl8723au/Kconfig7
-rw-r--r--drivers/staging/rtl8723au/core/rtw_ap.c3
-rw-r--r--drivers/staging/rtl8723au/core/rtw_recv.c25
-rw-r--r--drivers/staging/rtl8723au/core/rtw_wlan_util.c10
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c2
-rw-r--r--drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_mlme_ext.h2
-rw-r--r--drivers/staging/rtl8723au/include/rtw_recv.h2
-rw-r--r--drivers/staging/rtl8723au/os_dep/usb_intf.c5
-rw-r--r--drivers/staging/rts5208/ms.c16
-rw-r--r--drivers/staging/rts5208/rtsx_card.c21
-rw-r--r--drivers/staging/rts5208/rtsx_card.h2
-rw-r--r--drivers/staging/rts5208/rtsx_chip.c35
-rw-r--r--drivers/staging/rts5208/rtsx_chip.h3
-rw-r--r--drivers/staging/rts5208/sd.c16
-rw-r--r--drivers/staging/skein/skein_api.c3
-rw-r--r--drivers/staging/skein/skein_base.c90
-rw-r--r--drivers/staging/skein/skein_base.h45
-rw-r--r--drivers/staging/skein/skein_block.c92
-rw-r--r--drivers/staging/skein/skein_generic.c6
-rw-r--r--drivers/staging/skein/threefish_api.h2
-rw-r--r--drivers/staging/skein/threefish_block.c2144
-rw-r--r--drivers/staging/slicoss/slicoss.c8
-rw-r--r--drivers/staging/sm750fb/ddk750_chip.c2
-rw-r--r--drivers/staging/speakup/main.c6
-rw-r--r--drivers/staging/speakup/serialio.h3
-rw-r--r--drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset14
-rw-r--r--drivers/staging/unisys/Documentation/overview.txt19
-rw-r--r--drivers/staging/unisys/Documentation/proc-entries.txt93
-rw-r--r--drivers/staging/unisys/MAINTAINERS1
-rw-r--r--drivers/staging/unisys/include/channel.h10
-rw-r--r--drivers/staging/unisys/include/iochannel.h42
-rw-r--r--drivers/staging/unisys/include/visorbus.h127
-rw-r--r--drivers/staging/unisys/visorbus/visorbus_main.c394
-rw-r--r--drivers/staging/unisys/visorbus/visorchannel.c5
-rw-r--r--drivers/staging/unisys/visorbus/visorchipset.c442
-rw-r--r--drivers/staging/unisys/visorhba/visorhba_main.c114
-rw-r--r--drivers/staging/unisys/visorinput/visorinput.c24
-rw-r--r--drivers/staging/unisys/visornic/visornic_main.c223
-rw-r--r--drivers/staging/vme/devices/vme_pio2_gpio.c5
-rw-r--r--drivers/staging/vt6655/baseband.c24
-rw-r--r--drivers/staging/vt6655/baseband.h6
-rw-r--r--drivers/staging/vt6655/card.c95
-rw-r--r--drivers/staging/vt6655/card.h9
-rw-r--r--drivers/staging/vt6655/desc.h3
-rw-r--r--drivers/staging/vt6655/mac.c15
-rw-r--r--drivers/staging/vt6655/srom.c9
-rw-r--r--drivers/staging/vt6656/baseband.c26
-rw-r--r--drivers/staging/vt6656/main_usb.c6
-rw-r--r--drivers/staging/vt6656/wcmd.c8
-rw-r--r--drivers/staging/wilc1000/Kconfig1
-rw-r--r--drivers/staging/wilc1000/host_interface.c438
-rw-r--r--drivers/staging/wilc1000/host_interface.h8
-rw-r--r--drivers/staging/wilc1000/linux_mon.c24
-rw-r--r--drivers/staging/wilc1000/linux_wlan.c98
-rw-r--r--drivers/staging/wilc1000/wilc_spi.c3
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_cfgoperations.c71
-rw-r--r--drivers/staging/wilc1000/wilc_wfi_netdevice.h15
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.c53
-rw-r--r--drivers/staging/wilc1000/wilc_wlan.h6
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_cfg.c7
-rw-r--r--drivers/staging/wilc1000/wilc_wlan_if.h21
-rw-r--r--drivers/staging/wlan-ng/cfg80211.c4
-rw-r--r--drivers/staging/wlan-ng/hfa384x_usb.c8
-rw-r--r--drivers/staging/wlan-ng/p80211conv.c5
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.c4
-rw-r--r--drivers/staging/wlan-ng/p80211netdev.h1
-rw-r--r--drivers/staging/wlan-ng/prism2fw.c28
-rw-r--r--drivers/staging/wlan-ng/prism2usb.c2
-rw-r--r--drivers/staging/xgifb/XGI_main_26.c5
-rw-r--r--drivers/staging/xgifb/vb_init.c16
-rw-r--r--drivers/staging/xgifb/vb_setmode.c22
-rw-r--r--drivers/staging/xgifb/vb_table.h135
-rw-r--r--drivers/staging/xgifb/vb_util.h8
-rw-r--r--include/dt-bindings/iio/adi,ad5592r.h16
-rw-r--r--include/linux/iio/buffer.h2
-rw-r--r--include/linux/iio/common/st_sensors.h9
-rw-r--r--include/linux/iio/consumer.h53
-rw-r--r--include/linux/iio/iio.h33
-rw-r--r--include/linux/iio/imu/adis.h1
-rw-r--r--include/linux/iio/magnetometer/ak8975.h16
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/platform_data/invensense_mpu6050.h5
-rw-r--r--include/linux/platform_data/st_sensors_pdata.h2
-rw-r--r--include/linux/sched.h4
-rw-r--r--include/linux/sync_file.h57
-rw-r--r--include/uapi/linux/iio/types.h2
-rw-r--r--include/uapi/linux/sync_file.h (renamed from drivers/staging/android/uapi/sync.h)44
-rw-r--r--tools/iio/generic_buffer.c116
-rw-r--r--tools/iio/iio_event_monitor.c18
-rw-r--r--tools/iio/iio_utils.h7
668 files changed, 25442 insertions, 25016 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 3c6624881375..df44998e7506 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -1233,7 +1233,7 @@ KernelVersion: 3.4
Contact: linux-iio@vger.kernel.org
Description:
Proximity measurement indicating that some
- object is near the sensor, usually be observing
+ object is near the sensor, usually by observing
reflectivity of infrared or ultrasound emitted.
Often these sensors are unit less and as such conversion
to SI units is not possible. Higher proximity measurements
@@ -1255,12 +1255,23 @@ Description:
What: /sys/.../iio:deviceX/in_intensityY_raw
What: /sys/.../iio:deviceX/in_intensityY_ir_raw
What: /sys/.../iio:deviceX/in_intensityY_both_raw
+What: /sys/.../iio:deviceX/in_intensityY_uv_raw
KernelVersion: 3.4
Contact: linux-iio@vger.kernel.org
Description:
Unit-less light intensity. Modifiers both and ir indicate
that measurements contains visible and infrared light
- components or just infrared light, respectively.
+ components or just infrared light, respectively. Modifier uv indicates
+ that measurements contain ultraviolet light components.
+
+What: /sys/.../iio:deviceX/in_uvindex_input
+KernelVersion: 4.6
+Contact: linux-iio@vger.kernel.org
+Description:
+ UV light intensity index measuring the human skin's response to
+ different wavelength of sunlight weighted according to the
+ standardised CIE Erythemal Action Spectrum. UV index values range
+ from 0 (low) to >=11 (extreme).
What: /sys/.../iio:deviceX/in_intensity_red_integration_time
What: /sys/.../iio:deviceX/in_intensity_green_integration_time
@@ -1501,3 +1512,56 @@ Contact: linux-iio@vger.kernel.org
Description:
Raw (unscaled no offset etc.) pH reading of a substance as a negative
base-10 logarithm of hydrodium ions in a litre of water.
+
+What: /sys/bus/iio/devices/iio:deviceX/mount_matrix
+What: /sys/bus/iio/devices/iio:deviceX/in_mount_matrix
+What: /sys/bus/iio/devices/iio:deviceX/out_mount_matrix
+What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_mount_matrix
+What: /sys/bus/iio/devices/iio:deviceX/in_accel_mount_matrix
+KernelVersion: 4.6
+Contact: linux-iio@vger.kernel.org
+Description:
+ Mounting matrix for IIO sensors. This is a rotation matrix which
+ informs userspace about sensor chip's placement relative to the
+ main hardware it is mounted on.
+ Main hardware placement is defined according to the local
+ reference frame related to the physical quantity the sensor
+ measures.
+ Given that the rotation matrix is defined in a board specific
+ way (platform data and / or device-tree), the main hardware
+ reference frame definition is left to the implementor's choice
+ (see below for a magnetometer example).
+ Applications should apply this rotation matrix to samples so
+ that when main hardware reference frame is aligned onto local
+ reference frame, then sensor chip reference frame is also
+ perfectly aligned with it.
+ Matrix is a 3x3 unitary matrix and typically looks like
+ [0, 1, 0; 1, 0, 0; 0, 0, -1]. Identity matrix
+ [1, 0, 0; 0, 1, 0; 0, 0, 1] means sensor chip and main hardware
+ are perfectly aligned with each other.
+
+ For example, a mounting matrix for a magnetometer sensor informs
+ userspace about sensor chip's ORIENTATION relative to the main
+ hardware.
+ More specifically, main hardware orientation is defined with
+ respect to the LOCAL EARTH GEOMAGNETIC REFERENCE FRAME where :
+ * Y is in the ground plane and positive towards magnetic North ;
+ * X is in the ground plane, perpendicular to the North axis and
+ positive towards the East ;
+ * Z is perpendicular to the ground plane and positive upwards.
+
+ An implementor might consider that for a hand-held device, a
+ 'natural' orientation would be 'front facing camera at the top'.
+ The main hardware reference frame could then be described as :
+ * Y is in the plane of the screen and is positive towards the
+ top of the screen ;
+ * X is in the plane of the screen, perpendicular to Y axis, and
+ positive towards the right hand side of the screen ;
+ * Z is perpendicular to the screen plane and positive out of the
+ screen.
+ Another example for a quadrotor UAV might be :
+ * Y is in the plane of the propellers and positive towards the
+ front-view camera;
+ * X is in the plane of the propellers, perpendicular to Y axis,
+ and positive towards the starboard side of the UAV ;
+ * Z is perpendicular to propellers plane and positive upwards.
diff --git a/Documentation/DocBook/device-drivers.tmpl b/Documentation/DocBook/device-drivers.tmpl
index 893b2cabf7e4..de79efdad46c 100644
--- a/Documentation/DocBook/device-drivers.tmpl
+++ b/Documentation/DocBook/device-drivers.tmpl
@@ -136,6 +136,8 @@ X!Edrivers/base/interface.c
!Iinclude/linux/seqno-fence.h
!Edrivers/dma-buf/reservation.c
!Iinclude/linux/reservation.h
+!Edrivers/dma-buf/sync_file.c
+!Iinclude/linux/sync_file.h
!Edrivers/base/dma-coherent.c
!Edrivers/base/dma-mapping.c
</sect1>
diff --git a/Documentation/devicetree/bindings/iio/accel/mma8452.txt b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
index 165937e1ac1c..45f5c5c5929c 100644
--- a/Documentation/devicetree/bindings/iio/accel/mma8452.txt
+++ b/Documentation/devicetree/bindings/iio/accel/mma8452.txt
@@ -1,4 +1,4 @@
-Freescale MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC or MMA8653FC
+Freescale MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC or FXLS8471Q
triaxial accelerometer
Required properties:
@@ -9,6 +9,7 @@ Required properties:
* "fsl,mma8453"
* "fsl,mma8652"
* "fsl,mma8653"
+ * "fsl,fxls8471"
- reg: the I2C address of the chip
diff --git a/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt b/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt
new file mode 100644
index 000000000000..0bcae5140bc5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/adc/lpc1850-adc.txt
@@ -0,0 +1,21 @@
+NXP LPC1850 ADC bindings
+
+Required properties:
+- compatible: Should be "nxp,lpc1850-adc"
+- reg: Offset and length of the register set for the ADC device
+- interrupts: The interrupt number for the ADC device
+- clocks: The root clock of the ADC controller
+- vref-supply: The regulator supply ADC reference voltage
+- resets: phandle to reset controller and line specifier
+
+Example:
+
+adc0: adc@400e3000 {
+ compatible = "nxp,lpc1850-adc";
+ reg = <0x400e3000 0x1000>;
+ interrupts = <17>;
+ clocks = <&ccu1 CLK_APB3_ADC0>;
+ vref-supply = <&reg_vdda>;
+ resets = <&rgu 40>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt b/Documentation/devicetree/bindings/iio/adc/mxs-lradc.txt
index 555fb117d4fa..555fb117d4fa 100644
--- a/Documentation/devicetree/bindings/staging/iio/adc/mxs-lradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/mxs-lradc.txt
diff --git a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
index a9a5fe19ff2a..bf99e2f24788 100644
--- a/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/rockchip-saradc.txt
@@ -1,7 +1,11 @@
Rockchip Successive Approximation Register (SAR) A/D Converter bindings
Required properties:
-- compatible: Should be "rockchip,saradc" or "rockchip,rk3066-tsadc"
+- compatible: should be "rockchip,<name>-saradc" or "rockchip,rk3066-tsadc"
+ - "rockchip,saradc": for rk3188, rk3288
+ - "rockchip,rk3066-tsadc": for rk3036
+ - "rockchip,rk3399-saradc": for rk3399
+
- reg: physical base address of the controller and length of memory mapped
region.
- interrupts: The interrupt number to the cpu. The interrupt specifier format
diff --git a/Documentation/devicetree/bindings/iio/dac/ad5592r.txt b/Documentation/devicetree/bindings/iio/dac/ad5592r.txt
new file mode 100644
index 000000000000..989f96f31c66
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/ad5592r.txt
@@ -0,0 +1,155 @@
+Analog Devices AD5592R/AD5593R DAC/ADC device driver
+
+Required properties for the AD5592R:
+ - compatible: Must be "adi,ad5592r"
+ - reg: SPI chip select number for the device
+ - spi-max-frequency: Max SPI frequency to use (< 30000000)
+ - spi-cpol: The AD5592R requires inverse clock polarity (CPOL) mode
+
+Required properties for the AD5593R:
+ - compatible: Must be "adi,ad5593r"
+ - reg: I2C address of the device
+
+Required properties for all supported chips:
+ - #address-cells: Should be 1.
+ - #size-cells: Should be 0.
+ - channel nodes:
+ Each child node represents one channel and has the following
+ Required properties:
+ * reg: Pin on which this channel is connected to.
+ * adi,mode: Mode or function of this channel.
+ Macros specifying the valid values
+ can be found in <dt-bindings/iio/adi,ad5592r.h>.
+
+ The following values are currently supported:
+ * CH_MODE_UNUSED (the pin is unused)
+ * CH_MODE_ADC (the pin is ADC input)
+ * CH_MODE_DAC (the pin is DAC output)
+ * CH_MODE_DAC_AND_ADC (the pin is DAC output
+ but can be monitored by an ADC, since
+ there is no disadvantage this
+ this should be considered as the
+ preferred DAC mode)
+ * CH_MODE_GPIO (the pin is registered
+ with GPIOLIB)
+ Optional properties:
+ * adi,off-state: State of this channel when unused or the
+ device gets removed. Macros specifying the
+ valid values can be found in
+ <dt-bindings/iio/adi,ad5592r.h>.
+
+ * CH_OFFSTATE_PULLDOWN (the pin is pulled down)
+ * CH_OFFSTATE_OUT_LOW (the pin is output low)
+ * CH_OFFSTATE_OUT_HIGH (the pin is output high)
+ * CH_OFFSTATE_OUT_TRISTATE (the pin is
+ tristated output)
+
+
+Optional properties:
+ - vref-supply: Phandle to the external reference voltage supply. This should
+ only be set if there is an external reference voltage connected to the VREF
+ pin. If the property is not set the internal 2.5V reference is used.
+ - reset-gpios : GPIO spec for the RESET pin. If specified, it will be
+ asserted during driver probe.
+ - gpio-controller: Marks the device node as a GPIO controller.
+ - #gpio-cells: Should be 2. The first cell is the GPIO number and the second
+ cell specifies GPIO flags, as defined in <dt-bindings/gpio/gpio.h>.
+
+AD5592R Example:
+
+ #include <dt-bindings/iio/adi,ad5592r.h>
+
+ vref: regulator-vref {
+ compatible = "regulator-fixed";
+ regulator-name = "vref-ad559x";
+ regulator-min-microvolt = <3300000>;
+ regulator-max-microvolt = <3300000>;
+ regulator-always-on;
+ };
+
+ ad5592r@0 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ #gpio-cells = <2>;
+ compatible = "adi,ad5592r";
+ reg = <0>;
+
+ spi-max-frequency = <1000000>;
+ spi-cpol;
+
+ vref-supply = <&vref>; /* optional */
+ reset-gpios = <&gpio0 86 0>; /* optional */
+ gpio-controller;
+
+ channel@0 {
+ reg = <0>;
+ adi,mode = <CH_MODE_DAC>;
+ };
+ channel@1 {
+ reg = <1>;
+ adi,mode = <CH_MODE_ADC>;
+ };
+ channel@2 {
+ reg = <2>;
+ adi,mode = <CH_MODE_DAC_AND_ADC>;
+ };
+ channel@3 {
+ reg = <3>;
+ adi,mode = <CH_MODE_DAC_AND_ADC>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@4 {
+ reg = <4>;
+ adi,mode = <CH_MODE_UNUSED>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@5 {
+ reg = <5>;
+ adi,mode = <CH_MODE_GPIO>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@6 {
+ reg = <6>;
+ adi,mode = <CH_MODE_GPIO>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@7 {
+ reg = <7>;
+ adi,mode = <CH_MODE_GPIO>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ };
+
+AD5593R Example:
+
+ #include <dt-bindings/iio/adi,ad5592r.h>
+
+ ad5593r@10 {
+ #size-cells = <0>;
+ #address-cells = <1>;
+ #gpio-cells = <2>;
+ compatible = "adi,ad5593r";
+ reg = <0x10>;
+ gpio-controller;
+
+ channel@0 {
+ reg = <0>;
+ adi,mode = <CH_MODE_DAC>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@1 {
+ reg = <1>;
+ adi,mode = <CH_MODE_ADC>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@2 {
+ reg = <2>;
+ adi,mode = <CH_MODE_DAC_AND_ADC>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ channel@6 {
+ reg = <6>;
+ adi,mode = <CH_MODE_GPIO>;
+ adi,off-state = <CH_OFFSTATE_PULLDOWN>;
+ };
+ };
diff --git a/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt b/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt
new file mode 100644
index 000000000000..7d6647d4af5e
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/dac/lpc1850-dac.txt
@@ -0,0 +1,20 @@
+NXP LPC1850 DAC bindings
+
+Required properties:
+- compatible: Should be "nxp,lpc1850-dac"
+- reg: Offset and length of the register set for the ADC device
+- interrupts: The interrupt number for the ADC device
+- clocks: The root clock of the ADC controller
+- vref-supply: The regulator supply ADC reference voltage
+- resets: phandle to reset controller and line specifier
+
+Example:
+dac: dac@400e1000 {
+ compatible = "nxp,lpc1850-dac";
+ reg = <0x400e1000 0x1000>;
+ interrupts = <0>;
+ clocks = <&ccu1 CLK_APB3_DAC>;
+ vref-supply = <&reg_vdda>;
+ resets = <&rgu 42>;
+ status = "disabled";
+};
diff --git a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
index e4d8f1c52f4a..a9fc11e43b45 100644
--- a/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
+++ b/Documentation/devicetree/bindings/iio/imu/inv_mpu6050.txt
@@ -8,10 +8,23 @@ Required properties:
- interrupt-parent : should be the phandle for the interrupt controller
- interrupts : interrupt mapping for GPIO IRQ
+Optional properties:
+ - mount-matrix: an optional 3x3 mounting rotation matrix
+
+
Example:
mpu6050@68 {
compatible = "invensense,mpu6050";
reg = <0x68>;
interrupt-parent = <&gpio1>;
interrupts = <18 1>;
+ mount-matrix = "-0.984807753012208", /* x0 */
+ "0", /* y0 */
+ "-0.173648177666930", /* z0 */
+ "0", /* x1 */
+ "-1", /* y1 */
+ "0", /* z1 */
+ "-0.173648177666930", /* x2 */
+ "0", /* y2 */
+ "0.984807753012208"; /* z2 */
};
diff --git a/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt b/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
index 011679f1a425..e1e7dd3259f6 100644
--- a/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
+++ b/Documentation/devicetree/bindings/iio/magnetometer/ak8975.txt
@@ -8,6 +8,8 @@ Required properties:
Optional properties:
- gpios : should be device tree identifier of the magnetometer DRDY pin
+ - vdd-supply: an optional regulator that needs to be on to provide VDD
+ - mount-matrix: an optional 3x3 mounting rotation matrix
Example:
@@ -15,4 +17,14 @@ ak8975@0c {
compatible = "asahi-kasei,ak8975";
reg = <0x0c>;
gpios = <&gpj0 7 0>;
+ vdd-supply = <&ldo_3v3_gnss>;
+ mount-matrix = "-0.984807753012208", /* x0 */
+ "0", /* y0 */
+ "-0.173648177666930", /* z0 */
+ "0", /* x1 */
+ "-1", /* y1 */
+ "0", /* z1 */
+ "-0.173648177666930", /* x2 */
+ "0", /* y2 */
+ "0.984807753012208"; /* z2 */
};
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt b/Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt
new file mode 100644
index 000000000000..df77bf552656
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiometer/ds1803.txt
@@ -0,0 +1,21 @@
+* Maxim Integrated DS1803 digital potentiometer driver
+
+The node for this driver must be a child node of a I2C controller, hence
+all mandatory properties for your controller must be specified. See directory:
+
+ Documentation/devicetree/bindings/i2c
+
+for more details.
+
+Required properties:
+ - compatible: Must be one of the following, depending on the
+ model:
+ "maxim,ds1803-010",
+ "maxim,ds1803-050",
+ "maxim,ds1803-100"
+
+Example:
+ds1803: ds1803@1 {
+ reg = <0x28>;
+ compatible = "maxim,ds1803-010";
+};
diff --git a/Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt b/Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt
new file mode 100644
index 000000000000..3ccba16f7035
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/potentiometer/mcp4131.txt
@@ -0,0 +1,84 @@
+* Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer
+ driver
+
+The node for this driver must be a child node of a SPI controller, hence
+all mandatory properties described in
+
+ Documentation/devicetree/bindings/spi/spi-bus.txt
+
+must be specified.
+
+Required properties:
+ - compatible: Must be one of the following, depending on the
+ model:
+ "microchip,mcp4131-502"
+ "microchip,mcp4131-103"
+ "microchip,mcp4131-503"
+ "microchip,mcp4131-104"
+ "microchip,mcp4132-502"
+ "microchip,mcp4132-103"
+ "microchip,mcp4132-503"
+ "microchip,mcp4132-104"
+ "microchip,mcp4141-502"
+ "microchip,mcp4141-103"
+ "microchip,mcp4141-503"
+ "microchip,mcp4141-104"
+ "microchip,mcp4142-502"
+ "microchip,mcp4142-103"
+ "microchip,mcp4142-503"
+ "microchip,mcp4142-104"
+ "microchip,mcp4151-502"
+ "microchip,mcp4151-103"
+ "microchip,mcp4151-503"
+ "microchip,mcp4151-104"
+ "microchip,mcp4152-502"
+ "microchip,mcp4152-103"
+ "microchip,mcp4152-503"
+ "microchip,mcp4152-104"
+ "microchip,mcp4161-502"
+ "microchip,mcp4161-103"
+ "microchip,mcp4161-503"
+ "microchip,mcp4161-104"
+ "microchip,mcp4162-502"
+ "microchip,mcp4162-103"
+ "microchip,mcp4162-503"
+ "microchip,mcp4162-104"
+ "microchip,mcp4231-502"
+ "microchip,mcp4231-103"
+ "microchip,mcp4231-503"
+ "microchip,mcp4231-104"
+ "microchip,mcp4232-502"
+ "microchip,mcp4232-103"
+ "microchip,mcp4232-503"
+ "microchip,mcp4232-104"
+ "microchip,mcp4241-502"
+ "microchip,mcp4241-103"
+ "microchip,mcp4241-503"
+ "microchip,mcp4241-104"
+ "microchip,mcp4242-502"
+ "microchip,mcp4242-103"
+ "microchip,mcp4242-503"
+ "microchip,mcp4242-104"
+ "microchip,mcp4251-502"
+ "microchip,mcp4251-103"
+ "microchip,mcp4251-503"
+ "microchip,mcp4251-104"
+ "microchip,mcp4252-502"
+ "microchip,mcp4252-103"
+ "microchip,mcp4252-503"
+ "microchip,mcp4252-104"
+ "microchip,mcp4261-502"
+ "microchip,mcp4261-103"
+ "microchip,mcp4261-503"
+ "microchip,mcp4261-104"
+ "microchip,mcp4262-502"
+ "microchip,mcp4262-103"
+ "microchip,mcp4262-503"
+ "microchip,mcp4262-104"
+
+Example:
+mcp4131: mcp4131@0 {
+ compatible = "mcp4131-502";
+ reg = <0>;
+ spi-max-frequency = <500000>;
+};
diff --git a/Documentation/devicetree/bindings/iio/pressure/hp03.txt b/Documentation/devicetree/bindings/iio/pressure/hp03.txt
new file mode 100644
index 000000000000..54e7e70bcea5
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/hp03.txt
@@ -0,0 +1,17 @@
+HopeRF HP03 digital pressure/temperature sensors
+
+Required properties:
+- compatible: must be "hoperf,hp03"
+- xclr-gpio: must be device tree identifier of the XCLR pin.
+ The XCLR pin is a reset of the ADC in the chip,
+ it must be pulled HI before the conversion and
+ readout of the value from the ADC registers and
+ pulled LO afterward.
+
+Example:
+
+hp03@0x77 {
+ compatible = "hoperf,hp03";
+ reg = <0x77>;
+ xclr-gpio = <&portc 0 0x0>;
+};
diff --git a/Documentation/devicetree/bindings/iio/pressure/ms5611.txt b/Documentation/devicetree/bindings/iio/pressure/ms5611.txt
new file mode 100644
index 000000000000..17bca866c084
--- /dev/null
+++ b/Documentation/devicetree/bindings/iio/pressure/ms5611.txt
@@ -0,0 +1,19 @@
+MEAS ms5611 family pressure sensors
+
+Pressure sensors from MEAS Switzerland with SPI and I2C bus interfaces.
+
+Required properties:
+- compatible: "meas,ms5611" or "meas,ms5607"
+- reg: the I2C address or SPI chip select the device will respond to
+
+Optional properties:
+- vdd-supply: an optional regulator that needs to be on to provide VDD
+ power to the sensor.
+
+Example:
+
+ms5607@77 {
+ compatible = "meas,ms5607";
+ reg = <0x77>;
+ vdd-supply = <&ldo_3v3_gnss>;
+};
diff --git a/Documentation/devicetree/bindings/iio/st-sensors.txt b/Documentation/devicetree/bindings/iio/st-sensors.txt
index d4b87cc1e446..5844cf72862d 100644
--- a/Documentation/devicetree/bindings/iio/st-sensors.txt
+++ b/Documentation/devicetree/bindings/iio/st-sensors.txt
@@ -16,6 +16,10 @@ Optional properties:
- st,drdy-int-pin: the pin on the package that will be used to signal
"data ready" (valid values: 1 or 2). This property is not configurable
on all sensors.
+- drive-open-drain: the interrupt/data ready line will be configured
+ as open drain, which is useful if several sensors share the same
+ interrupt line. (This binding is taken from pinctrl/pinctrl-bindings.txt)
+ This is a boolean property.
Sensors may also have applicable pin control settings, those use the
standard bindings from pinctrl/pinctrl-bindings.txt.
@@ -37,6 +41,7 @@ Accelerometers:
- st,lsm330-accel
- st,lsm303agr-accel
- st,lis2dh12-accel
+- st,h3lis331dl-accel
Gyroscopes:
- st,l3g4200d-gyro
@@ -46,6 +51,7 @@ Gyroscopes:
- st,l3gd20-gyro
- st,l3g4is-gyro
- st,lsm330-gyro
+- st,lsm9ds0-gyro
Magnetometers:
- st,lsm303agr-magn
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 32f965807a07..4454483cc53f 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -151,6 +151,7 @@ lsi LSI Corp. (LSI Logic)
lltc Linear Technology Corporation
marvell Marvell Technology Group Ltd.
maxim Maxim Integrated Products
+meas Measurement Specialties
mediatek MediaTek Inc.
melexis Melexis N.V.
merrii Merrii Technology Co., Ltd.
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index 76a6c0a70dee..c63eea0c1c8c 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -268,6 +268,10 @@ IIO
devm_iio_kfifo_free()
devm_iio_trigger_alloc()
devm_iio_trigger_free()
+ devm_iio_channel_get()
+ devm_iio_channel_release()
+ devm_iio_channel_get_all()
+ devm_iio_channel_release_all()
INPUT
devm_input_allocate_device()
diff --git a/Documentation/sync_file.txt b/Documentation/sync_file.txt
new file mode 100644
index 000000000000..eaf8297dbca2
--- /dev/null
+++ b/Documentation/sync_file.txt
@@ -0,0 +1,69 @@
+ Sync File API Guide
+ ~~~~~~~~~~~~~~~~~~~
+
+ Gustavo Padovan
+ <gustavo at padovan dot org>
+
+This document serves as a guide for device drivers writers on what the
+sync_file API is, and how drivers can support it. Sync file is the carrier of
+the fences(struct fence) that needs to synchronized between drivers or across
+process boundaries.
+
+The sync_file API is meant to be used to send and receive fence information
+to/from userspace. It enables userspace to do explicit fencing, where instead
+of attaching a fence to the buffer a producer driver (such as a GPU or V4L
+driver) sends the fence related to the buffer to userspace via a sync_file.
+
+The sync_file then can be sent to the consumer (DRM driver for example), that
+will not use the buffer for anything before the fence(s) signals, i.e., the
+driver that issued the fence is not using/processing the buffer anymore, so it
+signals that the buffer is ready to use. And vice-versa for the consumer ->
+producer part of the cycle.
+
+Sync files allows userspace awareness on buffer sharing synchronization between
+drivers.
+
+Sync file was originally added in the Android kernel but current Linux Desktop
+can benefit a lot from it.
+
+in-fences and out-fences
+------------------------
+
+Sync files can go either to or from userspace. When a sync_file is sent from
+the driver to userspace we call the fences it contains 'out-fences'. They are
+related to a buffer that the driver is processing or is going to process, so
+the driver an create out-fence to be able to notify, through fence_signal(),
+when it has finished using (or processing) that buffer. Out-fences are fences
+that the driver creates.
+
+On the other hand if the driver receives fence(s) through a sync_file from
+userspace we call these fence(s) 'in-fences'. Receiveing in-fences means that
+we need to wait for the fence(s) to signal before using any buffer related to
+the in-fences.
+
+Creating Sync Files
+-------------------
+
+When a driver needs to send an out-fence userspace it creates a sync_file.
+
+Interface:
+ struct sync_file *sync_file_create(struct fence *fence);
+
+The caller pass the out-fence and gets back the sync_file. That is just the
+first step, next it needs to install an fd on sync_file->file. So it gets an
+fd:
+
+ fd = get_unused_fd_flags(O_CLOEXEC);
+
+and installs it on sync_file->file:
+
+ fd_install(fd, sync_file->file);
+
+The sync_file fd now can be sent to userspace.
+
+If the creation process fail, or the sync_file needs to be released by any
+other reason fput(sync_file->file) should be used.
+
+References:
+[1] struct sync_file in include/linux/sync_file.h
+[2] All interfaces mentioned above defined in include/linux/sync_file.h
diff --git a/MAINTAINERS b/MAINTAINERS
index 5504c0de47ad..49d1e8339e57 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -776,6 +776,15 @@ S: Supported
F: drivers/android/
F: drivers/staging/android/
+ANDROID ION DRIVER
+M: Laura Abbott <labbott@redhat.com>
+M: Sumit Semwal <sumit.semwal@linaro.org>
+L: devel@driverdev.osuosl.org
+S: Supported
+F: drivers/staging/android/ion
+F: drivers/staging/android/uapi/ion.h
+F: drivers/staging/android/uapi/ion_test.h
+
AOA (Apple Onboard Audio) ALSA DRIVER
M: Johannes Berg <johannes@sipsolutions.net>
L: linuxppc-dev@lists.ozlabs.org
@@ -4746,6 +4755,7 @@ F: sound/soc/fsl/mpc8610_hpcd.c
FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER
M: "J. German Rivera" <German.Rivera@freescale.com>
+M: Stuart Yoder <stuart.yoder@nxp.com>
L: linux-kernel@vger.kernel.org
S: Maintained
F: drivers/staging/fsl-mc/
@@ -5617,7 +5627,7 @@ IIO SUBSYSTEM AND DRIVERS
M: Jonathan Cameron <jic23@kernel.org>
R: Hartmut Knaack <knaack.h@gmx.de>
R: Lars-Peter Clausen <lars@metafoo.de>
-R: Peter Meerwald <pmeerw@pmeerw.net>
+R: Peter Meerwald-Stadler <pmeerw@pmeerw.net>
L: linux-iio@vger.kernel.org
S: Maintained
F: drivers/iio/
diff --git a/drivers/Kconfig b/drivers/Kconfig
index d2ac339de85f..430f761b0d8d 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -114,6 +114,8 @@ source "drivers/rtc/Kconfig"
source "drivers/dma/Kconfig"
+source "drivers/dma-buf/Kconfig"
+
source "drivers/dca/Kconfig"
source "drivers/auxdisplay/Kconfig"
diff --git a/drivers/dma-buf/Kconfig b/drivers/dma-buf/Kconfig
new file mode 100644
index 000000000000..9824bc4addf8
--- /dev/null
+++ b/drivers/dma-buf/Kconfig
@@ -0,0 +1,11 @@
+menu "DMABUF options"
+
+config SYNC_FILE
+ bool "sync_file support for fences"
+ default n
+ select ANON_INODES
+ select DMA_SHARED_BUFFER
+ ---help---
+ This option enables the fence framework synchronization to export
+ sync_files to userspace that can represent one or more fences.
+endmenu
diff --git a/drivers/dma-buf/Makefile b/drivers/dma-buf/Makefile
index 57a675f90cd0..4a424eca75ed 100644
--- a/drivers/dma-buf/Makefile
+++ b/drivers/dma-buf/Makefile
@@ -1 +1,2 @@
obj-y := dma-buf.o fence.o reservation.o seqno-fence.o
+obj-$(CONFIG_SYNC_FILE) += sync_file.o
diff --git a/drivers/dma-buf/sync_file.c b/drivers/dma-buf/sync_file.c
new file mode 100644
index 000000000000..f08cf2d8309e
--- /dev/null
+++ b/drivers/dma-buf/sync_file.c
@@ -0,0 +1,395 @@
+/*
+ * drivers/dma-buf/sync_file.c
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/kernel.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/anon_inodes.h>
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
+
+static const struct file_operations sync_file_fops;
+
+static struct sync_file *sync_file_alloc(int size)
+{
+ struct sync_file *sync_file;
+
+ sync_file = kzalloc(size, GFP_KERNEL);
+ if (!sync_file)
+ return NULL;
+
+ sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
+ sync_file, 0);
+ if (IS_ERR(sync_file->file))
+ goto err;
+
+ kref_init(&sync_file->kref);
+
+ init_waitqueue_head(&sync_file->wq);
+
+ return sync_file;
+
+err:
+ kfree(sync_file);
+ return NULL;
+}
+
+static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
+{
+ struct sync_file_cb *check;
+ struct sync_file *sync_file;
+
+ check = container_of(cb, struct sync_file_cb, cb);
+ sync_file = check->sync_file;
+
+ if (atomic_dec_and_test(&sync_file->status))
+ wake_up_all(&sync_file->wq);
+}
+
+/**
+ * sync_file_create() - creates a sync file
+ * @fence: fence to add to the sync_fence
+ *
+ * Creates a sync_file containg @fence. Once this is called, the sync_file
+ * takes ownership of @fence. The sync_file can be released with
+ * fput(sync_file->file). Returns the sync_file or NULL in case of error.
+ */
+struct sync_file *sync_file_create(struct fence *fence)
+{
+ struct sync_file *sync_file;
+
+ sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]));
+ if (!sync_file)
+ return NULL;
+
+ sync_file->num_fences = 1;
+ atomic_set(&sync_file->status, 1);
+ snprintf(sync_file->name, sizeof(sync_file->name), "%s-%s%d-%d",
+ fence->ops->get_driver_name(fence),
+ fence->ops->get_timeline_name(fence), fence->context,
+ fence->seqno);
+
+ sync_file->cbs[0].fence = fence;
+ sync_file->cbs[0].sync_file = sync_file;
+ if (fence_add_callback(fence, &sync_file->cbs[0].cb,
+ fence_check_cb_func))
+ atomic_dec(&sync_file->status);
+
+ return sync_file;
+}
+EXPORT_SYMBOL(sync_file_create);
+
+/**
+ * sync_file_fdget() - get a sync_file from an fd
+ * @fd: fd referencing a fence
+ *
+ * Ensures @fd references a valid sync_file, increments the refcount of the
+ * backing file. Returns the sync_file or NULL in case of error.
+ */
+static struct sync_file *sync_file_fdget(int fd)
+{
+ struct file *file = fget(fd);
+
+ if (!file)
+ return NULL;
+
+ if (file->f_op != &sync_file_fops)
+ goto err;
+
+ return file->private_data;
+
+err:
+ fput(file);
+ return NULL;
+}
+
+static void sync_file_add_pt(struct sync_file *sync_file, int *i,
+ struct fence *fence)
+{
+ sync_file->cbs[*i].fence = fence;
+ sync_file->cbs[*i].sync_file = sync_file;
+
+ if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
+ fence_check_cb_func)) {
+ fence_get(fence);
+ (*i)++;
+ }
+}
+
+/**
+ * sync_file_merge() - merge two sync_files
+ * @name: name of new fence
+ * @a: sync_file a
+ * @b: sync_file b
+ *
+ * Creates a new sync_file which contains copies of all the fences in both
+ * @a and @b. @a and @b remain valid, independent sync_file. Returns the
+ * new merged sync_file or NULL in case of error.
+ */
+static struct sync_file *sync_file_merge(const char *name, struct sync_file *a,
+ struct sync_file *b)
+{
+ int num_fences = a->num_fences + b->num_fences;
+ struct sync_file *sync_file;
+ int i, i_a, i_b;
+ unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
+
+ sync_file = sync_file_alloc(size);
+ if (!sync_file)
+ return NULL;
+
+ atomic_set(&sync_file->status, num_fences);
+
+ /*
+ * Assume sync_file a and b are both ordered and have no
+ * duplicates with the same context.
+ *
+ * If a sync_file can only be created with sync_file_merge
+ * and sync_file_create, this is a reasonable assumption.
+ */
+ for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
+ struct fence *pt_a = a->cbs[i_a].fence;
+ struct fence *pt_b = b->cbs[i_b].fence;
+
+ if (pt_a->context < pt_b->context) {
+ sync_file_add_pt(sync_file, &i, pt_a);
+
+ i_a++;
+ } else if (pt_a->context > pt_b->context) {
+ sync_file_add_pt(sync_file, &i, pt_b);
+
+ i_b++;
+ } else {
+ if (pt_a->seqno - pt_b->seqno <= INT_MAX)
+ sync_file_add_pt(sync_file, &i, pt_a);
+ else
+ sync_file_add_pt(sync_file, &i, pt_b);
+
+ i_a++;
+ i_b++;
+ }
+ }
+
+ for (; i_a < a->num_fences; i_a++)
+ sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
+
+ for (; i_b < b->num_fences; i_b++)
+ sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
+
+ if (num_fences > i)
+ atomic_sub(num_fences - i, &sync_file->status);
+ sync_file->num_fences = i;
+
+ strlcpy(sync_file->name, name, sizeof(sync_file->name));
+ return sync_file;
+}
+
+static void sync_file_free(struct kref *kref)
+{
+ struct sync_file *sync_file = container_of(kref, struct sync_file,
+ kref);
+ int i;
+
+ for (i = 0; i < sync_file->num_fences; ++i) {
+ fence_remove_callback(sync_file->cbs[i].fence,
+ &sync_file->cbs[i].cb);
+ fence_put(sync_file->cbs[i].fence);
+ }
+
+ kfree(sync_file);
+}
+
+static int sync_file_release(struct inode *inode, struct file *file)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ kref_put(&sync_file->kref, sync_file_free);
+ return 0;
+}
+
+static unsigned int sync_file_poll(struct file *file, poll_table *wait)
+{
+ struct sync_file *sync_file = file->private_data;
+ int status;
+
+ poll_wait(file, &sync_file->wq, wait);
+
+ status = atomic_read(&sync_file->status);
+
+ if (!status)
+ return POLLIN;
+ if (status < 0)
+ return POLLERR;
+ return 0;
+}
+
+static long sync_file_ioctl_merge(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ int fd = get_unused_fd_flags(O_CLOEXEC);
+ int err;
+ struct sync_file *fence2, *fence3;
+ struct sync_merge_data data;
+
+ if (fd < 0)
+ return fd;
+
+ if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fd;
+ }
+
+ if (data.flags || data.pad) {
+ err = -EINVAL;
+ goto err_put_fd;
+ }
+
+ fence2 = sync_file_fdget(data.fd2);
+ if (!fence2) {
+ err = -ENOENT;
+ goto err_put_fd;
+ }
+
+ data.name[sizeof(data.name) - 1] = '\0';
+ fence3 = sync_file_merge(data.name, sync_file, fence2);
+ if (!fence3) {
+ err = -ENOMEM;
+ goto err_put_fence2;
+ }
+
+ data.fence = fd;
+ if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
+ err = -EFAULT;
+ goto err_put_fence3;
+ }
+
+ fd_install(fd, fence3->file);
+ fput(fence2->file);
+ return 0;
+
+err_put_fence3:
+ fput(fence3->file);
+
+err_put_fence2:
+ fput(fence2->file);
+
+err_put_fd:
+ put_unused_fd(fd);
+ return err;
+}
+
+static void sync_fill_fence_info(struct fence *fence,
+ struct sync_fence_info *info)
+{
+ strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
+ sizeof(info->obj_name));
+ strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
+ sizeof(info->driver_name));
+ if (fence_is_signaled(fence))
+ info->status = fence->status >= 0 ? 1 : fence->status;
+ else
+ info->status = 0;
+ info->timestamp_ns = ktime_to_ns(fence->timestamp);
+}
+
+static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
+ unsigned long arg)
+{
+ struct sync_file_info info;
+ struct sync_fence_info *fence_info = NULL;
+ __u32 size;
+ int ret, i;
+
+ if (copy_from_user(&info, (void __user *)arg, sizeof(info)))
+ return -EFAULT;
+
+ if (info.flags || info.pad)
+ return -EINVAL;
+
+ /*
+ * Passing num_fences = 0 means that userspace doesn't want to
+ * retrieve any sync_fence_info. If num_fences = 0 we skip filling
+ * sync_fence_info and return the actual number of fences on
+ * info->num_fences.
+ */
+ if (!info.num_fences)
+ goto no_fences;
+
+ if (info.num_fences < sync_file->num_fences)
+ return -EINVAL;
+
+ size = sync_file->num_fences * sizeof(*fence_info);
+ fence_info = kzalloc(size, GFP_KERNEL);
+ if (!fence_info)
+ return -ENOMEM;
+
+ for (i = 0; i < sync_file->num_fences; ++i)
+ sync_fill_fence_info(sync_file->cbs[i].fence, &fence_info[i]);
+
+ if (copy_to_user(u64_to_user_ptr(info.sync_fence_info), fence_info,
+ size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+no_fences:
+ strlcpy(info.name, sync_file->name, sizeof(info.name));
+ info.status = atomic_read(&sync_file->status);
+ if (info.status >= 0)
+ info.status = !info.status;
+
+ info.num_fences = sync_file->num_fences;
+
+ if (copy_to_user((void __user *)arg, &info, sizeof(info)))
+ ret = -EFAULT;
+ else
+ ret = 0;
+
+out:
+ kfree(fence_info);
+
+ return ret;
+}
+
+static long sync_file_ioctl(struct file *file, unsigned int cmd,
+ unsigned long arg)
+{
+ struct sync_file *sync_file = file->private_data;
+
+ switch (cmd) {
+ case SYNC_IOC_MERGE:
+ return sync_file_ioctl_merge(sync_file, arg);
+
+ case SYNC_IOC_FILE_INFO:
+ return sync_file_ioctl_fence_info(sync_file, arg);
+
+ default:
+ return -ENOTTY;
+ }
+}
+
+static const struct file_operations sync_file_fops = {
+ .release = sync_file_release,
+ .poll = sync_file_poll,
+ .unlocked_ioctl = sync_file_ioctl,
+ .compat_ioctl = sync_file_ioctl,
+};
+
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 236ada93df53..afdd55ddf821 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -28,11 +28,6 @@
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static struct etnaviv_gem_submit *submit_create(struct drm_device *dev,
struct etnaviv_gpu *gpu, size_t nr)
{
@@ -347,21 +342,21 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
cmdbuf->exec_state = args->exec_state;
cmdbuf->ctx = file->driver_priv;
- ret = copy_from_user(bos, to_user_ptr(args->bos),
+ ret = copy_from_user(bos, u64_to_user_ptr(args->bos),
args->nr_bos * sizeof(*bos));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
- ret = copy_from_user(relocs, to_user_ptr(args->relocs),
+ ret = copy_from_user(relocs, u64_to_user_ptr(args->relocs),
args->nr_relocs * sizeof(*relocs));
if (ret) {
ret = -EFAULT;
goto err_submit_cmds;
}
- ret = copy_from_user(stream, to_user_ptr(args->stream),
+ ret = copy_from_user(stream, u64_to_user_ptr(args->stream),
args->stream_size);
if (ret) {
ret = -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index daba7ebb9699..5d7a7c4f5136 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3577,11 +3577,6 @@ static inline i915_reg_t i915_vgacntrl_reg(struct drm_device *dev)
return VGACNTRL;
}
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static inline unsigned long msecs_to_jiffies_timeout(const unsigned int m)
{
unsigned long j = msecs_to_jiffies(m);
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index f2cb9a9539ee..233adc31ef0c 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -324,7 +324,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
{
struct drm_device *dev = obj->base.dev;
void *vaddr = obj->phys_handle->vaddr + args->offset;
- char __user *user_data = to_user_ptr(args->data_ptr);
+ char __user *user_data = u64_to_user_ptr(args->data_ptr);
int ret = 0;
/* We manually control the domain here and pretend that it
@@ -605,7 +605,7 @@ i915_gem_shmem_pread(struct drm_device *dev,
int needs_clflush = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -692,7 +692,7 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_WRITE,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
@@ -783,7 +783,7 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
if (ret)
goto out_unpin;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
@@ -907,7 +907,7 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
int needs_clflush_before = 0;
struct sg_page_iter sg_iter;
- user_data = to_user_ptr(args->data_ptr);
+ user_data = u64_to_user_ptr(args->data_ptr);
remain = args->size;
obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
@@ -1036,12 +1036,12 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
return 0;
if (!access_ok(VERIFY_READ,
- to_user_ptr(args->data_ptr),
+ u64_to_user_ptr(args->data_ptr),
args->size))
return -EFAULT;
if (likely(!i915.prefault_disable)) {
- ret = fault_in_multipages_readable(to_user_ptr(args->data_ptr),
+ ret = fault_in_multipages_readable(u64_to_user_ptr(args->data_ptr),
args->size);
if (ret)
return -EFAULT;
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index b845f468dd74..a676eedb441b 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -514,7 +514,7 @@ i915_gem_execbuffer_relocate_vma(struct i915_vma *vma,
struct drm_i915_gem_exec_object2 *entry = vma->exec_entry;
int remain, ret;
- user_relocs = to_user_ptr(entry->relocs_ptr);
+ user_relocs = u64_to_user_ptr(entry->relocs_ptr);
remain = entry->relocation_count;
while (remain) {
@@ -865,7 +865,7 @@ i915_gem_execbuffer_relocate_slow(struct drm_device *dev,
u64 invalid_offset = (u64)-1;
int j;
- user_relocs = to_user_ptr(exec[i].relocs_ptr);
+ user_relocs = u64_to_user_ptr(exec[i].relocs_ptr);
if (copy_from_user(reloc+total, user_relocs,
exec[i].relocation_count * sizeof(*reloc))) {
@@ -1009,7 +1009,7 @@ validate_exec_list(struct drm_device *dev,
invalid_flags |= EXEC_OBJECT_NEEDS_GTT;
for (i = 0; i < count; i++) {
- char __user *ptr = to_user_ptr(exec[i].relocs_ptr);
+ char __user *ptr = u64_to_user_ptr(exec[i].relocs_ptr);
int length; /* limited by fault_in_pages_readable() */
if (exec[i].flags & invalid_flags)
@@ -1696,7 +1696,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1732,7 +1732,7 @@ i915_gem_execbuffer(struct drm_device *dev, void *data,
ret = i915_gem_do_execbuffer(dev, data, file, &exec2, exec2_list);
if (!ret) {
struct drm_i915_gem_exec_object __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
/* Copy the new buffer offsets back to the user's exec list. */
for (i = 0; i < args->buffer_count; i++) {
@@ -1786,7 +1786,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
return -ENOMEM;
}
ret = copy_from_user(exec2_list,
- to_user_ptr(args->buffers_ptr),
+ u64_to_user_ptr(args->buffers_ptr),
sizeof(*exec2_list) * args->buffer_count);
if (ret != 0) {
DRM_DEBUG("copy %d exec entries failed %d\n",
@@ -1799,7 +1799,7 @@ i915_gem_execbuffer2(struct drm_device *dev, void *data,
if (!ret) {
/* Copy the new buffer offsets back to the user's exec list. */
struct drm_i915_gem_exec_object2 __user *user_exec_list =
- to_user_ptr(args->buffers_ptr);
+ u64_to_user_ptr(args->buffers_ptr);
int i;
for (i = 0; i < args->buffer_count; i++) {
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 43d2181231c0..23d25283616c 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -28,11 +28,6 @@
#define BO_LOCKED 0x4000
#define BO_PINNED 0x2000
-static inline void __user *to_user_ptr(u64 address)
-{
- return (void __user *)(uintptr_t)address;
-}
-
static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, int nr)
{
@@ -68,7 +63,7 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
struct drm_gem_object *obj;
struct msm_gem_object *msm_obj;
void __user *userptr =
- to_user_ptr(args->bos + (i * sizeof(submit_bo)));
+ u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
if (ret) {
@@ -257,7 +252,7 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
for (i = 0; i < nr_relocs; i++) {
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
- to_user_ptr(relocs + (i * sizeof(submit_reloc)));
+ u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
uint32_t iova, off;
bool valid;
@@ -356,7 +351,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
for (i = 0; i < args->nr_cmds; i++) {
struct drm_msm_gem_submit_cmd submit_cmd;
void __user *userptr =
- to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
+ u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
uint32_t iova;
diff --git a/drivers/iio/accel/Kconfig b/drivers/iio/accel/Kconfig
index b0d3ecf3318b..e4a758cd7d35 100644
--- a/drivers/iio/accel/Kconfig
+++ b/drivers/iio/accel/Kconfig
@@ -64,7 +64,7 @@ config IIO_ST_ACCEL_3AXIS
help
Say yes here to build support for STMicroelectronics accelerometers:
LSM303DLH, LSM303DLHC, LIS3DH, LSM330D, LSM330DL, LSM330DLC,
- LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12.
+ LIS331DLH, LSM303DL, LSM303DLM, LSM330, LIS2DH12, H3LIS331DL.
This driver can also be built as a module. If so, these modules
will be created:
@@ -143,7 +143,8 @@ config MMA8452
select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for the following Freescale 3-axis
- accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC.
+ accelerometers: MMA8451Q, MMA8452Q, MMA8453Q, MMA8652FC, MMA8653FC,
+ FXLS8471Q.
To compile this driver as a module, choose M here: the module
will be called mma8452.
diff --git a/drivers/iio/accel/bmc150-accel-core.c b/drivers/iio/accel/bmc150-accel-core.c
index 2072a31e813b..197e693e7e7b 100644
--- a/drivers/iio/accel/bmc150-accel-core.c
+++ b/drivers/iio/accel/bmc150-accel-core.c
@@ -25,7 +25,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -138,6 +137,7 @@ enum bmc150_accel_axis {
AXIS_X,
AXIS_Y,
AXIS_Z,
+ AXIS_MAX,
};
enum bmc150_power_modes {
@@ -188,7 +188,6 @@ enum bmc150_accel_trigger_id {
struct bmc150_accel_data {
struct regmap *regmap;
- struct device *dev;
int irq;
struct bmc150_accel_interrupt interrupts[BMC150_ACCEL_INTERRUPTS];
atomic_t active_intr;
@@ -246,16 +245,18 @@ static const struct {
{500000, BMC150_ACCEL_SLEEP_500_MS},
{1000000, BMC150_ACCEL_SLEEP_1_SEC} };
-static const struct regmap_config bmc150_i2c_regmap_conf = {
+const struct regmap_config bmc150_regmap_conf = {
.reg_bits = 8,
.val_bits = 8,
.max_register = 0x3f,
};
+EXPORT_SYMBOL_GPL(bmc150_regmap_conf);
static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
enum bmc150_power_modes mode,
int dur_us)
{
+ struct device *dev = regmap_get_device(data->regmap);
int i;
int ret;
u8 lpw_bits;
@@ -279,11 +280,11 @@ static int bmc150_accel_set_mode(struct bmc150_accel_data *data,
lpw_bits = mode << BMC150_ACCEL_PMU_MODE_SHIFT;
lpw_bits |= (dur_val << BMC150_ACCEL_PMU_BIT_SLEEP_DUR_SHIFT);
- dev_dbg(data->dev, "Set Mode bits %x\n", lpw_bits);
+ dev_dbg(dev, "Set Mode bits %x\n", lpw_bits);
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_LPW, lpw_bits);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_pmu_lpw\n");
+ dev_err(dev, "Error writing reg_pmu_lpw\n");
return ret;
}
@@ -316,23 +317,24 @@ static int bmc150_accel_set_bw(struct bmc150_accel_data *data, int val,
static int bmc150_accel_update_slope(struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_6,
data->slope_thres);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_6\n");
+ dev_err(dev, "Error writing reg_int_6\n");
return ret;
}
ret = regmap_update_bits(data->regmap, BMC150_ACCEL_REG_INT_5,
BMC150_ACCEL_SLOPE_DUR_MASK, data->slope_dur);
if (ret < 0) {
- dev_err(data->dev, "Error updating reg_int_5\n");
+ dev_err(dev, "Error updating reg_int_5\n");
return ret;
}
- dev_dbg(data->dev, "%s: %x %x\n", __func__, data->slope_thres,
+ dev_dbg(dev, "%s: %x %x\n", __func__, data->slope_thres,
data->slope_dur);
return ret;
@@ -378,20 +380,21 @@ static int bmc150_accel_get_startup_times(struct bmc150_accel_data *data)
static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
if (on) {
- ret = pm_runtime_get_sync(data->dev);
+ ret = pm_runtime_get_sync(dev);
} else {
- pm_runtime_mark_last_busy(data->dev);
- ret = pm_runtime_put_autosuspend(data->dev);
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
}
if (ret < 0) {
- dev_err(data->dev,
+ dev_err(dev,
"Failed: bmc150_accel_set_power_state for %d\n", on);
if (on)
- pm_runtime_put_noidle(data->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -445,6 +448,7 @@ static void bmc150_accel_interrupts_setup(struct iio_dev *indio_dev,
static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
bool state)
{
+ struct device *dev = regmap_get_device(data->regmap);
struct bmc150_accel_interrupt *intr = &data->interrupts[i];
const struct bmc150_accel_interrupt_info *info = intr->info;
int ret;
@@ -474,7 +478,7 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
ret = regmap_update_bits(data->regmap, info->map_reg, info->map_bitmask,
(state ? info->map_bitmask : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating reg_int_map\n");
+ dev_err(dev, "Error updating reg_int_map\n");
goto out_fix_power_state;
}
@@ -482,7 +486,7 @@ static int bmc150_accel_set_interrupt(struct bmc150_accel_data *data, int i,
ret = regmap_update_bits(data->regmap, info->en_reg, info->en_bitmask,
(state ? info->en_bitmask : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating reg_int_en\n");
+ dev_err(dev, "Error updating reg_int_en\n");
goto out_fix_power_state;
}
@@ -500,6 +504,7 @@ out_fix_power_state:
static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
for (i = 0; i < ARRAY_SIZE(data->chip_info->scale_table); ++i) {
@@ -508,8 +513,7 @@ static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
BMC150_ACCEL_REG_PMU_RANGE,
data->chip_info->scale_table[i].reg_range);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing pmu_range\n");
+ dev_err(dev, "Error writing pmu_range\n");
return ret;
}
@@ -523,6 +527,7 @@ static int bmc150_accel_set_scale(struct bmc150_accel_data *data, int val)
static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
unsigned int value;
@@ -530,7 +535,7 @@ static int bmc150_accel_get_temp(struct bmc150_accel_data *data, int *val)
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_TEMP, &value);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_temp\n");
+ dev_err(dev, "Error reading reg_temp\n");
mutex_unlock(&data->mutex);
return ret;
}
@@ -545,6 +550,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
struct iio_chan_spec const *chan,
int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
int axis = chan->scan_index;
__le16 raw_val;
@@ -559,7 +565,7 @@ static int bmc150_accel_get_axis(struct bmc150_accel_data *data,
ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_AXIS_TO_REG(axis),
&raw_val, sizeof(raw_val));
if (ret < 0) {
- dev_err(data->dev, "Error reading axis %d\n", axis);
+ dev_err(dev, "Error reading axis %d\n", axis);
bmc150_accel_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
@@ -831,6 +837,7 @@ static int bmc150_accel_set_watermark(struct iio_dev *indio_dev, unsigned val)
static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
char *buffer, int samples)
{
+ struct device *dev = regmap_get_device(data->regmap);
int sample_length = 3 * 2;
int ret;
int total_length = samples * sample_length;
@@ -854,7 +861,8 @@ static int bmc150_accel_fifo_transfer(struct bmc150_accel_data *data,
}
if (ret)
- dev_err(data->dev, "Error transferring data from fifo in single steps of %zu\n",
+ dev_err(dev,
+ "Error transferring data from fifo in single steps of %zu\n",
step);
return ret;
@@ -864,6 +872,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
unsigned samples, bool irq)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
u8 count;
u16 buffer[BMC150_ACCEL_FIFO_LENGTH * 3];
@@ -873,7 +882,7 @@ static int __bmc150_accel_fifo_flush(struct iio_dev *indio_dev,
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_FIFO_STATUS, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_fifo_status\n");
+ dev_err(dev, "Error reading reg_fifo_status\n");
return ret;
}
@@ -1105,27 +1114,23 @@ static const struct iio_info bmc150_accel_info_fifo = {
.driver_module = THIS_MODULE,
};
+static const unsigned long bmc150_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0};
+
static irqreturn_t bmc150_accel_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmc150_accel_data *data = iio_priv(indio_dev);
- int bit, ret, i = 0;
- unsigned int raw_val;
+ int ret;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- ret = regmap_bulk_read(data->regmap,
- BMC150_ACCEL_AXIS_TO_REG(bit), &raw_val,
- 2);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
- goto err_read;
- }
- data->buffer[i++] = raw_val;
- }
+ ret = regmap_bulk_read(data->regmap, BMC150_ACCEL_REG_XOUT_L,
+ data->buffer, AXIS_MAX * 2);
mutex_unlock(&data->mutex);
+ if (ret < 0)
+ goto err_read;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
pf->timestamp);
@@ -1139,6 +1144,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
{
struct bmc150_accel_trigger *t = iio_trigger_get_drvdata(trig);
struct bmc150_accel_data *data = t->data;
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* new data interrupts don't need ack */
@@ -1152,8 +1158,7 @@ static int bmc150_accel_trig_try_reen(struct iio_trigger *trig)
BMC150_ACCEL_INT_MODE_LATCH_RESET);
mutex_unlock(&data->mutex);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
return ret;
}
@@ -1204,13 +1209,14 @@ static const struct iio_trigger_ops bmc150_accel_trigger_ops = {
static int bmc150_accel_handle_roc_event(struct iio_dev *indio_dev)
{
struct bmc150_accel_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int dir;
int ret;
unsigned int val;
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_INT_STATUS_2, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_int_status_2\n");
+ dev_err(dev, "Error reading reg_int_status_2\n");
return ret;
}
@@ -1253,6 +1259,7 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct bmc150_accel_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
bool ack = false;
int ret;
@@ -1276,7 +1283,7 @@ static irqreturn_t bmc150_accel_irq_thread_handler(int irq, void *private)
BMC150_ACCEL_INT_MODE_LATCH_INT |
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret)
- dev_err(data->dev, "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
ret = IRQ_HANDLED;
} else {
@@ -1347,13 +1354,14 @@ static void bmc150_accel_unregister_triggers(struct bmc150_accel_data *data,
static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int i, ret;
for (i = 0; i < BMC150_ACCEL_TRIGGERS; i++) {
struct bmc150_accel_trigger *t = &data->triggers[i];
- t->indio_trig = devm_iio_trigger_alloc(data->dev,
- bmc150_accel_triggers[i].name,
+ t->indio_trig = devm_iio_trigger_alloc(dev,
+ bmc150_accel_triggers[i].name,
indio_dev->name,
indio_dev->id);
if (!t->indio_trig) {
@@ -1361,7 +1369,7 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
break;
}
- t->indio_trig->dev.parent = data->dev;
+ t->indio_trig->dev.parent = dev;
t->indio_trig->ops = &bmc150_accel_trigger_ops;
t->intr = bmc150_accel_triggers[i].intr;
t->data = data;
@@ -1385,12 +1393,13 @@ static int bmc150_accel_triggers_setup(struct iio_dev *indio_dev,
static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
u8 reg = BMC150_ACCEL_REG_FIFO_CONFIG1;
int ret;
ret = regmap_write(data->regmap, reg, data->fifo_mode);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_fifo_config1\n");
+ dev_err(dev, "Error writing reg_fifo_config1\n");
return ret;
}
@@ -1400,7 +1409,7 @@ static int bmc150_accel_fifo_set_mode(struct bmc150_accel_data *data)
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_FIFO_CONFIG0,
data->watermark);
if (ret < 0)
- dev_err(data->dev, "Error writing reg_fifo_config0\n");
+ dev_err(dev, "Error writing reg_fifo_config0\n");
return ret;
}
@@ -1484,17 +1493,17 @@ static const struct iio_buffer_setup_ops bmc150_accel_buffer_ops = {
static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
unsigned int val;
ret = regmap_read(data->regmap, BMC150_ACCEL_REG_CHIP_ID, &val);
if (ret < 0) {
- dev_err(data->dev,
- "Error: Reading chip id\n");
+ dev_err(dev, "Error: Reading chip id\n");
return ret;
}
- dev_dbg(data->dev, "Chip Id %x\n", val);
+ dev_dbg(dev, "Chip Id %x\n", val);
for (i = 0; i < ARRAY_SIZE(bmc150_accel_chip_info_tbl); i++) {
if (bmc150_accel_chip_info_tbl[i].chip_id == val) {
data->chip_info = &bmc150_accel_chip_info_tbl[i];
@@ -1503,7 +1512,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
}
if (!data->chip_info) {
- dev_err(data->dev, "Invalid chip %x\n", val);
+ dev_err(dev, "Invalid chip %x\n", val);
return -ENODEV;
}
@@ -1520,8 +1529,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_PMU_RANGE,
BMC150_ACCEL_DEF_RANGE_4G);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_pmu_range\n");
+ dev_err(dev, "Error writing reg_pmu_range\n");
return ret;
}
@@ -1539,8 +1547,7 @@ static int bmc150_accel_chip_init(struct bmc150_accel_data *data)
BMC150_ACCEL_INT_MODE_LATCH_INT |
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
return ret;
}
@@ -1560,7 +1567,6 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
- data->dev = dev;
data->irq = irq;
data->regmap = regmap;
@@ -1575,6 +1581,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
indio_dev->channels = data->chip_info->channels;
indio_dev->num_channels = data->chip_info->num_channels;
indio_dev->name = name ? name : data->chip_info->name;
+ indio_dev->available_scan_masks = bmc150_accel_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmc150_accel_info;
@@ -1583,13 +1590,13 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
bmc150_accel_trigger_handler,
&bmc150_accel_buffer_ops);
if (ret < 0) {
- dev_err(data->dev, "Failed: iio triggered buffer setup\n");
+ dev_err(dev, "Failed: iio triggered buffer setup\n");
return ret;
}
if (data->irq > 0) {
ret = devm_request_threaded_irq(
- data->dev, data->irq,
+ dev, data->irq,
bmc150_accel_irq_handler,
bmc150_accel_irq_thread_handler,
IRQF_TRIGGER_RISING,
@@ -1607,7 +1614,7 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
ret = regmap_write(data->regmap, BMC150_ACCEL_REG_INT_RST_LATCH,
BMC150_ACCEL_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_rst_latch\n");
+ dev_err(dev, "Error writing reg_int_rst_latch\n");
goto err_buffer_cleanup;
}
@@ -1656,9 +1663,9 @@ int bmc150_accel_core_remove(struct device *dev)
iio_device_unregister(indio_dev);
- pm_runtime_disable(data->dev);
- pm_runtime_set_suspended(data->dev);
- pm_runtime_put_noidle(data->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
bmc150_accel_unregister_triggers(data, BMC150_ACCEL_TRIGGERS - 1);
@@ -1707,7 +1714,7 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
struct bmc150_accel_data *data = iio_priv(indio_dev);
int ret;
- dev_dbg(data->dev, __func__);
+ dev_dbg(dev, __func__);
ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
if (ret < 0)
return -EAGAIN;
@@ -1722,7 +1729,7 @@ static int bmc150_accel_runtime_resume(struct device *dev)
int ret;
int sleep_val;
- dev_dbg(data->dev, __func__);
+ dev_dbg(dev, __func__);
ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_NORMAL, 0);
if (ret < 0)
diff --git a/drivers/iio/accel/bmc150-accel-i2c.c b/drivers/iio/accel/bmc150-accel-i2c.c
index b41404ba32fc..8ca8041267ef 100644
--- a/drivers/iio/accel/bmc150-accel-i2c.c
+++ b/drivers/iio/accel/bmc150-accel-i2c.c
@@ -28,11 +28,6 @@
#include "bmc150-accel.h"
-static const struct regmap_config bmc150_i2c_regmap_conf = {
- .reg_bits = 8,
- .val_bits = 8,
-};
-
static int bmc150_accel_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -43,7 +38,7 @@ static int bmc150_accel_probe(struct i2c_client *client,
i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_READ_I2C_BLOCK);
- regmap = devm_regmap_init_i2c(client, &bmc150_i2c_regmap_conf);
+ regmap = devm_regmap_init_i2c(client, &bmc150_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&client->dev, "Failed to initialize i2c regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/bmc150-accel-spi.c b/drivers/iio/accel/bmc150-accel-spi.c
index 16b66f2a7204..006794a70a1f 100644
--- a/drivers/iio/accel/bmc150-accel-spi.c
+++ b/drivers/iio/accel/bmc150-accel-spi.c
@@ -25,18 +25,12 @@
#include "bmc150-accel.h"
-static const struct regmap_config bmc150_spi_regmap_conf = {
- .reg_bits = 8,
- .val_bits = 8,
- .max_register = 0x3f,
-};
-
static int bmc150_accel_probe(struct spi_device *spi)
{
struct regmap *regmap;
const struct spi_device_id *id = spi_get_device_id(spi);
- regmap = devm_regmap_init_spi(spi, &bmc150_spi_regmap_conf);
+ regmap = devm_regmap_init_spi(spi, &bmc150_regmap_conf);
if (IS_ERR(regmap)) {
dev_err(&spi->dev, "Failed to initialize spi regmap\n");
return PTR_ERR(regmap);
diff --git a/drivers/iio/accel/bmc150-accel.h b/drivers/iio/accel/bmc150-accel.h
index ba0335987f94..38a8b11f8c19 100644
--- a/drivers/iio/accel/bmc150-accel.h
+++ b/drivers/iio/accel/bmc150-accel.h
@@ -16,5 +16,6 @@ int bmc150_accel_core_probe(struct device *dev, struct regmap *regmap, int irq,
const char *name, bool block_supported);
int bmc150_accel_core_remove(struct device *dev);
extern const struct dev_pm_ops bmc150_accel_pm_ops;
+extern const struct regmap_config bmc150_regmap_conf;
#endif /* _BMC150_ACCEL_H_ */
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index edec1d099e91..bfe219a8bea2 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -20,7 +20,6 @@
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -115,6 +114,7 @@ enum kxcjk1013_axis {
AXIS_X,
AXIS_Y,
AXIS_Z,
+ AXIS_MAX,
};
enum kxcjk1013_mode {
@@ -922,7 +922,7 @@ static const struct iio_event_spec kxcjk1013_event = {
.realbits = 12, \
.storagebits = 16, \
.shift = 4, \
- .endianness = IIO_CPU, \
+ .endianness = IIO_LE, \
}, \
.event_spec = &kxcjk1013_event, \
.num_event_specs = 1 \
@@ -953,25 +953,23 @@ static const struct iio_info kxcjk1013_info = {
.driver_module = THIS_MODULE,
};
+static const unsigned long kxcjk1013_scan_masks[] = {0x7, 0};
+
static irqreturn_t kxcjk1013_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct kxcjk1013_data *data = iio_priv(indio_dev);
- int bit, ret, i = 0;
+ int ret;
mutex_lock(&data->mutex);
-
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- ret = kxcjk1013_get_acc_reg(data, bit);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
- goto err;
- }
- data->buffer[i++] = ret;
- }
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(data->client,
+ KXCJK1013_REG_XOUT_L,
+ AXIS_MAX * 2,
+ (u8 *)data->buffer);
mutex_unlock(&data->mutex);
+ if (ret < 0)
+ goto err;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
data->timestamp);
@@ -1204,6 +1202,7 @@ static int kxcjk1013_probe(struct i2c_client *client,
indio_dev->dev.parent = &client->dev;
indio_dev->channels = kxcjk1013_channels;
indio_dev->num_channels = ARRAY_SIZE(kxcjk1013_channels);
+ indio_dev->available_scan_masks = kxcjk1013_scan_masks;
indio_dev->name = name;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &kxcjk1013_info;
diff --git a/drivers/iio/accel/mma7455_core.c b/drivers/iio/accel/mma7455_core.c
index c633cc2c0789..c902f54c23f5 100644
--- a/drivers/iio/accel/mma7455_core.c
+++ b/drivers/iio/accel/mma7455_core.c
@@ -55,11 +55,11 @@
struct mma7455_data {
struct regmap *regmap;
- struct device *dev;
};
static int mma7455_drdy(struct mma7455_data *mma7455)
{
+ struct device *dev = regmap_get_device(mma7455->regmap);
unsigned int reg;
int tries = 3;
int ret;
@@ -75,7 +75,7 @@ static int mma7455_drdy(struct mma7455_data *mma7455)
msleep(20);
}
- dev_warn(mma7455->dev, "data not ready\n");
+ dev_warn(dev, "data not ready\n");
return -EIO;
}
@@ -260,7 +260,6 @@ int mma7455_core_probe(struct device *dev, struct regmap *regmap,
dev_set_drvdata(dev, indio_dev);
mma7455 = iio_priv(indio_dev);
mma7455->regmap = regmap;
- mma7455->dev = dev;
indio_dev->info = &mma7455_info;
indio_dev->name = name;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 7f4994f32a90..e225d3c53bd5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -6,6 +6,7 @@
* MMA8453Q (10 bit)
* MMA8652FC (12 bit)
* MMA8653FC (10 bit)
+ * FXLS8471Q (14 bit)
*
* Copyright 2015 Martin Kepplinger <martin.kepplinger@theobroma-systems.com>
* Copyright 2014 Peter Meerwald <pmeerw@pmeerw.net>
@@ -16,7 +17,7 @@
*
* 7-bit I2C slave address 0x1c/0x1d (pin selectable)
*
- * TODO: orientation events, autosleep
+ * TODO: orientation events
*/
#include <linux/module.h>
@@ -31,6 +32,7 @@
#include <linux/delay.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
+#include <linux/pm_runtime.h>
#define MMA8452_STATUS 0x00
#define MMA8452_STATUS_DRDY (BIT(2) | BIT(1) | BIT(0))
@@ -91,6 +93,9 @@
#define MMA8453_DEVICE_ID 0x3a
#define MMA8652_DEVICE_ID 0x4a
#define MMA8653_DEVICE_ID 0x5a
+#define FXLS8471_DEVICE_ID 0x6a
+
+#define MMA8452_AUTO_SUSPEND_DELAY_MS 2000
struct mma8452_data {
struct i2c_client *client;
@@ -172,6 +177,31 @@ static int mma8452_drdy(struct mma8452_data *data)
return -EIO;
}
+static int mma8452_set_runtime_pm_state(struct i2c_client *client, bool on)
+{
+#ifdef CONFIG_PM
+ int ret;
+
+ if (on) {
+ ret = pm_runtime_get_sync(&client->dev);
+ } else {
+ pm_runtime_mark_last_busy(&client->dev);
+ ret = pm_runtime_put_autosuspend(&client->dev);
+ }
+
+ if (ret < 0) {
+ dev_err(&client->dev,
+ "failed to change power state to %d\n", on);
+ if (on)
+ pm_runtime_put_noidle(&client->dev);
+
+ return ret;
+ }
+#endif
+
+ return 0;
+}
+
static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
{
int ret = mma8452_drdy(data);
@@ -179,8 +209,16 @@ static int mma8452_read(struct mma8452_data *data, __be16 buf[3])
if (ret < 0)
return ret;
- return i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
- 3 * sizeof(__be16), (u8 *)buf);
+ ret = mma8452_set_runtime_pm_state(data->client, true);
+ if (ret)
+ return ret;
+
+ ret = i2c_smbus_read_i2c_block_data(data->client, MMA8452_OUT_X,
+ 3 * sizeof(__be16), (u8 *)buf);
+
+ ret = mma8452_set_runtime_pm_state(data->client, false);
+
+ return ret;
}
static ssize_t mma8452_show_int_plus_micros(char *buf, const int (*vals)[2],
@@ -357,7 +395,8 @@ static int mma8452_read_raw(struct iio_dev *indio_dev,
return IIO_VAL_INT_PLUS_MICRO;
case IIO_CHAN_INFO_CALIBBIAS:
ret = i2c_smbus_read_byte_data(data->client,
- MMA8452_OFF_X + chan->scan_index);
+ MMA8452_OFF_X +
+ chan->scan_index);
if (ret < 0)
return ret;
@@ -392,24 +431,47 @@ static int mma8452_active(struct mma8452_data *data)
data->ctrl_reg1);
}
+/* returns >0 if active, 0 if in standby and <0 on error */
+static int mma8452_is_active(struct mma8452_data *data)
+{
+ int reg;
+
+ reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG1);
+ if (reg < 0)
+ return reg;
+
+ return reg & MMA8452_CTRL_ACTIVE;
+}
+
static int mma8452_change_config(struct mma8452_data *data, u8 reg, u8 val)
{
int ret;
+ int is_active;
mutex_lock(&data->lock);
- /* config can only be changed when in standby */
- ret = mma8452_standby(data);
- if (ret < 0)
+ is_active = mma8452_is_active(data);
+ if (is_active < 0) {
+ ret = is_active;
goto fail;
+ }
+
+ /* config can only be changed when in standby */
+ if (is_active > 0) {
+ ret = mma8452_standby(data);
+ if (ret < 0)
+ goto fail;
+ }
ret = i2c_smbus_write_byte_data(data->client, reg, val);
if (ret < 0)
goto fail;
- ret = mma8452_active(data);
- if (ret < 0)
- goto fail;
+ if (is_active > 0) {
+ ret = mma8452_active(data);
+ if (ret < 0)
+ goto fail;
+ }
ret = 0;
fail:
@@ -418,7 +480,7 @@ fail:
return ret;
}
-/* returns >0 if in freefall mode, 0 if not or <0 if an error occured */
+/* returns >0 if in freefall mode, 0 if not or <0 if an error occurred */
static int mma8452_freefall_mode_enabled(struct mma8452_data *data)
{
int val;
@@ -668,7 +730,8 @@ static int mma8452_read_event_config(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
- return !!(ret & BIT(chan->scan_index + chip->ev_cfg_chan_shift));
+ return !!(ret & BIT(chan->scan_index +
+ chip->ev_cfg_chan_shift));
default:
return -EINVAL;
}
@@ -682,7 +745,11 @@ static int mma8452_write_event_config(struct iio_dev *indio_dev,
{
struct mma8452_data *data = iio_priv(indio_dev);
const struct mma_chip_info *chip = data->chip_info;
- int val;
+ int val, ret;
+
+ ret = mma8452_set_runtime_pm_state(data->client, state);
+ if (ret)
+ return ret;
switch (dir) {
case IIO_EV_DIR_FALLING:
@@ -990,6 +1057,7 @@ enum {
mma8453,
mma8652,
mma8653,
+ fxls8471,
};
static const struct mma_chip_info mma_chip_info_table[] = {
@@ -1003,7 +1071,7 @@ static const struct mma_chip_info mma_chip_info_table[] = {
* bit.
* The userspace interface uses m/s^2 and we declare micro units
* So scale factor for 12 bit here is given by:
- * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
+ * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
*/
.mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
.ev_cfg = MMA8452_TRANSIENT_CFG,
@@ -1081,6 +1149,22 @@ static const struct mma_chip_info mma_chip_info_table[] = {
.ev_ths_mask = MMA8452_FF_MT_THS_MASK,
.ev_count = MMA8452_FF_MT_COUNT,
},
+ [fxls8471] = {
+ .chip_id = FXLS8471_DEVICE_ID,
+ .channels = mma8451_channels,
+ .num_channels = ARRAY_SIZE(mma8451_channels),
+ .mma_scales = { {0, 2394}, {0, 4788}, {0, 9577} },
+ .ev_cfg = MMA8452_TRANSIENT_CFG,
+ .ev_cfg_ele = MMA8452_TRANSIENT_CFG_ELE,
+ .ev_cfg_chan_shift = 1,
+ .ev_src = MMA8452_TRANSIENT_SRC,
+ .ev_src_xe = MMA8452_TRANSIENT_SRC_XTRANSE,
+ .ev_src_ye = MMA8452_TRANSIENT_SRC_YTRANSE,
+ .ev_src_ze = MMA8452_TRANSIENT_SRC_ZTRANSE,
+ .ev_ths = MMA8452_TRANSIENT_THS,
+ .ev_ths_mask = MMA8452_TRANSIENT_THS_MASK,
+ .ev_count = MMA8452_TRANSIENT_COUNT,
+ },
};
static struct attribute *mma8452_attributes[] = {
@@ -1114,7 +1198,11 @@ static int mma8452_data_rdy_trigger_set_state(struct iio_trigger *trig,
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct mma8452_data *data = iio_priv(indio_dev);
- int reg;
+ int reg, ret;
+
+ ret = mma8452_set_runtime_pm_state(data->client, state);
+ if (ret)
+ return ret;
reg = i2c_smbus_read_byte_data(data->client, MMA8452_CTRL_REG4);
if (reg < 0)
@@ -1206,6 +1294,7 @@ static const struct of_device_id mma8452_dt_ids[] = {
{ .compatible = "fsl,mma8453", .data = &mma_chip_info_table[mma8453] },
{ .compatible = "fsl,mma8652", .data = &mma_chip_info_table[mma8652] },
{ .compatible = "fsl,mma8653", .data = &mma_chip_info_table[mma8653] },
+ { .compatible = "fsl,fxls8471", .data = &mma_chip_info_table[fxls8471] },
{ }
};
MODULE_DEVICE_TABLE(of, mma8452_dt_ids);
@@ -1243,6 +1332,7 @@ static int mma8452_probe(struct i2c_client *client,
case MMA8453_DEVICE_ID:
case MMA8652_DEVICE_ID:
case MMA8653_DEVICE_ID:
+ case FXLS8471_DEVICE_ID:
if (ret == data->chip_info->chip_id)
break;
default:
@@ -1340,6 +1430,15 @@ static int mma8452_probe(struct i2c_client *client,
goto buffer_cleanup;
}
+ ret = pm_runtime_set_active(&client->dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ pm_runtime_enable(&client->dev);
+ pm_runtime_set_autosuspend_delay(&client->dev,
+ MMA8452_AUTO_SUSPEND_DELAY_MS);
+ pm_runtime_use_autosuspend(&client->dev);
+
ret = iio_device_register(indio_dev);
if (ret < 0)
goto buffer_cleanup;
@@ -1364,6 +1463,11 @@ static int mma8452_remove(struct i2c_client *client)
struct iio_dev *indio_dev = i2c_get_clientdata(client);
iio_device_unregister(indio_dev);
+
+ pm_runtime_disable(&client->dev);
+ pm_runtime_set_suspended(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+
iio_triggered_buffer_cleanup(indio_dev);
mma8452_trigger_cleanup(indio_dev);
mma8452_standby(iio_priv(indio_dev));
@@ -1371,6 +1475,45 @@ static int mma8452_remove(struct i2c_client *client)
return 0;
}
+#ifdef CONFIG_PM
+static int mma8452_runtime_suspend(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma8452_data *data = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&data->lock);
+ ret = mma8452_standby(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0) {
+ dev_err(&data->client->dev, "powering off device failed\n");
+ return -EAGAIN;
+ }
+
+ return 0;
+}
+
+static int mma8452_runtime_resume(struct device *dev)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct mma8452_data *data = iio_priv(indio_dev);
+ int ret, sleep_val;
+
+ ret = mma8452_active(data);
+ if (ret < 0)
+ return ret;
+
+ ret = mma8452_get_odr_index(data);
+ sleep_val = 1000 / mma8452_samp_freq[ret][0];
+ if (sleep_val < 20)
+ usleep_range(sleep_val * 1000, 20000);
+ else
+ msleep_interruptible(sleep_val);
+
+ return 0;
+}
+#endif
+
#ifdef CONFIG_PM_SLEEP
static int mma8452_suspend(struct device *dev)
{
@@ -1383,18 +1526,21 @@ static int mma8452_resume(struct device *dev)
return mma8452_active(iio_priv(i2c_get_clientdata(
to_i2c_client(dev))));
}
-
-static SIMPLE_DEV_PM_OPS(mma8452_pm_ops, mma8452_suspend, mma8452_resume);
-#define MMA8452_PM_OPS (&mma8452_pm_ops)
-#else
-#define MMA8452_PM_OPS NULL
#endif
+static const struct dev_pm_ops mma8452_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(mma8452_suspend, mma8452_resume)
+ SET_RUNTIME_PM_OPS(mma8452_runtime_suspend,
+ mma8452_runtime_resume, NULL)
+};
+
static const struct i2c_device_id mma8452_id[] = {
+ { "mma8451", mma8451 },
{ "mma8452", mma8452 },
{ "mma8453", mma8453 },
{ "mma8652", mma8652 },
{ "mma8653", mma8653 },
+ { "fxls8471", fxls8471 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mma8452_id);
@@ -1403,7 +1549,7 @@ static struct i2c_driver mma8452_driver = {
.driver = {
.name = "mma8452",
.of_match_table = of_match_ptr(mma8452_dt_ids),
- .pm = MMA8452_PM_OPS,
+ .pm = &mma8452_pm_ops,
},
.probe = mma8452_probe,
.remove = mma8452_remove,
diff --git a/drivers/iio/accel/mma9553.c b/drivers/iio/accel/mma9553.c
index fa7d36217c4b..bb05f3efddca 100644
--- a/drivers/iio/accel/mma9553.c
+++ b/drivers/iio/accel/mma9553.c
@@ -17,7 +17,6 @@
#include <linux/interrupt.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/events.h>
diff --git a/drivers/iio/accel/mxc4005.c b/drivers/iio/accel/mxc4005.c
index e72e218c2696..c23f47af7256 100644
--- a/drivers/iio/accel/mxc4005.c
+++ b/drivers/iio/accel/mxc4005.c
@@ -17,7 +17,6 @@
#include <linux/i2c.h>
#include <linux/iio/iio.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/regmap.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/trigger.h>
@@ -380,31 +379,6 @@ static const struct iio_trigger_ops mxc4005_trigger_ops = {
.owner = THIS_MODULE,
};
-static int mxc4005_gpio_probe(struct i2c_client *client,
- struct mxc4005_data *data)
-{
- struct device *dev;
- struct gpio_desc *gpio;
- int ret;
-
- if (!client)
- return -EINVAL;
-
- dev = &client->dev;
-
- gpio = devm_gpiod_get_index(dev, "mxc4005_int", 0, GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(dev, "failed to get acpi gpio index\n");
- return PTR_ERR(gpio);
- }
-
- ret = gpiod_to_irq(gpio);
-
- dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio), ret);
-
- return ret;
-}
-
static int mxc4005_chip_init(struct mxc4005_data *data)
{
int ret;
@@ -470,9 +444,6 @@ static int mxc4005_probe(struct i2c_client *client,
return ret;
}
- if (client->irq < 0)
- client->irq = mxc4005_gpio_probe(client, data);
-
if (client->irq > 0) {
data->dready_trig = devm_iio_trigger_alloc(&client->dev,
"%s-dev%d",
diff --git a/drivers/iio/accel/st_accel.h b/drivers/iio/accel/st_accel.h
index 5d4a1897b293..57f83a67948c 100644
--- a/drivers/iio/accel/st_accel.h
+++ b/drivers/iio/accel/st_accel.h
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/iio/common/st_sensors.h>
+#define H3LIS331DL_DRIVER_NAME "h3lis331dl_accel"
#define LIS3LV02DL_ACCEL_DEV_NAME "lis3lv02dl_accel"
#define LSM303DLHC_ACCEL_DEV_NAME "lsm303dlhc_accel"
#define LIS3DH_ACCEL_DEV_NAME "lis3dh"
diff --git a/drivers/iio/accel/st_accel_core.c b/drivers/iio/accel/st_accel_core.c
index a03a1417dd63..dc73f2d85e6d 100644
--- a/drivers/iio/accel/st_accel_core.c
+++ b/drivers/iio/accel/st_accel_core.c
@@ -39,6 +39,9 @@
#define ST_ACCEL_FS_AVL_6G 6
#define ST_ACCEL_FS_AVL_8G 8
#define ST_ACCEL_FS_AVL_16G 16
+#define ST_ACCEL_FS_AVL_100G 100
+#define ST_ACCEL_FS_AVL_200G 200
+#define ST_ACCEL_FS_AVL_400G 400
/* CUSTOM VALUES FOR SENSOR 1 */
#define ST_ACCEL_1_WAI_EXP 0x33
@@ -96,6 +99,8 @@
#define ST_ACCEL_2_DRDY_IRQ_INT2_MASK 0x10
#define ST_ACCEL_2_IHL_IRQ_ADDR 0x22
#define ST_ACCEL_2_IHL_IRQ_MASK 0x80
+#define ST_ACCEL_2_OD_IRQ_ADDR 0x22
+#define ST_ACCEL_2_OD_IRQ_MASK 0x40
#define ST_ACCEL_2_MULTIREAD_BIT true
/* CUSTOM VALUES FOR SENSOR 3 */
@@ -177,10 +182,39 @@
#define ST_ACCEL_5_DRDY_IRQ_INT2_MASK 0x20
#define ST_ACCEL_5_IHL_IRQ_ADDR 0x22
#define ST_ACCEL_5_IHL_IRQ_MASK 0x80
+#define ST_ACCEL_5_OD_IRQ_ADDR 0x22
+#define ST_ACCEL_5_OD_IRQ_MASK 0x40
#define ST_ACCEL_5_IG1_EN_ADDR 0x21
#define ST_ACCEL_5_IG1_EN_MASK 0x08
#define ST_ACCEL_5_MULTIREAD_BIT false
+/* CUSTOM VALUES FOR SENSOR 6 */
+#define ST_ACCEL_6_WAI_EXP 0x32
+#define ST_ACCEL_6_ODR_ADDR 0x20
+#define ST_ACCEL_6_ODR_MASK 0x18
+#define ST_ACCEL_6_ODR_AVL_50HZ_VAL 0x00
+#define ST_ACCEL_6_ODR_AVL_100HZ_VAL 0x01
+#define ST_ACCEL_6_ODR_AVL_400HZ_VAL 0x02
+#define ST_ACCEL_6_ODR_AVL_1000HZ_VAL 0x03
+#define ST_ACCEL_6_PW_ADDR 0x20
+#define ST_ACCEL_6_PW_MASK 0x20
+#define ST_ACCEL_6_FS_ADDR 0x23
+#define ST_ACCEL_6_FS_MASK 0x30
+#define ST_ACCEL_6_FS_AVL_100_VAL 0x00
+#define ST_ACCEL_6_FS_AVL_200_VAL 0x01
+#define ST_ACCEL_6_FS_AVL_400_VAL 0x03
+#define ST_ACCEL_6_FS_AVL_100_GAIN IIO_G_TO_M_S_2(49000)
+#define ST_ACCEL_6_FS_AVL_200_GAIN IIO_G_TO_M_S_2(98000)
+#define ST_ACCEL_6_FS_AVL_400_GAIN IIO_G_TO_M_S_2(195000)
+#define ST_ACCEL_6_BDU_ADDR 0x23
+#define ST_ACCEL_6_BDU_MASK 0x80
+#define ST_ACCEL_6_DRDY_IRQ_ADDR 0x22
+#define ST_ACCEL_6_DRDY_IRQ_INT1_MASK 0x02
+#define ST_ACCEL_6_DRDY_IRQ_INT2_MASK 0x10
+#define ST_ACCEL_6_IHL_IRQ_ADDR 0x22
+#define ST_ACCEL_6_IHL_IRQ_MASK 0x80
+#define ST_ACCEL_6_MULTIREAD_BIT true
+
static const struct iio_chan_spec st_accel_8bit_channels[] = {
ST_SENSORS_LSM_CHANNELS(IIO_ACCEL,
BIT(IIO_CHAN_INFO_RAW) | BIT(IIO_CHAN_INFO_SCALE),
@@ -302,6 +336,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_1_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_1_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_1_IHL_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_1_MULTIREAD_BIT,
.bootime = 2,
@@ -367,6 +402,9 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_2_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_2_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_2_IHL_IRQ_MASK,
+ .addr_od = ST_ACCEL_2_OD_IRQ_ADDR,
+ .mask_od = ST_ACCEL_2_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_2_MULTIREAD_BIT,
.bootime = 2,
@@ -444,6 +482,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_3_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_3_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_3_IHL_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
.ig1 = {
.en_addr = ST_ACCEL_3_IG1_EN_ADDR,
.en_mask = ST_ACCEL_3_IG1_EN_MASK,
@@ -502,6 +541,7 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.drdy_irq = {
.addr = ST_ACCEL_4_DRDY_IRQ_ADDR,
.mask_int1 = ST_ACCEL_4_DRDY_IRQ_INT1_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_4_MULTIREAD_BIT,
.bootime = 2, /* guess */
@@ -553,10 +593,75 @@ static const struct st_sensor_settings st_accel_sensors_settings[] = {
.mask_int2 = ST_ACCEL_5_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_ACCEL_5_IHL_IRQ_ADDR,
.mask_ihl = ST_ACCEL_5_IHL_IRQ_MASK,
+ .addr_od = ST_ACCEL_5_OD_IRQ_ADDR,
+ .mask_od = ST_ACCEL_5_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_ACCEL_5_MULTIREAD_BIT,
.bootime = 2, /* guess */
},
+ {
+ .wai = ST_ACCEL_6_WAI_EXP,
+ .wai_addr = ST_SENSORS_DEFAULT_WAI_ADDRESS,
+ .sensors_supported = {
+ [0] = H3LIS331DL_DRIVER_NAME,
+ },
+ .ch = (struct iio_chan_spec *)st_accel_12bit_channels,
+ .odr = {
+ .addr = ST_ACCEL_6_ODR_ADDR,
+ .mask = ST_ACCEL_6_ODR_MASK,
+ .odr_avl = {
+ { 50, ST_ACCEL_6_ODR_AVL_50HZ_VAL },
+ { 100, ST_ACCEL_6_ODR_AVL_100HZ_VAL, },
+ { 400, ST_ACCEL_6_ODR_AVL_400HZ_VAL, },
+ { 1000, ST_ACCEL_6_ODR_AVL_1000HZ_VAL, },
+ },
+ },
+ .pw = {
+ .addr = ST_ACCEL_6_PW_ADDR,
+ .mask = ST_ACCEL_6_PW_MASK,
+ .value_on = ST_SENSORS_DEFAULT_POWER_ON_VALUE,
+ .value_off = ST_SENSORS_DEFAULT_POWER_OFF_VALUE,
+ },
+ .enable_axis = {
+ .addr = ST_SENSORS_DEFAULT_AXIS_ADDR,
+ .mask = ST_SENSORS_DEFAULT_AXIS_MASK,
+ },
+ .fs = {
+ .addr = ST_ACCEL_6_FS_ADDR,
+ .mask = ST_ACCEL_6_FS_MASK,
+ .fs_avl = {
+ [0] = {
+ .num = ST_ACCEL_FS_AVL_100G,
+ .value = ST_ACCEL_6_FS_AVL_100_VAL,
+ .gain = ST_ACCEL_6_FS_AVL_100_GAIN,
+ },
+ [1] = {
+ .num = ST_ACCEL_FS_AVL_200G,
+ .value = ST_ACCEL_6_FS_AVL_200_VAL,
+ .gain = ST_ACCEL_6_FS_AVL_200_GAIN,
+ },
+ [2] = {
+ .num = ST_ACCEL_FS_AVL_400G,
+ .value = ST_ACCEL_6_FS_AVL_400_VAL,
+ .gain = ST_ACCEL_6_FS_AVL_400_GAIN,
+ },
+ },
+ },
+ .bdu = {
+ .addr = ST_ACCEL_6_BDU_ADDR,
+ .mask = ST_ACCEL_6_BDU_MASK,
+ },
+ .drdy_irq = {
+ .addr = ST_ACCEL_6_DRDY_IRQ_ADDR,
+ .mask_int1 = ST_ACCEL_6_DRDY_IRQ_INT1_MASK,
+ .mask_int2 = ST_ACCEL_6_DRDY_IRQ_INT2_MASK,
+ .addr_ihl = ST_ACCEL_6_IHL_IRQ_ADDR,
+ .mask_ihl = ST_ACCEL_6_IHL_IRQ_MASK,
+ },
+ .multi_read_bit = ST_ACCEL_6_MULTIREAD_BIT,
+ .bootime = 2,
+ },
};
static int st_accel_read_raw(struct iio_dev *indio_dev,
diff --git a/drivers/iio/accel/st_accel_i2c.c b/drivers/iio/accel/st_accel_i2c.c
index 294a32f89367..7333ee9fb11b 100644
--- a/drivers/iio/accel/st_accel_i2c.c
+++ b/drivers/iio/accel/st_accel_i2c.c
@@ -76,6 +76,10 @@ static const struct of_device_id st_accel_of_match[] = {
.compatible = "st,lis2dh12-accel",
.data = LIS2DH12_ACCEL_DEV_NAME,
},
+ {
+ .compatible = "st,h3lis331dl-accel",
+ .data = H3LIS331DL_DRIVER_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_accel_of_match);
diff --git a/drivers/iio/accel/stk8312.c b/drivers/iio/accel/stk8312.c
index 85fe7f7247c1..e31023dc5f1b 100644
--- a/drivers/iio/accel/stk8312.c
+++ b/drivers/iio/accel/stk8312.c
@@ -11,7 +11,6 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/iio/accel/stk8ba50.c b/drivers/iio/accel/stk8ba50.c
index 5709d9eb8f34..300d955bad00 100644
--- a/drivers/iio/accel/stk8ba50.c
+++ b/drivers/iio/accel/stk8ba50.c
@@ -11,7 +11,6 @@
*/
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/drivers/iio/adc/Kconfig b/drivers/iio/adc/Kconfig
index 82c718c515a0..25378c5882e2 100644
--- a/drivers/iio/adc/Kconfig
+++ b/drivers/iio/adc/Kconfig
@@ -242,6 +242,16 @@ config LP8788_ADC
To compile this driver as a module, choose M here: the module will be
called lp8788_adc.
+config LPC18XX_ADC
+ tristate "NXP LPC18xx ADC driver"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ Say yes here to build support for NXP LPC18XX ADC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called lpc18xx_adc.
+
config MAX1027
tristate "Maxim max1027 ADC driver"
depends on SPI
@@ -375,11 +385,11 @@ config ROCKCHIP_SARADC
module will be called rockchip_saradc.
config TI_ADC081C
- tristate "Texas Instruments ADC081C021/027"
+ tristate "Texas Instruments ADC081C/ADC101C/ADC121C family"
depends on I2C
help
- If you say yes here you get support for Texas Instruments ADC081C021
- and ADC081C027 ADC chips.
+ If you say yes here you get support for Texas Instruments ADC081C,
+ ADC101C and ADC121C ADC chips.
This driver can also be built as a module. If so, the module will be
called ti-adc081c.
diff --git a/drivers/iio/adc/Makefile b/drivers/iio/adc/Makefile
index 0cb79210a4b0..38638d46f972 100644
--- a/drivers/iio/adc/Makefile
+++ b/drivers/iio/adc/Makefile
@@ -25,6 +25,7 @@ obj-$(CONFIG_HI8435) += hi8435.o
obj-$(CONFIG_IMX7D_ADC) += imx7d_adc.o
obj-$(CONFIG_INA2XX_ADC) += ina2xx-adc.o
obj-$(CONFIG_LP8788_ADC) += lp8788_adc.o
+obj-$(CONFIG_LPC18XX_ADC) += lpc18xx_adc.o
obj-$(CONFIG_MAX1027) += max1027.o
obj-$(CONFIG_MAX1363) += max1363.o
obj-$(CONFIG_MCP320X) += mcp320x.o
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 01d71588d752..a3f5254f4e51 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -477,7 +477,7 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
if (ret < 0)
return ret;
*val = (ret >> chan->scan_type.shift) &
- GENMASK(chan->scan_type.realbits - 1 , 0);
+ GENMASK(chan->scan_type.realbits - 1, 0);
return IIO_VAL_INT;
}
diff --git a/drivers/iio/adc/at91-sama5d2_adc.c b/drivers/iio/adc/at91-sama5d2_adc.c
index 2e154cb51685..e10dca3ed74b 100644
--- a/drivers/iio/adc/at91-sama5d2_adc.c
+++ b/drivers/iio/adc/at91-sama5d2_adc.c
@@ -66,8 +66,10 @@
#define AT91_SAMA5D2_MR_PRESCAL(v) ((v) << AT91_SAMA5D2_MR_PRESCAL_OFFSET)
#define AT91_SAMA5D2_MR_PRESCAL_OFFSET 8
#define AT91_SAMA5D2_MR_PRESCAL_MAX 0xff
+#define AT91_SAMA5D2_MR_PRESCAL_MASK GENMASK(15, 8)
/* Startup Time */
#define AT91_SAMA5D2_MR_STARTUP(v) ((v) << 16)
+#define AT91_SAMA5D2_MR_STARTUP_MASK GENMASK(19, 16)
/* Analog Change */
#define AT91_SAMA5D2_MR_ANACH BIT(23)
/* Tracking Time */
@@ -92,13 +94,13 @@
/* Last Converted Data Register */
#define AT91_SAMA5D2_LCDR 0x20
/* Interrupt Enable Register */
-#define AT91_SAMA5D2_IER 0x24
+#define AT91_SAMA5D2_IER 0x24
/* Interrupt Disable Register */
-#define AT91_SAMA5D2_IDR 0x28
+#define AT91_SAMA5D2_IDR 0x28
/* Interrupt Mask Register */
-#define AT91_SAMA5D2_IMR 0x2c
+#define AT91_SAMA5D2_IMR 0x2c
/* Interrupt Status Register */
-#define AT91_SAMA5D2_ISR 0x30
+#define AT91_SAMA5D2_ISR 0x30
/* Last Channel Trigger Mode Register */
#define AT91_SAMA5D2_LCTMR 0x34
/* Last Channel Compare Window Register */
@@ -106,17 +108,20 @@
/* Overrun Status Register */
#define AT91_SAMA5D2_OVER 0x3c
/* Extended Mode Register */
-#define AT91_SAMA5D2_EMR 0x40
+#define AT91_SAMA5D2_EMR 0x40
/* Compare Window Register */
-#define AT91_SAMA5D2_CWR 0x44
+#define AT91_SAMA5D2_CWR 0x44
/* Channel Gain Register */
-#define AT91_SAMA5D2_CGR 0x48
+#define AT91_SAMA5D2_CGR 0x48
+
/* Channel Offset Register */
-#define AT91_SAMA5D2_COR 0x4c
+#define AT91_SAMA5D2_COR 0x4c
+#define AT91_SAMA5D2_COR_DIFF_OFFSET 16
+
/* Channel Data Register 0 */
#define AT91_SAMA5D2_CDR0 0x50
/* Analog Control Register */
-#define AT91_SAMA5D2_ACR 0x94
+#define AT91_SAMA5D2_ACR 0x94
/* Touchscreen Mode Register */
#define AT91_SAMA5D2_TSMR 0xb0
/* Touchscreen X Position Register */
@@ -130,7 +135,7 @@
/* Correction Select Register */
#define AT91_SAMA5D2_COSR 0xd0
/* Correction Value Register */
-#define AT91_SAMA5D2_CVR 0xd4
+#define AT91_SAMA5D2_CVR 0xd4
/* Channel Error Correction Register */
#define AT91_SAMA5D2_CECR 0xd8
/* Write Protection Mode Register */
@@ -140,7 +145,7 @@
/* Version Register */
#define AT91_SAMA5D2_VERSION 0xfc
-#define AT91_AT91_SAMA5D2_CHAN(num, addr) \
+#define AT91_SAMA5D2_CHAN_SINGLE(num, addr) \
{ \
.type = IIO_VOLTAGE, \
.channel = num, \
@@ -156,6 +161,24 @@
.indexed = 1, \
}
+#define AT91_SAMA5D2_CHAN_DIFF(num, num2, addr) \
+ { \
+ .type = IIO_VOLTAGE, \
+ .differential = 1, \
+ .channel = num, \
+ .channel2 = num2, \
+ .address = addr, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 12, \
+ }, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_shared_by_all = BIT(IIO_CHAN_INFO_SAMP_FREQ),\
+ .datasheet_name = "CH"#num"-CH"#num2, \
+ .indexed = 1, \
+ }
+
#define at91_adc_readl(st, reg) readl_relaxed(st->base + reg)
#define at91_adc_writel(st, reg, val) writel_relaxed(val, st->base + reg)
@@ -185,18 +208,24 @@ struct at91_adc_state {
};
static const struct iio_chan_spec at91_adc_channels[] = {
- AT91_AT91_SAMA5D2_CHAN(0, 0x50),
- AT91_AT91_SAMA5D2_CHAN(1, 0x54),
- AT91_AT91_SAMA5D2_CHAN(2, 0x58),
- AT91_AT91_SAMA5D2_CHAN(3, 0x5c),
- AT91_AT91_SAMA5D2_CHAN(4, 0x60),
- AT91_AT91_SAMA5D2_CHAN(5, 0x64),
- AT91_AT91_SAMA5D2_CHAN(6, 0x68),
- AT91_AT91_SAMA5D2_CHAN(7, 0x6c),
- AT91_AT91_SAMA5D2_CHAN(8, 0x70),
- AT91_AT91_SAMA5D2_CHAN(9, 0x74),
- AT91_AT91_SAMA5D2_CHAN(10, 0x78),
- AT91_AT91_SAMA5D2_CHAN(11, 0x7c),
+ AT91_SAMA5D2_CHAN_SINGLE(0, 0x50),
+ AT91_SAMA5D2_CHAN_SINGLE(1, 0x54),
+ AT91_SAMA5D2_CHAN_SINGLE(2, 0x58),
+ AT91_SAMA5D2_CHAN_SINGLE(3, 0x5c),
+ AT91_SAMA5D2_CHAN_SINGLE(4, 0x60),
+ AT91_SAMA5D2_CHAN_SINGLE(5, 0x64),
+ AT91_SAMA5D2_CHAN_SINGLE(6, 0x68),
+ AT91_SAMA5D2_CHAN_SINGLE(7, 0x6c),
+ AT91_SAMA5D2_CHAN_SINGLE(8, 0x70),
+ AT91_SAMA5D2_CHAN_SINGLE(9, 0x74),
+ AT91_SAMA5D2_CHAN_SINGLE(10, 0x78),
+ AT91_SAMA5D2_CHAN_SINGLE(11, 0x7c),
+ AT91_SAMA5D2_CHAN_DIFF(0, 1, 0x50),
+ AT91_SAMA5D2_CHAN_DIFF(2, 3, 0x58),
+ AT91_SAMA5D2_CHAN_DIFF(4, 5, 0x60),
+ AT91_SAMA5D2_CHAN_DIFF(6, 7, 0x68),
+ AT91_SAMA5D2_CHAN_DIFF(8, 9, 0x70),
+ AT91_SAMA5D2_CHAN_DIFF(10, 11, 0x78),
};
static unsigned at91_adc_startup_time(unsigned startup_time_min,
@@ -226,7 +255,7 @@ static unsigned at91_adc_startup_time(unsigned startup_time_min,
static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
{
struct iio_dev *indio_dev = iio_priv_to_dev(st);
- unsigned f_per, prescal, startup;
+ unsigned f_per, prescal, startup, mr;
f_per = clk_get_rate(st->per_clk);
prescal = (f_per / (2 * freq)) - 1;
@@ -234,10 +263,11 @@ static void at91_adc_setup_samp_freq(struct at91_adc_state *st, unsigned freq)
startup = at91_adc_startup_time(st->soc_info.startup_time,
freq / 1000);
- at91_adc_writel(st, AT91_SAMA5D2_MR,
- AT91_SAMA5D2_MR_TRANSFER(2)
- | AT91_SAMA5D2_MR_STARTUP(startup)
- | AT91_SAMA5D2_MR_PRESCAL(prescal));
+ mr = at91_adc_readl(st, AT91_SAMA5D2_MR);
+ mr &= ~(AT91_SAMA5D2_MR_STARTUP_MASK | AT91_SAMA5D2_MR_PRESCAL_MASK);
+ mr |= AT91_SAMA5D2_MR_STARTUP(startup);
+ mr |= AT91_SAMA5D2_MR_PRESCAL(prescal);
+ at91_adc_writel(st, AT91_SAMA5D2_MR, mr);
dev_dbg(&indio_dev->dev, "freq: %u, startup: %u, prescal: %u\n",
freq, startup, prescal);
@@ -278,6 +308,7 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct at91_adc_state *st = iio_priv(indio_dev);
+ u32 cor = 0;
int ret;
switch (mask) {
@@ -286,6 +317,11 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
st->chan = chan;
+ if (chan->differential)
+ cor = (BIT(chan->channel) | BIT(chan->channel2)) <<
+ AT91_SAMA5D2_COR_DIFF_OFFSET;
+
+ at91_adc_writel(st, AT91_SAMA5D2_COR, cor);
at91_adc_writel(st, AT91_SAMA5D2_CHER, BIT(chan->channel));
at91_adc_writel(st, AT91_SAMA5D2_IER, BIT(chan->channel));
at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_START);
@@ -298,6 +334,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
if (ret > 0) {
*val = st->conversion_value;
+ if (chan->scan_type.sign == 's')
+ *val = sign_extend32(*val, 11);
ret = IIO_VAL_INT;
st->conversion_done = false;
}
@@ -310,6 +348,8 @@ static int at91_adc_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
*val = st->vref_uv / 1000;
+ if (chan->differential)
+ *val *= 2;
*val2 = chan->scan_type.realbits;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -444,6 +484,12 @@ static int at91_adc_probe(struct platform_device *pdev)
at91_adc_writel(st, AT91_SAMA5D2_CR, AT91_SAMA5D2_CR_SWRST);
at91_adc_writel(st, AT91_SAMA5D2_IDR, 0xffffffff);
+ /*
+ * Transfer field must be set to 2 according to the datasheet and
+ * allows different analog settings for each channel.
+ */
+ at91_adc_writel(st, AT91_SAMA5D2_MR,
+ AT91_SAMA5D2_MR_TRANSFER(2) | AT91_SAMA5D2_MR_ANACH);
at91_adc_setup_samp_freq(st, st->soc_info.min_sample_rate);
diff --git a/drivers/iio/adc/at91_adc.c b/drivers/iio/adc/at91_adc.c
index f284cd6a93d6..52430ba171f3 100644
--- a/drivers/iio/adc/at91_adc.c
+++ b/drivers/iio/adc/at91_adc.c
@@ -797,8 +797,8 @@ static u32 calc_startup_ticks_9x5(u32 startup_time, u32 adc_clk_khz)
* Startup Time = <lookup_table_value> / ADC Clock
*/
const int startup_lookup[] = {
- 0 , 8 , 16 , 24 ,
- 64 , 80 , 96 , 112,
+ 0, 8, 16, 24,
+ 64, 80, 96, 112,
512, 576, 640, 704,
768, 832, 896, 960
};
@@ -924,14 +924,14 @@ static int at91_adc_probe_dt(struct at91_adc_state *st,
ret = -EINVAL;
goto error_ret;
}
- trig->name = name;
+ trig->name = name;
if (of_property_read_u32(trig_node, "trigger-value", &prop)) {
dev_err(&idev->dev, "Missing trigger-value property in the DT.\n");
ret = -EINVAL;
goto error_ret;
}
- trig->value = prop;
+ trig->value = prop;
trig->is_external = of_property_read_bool(trig_node, "trigger-external");
i++;
}
diff --git a/drivers/iio/adc/ina2xx-adc.c b/drivers/iio/adc/ina2xx-adc.c
index 65909d5858b1..502f2fbe8aef 100644
--- a/drivers/iio/adc/ina2xx-adc.c
+++ b/drivers/iio/adc/ina2xx-adc.c
@@ -185,9 +185,9 @@ static int ina2xx_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_SCALE:
switch (chan->address) {
case INA2XX_SHUNT_VOLTAGE:
- /* processed (mV) = raw*1000/shunt_div */
+ /* processed (mV) = raw/shunt_div */
*val2 = chip->config->shunt_div;
- *val = 1000;
+ *val = 1;
return IIO_VAL_FRACTIONAL;
case INA2XX_BUS_VOLTAGE:
@@ -350,6 +350,23 @@ static ssize_t ina2xx_allow_async_readout_store(struct device *dev,
return len;
}
+/*
+ * Set current LSB to 1mA, shunt is in uOhms
+ * (equation 13 in datasheet). We hardcode a Current_LSB
+ * of 1.0 x10-6. The only remaining parameter is RShunt.
+ * There is no need to expose the CALIBRATION register
+ * to the user for now. But we need to reset this register
+ * if the user updates RShunt after driver init, e.g upon
+ * reading an EEPROM/Probe-type value.
+ */
+static int ina2xx_set_calibration(struct ina2xx_chip_info *chip)
+{
+ u16 regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
+ chip->shunt_resistor);
+
+ return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
+}
+
static int set_shunt_resistor(struct ina2xx_chip_info *chip, unsigned int val)
{
if (val <= 0 || val > chip->config->calibration_factor)
@@ -385,6 +402,11 @@ static ssize_t ina2xx_shunt_resistor_store(struct device *dev,
if (ret)
return ret;
+ /* Update the Calibration register */
+ ret = ina2xx_set_calibration(chip);
+ if (ret)
+ return ret;
+
return len;
}
@@ -602,24 +624,11 @@ static const struct iio_info ina2xx_info = {
/* Initialize the configuration and calibration registers. */
static int ina2xx_init(struct ina2xx_chip_info *chip, unsigned int config)
{
- u16 regval;
- int ret;
-
- ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
+ int ret = regmap_write(chip->regmap, INA2XX_CONFIG, config);
if (ret)
return ret;
- /*
- * Set current LSB to 1mA, shunt is in uOhms
- * (equation 13 in datasheet). We hardcode a Current_LSB
- * of 1.0 x10-6. The only remaining parameter is RShunt.
- * There is no need to expose the CALIBRATION register
- * to the user for now.
- */
- regval = DIV_ROUND_CLOSEST(chip->config->calibration_factor,
- chip->shunt_resistor);
-
- return regmap_write(chip->regmap, INA2XX_CALIBRATION, regval);
+ return ina2xx_set_calibration(chip);
}
static int ina2xx_probe(struct i2c_client *client,
diff --git a/drivers/iio/adc/lpc18xx_adc.c b/drivers/iio/adc/lpc18xx_adc.c
new file mode 100644
index 000000000000..3ef18f4b27f0
--- /dev/null
+++ b/drivers/iio/adc/lpc18xx_adc.c
@@ -0,0 +1,231 @@
+/*
+ * IIO ADC driver for NXP LPC18xx ADC
+ *
+ * Copyright (C) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UNSUPPORTED hardware features:
+ * - Hardware triggers
+ * - Burst mode
+ * - Interrupts
+ * - DMA
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* LPC18XX ADC registers and bits */
+#define LPC18XX_ADC_CR 0x000
+#define LPC18XX_ADC_CR_CLKDIV_SHIFT 8
+#define LPC18XX_ADC_CR_PDN BIT(21)
+#define LPC18XX_ADC_CR_START_NOW (0x1 << 24)
+#define LPC18XX_ADC_GDR 0x004
+
+/* Data register bits */
+#define LPC18XX_ADC_SAMPLE_SHIFT 6
+#define LPC18XX_ADC_SAMPLE_MASK 0x3ff
+#define LPC18XX_ADC_CONV_DONE BIT(31)
+
+/* Clock should be 4.5 MHz or less */
+#define LPC18XX_ADC_CLK_TARGET 4500000
+
+struct lpc18xx_adc {
+ struct regulator *vref;
+ void __iomem *base;
+ struct device *dev;
+ struct mutex lock;
+ struct clk *clk;
+ u32 cr_reg;
+};
+
+#define LPC18XX_ADC_CHAN(_idx) { \
+ .type = IIO_VOLTAGE, \
+ .indexed = 1, \
+ .channel = _idx, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec lpc18xx_adc_iio_channels[] = {
+ LPC18XX_ADC_CHAN(0),
+ LPC18XX_ADC_CHAN(1),
+ LPC18XX_ADC_CHAN(2),
+ LPC18XX_ADC_CHAN(3),
+ LPC18XX_ADC_CHAN(4),
+ LPC18XX_ADC_CHAN(5),
+ LPC18XX_ADC_CHAN(6),
+ LPC18XX_ADC_CHAN(7),
+};
+
+static int lpc18xx_adc_read_chan(struct lpc18xx_adc *adc, unsigned int ch)
+{
+ int ret;
+ u32 reg;
+
+ reg = adc->cr_reg | BIT(ch) | LPC18XX_ADC_CR_START_NOW;
+ writel(reg, adc->base + LPC18XX_ADC_CR);
+
+ ret = readl_poll_timeout(adc->base + LPC18XX_ADC_GDR, reg,
+ reg & LPC18XX_ADC_CONV_DONE, 3, 9);
+ if (ret) {
+ dev_warn(adc->dev, "adc read timed out\n");
+ return ret;
+ }
+
+ return (reg >> LPC18XX_ADC_SAMPLE_SHIFT) & LPC18XX_ADC_SAMPLE_MASK;
+}
+
+static int lpc18xx_adc_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lpc18xx_adc *adc = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&adc->lock);
+ *val = lpc18xx_adc_read_chan(adc, chan->channel);
+ mutex_unlock(&adc->lock);
+ if (*val < 0)
+ return *val;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = regulator_get_voltage(adc->vref) / 1000;
+ *val2 = 10;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc18xx_adc_info = {
+ .read_raw = lpc18xx_adc_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int lpc18xx_adc_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct lpc18xx_adc *adc;
+ struct resource *res;
+ unsigned int clkdiv;
+ unsigned long rate;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ adc = iio_priv(indio_dev);
+ adc->dev = &pdev->dev;
+ mutex_init(&adc->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ adc->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(adc->base))
+ return PTR_ERR(adc->base);
+
+ adc->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(adc->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ return PTR_ERR(adc->clk);
+ }
+
+ rate = clk_get_rate(adc->clk);
+ clkdiv = DIV_ROUND_UP(rate, LPC18XX_ADC_CLK_TARGET);
+
+ adc->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(adc->vref)) {
+ dev_err(&pdev->dev, "error getting regulator\n");
+ return PTR_ERR(adc->vref);
+ }
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &lpc18xx_adc_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = lpc18xx_adc_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lpc18xx_adc_iio_channels);
+
+ ret = regulator_enable(adc->vref);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(adc->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ goto dis_reg;
+ }
+
+ adc->cr_reg = (clkdiv << LPC18XX_ADC_CR_CLKDIV_SHIFT) |
+ LPC18XX_ADC_CR_PDN;
+ writel(adc->cr_reg, adc->base + LPC18XX_ADC_CR);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ goto dis_clk;
+ }
+
+ return 0;
+
+dis_clk:
+ writel(0, adc->base + LPC18XX_ADC_CR);
+ clk_disable_unprepare(adc->clk);
+dis_reg:
+ regulator_disable(adc->vref);
+ return ret;
+}
+
+static int lpc18xx_adc_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct lpc18xx_adc *adc = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ writel(0, adc->base + LPC18XX_ADC_CR);
+ clk_disable_unprepare(adc->clk);
+ regulator_disable(adc->vref);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_adc_match[] = {
+ { .compatible = "nxp,lpc1850-adc" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_adc_match);
+
+static struct platform_driver lpc18xx_adc_driver = {
+ .probe = lpc18xx_adc_probe,
+ .remove = lpc18xx_adc_remove,
+ .driver = {
+ .name = "lpc18xx-adc",
+ .of_match_table = lpc18xx_adc_match,
+ },
+};
+module_platform_driver(lpc18xx_adc_driver);
+
+MODULE_DESCRIPTION("LPC18xx ADC driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/mcp3422.c b/drivers/iio/adc/mcp3422.c
index d7b36efd2f3c..d1172dc1e8e2 100644
--- a/drivers/iio/adc/mcp3422.c
+++ b/drivers/iio/adc/mcp3422.c
@@ -61,9 +61,9 @@
static const int mcp3422_scales[4][4] = {
{ 1000000, 500000, 250000, 125000 },
- { 250000 , 125000, 62500 , 31250 },
- { 62500 , 31250 , 15625 , 7812 },
- { 15625 , 7812 , 3906 , 1953 } };
+ { 250000, 125000, 62500, 31250 },
+ { 62500, 31250, 15625, 7812 },
+ { 15625, 7812, 3906, 1953 } };
/* Constant msleep times for data acquisitions */
static const int mcp3422_read_times[4] = {
diff --git a/drivers/iio/adc/mxs-lradc.c b/drivers/iio/adc/mxs-lradc.c
index 33051b87aac2..ad26da1edbee 100644
--- a/drivers/iio/adc/mxs-lradc.c
+++ b/drivers/iio/adc/mxs-lradc.c
@@ -686,6 +686,17 @@ static void mxs_lradc_prepare_pressure(struct mxs_lradc *lradc)
static void mxs_lradc_enable_touch_detection(struct mxs_lradc *lradc)
{
+ /* Configure the touchscreen type */
+ if (lradc->soc == IMX28_LRADC) {
+ mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
+
+ if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
+ mxs_lradc_reg_set(lradc,
+ LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
+ LRADC_CTRL0);
+ }
+
mxs_lradc_setup_touch_detection(lradc);
lradc->cur_plate = LRADC_TOUCH;
@@ -1127,6 +1138,7 @@ static int mxs_lradc_ts_register(struct mxs_lradc *lradc)
__set_bit(EV_ABS, input->evbit);
__set_bit(EV_KEY, input->evbit);
__set_bit(BTN_TOUCH, input->keybit);
+ __set_bit(INPUT_PROP_DIRECT, input->propbit);
input_set_abs_params(input, ABS_X, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
input_set_abs_params(input, ABS_Y, 0, LRADC_SINGLE_SAMPLE_MASK, 0, 0);
input_set_abs_params(input, ABS_PRESSURE, 0, LRADC_SINGLE_SAMPLE_MASK,
@@ -1475,18 +1487,13 @@ static const struct iio_chan_spec mx28_lradc_chan_spec[] = {
MXS_ADC_CHAN(15, IIO_VOLTAGE, "VDD5V"),
};
-static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
+static void mxs_lradc_hw_init(struct mxs_lradc *lradc)
{
/* The ADC always uses DELAY CHANNEL 0. */
const u32 adc_cfg =
(1 << (LRADC_DELAY_TRIGGER_DELAYS_OFFSET + 0)) |
(LRADC_DELAY_TIMER_PER << LRADC_DELAY_DELAY_OFFSET);
- int ret = stmp_reset_block(lradc->base);
-
- if (ret)
- return ret;
-
/* Configure DELAY CHANNEL 0 for generic ADC sampling. */
mxs_lradc_reg_wrt(lradc, adc_cfg, LRADC_DELAY(0));
@@ -1495,20 +1502,8 @@ static int mxs_lradc_hw_init(struct mxs_lradc *lradc)
mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(2));
mxs_lradc_reg_wrt(lradc, 0, LRADC_DELAY(3));
- /* Configure the touchscreen type */
- if (lradc->soc == IMX28_LRADC) {
- mxs_lradc_reg_clear(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
- LRADC_CTRL0);
-
- if (lradc->use_touchscreen == MXS_LRADC_TOUCHSCREEN_5WIRE)
- mxs_lradc_reg_set(lradc, LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE,
- LRADC_CTRL0);
- }
-
/* Start internal temperature sensing. */
mxs_lradc_reg_wrt(lradc, 0, LRADC_CTRL2);
-
- return 0;
}
static void mxs_lradc_hw_stop(struct mxs_lradc *lradc)
@@ -1708,11 +1703,13 @@ static int mxs_lradc_probe(struct platform_device *pdev)
}
}
- /* Configure the hardware. */
- ret = mxs_lradc_hw_init(lradc);
+ ret = stmp_reset_block(lradc->base);
if (ret)
goto err_dev;
+ /* Configure the hardware. */
+ mxs_lradc_hw_init(lradc);
+
/* Register the touchscreen input device. */
if (touch_ret == 0) {
ret = mxs_lradc_ts_register(lradc);
diff --git a/drivers/iio/adc/rockchip_saradc.c b/drivers/iio/adc/rockchip_saradc.c
index 9c311c1e1ac7..f9ad6c2d6821 100644
--- a/drivers/iio/adc/rockchip_saradc.c
+++ b/drivers/iio/adc/rockchip_saradc.c
@@ -159,6 +159,22 @@ static const struct rockchip_saradc_data rk3066_tsadc_data = {
.clk_rate = 50000,
};
+static const struct iio_chan_spec rockchip_rk3399_saradc_iio_channels[] = {
+ ADC_CHANNEL(0, "adc0"),
+ ADC_CHANNEL(1, "adc1"),
+ ADC_CHANNEL(2, "adc2"),
+ ADC_CHANNEL(3, "adc3"),
+ ADC_CHANNEL(4, "adc4"),
+ ADC_CHANNEL(5, "adc5"),
+};
+
+static const struct rockchip_saradc_data rk3399_saradc_data = {
+ .num_bits = 10,
+ .channels = rockchip_rk3399_saradc_iio_channels,
+ .num_channels = ARRAY_SIZE(rockchip_rk3399_saradc_iio_channels),
+ .clk_rate = 1000000,
+};
+
static const struct of_device_id rockchip_saradc_match[] = {
{
.compatible = "rockchip,saradc",
@@ -166,6 +182,9 @@ static const struct of_device_id rockchip_saradc_match[] = {
}, {
.compatible = "rockchip,rk3066-tsadc",
.data = &rk3066_tsadc_data,
+ }, {
+ .compatible = "rockchip,rk3399-saradc",
+ .data = &rk3399_saradc_data,
},
{},
};
diff --git a/drivers/iio/adc/ti-adc081c.c b/drivers/iio/adc/ti-adc081c.c
index ecbc12138d58..9fd032d9f402 100644
--- a/drivers/iio/adc/ti-adc081c.c
+++ b/drivers/iio/adc/ti-adc081c.c
@@ -1,9 +1,21 @@
/*
+ * TI ADC081C/ADC101C/ADC121C 8/10/12-bit ADC driver
+ *
* Copyright (C) 2012 Avionic Design GmbH
+ * Copyright (C) 2016 Intel
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
+ *
+ * Datasheets:
+ * http://www.ti.com/lit/ds/symlink/adc081c021.pdf
+ * http://www.ti.com/lit/ds/symlink/adc101c021.pdf
+ * http://www.ti.com/lit/ds/symlink/adc121c021.pdf
+ *
+ * The devices have a very similar interface and differ mostly in the number of
+ * bits handled. For the 8-bit and 10-bit models the least-significant 4 or 2
+ * bits of value registers are reserved.
*/
#include <linux/err.h>
@@ -12,11 +24,17 @@
#include <linux/of.h>
#include <linux/iio/iio.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
#include <linux/regulator/consumer.h>
struct adc081c {
struct i2c_client *i2c;
struct regulator *ref;
+
+ /* 8, 10 or 12 */
+ int bits;
};
#define REG_CONV_RES 0x00
@@ -34,7 +52,7 @@ static int adc081c_read_raw(struct iio_dev *iio,
if (err < 0)
return err;
- *value = (err >> 4) & 0xff;
+ *value = (err & 0xFFF) >> (12 - adc->bits);
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
@@ -43,7 +61,7 @@ static int adc081c_read_raw(struct iio_dev *iio,
return err;
*value = err / 1000;
- *shift = 8;
+ *shift = adc->bits;
return IIO_VAL_FRACTIONAL_LOG2;
@@ -54,10 +72,53 @@ static int adc081c_read_raw(struct iio_dev *iio,
return -EINVAL;
}
-static const struct iio_chan_spec adc081c_channel = {
- .type = IIO_VOLTAGE,
- .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+#define ADCxx1C_CHAN(_bits) { \
+ .type = IIO_VOLTAGE, \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .scan_type = { \
+ .sign = 'u', \
+ .realbits = (_bits), \
+ .storagebits = 16, \
+ .shift = 12 - (_bits), \
+ .endianness = IIO_CPU, \
+ }, \
+}
+
+#define DEFINE_ADCxx1C_CHANNELS(_name, _bits) \
+ static const struct iio_chan_spec _name ## _channels[] = { \
+ ADCxx1C_CHAN((_bits)), \
+ IIO_CHAN_SOFT_TIMESTAMP(1), \
+ }; \
+
+#define ADC081C_NUM_CHANNELS 2
+
+struct adcxx1c_model {
+ const struct iio_chan_spec* channels;
+ int bits;
+};
+
+#define ADCxx1C_MODEL(_name, _bits) \
+ { \
+ .channels = _name ## _channels, \
+ .bits = (_bits), \
+ }
+
+DEFINE_ADCxx1C_CHANNELS(adc081c, 8);
+DEFINE_ADCxx1C_CHANNELS(adc101c, 10);
+DEFINE_ADCxx1C_CHANNELS(adc121c, 12);
+
+/* Model ids are indexes in _models array */
+enum adcxx1c_model_id {
+ ADC081C = 0,
+ ADC101C = 1,
+ ADC121C = 2,
+};
+
+static struct adcxx1c_model adcxx1c_models[] = {
+ ADCxx1C_MODEL(adc081c, 8),
+ ADCxx1C_MODEL(adc101c, 10),
+ ADCxx1C_MODEL(adc121c, 12),
};
static const struct iio_info adc081c_info = {
@@ -65,11 +126,30 @@ static const struct iio_info adc081c_info = {
.driver_module = THIS_MODULE,
};
+static irqreturn_t adc081c_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct adc081c *data = iio_priv(indio_dev);
+ u16 buf[8]; /* 2 bytes data + 6 bytes padding + 8 bytes timestamp */
+ int ret;
+
+ ret = i2c_smbus_read_word_swapped(data->i2c, REG_CONV_RES);
+ if (ret < 0)
+ goto out;
+ buf[0] = ret;
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+out:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
static int adc081c_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct iio_dev *iio;
struct adc081c *adc;
+ struct adcxx1c_model *model = &adcxx1c_models[id->driver_data];
int err;
if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA))
@@ -81,6 +161,7 @@ static int adc081c_probe(struct i2c_client *client,
adc = iio_priv(iio);
adc->i2c = client;
+ adc->bits = model->bits;
adc->ref = devm_regulator_get(&client->dev, "vref");
if (IS_ERR(adc->ref))
@@ -95,18 +176,26 @@ static int adc081c_probe(struct i2c_client *client,
iio->modes = INDIO_DIRECT_MODE;
iio->info = &adc081c_info;
- iio->channels = &adc081c_channel;
- iio->num_channels = 1;
+ iio->channels = model->channels;
+ iio->num_channels = ADC081C_NUM_CHANNELS;
+
+ err = iio_triggered_buffer_setup(iio, NULL, adc081c_trigger_handler, NULL);
+ if (err < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ goto err_regulator_disable;
+ }
err = iio_device_register(iio);
if (err < 0)
- goto regulator_disable;
+ goto err_buffer_cleanup;
i2c_set_clientdata(client, iio);
return 0;
-regulator_disable:
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(iio);
+err_regulator_disable:
regulator_disable(adc->ref);
return err;
@@ -118,13 +207,16 @@ static int adc081c_remove(struct i2c_client *client)
struct adc081c *adc = iio_priv(iio);
iio_device_unregister(iio);
+ iio_triggered_buffer_cleanup(iio);
regulator_disable(adc->ref);
return 0;
}
static const struct i2c_device_id adc081c_id[] = {
- { "adc081c", 0 },
+ { "adc081c", ADC081C },
+ { "adc101c", ADC101C },
+ { "adc121c", ADC121C },
{ }
};
MODULE_DEVICE_TABLE(i2c, adc081c_id);
@@ -132,6 +224,8 @@ MODULE_DEVICE_TABLE(i2c, adc081c_id);
#ifdef CONFIG_OF
static const struct of_device_id adc081c_of_match[] = {
{ .compatible = "ti,adc081c" },
+ { .compatible = "ti,adc101c" },
+ { .compatible = "ti,adc121c" },
{ }
};
MODULE_DEVICE_TABLE(of, adc081c_of_match);
@@ -149,5 +243,5 @@ static struct i2c_driver adc081c_driver = {
module_i2c_driver(adc081c_driver);
MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
-MODULE_DESCRIPTION("Texas Instruments ADC081C021/027 driver");
+MODULE_DESCRIPTION("Texas Instruments ADC081C/ADC101C/ADC121C driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index b10f629cc44b..653bf1379d2e 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -714,19 +714,19 @@ static int vf610_write_raw(struct iio_dev *indio_dev,
int i;
switch (mask) {
- case IIO_CHAN_INFO_SAMP_FREQ:
- for (i = 0;
- i < ARRAY_SIZE(info->sample_freq_avail);
- i++)
- if (val == info->sample_freq_avail[i]) {
- info->adc_feature.sample_rate = i;
- vf610_adc_sample_set(info);
- return 0;
- }
- break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ for (i = 0;
+ i < ARRAY_SIZE(info->sample_freq_avail);
+ i++)
+ if (val == info->sample_freq_avail[i]) {
+ info->adc_feature.sample_rate = i;
+ vf610_adc_sample_set(info);
+ return 0;
+ }
+ break;
- default:
- break;
+ default:
+ break;
}
return -EINVAL;
diff --git a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
index 595511022795..5b41f9d0d4f3 100644
--- a/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
+++ b/drivers/iio/common/hid-sensors/hid-sensor-trigger.c
@@ -115,7 +115,7 @@ int hid_sensor_power_state(struct hid_sensor_common *st, bool state)
return ret;
}
- return 0;
+ return 0;
#else
atomic_set(&st->user_requested_state, state);
return _hid_sensor_power_state(st, state);
diff --git a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
index 669dc7c270f5..ecf7721ecaf4 100644
--- a/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
+++ b/drivers/iio/common/ms_sensors/ms_sensors_i2c.c
@@ -106,7 +106,7 @@ int ms_sensors_convert_and_read(void *cli, u8 conv, u8 rd,
unsigned int delay, u32 *adc)
{
int ret;
- __be32 buf = 0;
+ __be32 buf = 0;
struct i2c_client *client = (struct i2c_client *)cli;
/* Trigger conversion */
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index e18bc6782256..c55898543a47 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -24,81 +24,30 @@
int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
{
- u8 *addr;
- int i, n = 0, len;
+ int i, len;
+ int total = 0;
struct st_sensor_data *sdata = iio_priv(indio_dev);
unsigned int num_data_channels = sdata->num_data_channels;
- unsigned int byte_for_channel =
- indio_dev->channels[0].scan_type.storagebits >> 3;
-
- addr = kmalloc(num_data_channels, GFP_KERNEL);
- if (!addr) {
- len = -ENOMEM;
- goto st_sensors_get_buffer_element_error;
- }
for (i = 0; i < num_data_channels; i++) {
+ unsigned int bytes_to_read;
+
if (test_bit(i, indio_dev->active_scan_mask)) {
- addr[n] = indio_dev->channels[i].address;
- n++;
- }
- }
- switch (n) {
- case 1:
- len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
- addr[0], byte_for_channel, buf, sdata->multiread_bit);
- break;
- case 2:
- if ((addr[1] - addr[0]) == byte_for_channel) {
+ bytes_to_read = indio_dev->channels[i].scan_type.storagebits >> 3;
len = sdata->tf->read_multiple_byte(&sdata->tb,
- sdata->dev, addr[0], byte_for_channel * n,
- buf, sdata->multiread_bit);
- } else {
- u8 *rx_array;
- rx_array = kmalloc(byte_for_channel * num_data_channels,
- GFP_KERNEL);
- if (!rx_array) {
- len = -ENOMEM;
- goto st_sensors_free_memory;
- }
+ sdata->dev, indio_dev->channels[i].address,
+ bytes_to_read,
+ buf + total, sdata->multiread_bit);
- len = sdata->tf->read_multiple_byte(&sdata->tb,
- sdata->dev, addr[0],
- byte_for_channel * num_data_channels,
- rx_array, sdata->multiread_bit);
- if (len < 0) {
- kfree(rx_array);
- goto st_sensors_free_memory;
- }
-
- for (i = 0; i < n * byte_for_channel; i++) {
- if (i < n)
- buf[i] = rx_array[i];
- else
- buf[i] = rx_array[n + i];
- }
- kfree(rx_array);
- len = byte_for_channel * n;
+ if (len < bytes_to_read)
+ return -EIO;
+
+ /* Advance the buffer pointer */
+ total += len;
}
- break;
- case 3:
- len = sdata->tf->read_multiple_byte(&sdata->tb, sdata->dev,
- addr[0], byte_for_channel * num_data_channels,
- buf, sdata->multiread_bit);
- break;
- default:
- len = -EINVAL;
- goto st_sensors_free_memory;
- }
- if (len != byte_for_channel * n) {
- len = -EIO;
- goto st_sensors_free_memory;
}
-st_sensors_free_memory:
- kfree(addr);
-st_sensors_get_buffer_element_error:
- return len;
+ return total;
}
EXPORT_SYMBOL(st_sensors_get_buffer_element);
@@ -109,6 +58,24 @@ irqreturn_t st_sensors_trigger_handler(int irq, void *p)
struct iio_dev *indio_dev = pf->indio_dev;
struct st_sensor_data *sdata = iio_priv(indio_dev);
+ /* If we have a status register, check if this IRQ came from us */
+ if (sdata->sensor_settings->drdy_irq.addr_stat_drdy) {
+ u8 status;
+
+ len = sdata->tf->read_byte(&sdata->tb, sdata->dev,
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy,
+ &status);
+ if (len < 0)
+ dev_err(sdata->dev, "could not read channel status\n");
+
+ /*
+ * If this was not caused by any channels on this sensor,
+ * return IRQ_NONE
+ */
+ if (!(status & (u8)indio_dev->active_scan_mask[0]))
+ return IRQ_NONE;
+ }
+
len = st_sensors_get_buffer_element(indio_dev, sdata->buffer_data);
if (len < 0)
goto st_sensors_get_buffer_element_error;
diff --git a/drivers/iio/common/st_sensors/st_sensors_core.c b/drivers/iio/common/st_sensors/st_sensors_core.c
index f5a2d445d0c0..dffe00692169 100644
--- a/drivers/iio/common/st_sensors/st_sensors_core.c
+++ b/drivers/iio/common/st_sensors/st_sensors_core.c
@@ -301,6 +301,14 @@ static int st_sensors_set_drdy_int_pin(struct iio_dev *indio_dev,
return -EINVAL;
}
+ if (pdata->open_drain) {
+ if (!sdata->sensor_settings->drdy_irq.addr_od)
+ dev_err(&indio_dev->dev,
+ "open drain requested but unsupported.\n");
+ else
+ sdata->int_pin_open_drain = true;
+ }
+
return 0;
}
@@ -321,6 +329,8 @@ static struct st_sensors_platform_data *st_sensors_of_probe(struct device *dev,
else
pdata->drdy_int_pin = defdata ? defdata->drdy_int_pin : 0;
+ pdata->open_drain = of_property_read_bool(np, "drive-open-drain");
+
return pdata;
}
#else
@@ -374,6 +384,16 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
return err;
}
+ if (sdata->int_pin_open_drain) {
+ dev_info(&indio_dev->dev,
+ "set interrupt line to open drain mode\n");
+ err = st_sensors_write_data_with_mask(indio_dev,
+ sdata->sensor_settings->drdy_irq.addr_od,
+ sdata->sensor_settings->drdy_irq.mask_od, 1);
+ if (err < 0)
+ return err;
+ }
+
err = st_sensors_set_axis_enable(indio_dev, ST_SENSORS_ENABLE_ALL_AXIS);
return err;
diff --git a/drivers/iio/common/st_sensors/st_sensors_trigger.c b/drivers/iio/common/st_sensors/st_sensors_trigger.c
index 6a8c98327945..da72279fcf99 100644
--- a/drivers/iio/common/st_sensors/st_sensors_trigger.c
+++ b/drivers/iio/common/st_sensors/st_sensors_trigger.c
@@ -64,6 +64,19 @@ int st_sensors_allocate_trigger(struct iio_dev *indio_dev,
"rising edge\n", irq_trig);
irq_trig = IRQF_TRIGGER_RISING;
}
+
+ /*
+ * If the interrupt pin is Open Drain, by definition this
+ * means that the interrupt line may be shared with other
+ * peripherals. But to do this we also need to have a status
+ * register and mask to figure out if this sensor was firing
+ * the IRQ or not, so we can tell the interrupt handle that
+ * it was "our" interrupt.
+ */
+ if (sdata->int_pin_open_drain &&
+ sdata->sensor_settings->drdy_irq.addr_stat_drdy)
+ irq_trig |= IRQF_SHARED;
+
err = request_threaded_irq(irq,
iio_trigger_generic_data_rdy_poll,
NULL,
diff --git a/drivers/iio/dac/Kconfig b/drivers/iio/dac/Kconfig
index 6abcfb8597d9..e63b957c985f 100644
--- a/drivers/iio/dac/Kconfig
+++ b/drivers/iio/dac/Kconfig
@@ -74,6 +74,33 @@ config AD5449
To compile this driver as a module, choose M here: the
module will be called ad5449.
+config AD5592R_BASE
+ tristate
+
+config AD5592R
+ tristate "Analog Devices AD5592R ADC/DAC driver"
+ depends on SPI_MASTER
+ select GPIOLIB
+ select AD5592R_BASE
+ help
+ Say yes here to build support for Analog Devices AD5592R
+ Digital to Analog / Analog to Digital Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5592r.
+
+config AD5593R
+ tristate "Analog Devices AD5593R ADC/DAC driver"
+ depends on I2C
+ select GPIOLIB
+ select AD5592R_BASE
+ help
+ Say yes here to build support for Analog Devices AD5593R
+ Digital to Analog / Analog to Digital Converter.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ad5593r.
+
config AD5504
tristate "Analog Devices AD5504/AD5501 DAC SPI driver"
depends on SPI
@@ -154,6 +181,16 @@ config AD7303
To compile this driver as module choose M here: the module will be called
ad7303.
+config LPC18XX_DAC
+ tristate "NXP LPC18xx DAC driver"
+ depends on ARCH_LPC18XX || COMPILE_TEST
+ depends on OF && HAS_IOMEM
+ help
+ Say yes here to build support for NXP LPC18XX DAC.
+
+ To compile this driver as a module, choose M here: the module will be
+ called lpc18xx_dac.
+
config M62332
tristate "Mitsubishi M62332 DAC driver"
depends on I2C
diff --git a/drivers/iio/dac/Makefile b/drivers/iio/dac/Makefile
index 67b48429686d..8b78d5ca9b11 100644
--- a/drivers/iio/dac/Makefile
+++ b/drivers/iio/dac/Makefile
@@ -11,12 +11,16 @@ obj-$(CONFIG_AD5064) += ad5064.o
obj-$(CONFIG_AD5504) += ad5504.o
obj-$(CONFIG_AD5446) += ad5446.o
obj-$(CONFIG_AD5449) += ad5449.o
+obj-$(CONFIG_AD5592R_BASE) += ad5592r-base.o
+obj-$(CONFIG_AD5592R) += ad5592r.o
+obj-$(CONFIG_AD5593R) += ad5593r.o
obj-$(CONFIG_AD5755) += ad5755.o
obj-$(CONFIG_AD5761) += ad5761.o
obj-$(CONFIG_AD5764) += ad5764.o
obj-$(CONFIG_AD5791) += ad5791.o
obj-$(CONFIG_AD5686) += ad5686.o
obj-$(CONFIG_AD7303) += ad7303.o
+obj-$(CONFIG_LPC18XX_DAC) += lpc18xx_dac.o
obj-$(CONFIG_M62332) += m62332.o
obj-$(CONFIG_MAX517) += max517.o
obj-$(CONFIG_MAX5821) += max5821.o
diff --git a/drivers/iio/dac/ad5592r-base.c b/drivers/iio/dac/ad5592r-base.c
new file mode 100644
index 000000000000..948f600e7059
--- /dev/null
+++ b/drivers/iio/dac/ad5592r-base.c
@@ -0,0 +1,691 @@
+/*
+ * AD5592R Digital <-> Analog converters driver
+ *
+ * Copyright 2014-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/regulator/consumer.h>
+#include <linux/gpio/consumer.h>
+#include <linux/gpio/driver.h>
+#include <linux/gpio.h>
+#include <linux/property.h>
+
+#include <dt-bindings/iio/adi,ad5592r.h>
+
+#include "ad5592r-base.h"
+
+static int ad5592r_gpio_get(struct gpio_chip *chip, unsigned offset)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+ int ret = 0;
+ u8 val;
+
+ mutex_lock(&st->gpio_lock);
+
+ if (st->gpio_out & BIT(offset))
+ val = st->gpio_val;
+ else
+ ret = st->ops->gpio_read(st, &val);
+
+ mutex_unlock(&st->gpio_lock);
+
+ if (ret < 0)
+ return ret;
+
+ return !!(val & BIT(offset));
+}
+
+static void ad5592r_gpio_set(struct gpio_chip *chip, unsigned offset, int value)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+
+ mutex_lock(&st->gpio_lock);
+
+ if (value)
+ st->gpio_val |= BIT(offset);
+ else
+ st->gpio_val &= ~BIT(offset);
+
+ st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
+
+ mutex_unlock(&st->gpio_lock);
+}
+
+static int ad5592r_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+ int ret;
+
+ mutex_lock(&st->gpio_lock);
+
+ st->gpio_out &= ~BIT(offset);
+ st->gpio_in |= BIT(offset);
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
+
+err_unlock:
+ mutex_unlock(&st->gpio_lock);
+
+ return ret;
+}
+
+static int ad5592r_gpio_direction_output(struct gpio_chip *chip,
+ unsigned offset, int value)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+ int ret;
+
+ mutex_lock(&st->gpio_lock);
+
+ if (value)
+ st->gpio_val |= BIT(offset);
+ else
+ st->gpio_val &= ~BIT(offset);
+
+ st->gpio_in &= ~BIT(offset);
+ st->gpio_out |= BIT(offset);
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
+ if (ret < 0)
+ goto err_unlock;
+
+ ret = st->ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
+
+err_unlock:
+ mutex_unlock(&st->gpio_lock);
+
+ return ret;
+}
+
+static int ad5592r_gpio_request(struct gpio_chip *chip, unsigned offset)
+{
+ struct ad5592r_state *st = gpiochip_get_data(chip);
+
+ if (!(st->gpio_map & BIT(offset))) {
+ dev_err(st->dev, "GPIO %d is reserved by alternate function\n",
+ offset);
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ad5592r_gpio_init(struct ad5592r_state *st)
+{
+ if (!st->gpio_map)
+ return 0;
+
+ st->gpiochip.label = dev_name(st->dev);
+ st->gpiochip.base = -1;
+ st->gpiochip.ngpio = 8;
+ st->gpiochip.parent = st->dev;
+ st->gpiochip.can_sleep = true;
+ st->gpiochip.direction_input = ad5592r_gpio_direction_input;
+ st->gpiochip.direction_output = ad5592r_gpio_direction_output;
+ st->gpiochip.get = ad5592r_gpio_get;
+ st->gpiochip.set = ad5592r_gpio_set;
+ st->gpiochip.request = ad5592r_gpio_request;
+ st->gpiochip.owner = THIS_MODULE;
+
+ mutex_init(&st->gpio_lock);
+
+ return gpiochip_add_data(&st->gpiochip, st);
+}
+
+static void ad5592r_gpio_cleanup(struct ad5592r_state *st)
+{
+ if (st->gpio_map)
+ gpiochip_remove(&st->gpiochip);
+}
+
+static int ad5592r_reset(struct ad5592r_state *st)
+{
+ struct gpio_desc *gpio;
+ struct iio_dev *iio_dev = iio_priv_to_dev(st);
+
+ gpio = devm_gpiod_get_optional(st->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio))
+ return PTR_ERR(gpio);
+
+ if (gpio) {
+ udelay(1);
+ gpiod_set_value(gpio, 1);
+ } else {
+ mutex_lock(&iio_dev->mlock);
+ /* Writing this magic value resets the device */
+ st->ops->reg_write(st, AD5592R_REG_RESET, 0xdac);
+ mutex_unlock(&iio_dev->mlock);
+ }
+
+ udelay(250);
+
+ return 0;
+}
+
+static int ad5592r_get_vref(struct ad5592r_state *st)
+{
+ int ret;
+
+ if (st->reg) {
+ ret = regulator_get_voltage(st->reg);
+ if (ret < 0)
+ return ret;
+
+ return ret / 1000;
+ } else {
+ return 2500;
+ }
+}
+
+static int ad5592r_set_channel_modes(struct ad5592r_state *st)
+{
+ const struct ad5592r_rw_ops *ops = st->ops;
+ int ret;
+ unsigned i;
+ struct iio_dev *iio_dev = iio_priv_to_dev(st);
+ u8 pulldown = 0, tristate = 0, dac = 0, adc = 0;
+ u16 read_back;
+
+ for (i = 0; i < st->num_channels; i++) {
+ switch (st->channel_modes[i]) {
+ case CH_MODE_DAC:
+ dac |= BIT(i);
+ break;
+
+ case CH_MODE_ADC:
+ adc |= BIT(i);
+ break;
+
+ case CH_MODE_DAC_AND_ADC:
+ dac |= BIT(i);
+ adc |= BIT(i);
+ break;
+
+ case CH_MODE_GPIO:
+ st->gpio_map |= BIT(i);
+ st->gpio_in |= BIT(i); /* Default to input */
+ break;
+
+ case CH_MODE_UNUSED:
+ /* fall-through */
+ default:
+ switch (st->channel_offstate[i]) {
+ case CH_OFFSTATE_OUT_TRISTATE:
+ tristate |= BIT(i);
+ break;
+
+ case CH_OFFSTATE_OUT_LOW:
+ st->gpio_out |= BIT(i);
+ break;
+
+ case CH_OFFSTATE_OUT_HIGH:
+ st->gpio_out |= BIT(i);
+ st->gpio_val |= BIT(i);
+ break;
+
+ case CH_OFFSTATE_PULLDOWN:
+ /* fall-through */
+ default:
+ pulldown |= BIT(i);
+ break;
+ }
+ }
+ }
+
+ mutex_lock(&iio_dev->mlock);
+
+ /* Pull down unused pins to GND */
+ ret = ops->reg_write(st, AD5592R_REG_PULLDOWN, pulldown);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_TRISTATE, tristate);
+ if (ret)
+ goto err_unlock;
+
+ /* Configure pins that we use */
+ ret = ops->reg_write(st, AD5592R_REG_DAC_EN, dac);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_ADC_EN, adc);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_GPIO_SET, st->gpio_val);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_GPIO_OUT_EN, st->gpio_out);
+ if (ret)
+ goto err_unlock;
+
+ ret = ops->reg_write(st, AD5592R_REG_GPIO_IN_EN, st->gpio_in);
+ if (ret)
+ goto err_unlock;
+
+ /* Verify that we can read back at least one register */
+ ret = ops->reg_read(st, AD5592R_REG_ADC_EN, &read_back);
+ if (!ret && (read_back & 0xff) != adc)
+ ret = -EIO;
+
+err_unlock:
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+}
+
+static int ad5592r_reset_channel_modes(struct ad5592r_state *st)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(st->channel_modes); i++)
+ st->channel_modes[i] = CH_MODE_UNUSED;
+
+ return ad5592r_set_channel_modes(st);
+}
+
+static int ad5592r_write_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan, int val, int val2, long mask)
+{
+ struct ad5592r_state *st = iio_priv(iio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+
+ if (val >= (1 << chan->scan_type.realbits) || val < 0)
+ return -EINVAL;
+
+ if (!chan->output)
+ return -EINVAL;
+
+ mutex_lock(&iio_dev->mlock);
+ ret = st->ops->write_dac(st, chan->channel, val);
+ if (!ret)
+ st->cached_dac[chan->channel] = val;
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+ case IIO_CHAN_INFO_SCALE:
+ if (chan->type == IIO_VOLTAGE) {
+ bool gain;
+
+ if (val == st->scale_avail[0][0] &&
+ val2 == st->scale_avail[0][1])
+ gain = false;
+ else if (val == st->scale_avail[1][0] &&
+ val2 == st->scale_avail[1][1])
+ gain = true;
+ else
+ return -EINVAL;
+
+ mutex_lock(&iio_dev->mlock);
+
+ ret = st->ops->reg_read(st, AD5592R_REG_CTRL,
+ &st->cached_gp_ctrl);
+ if (ret < 0) {
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+ }
+
+ if (chan->output) {
+ if (gain)
+ st->cached_gp_ctrl |=
+ AD5592R_REG_CTRL_DAC_RANGE;
+ else
+ st->cached_gp_ctrl &=
+ ~AD5592R_REG_CTRL_DAC_RANGE;
+ } else {
+ if (gain)
+ st->cached_gp_ctrl |=
+ AD5592R_REG_CTRL_ADC_RANGE;
+ else
+ st->cached_gp_ctrl &=
+ ~AD5592R_REG_CTRL_ADC_RANGE;
+ }
+
+ ret = st->ops->reg_write(st, AD5592R_REG_CTRL,
+ st->cached_gp_ctrl);
+ mutex_unlock(&iio_dev->mlock);
+
+ return ret;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int ad5592r_read_raw(struct iio_dev *iio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long m)
+{
+ struct ad5592r_state *st = iio_priv(iio_dev);
+ u16 read_val;
+ int ret;
+
+ switch (m) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&iio_dev->mlock);
+
+ if (!chan->output) {
+ ret = st->ops->read_adc(st, chan->channel, &read_val);
+ if (ret)
+ goto unlock;
+
+ if ((read_val >> 12 & 0x7) != (chan->channel & 0x7)) {
+ dev_err(st->dev, "Error while reading channel %u\n",
+ chan->channel);
+ ret = -EIO;
+ goto unlock;
+ }
+
+ read_val &= GENMASK(11, 0);
+
+ } else {
+ read_val = st->cached_dac[chan->channel];
+ }
+
+ dev_dbg(st->dev, "Channel %u read: 0x%04hX\n",
+ chan->channel, read_val);
+
+ *val = (int) read_val;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ *val = ad5592r_get_vref(st);
+
+ if (chan->type == IIO_TEMP) {
+ s64 tmp = *val * (3767897513LL / 25LL);
+ *val = div_s64_rem(tmp, 1000000000LL, val2);
+
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ } else {
+ int mult;
+
+ mutex_lock(&iio_dev->mlock);
+
+ if (chan->output)
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_DAC_RANGE);
+ else
+ mult = !!(st->cached_gp_ctrl &
+ AD5592R_REG_CTRL_ADC_RANGE);
+
+ *val *= ++mult;
+
+ *val2 = chan->scan_type.realbits;
+ ret = IIO_VAL_FRACTIONAL_LOG2;
+ }
+ break;
+ case IIO_CHAN_INFO_OFFSET:
+ ret = ad5592r_get_vref(st);
+
+ mutex_lock(&iio_dev->mlock);
+
+ if (st->cached_gp_ctrl & AD5592R_REG_CTRL_ADC_RANGE)
+ *val = (-34365 * 25) / ret;
+ else
+ *val = (-75365 * 25) / ret;
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+
+unlock:
+ mutex_unlock(&iio_dev->mlock);
+ return ret;
+}
+
+static int ad5592r_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, long mask)
+{
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return IIO_VAL_INT_PLUS_NANO;
+
+ default:
+ return IIO_VAL_INT_PLUS_MICRO;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info ad5592r_info = {
+ .read_raw = ad5592r_read_raw,
+ .write_raw = ad5592r_write_raw,
+ .write_raw_get_fmt = ad5592r_write_raw_get_fmt,
+ .driver_module = THIS_MODULE,
+};
+
+static ssize_t ad5592r_show_scale_available(struct iio_dev *iio_dev,
+ uintptr_t private,
+ const struct iio_chan_spec *chan,
+ char *buf)
+{
+ struct ad5592r_state *st = iio_priv(iio_dev);
+
+ return sprintf(buf, "%d.%09u %d.%09u\n",
+ st->scale_avail[0][0], st->scale_avail[0][1],
+ st->scale_avail[1][0], st->scale_avail[1][1]);
+}
+
+static struct iio_chan_spec_ext_info ad5592r_ext_info[] = {
+ {
+ .name = "scale_available",
+ .read = ad5592r_show_scale_available,
+ .shared = true,
+ },
+ {},
+};
+
+static void ad5592r_setup_channel(struct iio_dev *iio_dev,
+ struct iio_chan_spec *chan, bool output, unsigned id)
+{
+ chan->type = IIO_VOLTAGE;
+ chan->indexed = 1;
+ chan->output = output;
+ chan->channel = id;
+ chan->info_mask_separate = BIT(IIO_CHAN_INFO_RAW);
+ chan->info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE);
+ chan->scan_type.sign = 'u';
+ chan->scan_type.realbits = 12;
+ chan->scan_type.storagebits = 16;
+ chan->ext_info = ad5592r_ext_info;
+}
+
+static int ad5592r_alloc_channels(struct ad5592r_state *st)
+{
+ unsigned i, curr_channel = 0,
+ num_channels = st->num_channels;
+ struct iio_dev *iio_dev = iio_priv_to_dev(st);
+ struct iio_chan_spec *channels;
+ struct fwnode_handle *child;
+ u32 reg, tmp;
+ int ret;
+
+ device_for_each_child_node(st->dev, child) {
+ ret = fwnode_property_read_u32(child, "reg", &reg);
+ if (ret || reg > ARRAY_SIZE(st->channel_modes))
+ continue;
+
+ ret = fwnode_property_read_u32(child, "adi,mode", &tmp);
+ if (!ret)
+ st->channel_modes[reg] = tmp;
+
+ fwnode_property_read_u32(child, "adi,off-state", &tmp);
+ if (!ret)
+ st->channel_offstate[reg] = tmp;
+ }
+
+ channels = devm_kzalloc(st->dev,
+ (1 + 2 * num_channels) * sizeof(*channels), GFP_KERNEL);
+ if (!channels)
+ return -ENOMEM;
+
+ for (i = 0; i < num_channels; i++) {
+ switch (st->channel_modes[i]) {
+ case CH_MODE_DAC:
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ true, i);
+ curr_channel++;
+ break;
+
+ case CH_MODE_ADC:
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ false, i);
+ curr_channel++;
+ break;
+
+ case CH_MODE_DAC_AND_ADC:
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ true, i);
+ curr_channel++;
+ ad5592r_setup_channel(iio_dev, &channels[curr_channel],
+ false, i);
+ curr_channel++;
+ break;
+
+ default:
+ continue;
+ }
+ }
+
+ channels[curr_channel].type = IIO_TEMP;
+ channels[curr_channel].channel = 8;
+ channels[curr_channel].info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OFFSET);
+ curr_channel++;
+
+ iio_dev->num_channels = curr_channel;
+ iio_dev->channels = channels;
+
+ return 0;
+}
+
+static void ad5592r_init_scales(struct ad5592r_state *st, int vref_mV)
+{
+ s64 tmp = (s64)vref_mV * 1000000000LL >> 12;
+
+ st->scale_avail[0][0] =
+ div_s64_rem(tmp, 1000000000LL, &st->scale_avail[0][1]);
+ st->scale_avail[1][0] =
+ div_s64_rem(tmp * 2, 1000000000LL, &st->scale_avail[1][1]);
+}
+
+int ad5592r_probe(struct device *dev, const char *name,
+ const struct ad5592r_rw_ops *ops)
+{
+ struct iio_dev *iio_dev;
+ struct ad5592r_state *st;
+ int ret;
+
+ iio_dev = devm_iio_device_alloc(dev, sizeof(*st));
+ if (!iio_dev)
+ return -ENOMEM;
+
+ st = iio_priv(iio_dev);
+ st->dev = dev;
+ st->ops = ops;
+ st->num_channels = 8;
+ dev_set_drvdata(dev, iio_dev);
+
+ st->reg = devm_regulator_get_optional(dev, "vref");
+ if (IS_ERR(st->reg)) {
+ if ((PTR_ERR(st->reg) != -ENODEV) && dev->of_node)
+ return PTR_ERR(st->reg);
+
+ st->reg = NULL;
+ } else {
+ ret = regulator_enable(st->reg);
+ if (ret)
+ return ret;
+ }
+
+ iio_dev->dev.parent = dev;
+ iio_dev->name = name;
+ iio_dev->info = &ad5592r_info;
+ iio_dev->modes = INDIO_DIRECT_MODE;
+
+ ad5592r_init_scales(st, ad5592r_get_vref(st));
+
+ ret = ad5592r_reset(st);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ops->reg_write(st, AD5592R_REG_PD,
+ (st->reg == NULL) ? AD5592R_REG_PD_EN_REF : 0);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad5592r_alloc_channels(st);
+ if (ret)
+ goto error_disable_reg;
+
+ ret = ad5592r_set_channel_modes(st);
+ if (ret)
+ goto error_reset_ch_modes;
+
+ ret = iio_device_register(iio_dev);
+ if (ret)
+ goto error_reset_ch_modes;
+
+ ret = ad5592r_gpio_init(st);
+ if (ret)
+ goto error_dev_unregister;
+
+ return 0;
+
+error_dev_unregister:
+ iio_device_unregister(iio_dev);
+
+error_reset_ch_modes:
+ ad5592r_reset_channel_modes(st);
+
+error_disable_reg:
+ if (st->reg)
+ regulator_disable(st->reg);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(ad5592r_probe);
+
+int ad5592r_remove(struct device *dev)
+{
+ struct iio_dev *iio_dev = dev_get_drvdata(dev);
+ struct ad5592r_state *st = iio_priv(iio_dev);
+
+ iio_device_unregister(iio_dev);
+ ad5592r_reset_channel_modes(st);
+ ad5592r_gpio_cleanup(st);
+
+ if (st->reg)
+ regulator_disable(st->reg);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ad5592r_remove);
+
+MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5592r-base.h b/drivers/iio/dac/ad5592r-base.h
new file mode 100644
index 000000000000..841457e93f85
--- /dev/null
+++ b/drivers/iio/dac/ad5592r-base.h
@@ -0,0 +1,76 @@
+/*
+ * AD5592R / AD5593R Digital <-> Analog converters driver
+ *
+ * Copyright 2015-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#ifndef __DRIVERS_IIO_DAC_AD5592R_BASE_H__
+#define __DRIVERS_IIO_DAC_AD5592R_BASE_H__
+
+#include <linux/types.h>
+#include <linux/cache.h>
+#include <linux/mutex.h>
+#include <linux/gpio/driver.h>
+
+struct device;
+struct ad5592r_state;
+
+enum ad5592r_registers {
+ AD5592R_REG_NOOP = 0x0,
+ AD5592R_REG_DAC_READBACK = 0x1,
+ AD5592R_REG_ADC_SEQ = 0x2,
+ AD5592R_REG_CTRL = 0x3,
+ AD5592R_REG_ADC_EN = 0x4,
+ AD5592R_REG_DAC_EN = 0x5,
+ AD5592R_REG_PULLDOWN = 0x6,
+ AD5592R_REG_LDAC = 0x7,
+ AD5592R_REG_GPIO_OUT_EN = 0x8,
+ AD5592R_REG_GPIO_SET = 0x9,
+ AD5592R_REG_GPIO_IN_EN = 0xA,
+ AD5592R_REG_PD = 0xB,
+ AD5592R_REG_OPEN_DRAIN = 0xC,
+ AD5592R_REG_TRISTATE = 0xD,
+ AD5592R_REG_RESET = 0xF,
+};
+
+#define AD5592R_REG_PD_EN_REF BIT(9)
+#define AD5592R_REG_CTRL_ADC_RANGE BIT(5)
+#define AD5592R_REG_CTRL_DAC_RANGE BIT(4)
+
+struct ad5592r_rw_ops {
+ int (*write_dac)(struct ad5592r_state *st, unsigned chan, u16 value);
+ int (*read_adc)(struct ad5592r_state *st, unsigned chan, u16 *value);
+ int (*reg_write)(struct ad5592r_state *st, u8 reg, u16 value);
+ int (*reg_read)(struct ad5592r_state *st, u8 reg, u16 *value);
+ int (*gpio_read)(struct ad5592r_state *st, u8 *value);
+};
+
+struct ad5592r_state {
+ struct device *dev;
+ struct regulator *reg;
+ struct gpio_chip gpiochip;
+ struct mutex gpio_lock; /* Protect cached gpio_out, gpio_val, etc. */
+ unsigned int num_channels;
+ const struct ad5592r_rw_ops *ops;
+ int scale_avail[2][2];
+ u16 cached_dac[8];
+ u16 cached_gp_ctrl;
+ u8 channel_modes[8];
+ u8 channel_offstate[8];
+ u8 gpio_map;
+ u8 gpio_out;
+ u8 gpio_in;
+ u8 gpio_val;
+
+ __be16 spi_msg ____cacheline_aligned;
+ __be16 spi_msg_nop;
+};
+
+int ad5592r_probe(struct device *dev, const char *name,
+ const struct ad5592r_rw_ops *ops);
+int ad5592r_remove(struct device *dev);
+
+#endif /* __DRIVERS_IIO_DAC_AD5592R_BASE_H__ */
diff --git a/drivers/iio/dac/ad5592r.c b/drivers/iio/dac/ad5592r.c
new file mode 100644
index 000000000000..0b235a2c7359
--- /dev/null
+++ b/drivers/iio/dac/ad5592r.c
@@ -0,0 +1,164 @@
+/*
+ * AD5592R Digital <-> Analog converters driver
+ *
+ * Copyright 2015-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include "ad5592r-base.h"
+
+#include <linux/bitops.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define AD5592R_GPIO_READBACK_EN BIT(10)
+#define AD5592R_LDAC_READBACK_EN BIT(6)
+
+static int ad5592r_spi_wnop_r16(struct ad5592r_state *st, u16 *buf)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ struct spi_transfer t = {
+ .tx_buf = &st->spi_msg_nop,
+ .rx_buf = buf,
+ .len = 2
+ };
+
+ st->spi_msg_nop = 0; /* NOP */
+
+ return spi_sync_transfer(spi, &t, 1);
+}
+
+static int ad5592r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+
+ st->spi_msg = cpu_to_be16(BIT(15) | (chan << 12) | value);
+
+ return spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+}
+
+static int ad5592r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ int ret;
+
+ st->spi_msg = cpu_to_be16((AD5592R_REG_ADC_SEQ << 11) | BIT(chan));
+
+ ret = spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+ if (ret)
+ return ret;
+
+ /*
+ * Invalid data:
+ * See Figure 40. Single-Channel ADC Conversion Sequence
+ */
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ *value = be16_to_cpu(st->spi_msg);
+
+ return 0;
+}
+
+static int ad5592r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+
+ st->spi_msg = cpu_to_be16((reg << 11) | value);
+
+ return spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+}
+
+static int ad5592r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
+{
+ struct spi_device *spi = container_of(st->dev, struct spi_device, dev);
+ int ret;
+
+ st->spi_msg = cpu_to_be16((AD5592R_REG_LDAC << 11) |
+ AD5592R_LDAC_READBACK_EN | (reg << 2));
+
+ ret = spi_write(spi, &st->spi_msg, sizeof(st->spi_msg));
+ if (ret)
+ return ret;
+
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ *value = be16_to_cpu(st->spi_msg);
+
+ return 0;
+}
+
+static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+{
+ int ret;
+
+ ret = ad5592r_reg_write(st, AD5592R_REG_GPIO_IN_EN,
+ AD5592R_GPIO_READBACK_EN | st->gpio_in);
+ if (ret)
+ return ret;
+
+ ret = ad5592r_spi_wnop_r16(st, &st->spi_msg);
+ if (ret)
+ return ret;
+
+ *value = (u8) be16_to_cpu(st->spi_msg);
+
+ return 0;
+}
+
+static const struct ad5592r_rw_ops ad5592r_rw_ops = {
+ .write_dac = ad5592r_write_dac,
+ .read_adc = ad5592r_read_adc,
+ .reg_write = ad5592r_reg_write,
+ .reg_read = ad5592r_reg_read,
+ .gpio_read = ad5593r_gpio_read,
+};
+
+static int ad5592r_spi_probe(struct spi_device *spi)
+{
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ return ad5592r_probe(&spi->dev, id->name, &ad5592r_rw_ops);
+}
+
+static int ad5592r_spi_remove(struct spi_device *spi)
+{
+ return ad5592r_remove(&spi->dev);
+}
+
+static const struct spi_device_id ad5592r_spi_ids[] = {
+ { .name = "ad5592r", },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, ad5592r_spi_ids);
+
+static const struct of_device_id ad5592r_of_match[] = {
+ { .compatible = "adi,ad5592r", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad5592r_of_match);
+
+static struct spi_driver ad5592r_spi_driver = {
+ .driver = {
+ .name = "ad5592r",
+ .of_match_table = of_match_ptr(ad5592r_of_match),
+ },
+ .probe = ad5592r_spi_probe,
+ .remove = ad5592r_spi_remove,
+ .id_table = ad5592r_spi_ids,
+};
+module_spi_driver(ad5592r_spi_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/ad5593r.c b/drivers/iio/dac/ad5593r.c
new file mode 100644
index 000000000000..dca158a88f47
--- /dev/null
+++ b/drivers/iio/dac/ad5593r.c
@@ -0,0 +1,131 @@
+/*
+ * AD5593R Digital <-> Analog converters driver
+ *
+ * Copyright 2015-2016 Analog Devices Inc.
+ * Author: Paul Cercueil <paul.cercueil@analog.com>
+ *
+ * Licensed under the GPL-2.
+ */
+
+#include "ad5592r-base.h"
+
+#include <linux/bitops.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define AD5593R_MODE_CONF (0 << 4)
+#define AD5593R_MODE_DAC_WRITE (1 << 4)
+#define AD5593R_MODE_ADC_READBACK (4 << 4)
+#define AD5593R_MODE_DAC_READBACK (5 << 4)
+#define AD5593R_MODE_GPIO_READBACK (6 << 4)
+#define AD5593R_MODE_REG_READBACK (7 << 4)
+
+static int ad5593r_write_dac(struct ad5592r_state *st, unsigned chan, u16 value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+
+ return i2c_smbus_write_word_swapped(i2c,
+ AD5593R_MODE_DAC_WRITE | chan, value);
+}
+
+static int ad5593r_read_adc(struct ad5592r_state *st, unsigned chan, u16 *value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ s32 val;
+
+ val = i2c_smbus_write_word_swapped(i2c,
+ AD5593R_MODE_CONF | AD5592R_REG_ADC_SEQ, BIT(chan));
+ if (val < 0)
+ return (int) val;
+
+ val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_ADC_READBACK);
+ if (val < 0)
+ return (int) val;
+
+ *value = (u16) val;
+
+ return 0;
+}
+
+static int ad5593r_reg_write(struct ad5592r_state *st, u8 reg, u16 value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+
+ return i2c_smbus_write_word_swapped(i2c,
+ AD5593R_MODE_CONF | reg, value);
+}
+
+static int ad5593r_reg_read(struct ad5592r_state *st, u8 reg, u16 *value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ s32 val;
+
+ val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_REG_READBACK | reg);
+ if (val < 0)
+ return (int) val;
+
+ *value = (u16) val;
+
+ return 0;
+}
+
+static int ad5593r_gpio_read(struct ad5592r_state *st, u8 *value)
+{
+ struct i2c_client *i2c = to_i2c_client(st->dev);
+ s32 val;
+
+ val = i2c_smbus_read_word_swapped(i2c, AD5593R_MODE_GPIO_READBACK);
+ if (val < 0)
+ return (int) val;
+
+ *value = (u8) val;
+
+ return 0;
+}
+
+static const struct ad5592r_rw_ops ad5593r_rw_ops = {
+ .write_dac = ad5593r_write_dac,
+ .read_adc = ad5593r_read_adc,
+ .reg_write = ad5593r_reg_write,
+ .reg_read = ad5593r_reg_read,
+ .gpio_read = ad5593r_gpio_read,
+};
+
+static int ad5593r_i2c_probe(struct i2c_client *i2c,
+ const struct i2c_device_id *id)
+{
+ return ad5592r_probe(&i2c->dev, id->name, &ad5593r_rw_ops);
+}
+
+static int ad5593r_i2c_remove(struct i2c_client *i2c)
+{
+ return ad5592r_remove(&i2c->dev);
+}
+
+static const struct i2c_device_id ad5593r_i2c_ids[] = {
+ { .name = "ad5593r", },
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ad5593r_i2c_ids);
+
+static const struct of_device_id ad5593r_of_match[] = {
+ { .compatible = "adi,ad5593r", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, ad5593r_of_match);
+
+static struct i2c_driver ad5593r_driver = {
+ .driver = {
+ .name = "ad5593r",
+ .of_match_table = of_match_ptr(ad5593r_of_match),
+ },
+ .probe = ad5593r_i2c_probe,
+ .remove = ad5593r_i2c_remove,
+ .id_table = ad5593r_i2c_ids,
+};
+module_i2c_driver(ad5593r_driver);
+
+MODULE_AUTHOR("Paul Cercueil <paul.cercueil@analog.com>");
+MODULE_DESCRIPTION("Analog Devices AD5592R multi-channel converters");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/dac/lpc18xx_dac.c b/drivers/iio/dac/lpc18xx_dac.c
new file mode 100644
index 000000000000..55d1456a059d
--- /dev/null
+++ b/drivers/iio/dac/lpc18xx_dac.c
@@ -0,0 +1,210 @@
+/*
+ * IIO DAC driver for NXP LPC18xx DAC
+ *
+ * Copyright (C) 2016 Joachim Eastwood <manabian@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * UNSUPPORTED hardware features:
+ * - Interrupts
+ * - DMA
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/driver.h>
+#include <linux/io.h>
+#include <linux/iopoll.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+
+/* LPC18XX DAC registers and bits */
+#define LPC18XX_DAC_CR 0x000
+#define LPC18XX_DAC_CR_VALUE_SHIFT 6
+#define LPC18XX_DAC_CR_VALUE_MASK 0x3ff
+#define LPC18XX_DAC_CR_BIAS BIT(16)
+#define LPC18XX_DAC_CTRL 0x004
+#define LPC18XX_DAC_CTRL_DMA_ENA BIT(3)
+
+struct lpc18xx_dac {
+ struct regulator *vref;
+ void __iomem *base;
+ struct mutex lock;
+ struct clk *clk;
+};
+
+static const struct iio_chan_spec lpc18xx_dac_iio_channels[] = {
+ {
+ .type = IIO_VOLTAGE,
+ .output = 1,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static int lpc18xx_dac_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct lpc18xx_dac *dac = iio_priv(indio_dev);
+ u32 reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ reg = readl(dac->base + LPC18XX_DAC_CR);
+ *val = reg >> LPC18XX_DAC_CR_VALUE_SHIFT;
+ *val &= LPC18XX_DAC_CR_VALUE_MASK;
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = regulator_get_voltage(dac->vref) / 1000;
+ *val2 = 10;
+
+ return IIO_VAL_FRACTIONAL_LOG2;
+ }
+
+ return -EINVAL;
+}
+
+static int lpc18xx_dac_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct lpc18xx_dac *dac = iio_priv(indio_dev);
+ u32 reg;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val < 0 || val > LPC18XX_DAC_CR_VALUE_MASK)
+ return -EINVAL;
+
+ reg = LPC18XX_DAC_CR_BIAS;
+ reg |= val << LPC18XX_DAC_CR_VALUE_SHIFT;
+
+ mutex_lock(&dac->lock);
+ writel(reg, dac->base + LPC18XX_DAC_CR);
+ writel(LPC18XX_DAC_CTRL_DMA_ENA, dac->base + LPC18XX_DAC_CTRL);
+ mutex_unlock(&dac->lock);
+
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info lpc18xx_dac_info = {
+ .read_raw = lpc18xx_dac_read_raw,
+ .write_raw = lpc18xx_dac_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int lpc18xx_dac_probe(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev;
+ struct lpc18xx_dac *dac;
+ struct resource *res;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*dac));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, indio_dev);
+ dac = iio_priv(indio_dev);
+ mutex_init(&dac->lock);
+
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ dac->base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(dac->base))
+ return PTR_ERR(dac->base);
+
+ dac->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(dac->clk)) {
+ dev_err(&pdev->dev, "error getting clock\n");
+ return PTR_ERR(dac->clk);
+ }
+
+ dac->vref = devm_regulator_get(&pdev->dev, "vref");
+ if (IS_ERR(dac->vref)) {
+ dev_err(&pdev->dev, "error getting regulator\n");
+ return PTR_ERR(dac->vref);
+ }
+
+ indio_dev->name = dev_name(&pdev->dev);
+ indio_dev->dev.parent = &pdev->dev;
+ indio_dev->info = &lpc18xx_dac_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = lpc18xx_dac_iio_channels;
+ indio_dev->num_channels = ARRAY_SIZE(lpc18xx_dac_iio_channels);
+
+ ret = regulator_enable(dac->vref);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable regulator\n");
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dac->clk);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to enable clock\n");
+ goto dis_reg;
+ }
+
+ writel(0, dac->base + LPC18XX_DAC_CTRL);
+ writel(0, dac->base + LPC18XX_DAC_CR);
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(&pdev->dev, "unable to register device\n");
+ goto dis_clk;
+ }
+
+ return 0;
+
+dis_clk:
+ clk_disable_unprepare(dac->clk);
+dis_reg:
+ regulator_disable(dac->vref);
+ return ret;
+}
+
+static int lpc18xx_dac_remove(struct platform_device *pdev)
+{
+ struct iio_dev *indio_dev = platform_get_drvdata(pdev);
+ struct lpc18xx_dac *dac = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+
+ writel(0, dac->base + LPC18XX_DAC_CTRL);
+ clk_disable_unprepare(dac->clk);
+ regulator_disable(dac->vref);
+
+ return 0;
+}
+
+static const struct of_device_id lpc18xx_dac_match[] = {
+ { .compatible = "nxp,lpc1850-dac" },
+ { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, lpc18xx_dac_match);
+
+static struct platform_driver lpc18xx_dac_driver = {
+ .probe = lpc18xx_dac_probe,
+ .remove = lpc18xx_dac_remove,
+ .driver = {
+ .name = "lpc18xx-dac",
+ .of_match_table = lpc18xx_dac_match,
+ },
+};
+module_platform_driver(lpc18xx_dac_driver);
+
+MODULE_DESCRIPTION("LPC18xx DAC driver");
+MODULE_AUTHOR("Joachim Eastwood <manabian@gmail.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/frequency/ad9523.c b/drivers/iio/frequency/ad9523.c
index 44a30f286de1..99eba524f6dd 100644
--- a/drivers/iio/frequency/ad9523.c
+++ b/drivers/iio/frequency/ad9523.c
@@ -284,7 +284,7 @@ struct ad9523_state {
} data[2] ____cacheline_aligned;
};
-static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
+static int ad9523_read(struct iio_dev *indio_dev, unsigned int addr)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
@@ -318,7 +318,8 @@ static int ad9523_read(struct iio_dev *indio_dev, unsigned addr)
return ret;
};
-static int ad9523_write(struct iio_dev *indio_dev, unsigned addr, unsigned val)
+static int ad9523_write(struct iio_dev *indio_dev,
+ unsigned int addr, unsigned int val)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
@@ -351,11 +352,11 @@ static int ad9523_io_update(struct iio_dev *indio_dev)
}
static int ad9523_vco_out_map(struct iio_dev *indio_dev,
- unsigned ch, unsigned out)
+ unsigned int ch, unsigned int out)
{
struct ad9523_state *st = iio_priv(indio_dev);
int ret;
- unsigned mask;
+ unsigned int mask;
switch (ch) {
case 0 ... 3:
@@ -405,7 +406,7 @@ static int ad9523_vco_out_map(struct iio_dev *indio_dev,
}
static int ad9523_set_clock_provider(struct iio_dev *indio_dev,
- unsigned ch, unsigned long freq)
+ unsigned int ch, unsigned long freq)
{
struct ad9523_state *st = iio_priv(indio_dev);
long tmp1, tmp2;
@@ -619,7 +620,7 @@ static int ad9523_read_raw(struct iio_dev *indio_dev,
long m)
{
struct ad9523_state *st = iio_priv(indio_dev);
- unsigned code;
+ unsigned int code;
int ret;
mutex_lock(&indio_dev->mlock);
@@ -655,7 +656,7 @@ static int ad9523_write_raw(struct iio_dev *indio_dev,
long mask)
{
struct ad9523_state *st = iio_priv(indio_dev);
- unsigned reg;
+ unsigned int reg;
int ret, tmp, code;
mutex_lock(&indio_dev->mlock);
@@ -709,8 +710,8 @@ out:
}
static int ad9523_reg_access(struct iio_dev *indio_dev,
- unsigned reg, unsigned writeval,
- unsigned *readval)
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
{
int ret;
diff --git a/drivers/iio/gyro/Kconfig b/drivers/iio/gyro/Kconfig
index e816d29d6a62..205a84420ae9 100644
--- a/drivers/iio/gyro/Kconfig
+++ b/drivers/iio/gyro/Kconfig
@@ -93,7 +93,7 @@ config IIO_ST_GYRO_3AXIS
select IIO_TRIGGERED_BUFFER if (IIO_BUFFER)
help
Say yes here to build support for STMicroelectronics gyroscopes:
- L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330.
+ L3G4200D, LSM330DL, L3GD20, LSM330DLC, L3G4IS, LSM330, LSM9DS0.
This driver can also be built as a module. If so, these modules
will be created:
diff --git a/drivers/iio/gyro/bmg160_core.c b/drivers/iio/gyro/bmg160_core.c
index 4dac567e75b4..7ccc044063f6 100644
--- a/drivers/iio/gyro/bmg160_core.c
+++ b/drivers/iio/gyro/bmg160_core.c
@@ -17,7 +17,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -31,7 +30,6 @@
#include "bmg160.h"
#define BMG160_IRQ_NAME "bmg160_event"
-#define BMG160_GPIO_NAME "gpio_int"
#define BMG160_REG_CHIP_ID 0x00
#define BMG160_CHIP_ID_VAL 0x0F
@@ -97,7 +95,6 @@
#define BMG160_AUTO_SUSPEND_DELAY_MS 2000
struct bmg160_data {
- struct device *dev;
struct regmap *regmap;
struct iio_trigger *dready_trig;
struct iio_trigger *motion_trig;
@@ -116,6 +113,7 @@ enum bmg160_axis {
AXIS_X,
AXIS_Y,
AXIS_Z,
+ AXIS_MAX,
};
static const struct {
@@ -138,11 +136,12 @@ static const struct {
static int bmg160_set_mode(struct bmg160_data *data, u8 mode)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
ret = regmap_write(data->regmap, BMG160_REG_PMU_LPW, mode);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_pmu_lpw\n");
+ dev_err(dev, "Error writing reg_pmu_lpw\n");
return ret;
}
@@ -163,6 +162,7 @@ static int bmg160_convert_freq_to_bit(int val)
static int bmg160_set_bw(struct bmg160_data *data, int val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
int bw_bits;
@@ -172,7 +172,7 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
ret = regmap_write(data->regmap, BMG160_REG_PMU_BW, bw_bits);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_pmu_bw\n");
+ dev_err(dev, "Error writing reg_pmu_bw\n");
return ret;
}
@@ -183,18 +183,19 @@ static int bmg160_set_bw(struct bmg160_data *data, int val)
static int bmg160_chip_init(struct bmg160_data *data)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
unsigned int val;
ret = regmap_read(data->regmap, BMG160_REG_CHIP_ID, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_chip_id\n");
+ dev_err(dev, "Error reading reg_chip_id\n");
return ret;
}
- dev_dbg(data->dev, "Chip Id %x\n", val);
+ dev_dbg(dev, "Chip Id %x\n", val);
if (val != BMG160_CHIP_ID_VAL) {
- dev_err(data->dev, "invalid chip %x\n", val);
+ dev_err(dev, "invalid chip %x\n", val);
return -ENODEV;
}
@@ -213,14 +214,14 @@ static int bmg160_chip_init(struct bmg160_data *data)
/* Set Default Range */
ret = regmap_write(data->regmap, BMG160_REG_RANGE, BMG160_RANGE_500DPS);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_range\n");
+ dev_err(dev, "Error writing reg_range\n");
return ret;
}
data->dps_range = BMG160_RANGE_500DPS;
ret = regmap_read(data->regmap, BMG160_REG_SLOPE_THRES, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_slope_thres\n");
+ dev_err(dev, "Error reading reg_slope_thres\n");
return ret;
}
data->slope_thres = val;
@@ -229,7 +230,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
ret = regmap_update_bits(data->regmap, BMG160_REG_INT_EN_1,
BMG160_INT1_BIT_OD, 0);
if (ret < 0) {
- dev_err(data->dev, "Error updating bits in reg_int_en_1\n");
+ dev_err(dev, "Error updating bits in reg_int_en_1\n");
return ret;
}
@@ -237,7 +238,7 @@ static int bmg160_chip_init(struct bmg160_data *data)
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
+ dev_err(dev,
"Error writing reg_motion_intr\n");
return ret;
}
@@ -248,20 +249,21 @@ static int bmg160_chip_init(struct bmg160_data *data)
static int bmg160_set_power_state(struct bmg160_data *data, bool on)
{
#ifdef CONFIG_PM
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
if (on)
- ret = pm_runtime_get_sync(data->dev);
+ ret = pm_runtime_get_sync(dev);
else {
- pm_runtime_mark_last_busy(data->dev);
- ret = pm_runtime_put_autosuspend(data->dev);
+ pm_runtime_mark_last_busy(dev);
+ ret = pm_runtime_put_autosuspend(dev);
}
if (ret < 0) {
- dev_err(data->dev,
- "Failed: bmg160_set_power_state for %d\n", on);
+ dev_err(dev, "Failed: bmg160_set_power_state for %d\n", on);
+
if (on)
- pm_runtime_put_noidle(data->dev);
+ pm_runtime_put_noidle(dev);
return ret;
}
@@ -273,6 +275,7 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
bool status)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* Enable/Disable INT_MAP0 mapping */
@@ -280,7 +283,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
BMG160_INT_MAP_0_BIT_ANY,
(status ? BMG160_INT_MAP_0_BIT_ANY : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating bits reg_int_map0\n");
+ dev_err(dev, "Error updating bits reg_int_map0\n");
return ret;
}
@@ -290,8 +293,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
ret = regmap_write(data->regmap, BMG160_REG_SLOPE_THRES,
data->slope_thres);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_slope_thres\n");
+ dev_err(dev, "Error writing reg_slope_thres\n");
return ret;
}
@@ -299,8 +301,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
BMG160_INT_MOTION_X | BMG160_INT_MOTION_Y |
BMG160_INT_MOTION_Z);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_motion_intr\n");
+ dev_err(dev, "Error writing reg_motion_intr\n");
return ret;
}
@@ -315,8 +316,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
+ dev_err(dev, "Error writing reg_rst_latch\n");
return ret;
}
}
@@ -329,7 +329,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
}
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_en0\n");
+ dev_err(dev, "Error writing reg_int_en0\n");
return ret;
}
@@ -339,6 +339,7 @@ static int bmg160_setup_any_motion_interrupt(struct bmg160_data *data,
static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
bool status)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* Enable/Disable INT_MAP1 mapping */
@@ -346,7 +347,7 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
BMG160_INT_MAP_1_BIT_NEW_DATA,
(status ? BMG160_INT_MAP_1_BIT_NEW_DATA : 0));
if (ret < 0) {
- dev_err(data->dev, "Error updating bits in reg_int_map1\n");
+ dev_err(dev, "Error updating bits in reg_int_map1\n");
return ret;
}
@@ -355,9 +356,8 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
BMG160_INT_MODE_NON_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
- return ret;
+ dev_err(dev, "Error writing reg_rst_latch\n");
+ return ret;
}
ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0,
@@ -369,16 +369,15 @@ static int bmg160_setup_new_data_interrupt(struct bmg160_data *data,
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
- return ret;
+ dev_err(dev, "Error writing reg_rst_latch\n");
+ return ret;
}
ret = regmap_write(data->regmap, BMG160_REG_INT_EN_0, 0);
}
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_int_en0\n");
+ dev_err(dev, "Error writing reg_int_en0\n");
return ret;
}
@@ -401,6 +400,7 @@ static int bmg160_get_bw(struct bmg160_data *data, int *val)
static int bmg160_set_scale(struct bmg160_data *data, int val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret, i;
for (i = 0; i < ARRAY_SIZE(bmg160_scale_table); ++i) {
@@ -408,8 +408,7 @@ static int bmg160_set_scale(struct bmg160_data *data, int val)
ret = regmap_write(data->regmap, BMG160_REG_RANGE,
bmg160_scale_table[i].dps_range);
if (ret < 0) {
- dev_err(data->dev,
- "Error writing reg_range\n");
+ dev_err(dev, "Error writing reg_range\n");
return ret;
}
data->dps_range = bmg160_scale_table[i].dps_range;
@@ -422,6 +421,7 @@ static int bmg160_set_scale(struct bmg160_data *data, int val)
static int bmg160_get_temp(struct bmg160_data *data, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
unsigned int raw_val;
@@ -434,7 +434,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
ret = regmap_read(data->regmap, BMG160_REG_TEMP, &raw_val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_temp\n");
+ dev_err(dev, "Error reading reg_temp\n");
bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
@@ -451,6 +451,7 @@ static int bmg160_get_temp(struct bmg160_data *data, int *val)
static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
{
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
__le16 raw_val;
@@ -464,7 +465,7 @@ static int bmg160_get_axis(struct bmg160_data *data, int axis, int *val)
ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(axis), &raw_val,
sizeof(raw_val));
if (ret < 0) {
- dev_err(data->dev, "Error reading axis %d\n", axis);
+ dev_err(dev, "Error reading axis %d\n", axis);
bmg160_set_power_state(data, false);
mutex_unlock(&data->mutex);
return ret;
@@ -764,26 +765,23 @@ static const struct iio_info bmg160_info = {
.driver_module = THIS_MODULE,
};
+static const unsigned long bmg160_accel_scan_masks[] = {
+ BIT(AXIS_X) | BIT(AXIS_Y) | BIT(AXIS_Z),
+ 0};
+
static irqreturn_t bmg160_trigger_handler(int irq, void *p)
{
struct iio_poll_func *pf = p;
struct iio_dev *indio_dev = pf->indio_dev;
struct bmg160_data *data = iio_priv(indio_dev);
- int bit, ret, i = 0;
- unsigned int val;
+ int ret;
mutex_lock(&data->mutex);
- for_each_set_bit(bit, indio_dev->active_scan_mask,
- indio_dev->masklength) {
- ret = regmap_bulk_read(data->regmap, BMG160_AXIS_TO_REG(bit),
- &val, 2);
- if (ret < 0) {
- mutex_unlock(&data->mutex);
- goto err;
- }
- data->buffer[i++] = val;
- }
+ ret = regmap_bulk_read(data->regmap, BMG160_REG_XOUT_L,
+ data->buffer, AXIS_MAX * 2);
mutex_unlock(&data->mutex);
+ if (ret < 0)
+ goto err;
iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
pf->timestamp);
@@ -797,6 +795,7 @@ static int bmg160_trig_try_reen(struct iio_trigger *trig)
{
struct iio_dev *indio_dev = iio_trigger_get_drvdata(trig);
struct bmg160_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
/* new data interrupts don't need ack */
@@ -808,7 +807,7 @@ static int bmg160_trig_try_reen(struct iio_trigger *trig)
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0) {
- dev_err(data->dev, "Error writing reg_rst_latch\n");
+ dev_err(dev, "Error writing reg_rst_latch\n");
return ret;
}
@@ -868,13 +867,14 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct bmg160_data *data = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(data->regmap);
int ret;
int dir;
unsigned int val;
ret = regmap_read(data->regmap, BMG160_REG_INT_STATUS_2, &val);
if (ret < 0) {
- dev_err(data->dev, "Error reading reg_int_status2\n");
+ dev_err(dev, "Error reading reg_int_status2\n");
goto ack_intr_status;
}
@@ -911,8 +911,7 @@ ack_intr_status:
BMG160_INT_MODE_LATCH_INT |
BMG160_INT_MODE_LATCH_RESET);
if (ret < 0)
- dev_err(data->dev,
- "Error writing reg_rst_latch\n");
+ dev_err(dev, "Error writing reg_rst_latch\n");
}
return IRQ_HANDLED;
@@ -956,29 +955,6 @@ static const struct iio_buffer_setup_ops bmg160_buffer_setup_ops = {
.postdisable = bmg160_buffer_postdisable,
};
-static int bmg160_gpio_probe(struct bmg160_data *data)
-
-{
- struct device *dev;
- struct gpio_desc *gpio;
-
- dev = data->dev;
-
- /* data ready gpio interrupt pin */
- gpio = devm_gpiod_get_index(dev, BMG160_GPIO_NAME, 0, GPIOD_IN);
- if (IS_ERR(gpio)) {
- dev_err(dev, "acpi gpio get index failed\n");
- return PTR_ERR(gpio);
- }
-
- data->irq = gpiod_to_irq(gpio);
-
- dev_dbg(dev, "GPIO resource, no:%d irq:%d\n", desc_to_gpio(gpio),
- data->irq);
-
- return 0;
-}
-
static const char *bmg160_match_acpi_device(struct device *dev)
{
const struct acpi_device_id *id;
@@ -1003,7 +979,6 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
data = iio_priv(indio_dev);
dev_set_drvdata(dev, indio_dev);
- data->dev = dev;
data->irq = irq;
data->regmap = regmap;
@@ -1020,12 +995,10 @@ int bmg160_core_probe(struct device *dev, struct regmap *regmap, int irq,
indio_dev->channels = bmg160_channels;
indio_dev->num_channels = ARRAY_SIZE(bmg160_channels);
indio_dev->name = name;
+ indio_dev->available_scan_masks = bmg160_accel_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmg160_info;
- if (data->irq <= 0)
- bmg160_gpio_probe(data);
-
if (data->irq > 0) {
ret = devm_request_threaded_irq(dev,
data->irq,
@@ -1168,7 +1141,7 @@ static int bmg160_runtime_suspend(struct device *dev)
ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
if (ret < 0) {
- dev_err(data->dev, "set mode failed\n");
+ dev_err(dev, "set mode failed\n");
return -EAGAIN;
}
diff --git a/drivers/iio/gyro/st_gyro.h b/drivers/iio/gyro/st_gyro.h
index 5353d6328c54..a5c5c4e29add 100644
--- a/drivers/iio/gyro/st_gyro.h
+++ b/drivers/iio/gyro/st_gyro.h
@@ -21,6 +21,7 @@
#define L3GD20_GYRO_DEV_NAME "l3gd20"
#define L3G4IS_GYRO_DEV_NAME "l3g4is_ui"
#define LSM330_GYRO_DEV_NAME "lsm330_gyro"
+#define LSM9DS0_GYRO_DEV_NAME "lsm9ds0_gyro"
/**
* struct st_sensors_platform_data - gyro platform data
diff --git a/drivers/iio/gyro/st_gyro_core.c b/drivers/iio/gyro/st_gyro_core.c
index 110f95b6e52f..52a3c87c375c 100644
--- a/drivers/iio/gyro/st_gyro_core.c
+++ b/drivers/iio/gyro/st_gyro_core.c
@@ -190,6 +190,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_GYRO_1_MULTIREAD_BIT,
.bootime = 2,
@@ -203,6 +204,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
[2] = LSM330DLC_GYRO_DEV_NAME,
[3] = L3G4IS_GYRO_DEV_NAME,
[4] = LSM330_GYRO_DEV_NAME,
+ [5] = LSM9DS0_GYRO_DEV_NAME,
},
.ch = (struct iio_chan_spec *)st_gyro_16bit_channels,
.odr = {
@@ -258,6 +260,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_GYRO_2_MULTIREAD_BIT,
.bootime = 2,
@@ -322,6 +325,7 @@ static const struct st_sensor_settings st_gyro_sensors_settings[] = {
* drain settings, but only for INT1 and not
* for the DRDY line on INT2.
*/
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_GYRO_3_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/gyro/st_gyro_i2c.c b/drivers/iio/gyro/st_gyro_i2c.c
index 6848451f817a..40056b821036 100644
--- a/drivers/iio/gyro/st_gyro_i2c.c
+++ b/drivers/iio/gyro/st_gyro_i2c.c
@@ -48,6 +48,10 @@ static const struct of_device_id st_gyro_of_match[] = {
.compatible = "st,lsm330-gyro",
.data = LSM330_GYRO_DEV_NAME,
},
+ {
+ .compatible = "st,lsm9ds0-gyro",
+ .data = LSM9DS0_GYRO_DEV_NAME,
+ },
{},
};
MODULE_DEVICE_TABLE(of, st_gyro_of_match);
@@ -93,6 +97,7 @@ static const struct i2c_device_id st_gyro_id_table[] = {
{ L3GD20_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
+ { LSM9DS0_GYRO_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(i2c, st_gyro_id_table);
diff --git a/drivers/iio/gyro/st_gyro_spi.c b/drivers/iio/gyro/st_gyro_spi.c
index d2b7a5fa344c..fbf2faed501c 100644
--- a/drivers/iio/gyro/st_gyro_spi.c
+++ b/drivers/iio/gyro/st_gyro_spi.c
@@ -54,6 +54,7 @@ static const struct spi_device_id st_gyro_id_table[] = {
{ L3GD20_GYRO_DEV_NAME },
{ L3G4IS_GYRO_DEV_NAME },
{ LSM330_GYRO_DEV_NAME },
+ { LSM9DS0_GYRO_DEV_NAME },
{},
};
MODULE_DEVICE_TABLE(spi, st_gyro_id_table);
diff --git a/drivers/iio/humidity/Kconfig b/drivers/iio/humidity/Kconfig
index 866dda133336..738a86d9e4a9 100644
--- a/drivers/iio/humidity/Kconfig
+++ b/drivers/iio/humidity/Kconfig
@@ -3,6 +3,16 @@
#
menu "Humidity sensors"
+config AM2315
+ tristate "Aosong AM2315 relative humidity and temperature sensor"
+ depends on I2C
+ help
+ If you say yes here you get support for the Aosong AM2315
+ relative humidity and ambient temperature sensor.
+
+ This driver can also be built as a module. If so, the module will
+ be called am2315.
+
config DHT11
tristate "DHT11 (and compatible sensors) driver"
depends on GPIOLIB || COMPILE_TEST
diff --git a/drivers/iio/humidity/Makefile b/drivers/iio/humidity/Makefile
index c9f089a9a6b8..4a73442fcd9c 100644
--- a/drivers/iio/humidity/Makefile
+++ b/drivers/iio/humidity/Makefile
@@ -2,6 +2,7 @@
# Makefile for IIO humidity sensor drivers
#
+obj-$(CONFIG_AM2315) += am2315.o
obj-$(CONFIG_DHT11) += dht11.o
obj-$(CONFIG_HDC100X) += hdc100x.o
obj-$(CONFIG_HTU21) += htu21.o
diff --git a/drivers/iio/humidity/am2315.c b/drivers/iio/humidity/am2315.c
new file mode 100644
index 000000000000..3be6d209a159
--- /dev/null
+++ b/drivers/iio/humidity/am2315.c
@@ -0,0 +1,303 @@
+/**
+ * Aosong AM2315 relative humidity and temperature
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C address: 0x5C.
+ */
+
+#include <linux/acpi.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#define AM2315_REG_HUM_MSB 0x00
+#define AM2315_REG_HUM_LSB 0x01
+#define AM2315_REG_TEMP_MSB 0x02
+#define AM2315_REG_TEMP_LSB 0x03
+
+#define AM2315_FUNCTION_READ 0x03
+#define AM2315_HUM_OFFSET 2
+#define AM2315_TEMP_OFFSET 4
+#define AM2315_ALL_CHANNEL_MASK GENMASK(1, 0)
+
+#define AM2315_DRIVER_NAME "am2315"
+
+struct am2315_data {
+ struct i2c_client *client;
+ struct mutex lock;
+ s16 buffer[8]; /* 2x16-bit channels + 2x16 padding + 4x16 timestamp */
+};
+
+struct am2315_sensor_data {
+ s16 hum_data;
+ s16 temp_data;
+};
+
+static const struct iio_chan_spec am2315_channels[] = {
+ {
+ .type = IIO_HUMIDITYRELATIVE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 0,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = 1,
+ .scan_type = {
+ .sign = 's',
+ .realbits = 16,
+ .storagebits = 16,
+ .endianness = IIO_CPU,
+ },
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+};
+
+/* CRC calculation algorithm, as specified in the datasheet (page 13). */
+static u16 am2315_crc(u8 *data, u8 nr_bytes)
+{
+ int i;
+ u16 crc = 0xffff;
+
+ while (nr_bytes--) {
+ crc ^= *data++;
+ for (i = 0; i < 8; i++) {
+ if (crc & 0x01) {
+ crc >>= 1;
+ crc ^= 0xA001;
+ } else {
+ crc >>= 1;
+ }
+ }
+ }
+
+ return crc;
+}
+
+/* Simple function that sends a few bytes to the device to wake it up. */
+static void am2315_ping(struct i2c_client *client)
+{
+ i2c_smbus_read_byte_data(client, AM2315_REG_HUM_MSB);
+}
+
+static int am2315_read_data(struct am2315_data *data,
+ struct am2315_sensor_data *sensor_data)
+{
+ int ret;
+ /* tx_buf format: <function code> <start addr> <nr of regs to read> */
+ u8 tx_buf[3] = { AM2315_FUNCTION_READ, AM2315_REG_HUM_MSB, 4 };
+ /*
+ * rx_buf format:
+ * <function code> <number of registers read>
+ * <humidity MSB> <humidity LSB> <temp MSB> <temp LSB>
+ * <CRC LSB> <CRC MSB>
+ */
+ u8 rx_buf[8];
+ u16 crc;
+
+ /* First wake up the device. */
+ am2315_ping(data->client);
+
+ mutex_lock(&data->lock);
+ ret = i2c_master_send(data->client, tx_buf, sizeof(tx_buf));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to send read request\n");
+ goto exit_unlock;
+ }
+ /* Wait 2-3 ms, then read back the data sent by the device. */
+ usleep_range(2000, 3000);
+ /* Do a bulk data read, then pick out what we need. */
+ ret = i2c_master_recv(data->client, rx_buf, sizeof(rx_buf));
+ if (ret < 0) {
+ dev_err(&data->client->dev, "failed to read sensor data\n");
+ goto exit_unlock;
+ }
+ mutex_unlock(&data->lock);
+ /*
+ * Do a CRC check on the data and compare it to the value
+ * calculated by the device.
+ */
+ crc = am2315_crc(rx_buf, sizeof(rx_buf) - 2);
+ if ((crc & 0xff) != rx_buf[6] || (crc >> 8) != rx_buf[7]) {
+ dev_err(&data->client->dev, "failed to verify sensor data\n");
+ return -EIO;
+ }
+
+ sensor_data->hum_data = (rx_buf[AM2315_HUM_OFFSET] << 8) |
+ rx_buf[AM2315_HUM_OFFSET + 1];
+ sensor_data->temp_data = (rx_buf[AM2315_TEMP_OFFSET] << 8) |
+ rx_buf[AM2315_TEMP_OFFSET + 1];
+
+ return ret;
+
+exit_unlock:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static irqreturn_t am2315_trigger_handler(int irq, void *p)
+{
+ int i;
+ int ret;
+ int bit;
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct am2315_data *data = iio_priv(indio_dev);
+ struct am2315_sensor_data sensor_data;
+
+ ret = am2315_read_data(data, &sensor_data);
+ if (ret < 0) {
+ mutex_unlock(&data->lock);
+ goto err;
+ }
+
+ mutex_lock(&data->lock);
+ if (*(indio_dev->active_scan_mask) == AM2315_ALL_CHANNEL_MASK) {
+ data->buffer[0] = sensor_data.hum_data;
+ data->buffer[1] = sensor_data.temp_data;
+ } else {
+ i = 0;
+ for_each_set_bit(bit, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ data->buffer[i] = (bit ? sensor_data.temp_data :
+ sensor_data.hum_data);
+ i++;
+ }
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, data->buffer,
+ pf->timestamp);
+err:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int am2315_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct am2315_sensor_data sensor_data;
+ struct am2315_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = am2315_read_data(data, &sensor_data);
+ if (ret < 0)
+ return ret;
+ *val = (chan->type == IIO_HUMIDITYRELATIVE) ?
+ sensor_data.hum_data : sensor_data.temp_data;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 100;
+ return IIO_VAL_INT;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info am2315_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = am2315_read_raw,
+};
+
+static int am2315_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct iio_dev *indio_dev;
+ struct am2315_data *data;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev) {
+ dev_err(&client->dev, "iio allocation failed!\n");
+ return -ENOMEM;
+ }
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ i2c_set_clientdata(client, indio_dev);
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &am2315_info;
+ indio_dev->name = AM2315_DRIVER_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = am2315_channels;
+ indio_dev->num_channels = ARRAY_SIZE(am2315_channels);
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ am2315_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto err_buffer_cleanup;
+
+ return 0;
+
+err_buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+ return ret;
+}
+
+static int am2315_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id am2315_i2c_id[] = {
+ {"am2315", 0},
+ {}
+};
+
+static const struct acpi_device_id am2315_acpi_id[] = {
+ {"AOS2315", 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(acpi, am2315_acpi_id);
+
+static struct i2c_driver am2315_driver = {
+ .driver = {
+ .name = "am2315",
+ .acpi_match_table = ACPI_PTR(am2315_acpi_id),
+ },
+ .probe = am2315_probe,
+ .remove = am2315_remove,
+ .id_table = am2315_i2c_id,
+};
+
+module_i2c_driver(am2315_driver);
+
+MODULE_AUTHOR("Tiberiu Breana <tiberiu.a.breana@intel.com>");
+MODULE_DESCRIPTION("Aosong AM2315 relative humidity and temperature");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/humidity/dht11.c b/drivers/iio/humidity/dht11.c
index 20b500da94db..9c47bc98f3ac 100644
--- a/drivers/iio/humidity/dht11.c
+++ b/drivers/iio/humidity/dht11.c
@@ -96,6 +96,24 @@ struct dht11 {
struct {s64 ts; int value; } edges[DHT11_EDGES_PER_READ];
};
+#ifdef CONFIG_DYNAMIC_DEBUG
+/*
+ * dht11_edges_print: show the data as actually received by the
+ * driver.
+ */
+static void dht11_edges_print(struct dht11 *dht11)
+{
+ int i;
+
+ dev_dbg(dht11->dev, "%d edges detected:\n", dht11->num_edges);
+ for (i = 1; i < dht11->num_edges; ++i) {
+ dev_dbg(dht11->dev, "%d: %lld ns %s\n", i,
+ dht11->edges[i].ts - dht11->edges[i - 1].ts,
+ dht11->edges[i - 1].value ? "high" : "low");
+ }
+}
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
static unsigned char dht11_decode_byte(char *bits)
{
unsigned char ret = 0;
@@ -119,8 +137,12 @@ static int dht11_decode(struct dht11 *dht11, int offset)
for (i = 0; i < DHT11_BITS_PER_READ; ++i) {
t = dht11->edges[offset + 2 * i + 2].ts -
dht11->edges[offset + 2 * i + 1].ts;
- if (!dht11->edges[offset + 2 * i + 1].value)
- return -EIO; /* lost synchronisation */
+ if (!dht11->edges[offset + 2 * i + 1].value) {
+ dev_dbg(dht11->dev,
+ "lost synchronisation at edge %d\n",
+ offset + 2 * i + 1);
+ return -EIO;
+ }
bits[i] = t > DHT11_THRESHOLD;
}
@@ -130,8 +152,10 @@ static int dht11_decode(struct dht11 *dht11, int offset)
temp_dec = dht11_decode_byte(&bits[24]);
checksum = dht11_decode_byte(&bits[32]);
- if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum)
+ if (((hum_int + hum_dec + temp_int + temp_dec) & 0xff) != checksum) {
+ dev_dbg(dht11->dev, "invalid checksum\n");
return -EIO;
+ }
dht11->timestamp = ktime_get_boot_ns();
if (hum_int < 20) { /* DHT22 */
@@ -182,6 +206,7 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
mutex_lock(&dht11->lock);
if (dht11->timestamp + DHT11_DATA_VALID_TIME < ktime_get_boot_ns()) {
timeres = ktime_get_resolution_ns();
+ dev_dbg(dht11->dev, "current timeresolution: %dns\n", timeres);
if (timeres > DHT11_MIN_TIMERES) {
dev_err(dht11->dev, "timeresolution %dns too low\n",
timeres);
@@ -219,10 +244,13 @@ static int dht11_read_raw(struct iio_dev *iio_dev,
free_irq(dht11->irq, iio_dev);
+#ifdef CONFIG_DYNAMIC_DEBUG
+ dht11_edges_print(dht11);
+#endif
+
if (ret == 0 && dht11->num_edges < DHT11_EDGES_PER_READ - 1) {
- dev_err(&iio_dev->dev,
- "Only %d signal edges detected\n",
- dht11->num_edges);
+ dev_err(dht11->dev, "Only %d signal edges detected\n",
+ dht11->num_edges);
ret = -ETIMEDOUT;
}
if (ret < 0)
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 5e610f7de5aa..1f1ad41ef881 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -25,6 +25,8 @@ config ADIS16480
Say yes here to build support for Analog Devices ADIS16375, ADIS16480,
ADIS16485, ADIS16488 inertial sensors.
+source "drivers/iio/imu/bmi160/Kconfig"
+
config KMX61
tristate "Kionix KMX61 6-axis accelerometer and magnetometer"
depends on I2C
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index e1e6e3d70e26..c71bcd30dc38 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -13,6 +13,7 @@ adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_trigger.o
adis_lib-$(CONFIG_IIO_ADIS_LIB_BUFFER) += adis_buffer.o
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
+obj-y += bmi160/
obj-y += inv_mpu6050/
obj-$(CONFIG_KMX61) += kmx61.o
diff --git a/drivers/iio/imu/adis.c b/drivers/iio/imu/adis.c
index 911255d41c1a..ad6f91d06185 100644
--- a/drivers/iio/imu/adis.c
+++ b/drivers/iio/imu/adis.c
@@ -324,7 +324,12 @@ static int adis_self_test(struct adis *adis)
msleep(adis->data->startup_delay);
- return adis_check_status(adis);
+ ret = adis_check_status(adis);
+
+ if (adis->data->self_test_no_autoclear)
+ adis_write_reg_16(adis, adis->data->msc_ctrl_reg, 0x00);
+
+ return ret;
}
/**
diff --git a/drivers/iio/imu/bmi160/Kconfig b/drivers/iio/imu/bmi160/Kconfig
new file mode 100644
index 000000000000..005c17ccc2b0
--- /dev/null
+++ b/drivers/iio/imu/bmi160/Kconfig
@@ -0,0 +1,32 @@
+#
+# BMI160 IMU driver
+#
+
+config BMI160
+ tristate
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
+
+config BMI160_I2C
+ tristate "Bosch BMI160 I2C driver"
+ depends on I2C
+ select BMI160
+ select REGMAP_I2C
+ help
+ If you say yes here you get support for BMI160 IMU on I2C with
+ accelerometer, gyroscope and external BMG160 magnetometer.
+
+ This driver can also be built as a module. If so, the module will be
+ called bmi160_i2c.
+
+config BMI160_SPI
+ tristate "Bosch BMI160 SPI driver"
+ depends on SPI
+ select BMI160
+ select REGMAP_SPI
+ help
+ If you say yes here you get support for BMI160 IMU on SPI with
+ accelerometer, gyroscope and external BMG160 magnetometer.
+
+ This driver can also be built as a module. If so, the module will be
+ called bmi160_spi.
diff --git a/drivers/iio/imu/bmi160/Makefile b/drivers/iio/imu/bmi160/Makefile
new file mode 100644
index 000000000000..10365e493ae2
--- /dev/null
+++ b/drivers/iio/imu/bmi160/Makefile
@@ -0,0 +1,6 @@
+#
+# Makefile for Bosch BMI160 IMU
+#
+obj-$(CONFIG_BMI160) += bmi160_core.o
+obj-$(CONFIG_BMI160_I2C) += bmi160_i2c.o
+obj-$(CONFIG_BMI160_SPI) += bmi160_spi.o
diff --git a/drivers/iio/imu/bmi160/bmi160.h b/drivers/iio/imu/bmi160/bmi160.h
new file mode 100644
index 000000000000..d2ae6ed70271
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160.h
@@ -0,0 +1,10 @@
+#ifndef BMI160_H_
+#define BMI160_H_
+
+extern const struct regmap_config bmi160_regmap_config;
+
+int bmi160_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi);
+void bmi160_core_remove(struct device *dev);
+
+#endif /* BMI160_H_ */
diff --git a/drivers/iio/imu/bmi160/bmi160_core.c b/drivers/iio/imu/bmi160/bmi160_core.c
new file mode 100644
index 000000000000..0bf92b06d7d8
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_core.c
@@ -0,0 +1,596 @@
+/*
+ * BMI160 - Bosch IMU (accel, gyro plus external magnetometer)
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO core driver for BMI160, with support for I2C/SPI busses
+ *
+ * TODO: magnetometer, interrupts, hardware FIFO
+ */
+#include <linux/module.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/buffer.h>
+
+#include "bmi160.h"
+
+#define BMI160_REG_CHIP_ID 0x00
+#define BMI160_CHIP_ID_VAL 0xD1
+
+#define BMI160_REG_PMU_STATUS 0x03
+
+/* X axis data low byte address, the rest can be obtained using axis offset */
+#define BMI160_REG_DATA_MAGN_XOUT_L 0x04
+#define BMI160_REG_DATA_GYRO_XOUT_L 0x0C
+#define BMI160_REG_DATA_ACCEL_XOUT_L 0x12
+
+#define BMI160_REG_ACCEL_CONFIG 0x40
+#define BMI160_ACCEL_CONFIG_ODR_MASK GENMASK(3, 0)
+#define BMI160_ACCEL_CONFIG_BWP_MASK GENMASK(6, 4)
+
+#define BMI160_REG_ACCEL_RANGE 0x41
+#define BMI160_ACCEL_RANGE_2G 0x03
+#define BMI160_ACCEL_RANGE_4G 0x05
+#define BMI160_ACCEL_RANGE_8G 0x08
+#define BMI160_ACCEL_RANGE_16G 0x0C
+
+#define BMI160_REG_GYRO_CONFIG 0x42
+#define BMI160_GYRO_CONFIG_ODR_MASK GENMASK(3, 0)
+#define BMI160_GYRO_CONFIG_BWP_MASK GENMASK(5, 4)
+
+#define BMI160_REG_GYRO_RANGE 0x43
+#define BMI160_GYRO_RANGE_2000DPS 0x00
+#define BMI160_GYRO_RANGE_1000DPS 0x01
+#define BMI160_GYRO_RANGE_500DPS 0x02
+#define BMI160_GYRO_RANGE_250DPS 0x03
+#define BMI160_GYRO_RANGE_125DPS 0x04
+
+#define BMI160_REG_CMD 0x7E
+#define BMI160_CMD_ACCEL_PM_SUSPEND 0x10
+#define BMI160_CMD_ACCEL_PM_NORMAL 0x11
+#define BMI160_CMD_ACCEL_PM_LOW_POWER 0x12
+#define BMI160_CMD_GYRO_PM_SUSPEND 0x14
+#define BMI160_CMD_GYRO_PM_NORMAL 0x15
+#define BMI160_CMD_GYRO_PM_FAST_STARTUP 0x17
+#define BMI160_CMD_SOFTRESET 0xB6
+
+#define BMI160_REG_DUMMY 0x7F
+
+#define BMI160_ACCEL_PMU_MIN_USLEEP 3200
+#define BMI160_ACCEL_PMU_MAX_USLEEP 3800
+#define BMI160_GYRO_PMU_MIN_USLEEP 55000
+#define BMI160_GYRO_PMU_MAX_USLEEP 80000
+#define BMI160_SOFTRESET_USLEEP 1000
+
+#define BMI160_CHANNEL(_type, _axis, _index) { \
+ .type = _type, \
+ .modified = 1, \
+ .channel2 = IIO_MOD_##_axis, \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) | \
+ BIT(IIO_CHAN_INFO_SAMP_FREQ), \
+ .scan_index = _index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_LE, \
+ }, \
+}
+
+/* scan indexes follow DATA register order */
+enum bmi160_scan_axis {
+ BMI160_SCAN_EXT_MAGN_X = 0,
+ BMI160_SCAN_EXT_MAGN_Y,
+ BMI160_SCAN_EXT_MAGN_Z,
+ BMI160_SCAN_RHALL,
+ BMI160_SCAN_GYRO_X,
+ BMI160_SCAN_GYRO_Y,
+ BMI160_SCAN_GYRO_Z,
+ BMI160_SCAN_ACCEL_X,
+ BMI160_SCAN_ACCEL_Y,
+ BMI160_SCAN_ACCEL_Z,
+ BMI160_SCAN_TIMESTAMP,
+};
+
+enum bmi160_sensor_type {
+ BMI160_ACCEL = 0,
+ BMI160_GYRO,
+ BMI160_EXT_MAGN,
+ BMI160_NUM_SENSORS /* must be last */
+};
+
+struct bmi160_data {
+ struct regmap *regmap;
+};
+
+const struct regmap_config bmi160_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+};
+EXPORT_SYMBOL(bmi160_regmap_config);
+
+struct bmi160_regs {
+ u8 data; /* LSB byte register for X-axis */
+ u8 config;
+ u8 config_odr_mask;
+ u8 config_bwp_mask;
+ u8 range;
+ u8 pmu_cmd_normal;
+ u8 pmu_cmd_suspend;
+};
+
+static struct bmi160_regs bmi160_regs[] = {
+ [BMI160_ACCEL] = {
+ .data = BMI160_REG_DATA_ACCEL_XOUT_L,
+ .config = BMI160_REG_ACCEL_CONFIG,
+ .config_odr_mask = BMI160_ACCEL_CONFIG_ODR_MASK,
+ .config_bwp_mask = BMI160_ACCEL_CONFIG_BWP_MASK,
+ .range = BMI160_REG_ACCEL_RANGE,
+ .pmu_cmd_normal = BMI160_CMD_ACCEL_PM_NORMAL,
+ .pmu_cmd_suspend = BMI160_CMD_ACCEL_PM_SUSPEND,
+ },
+ [BMI160_GYRO] = {
+ .data = BMI160_REG_DATA_GYRO_XOUT_L,
+ .config = BMI160_REG_GYRO_CONFIG,
+ .config_odr_mask = BMI160_GYRO_CONFIG_ODR_MASK,
+ .config_bwp_mask = BMI160_GYRO_CONFIG_BWP_MASK,
+ .range = BMI160_REG_GYRO_RANGE,
+ .pmu_cmd_normal = BMI160_CMD_GYRO_PM_NORMAL,
+ .pmu_cmd_suspend = BMI160_CMD_GYRO_PM_SUSPEND,
+ },
+};
+
+struct bmi160_pmu_time {
+ unsigned long min;
+ unsigned long max;
+};
+
+static struct bmi160_pmu_time bmi160_pmu_time[] = {
+ [BMI160_ACCEL] = {
+ .min = BMI160_ACCEL_PMU_MIN_USLEEP,
+ .max = BMI160_ACCEL_PMU_MAX_USLEEP
+ },
+ [BMI160_GYRO] = {
+ .min = BMI160_GYRO_PMU_MIN_USLEEP,
+ .max = BMI160_GYRO_PMU_MIN_USLEEP,
+ },
+};
+
+struct bmi160_scale {
+ u8 bits;
+ int uscale;
+};
+
+struct bmi160_odr {
+ u8 bits;
+ int odr;
+ int uodr;
+};
+
+static const struct bmi160_scale bmi160_accel_scale[] = {
+ { BMI160_ACCEL_RANGE_2G, 598},
+ { BMI160_ACCEL_RANGE_4G, 1197},
+ { BMI160_ACCEL_RANGE_8G, 2394},
+ { BMI160_ACCEL_RANGE_16G, 4788},
+};
+
+static const struct bmi160_scale bmi160_gyro_scale[] = {
+ { BMI160_GYRO_RANGE_2000DPS, 1065},
+ { BMI160_GYRO_RANGE_1000DPS, 532},
+ { BMI160_GYRO_RANGE_500DPS, 266},
+ { BMI160_GYRO_RANGE_250DPS, 133},
+ { BMI160_GYRO_RANGE_125DPS, 66},
+};
+
+struct bmi160_scale_item {
+ const struct bmi160_scale *tbl;
+ int num;
+};
+
+static const struct bmi160_scale_item bmi160_scale_table[] = {
+ [BMI160_ACCEL] = {
+ .tbl = bmi160_accel_scale,
+ .num = ARRAY_SIZE(bmi160_accel_scale),
+ },
+ [BMI160_GYRO] = {
+ .tbl = bmi160_gyro_scale,
+ .num = ARRAY_SIZE(bmi160_gyro_scale),
+ },
+};
+
+static const struct bmi160_odr bmi160_accel_odr[] = {
+ {0x01, 0, 78125},
+ {0x02, 1, 5625},
+ {0x03, 3, 125},
+ {0x04, 6, 25},
+ {0x05, 12, 5},
+ {0x06, 25, 0},
+ {0x07, 50, 0},
+ {0x08, 100, 0},
+ {0x09, 200, 0},
+ {0x0A, 400, 0},
+ {0x0B, 800, 0},
+ {0x0C, 1600, 0},
+};
+
+static const struct bmi160_odr bmi160_gyro_odr[] = {
+ {0x06, 25, 0},
+ {0x07, 50, 0},
+ {0x08, 100, 0},
+ {0x09, 200, 0},
+ {0x0A, 400, 0},
+ {0x0B, 8000, 0},
+ {0x0C, 1600, 0},
+ {0x0D, 3200, 0},
+};
+
+struct bmi160_odr_item {
+ const struct bmi160_odr *tbl;
+ int num;
+};
+
+static const struct bmi160_odr_item bmi160_odr_table[] = {
+ [BMI160_ACCEL] = {
+ .tbl = bmi160_accel_odr,
+ .num = ARRAY_SIZE(bmi160_accel_odr),
+ },
+ [BMI160_GYRO] = {
+ .tbl = bmi160_gyro_odr,
+ .num = ARRAY_SIZE(bmi160_gyro_odr),
+ },
+};
+
+static const struct iio_chan_spec bmi160_channels[] = {
+ BMI160_CHANNEL(IIO_ACCEL, X, BMI160_SCAN_ACCEL_X),
+ BMI160_CHANNEL(IIO_ACCEL, Y, BMI160_SCAN_ACCEL_Y),
+ BMI160_CHANNEL(IIO_ACCEL, Z, BMI160_SCAN_ACCEL_Z),
+ BMI160_CHANNEL(IIO_ANGL_VEL, X, BMI160_SCAN_GYRO_X),
+ BMI160_CHANNEL(IIO_ANGL_VEL, Y, BMI160_SCAN_GYRO_Y),
+ BMI160_CHANNEL(IIO_ANGL_VEL, Z, BMI160_SCAN_GYRO_Z),
+ IIO_CHAN_SOFT_TIMESTAMP(BMI160_SCAN_TIMESTAMP),
+};
+
+static enum bmi160_sensor_type bmi160_to_sensor(enum iio_chan_type iio_type)
+{
+ switch (iio_type) {
+ case IIO_ACCEL:
+ return BMI160_ACCEL;
+ case IIO_ANGL_VEL:
+ return BMI160_GYRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static
+int bmi160_set_mode(struct bmi160_data *data, enum bmi160_sensor_type t,
+ bool mode)
+{
+ int ret;
+ u8 cmd;
+
+ if (mode)
+ cmd = bmi160_regs[t].pmu_cmd_normal;
+ else
+ cmd = bmi160_regs[t].pmu_cmd_suspend;
+
+ ret = regmap_write(data->regmap, BMI160_REG_CMD, cmd);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(bmi160_pmu_time[t].min, bmi160_pmu_time[t].max);
+
+ return 0;
+}
+
+static
+int bmi160_set_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int uscale)
+{
+ int i;
+
+ for (i = 0; i < bmi160_scale_table[t].num; i++)
+ if (bmi160_scale_table[t].tbl[i].uscale == uscale)
+ break;
+
+ if (i == bmi160_scale_table[t].num)
+ return -EINVAL;
+
+ return regmap_write(data->regmap, bmi160_regs[t].range,
+ bmi160_scale_table[t].tbl[i].bits);
+}
+
+static
+int bmi160_get_scale(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int *uscale)
+{
+ int i, ret, val;
+
+ ret = regmap_read(data->regmap, bmi160_regs[t].range, &val);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < bmi160_scale_table[t].num; i++)
+ if (bmi160_scale_table[t].tbl[i].bits == val) {
+ *uscale = bmi160_scale_table[t].tbl[i].uscale;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+static int bmi160_get_data(struct bmi160_data *data, int chan_type,
+ int axis, int *val)
+{
+ u8 reg;
+ int ret;
+ __le16 sample;
+ enum bmi160_sensor_type t = bmi160_to_sensor(chan_type);
+
+ reg = bmi160_regs[t].data + (axis - IIO_MOD_X) * sizeof(__le16);
+
+ ret = regmap_bulk_read(data->regmap, reg, &sample, sizeof(__le16));
+ if (ret < 0)
+ return ret;
+
+ *val = sign_extend32(le16_to_cpu(sample), 15);
+
+ return 0;
+}
+
+static
+int bmi160_set_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int odr, int uodr)
+{
+ int i;
+
+ for (i = 0; i < bmi160_odr_table[t].num; i++)
+ if (bmi160_odr_table[t].tbl[i].odr == odr &&
+ bmi160_odr_table[t].tbl[i].uodr == uodr)
+ break;
+
+ if (i >= bmi160_odr_table[t].num)
+ return -EINVAL;
+
+ return regmap_update_bits(data->regmap,
+ bmi160_regs[t].config,
+ bmi160_odr_table[t].tbl[i].bits,
+ bmi160_regs[t].config_odr_mask);
+}
+
+static int bmi160_get_odr(struct bmi160_data *data, enum bmi160_sensor_type t,
+ int *odr, int *uodr)
+{
+ int i, val, ret;
+
+ ret = regmap_read(data->regmap, bmi160_regs[t].config, &val);
+ if (ret < 0)
+ return ret;
+
+ val &= bmi160_regs[t].config_odr_mask;
+
+ for (i = 0; i < bmi160_odr_table[t].num; i++)
+ if (val == bmi160_odr_table[t].tbl[i].bits)
+ break;
+
+ if (i >= bmi160_odr_table[t].num)
+ return -EINVAL;
+
+ *odr = bmi160_odr_table[t].tbl[i].odr;
+ *uodr = bmi160_odr_table[t].tbl[i].uodr;
+
+ return 0;
+}
+
+static irqreturn_t bmi160_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct bmi160_data *data = iio_priv(indio_dev);
+ s16 buf[16]; /* 3 sens x 3 axis x s16 + 3 x s16 pad + 4 x s16 tstamp */
+ int i, ret, j = 0, base = BMI160_REG_DATA_MAGN_XOUT_L;
+ __le16 sample;
+
+ for_each_set_bit(i, indio_dev->active_scan_mask,
+ indio_dev->masklength) {
+ ret = regmap_bulk_read(data->regmap, base + i * sizeof(__le16),
+ &sample, sizeof(__le16));
+ if (ret < 0)
+ goto done;
+ buf[j++] = sample;
+ }
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+done:
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int bmi160_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int ret;
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = bmi160_get_data(data, chan->type, chan->channel2, val);
+ if (ret < 0)
+ return ret;
+ return IIO_VAL_INT;
+ case IIO_CHAN_INFO_SCALE:
+ *val = 0;
+ ret = bmi160_get_scale(data,
+ bmi160_to_sensor(chan->type), val2);
+ return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ ret = bmi160_get_odr(data, bmi160_to_sensor(chan->type),
+ val, val2);
+ return ret < 0 ? ret : IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int bmi160_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_SCALE:
+ return bmi160_set_scale(data,
+ bmi160_to_sensor(chan->type), val2);
+ break;
+ case IIO_CHAN_INFO_SAMP_FREQ:
+ return bmi160_set_odr(data, bmi160_to_sensor(chan->type),
+ val, val2);
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static const struct iio_info bmi160_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = bmi160_read_raw,
+ .write_raw = bmi160_write_raw,
+};
+
+static const char *bmi160_match_acpi_device(struct device *dev)
+{
+ const struct acpi_device_id *id;
+
+ id = acpi_match_device(dev->driver->acpi_match_table, dev);
+ if (!id)
+ return NULL;
+
+ return dev_name(dev);
+}
+
+static int bmi160_chip_init(struct bmi160_data *data, bool use_spi)
+{
+ int ret;
+ unsigned int val;
+ struct device *dev = regmap_get_device(data->regmap);
+
+ ret = regmap_write(data->regmap, BMI160_REG_CMD, BMI160_CMD_SOFTRESET);
+ if (ret < 0)
+ return ret;
+
+ usleep_range(BMI160_SOFTRESET_USLEEP, BMI160_SOFTRESET_USLEEP + 1);
+
+ /*
+ * CS rising edge is needed before starting SPI, so do a dummy read
+ * See Section 3.2.1, page 86 of the datasheet
+ */
+ if (use_spi) {
+ ret = regmap_read(data->regmap, BMI160_REG_DUMMY, &val);
+ if (ret < 0)
+ return ret;
+ }
+
+ ret = regmap_read(data->regmap, BMI160_REG_CHIP_ID, &val);
+ if (ret < 0) {
+ dev_err(dev, "Error reading chip id\n");
+ return ret;
+ }
+ if (val != BMI160_CHIP_ID_VAL) {
+ dev_err(dev, "Wrong chip id, got %x expected %x\n",
+ val, BMI160_CHIP_ID_VAL);
+ return -ENODEV;
+ }
+
+ ret = bmi160_set_mode(data, BMI160_ACCEL, true);
+ if (ret < 0)
+ return ret;
+
+ ret = bmi160_set_mode(data, BMI160_GYRO, true);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void bmi160_chip_uninit(struct bmi160_data *data)
+{
+ bmi160_set_mode(data, BMI160_GYRO, false);
+ bmi160_set_mode(data, BMI160_ACCEL, false);
+}
+
+int bmi160_core_probe(struct device *dev, struct regmap *regmap,
+ const char *name, bool use_spi)
+{
+ struct iio_dev *indio_dev;
+ struct bmi160_data *data;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+
+ ret = bmi160_chip_init(data, use_spi);
+ if (ret < 0)
+ return ret;
+
+ if (!name && ACPI_HANDLE(dev))
+ name = bmi160_match_acpi_device(dev);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->channels = bmi160_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bmi160_channels);
+ indio_dev->name = name;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->info = &bmi160_info;
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL,
+ bmi160_trigger_handler, NULL);
+ if (ret < 0)
+ goto uninit;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto buffer_cleanup;
+
+ return 0;
+buffer_cleanup:
+ iio_triggered_buffer_cleanup(indio_dev);
+uninit:
+ bmi160_chip_uninit(data);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(bmi160_core_probe);
+
+void bmi160_core_remove(struct device *dev)
+{
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
+ struct bmi160_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ bmi160_chip_uninit(data);
+}
+EXPORT_SYMBOL_GPL(bmi160_core_remove);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_DESCRIPTION("Bosch BMI160 driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/bmi160/bmi160_i2c.c b/drivers/iio/imu/bmi160/bmi160_i2c.c
new file mode 100644
index 000000000000..07a179d8fb48
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_i2c.c
@@ -0,0 +1,72 @@
+/*
+ * BMI160 - Bosch IMU, I2C bits
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * 7-bit I2C slave address is:
+ * - 0x68 if SDO is pulled to GND
+ * - 0x69 if SDO is pulled to VDDIO
+ */
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+
+#include "bmi160.h"
+
+static int bmi160_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &bmi160_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to register i2c regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return bmi160_core_probe(&client->dev, regmap, name, false);
+}
+
+static int bmi160_i2c_remove(struct i2c_client *client)
+{
+ bmi160_core_remove(&client->dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id bmi160_i2c_id[] = {
+ {"bmi160", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bmi160_i2c_id);
+
+static const struct acpi_device_id bmi160_acpi_match[] = {
+ {"BMI0160", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
+
+static struct i2c_driver bmi160_i2c_driver = {
+ .driver = {
+ .name = "bmi160_i2c",
+ .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
+ },
+ .probe = bmi160_i2c_probe,
+ .remove = bmi160_i2c_remove,
+ .id_table = bmi160_i2c_id,
+};
+module_i2c_driver(bmi160_i2c_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com>");
+MODULE_DESCRIPTION("BMI160 I2C driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/bmi160/bmi160_spi.c b/drivers/iio/imu/bmi160/bmi160_spi.c
new file mode 100644
index 000000000000..1ec8b12bd984
--- /dev/null
+++ b/drivers/iio/imu/bmi160/bmi160_spi.c
@@ -0,0 +1,63 @@
+/*
+ * BMI160 - Bosch IMU, SPI bits
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+#include <linux/module.h>
+#include <linux/spi/spi.h>
+#include <linux/regmap.h>
+#include <linux/acpi.h>
+
+#include "bmi160.h"
+
+static int bmi160_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &bmi160_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+ return bmi160_core_probe(&spi->dev, regmap, id->name, true);
+}
+
+static int bmi160_spi_remove(struct spi_device *spi)
+{
+ bmi160_core_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id bmi160_spi_id[] = {
+ {"bmi160", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, bmi160_spi_id);
+
+static const struct acpi_device_id bmi160_acpi_match[] = {
+ {"BMI0160", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, bmi160_acpi_match);
+
+static struct spi_driver bmi160_spi_driver = {
+ .probe = bmi160_spi_probe,
+ .remove = bmi160_spi_remove,
+ .id_table = bmi160_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(bmi160_acpi_match),
+ .name = "bmi160_spi",
+ },
+};
+module_spi_driver(bmi160_spi_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_DESCRIPTION("Bosch BMI160 SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/imu/inv_mpu6050/Kconfig b/drivers/iio/imu/inv_mpu6050/Kconfig
index 847455a2d6bb..f756feecfa4c 100644
--- a/drivers/iio/imu/inv_mpu6050/Kconfig
+++ b/drivers/iio/imu/inv_mpu6050/Kconfig
@@ -13,10 +13,8 @@ config INV_MPU6050_I2C
select INV_MPU6050_IIO
select REGMAP_I2C
help
- This driver supports the Invensense MPU6050 devices.
- This driver can also support MPU6500 in MPU6050 compatibility mode
- and also in MPU6500 mode with some limitations.
- It is a gyroscope/accelerometer combo device.
+ This driver supports the Invensense MPU6050/6500/9150 motion tracking
+ devices over I2C.
This driver can be built as a module. The module will be called
inv-mpu6050-i2c.
@@ -26,7 +24,7 @@ config INV_MPU6050_SPI
select INV_MPU6050_IIO
select REGMAP_SPI
help
- This driver supports the Invensense MPU6050 devices.
- It is a gyroscope/accelerometer combo device.
+ This driver supports the Invensense MPU6000/6500/9150 motion tracking
+ devices over SPI.
This driver can be built as a module. The module will be called
inv-mpu6050-spi.
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
index 0c2bded2b5b7..ee40dae5ab58 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_core.c
@@ -87,16 +87,29 @@ static const struct inv_mpu6050_chip_config chip_config_6050 = {
.accl_fs = INV_MPU6050_FS_02G,
};
+/* Indexed by enum inv_devices */
static const struct inv_mpu6050_hw hw_info[] = {
{
- .num_reg = 117,
+ .whoami = INV_MPU6050_WHOAMI_VALUE,
+ .name = "MPU6050",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+ {
+ .whoami = INV_MPU6500_WHOAMI_VALUE,
.name = "MPU6500",
.reg = &reg_set_6500,
.config = &chip_config_6050,
},
{
- .num_reg = 117,
- .name = "MPU6050",
+ .whoami = INV_MPU6000_WHOAMI_VALUE,
+ .name = "MPU6000",
+ .reg = &reg_set_6050,
+ .config = &chip_config_6050,
+ },
+ {
+ .whoami = INV_MPU9150_WHOAMI_VALUE,
+ .name = "MPU9150",
.reg = &reg_set_6050,
.config = &chip_config_6050,
},
@@ -599,6 +612,10 @@ inv_fifo_rate_show(struct device *dev, struct device_attribute *attr,
/**
* inv_attr_show() - calling this function will show current
* parameters.
+ *
+ * Deprecated in favor of IIO mounting matrix API.
+ *
+ * See inv_get_mount_matrix()
*/
static ssize_t inv_attr_show(struct device *dev, struct device_attribute *attr,
char *buf)
@@ -643,6 +660,18 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
return 0;
}
+static const struct iio_mount_matrix *
+inv_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ return &((struct inv_mpu6050_state *)iio_priv(indio_dev))->orientation;
+}
+
+static const struct iio_chan_spec_ext_info inv_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_TYPE, inv_get_mount_matrix),
+ { },
+};
+
#define INV_MPU6050_CHAN(_type, _channel2, _index) \
{ \
.type = _type, \
@@ -659,6 +688,7 @@ static int inv_mpu6050_validate_trigger(struct iio_dev *indio_dev,
.shift = 0, \
.endianness = IIO_BE, \
}, \
+ .ext_info = inv_ext_info, \
}
static const struct iio_chan_spec inv_mpu_channels[] = {
@@ -691,14 +721,16 @@ static IIO_CONST_ATTR(in_accel_scale_available,
"0.000598 0.001196 0.002392 0.004785");
static IIO_DEV_ATTR_SAMP_FREQ(S_IRUGO | S_IWUSR, inv_fifo_rate_show,
inv_mpu6050_fifo_rate_store);
+
+/* Deprecated: kept for userspace backward compatibility. */
static IIO_DEVICE_ATTR(in_gyro_matrix, S_IRUGO, inv_attr_show, NULL,
ATTR_GYRO_MATRIX);
static IIO_DEVICE_ATTR(in_accel_matrix, S_IRUGO, inv_attr_show, NULL,
ATTR_ACCL_MATRIX);
static struct attribute *inv_attributes[] = {
- &iio_dev_attr_in_gyro_matrix.dev_attr.attr,
- &iio_dev_attr_in_accel_matrix.dev_attr.attr,
+ &iio_dev_attr_in_gyro_matrix.dev_attr.attr, /* deprecated */
+ &iio_dev_attr_in_accel_matrix.dev_attr.attr, /* deprecated */
&iio_dev_attr_sampling_frequency.dev_attr.attr,
&iio_const_attr_sampling_frequency_available.dev_attr.attr,
&iio_const_attr_in_accel_scale_available.dev_attr.attr,
@@ -725,6 +757,7 @@ static const struct iio_info mpu_info = {
static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
{
int result;
+ unsigned int regval;
st->hw = &hw_info[st->chip_type];
st->reg = hw_info[st->chip_type].reg;
@@ -735,6 +768,17 @@ static int inv_check_and_setup_chip(struct inv_mpu6050_state *st)
if (result)
return result;
msleep(INV_MPU6050_POWER_UP_TIME);
+
+ /* check chip self-identification */
+ result = regmap_read(st->map, INV_MPU6050_REG_WHOAMI, &regval);
+ if (result)
+ return result;
+ if (regval != st->hw->whoami) {
+ dev_warn(regmap_get_device(st->map),
+ "whoami mismatch got %#02x expected %#02hhx for %s\n",
+ regval, st->hw->whoami, st->hw->name);
+ }
+
/*
* toggle power state. After reset, the sleep bit could be on
* or off depending on the OTP settings. Toggling power would
@@ -773,14 +817,31 @@ int inv_mpu_core_probe(struct regmap *regmap, int irq, const char *name,
if (!indio_dev)
return -ENOMEM;
+ BUILD_BUG_ON(ARRAY_SIZE(hw_info) != INV_NUM_PARTS);
+ if (chip_type < 0 || chip_type >= INV_NUM_PARTS) {
+ dev_err(dev, "Bad invensense chip_type=%d name=%s\n",
+ chip_type, name);
+ return -ENODEV;
+ }
st = iio_priv(indio_dev);
st->chip_type = chip_type;
st->powerup_count = 0;
st->irq = irq;
st->map = regmap;
+
pdata = dev_get_platdata(dev);
- if (pdata)
+ if (!pdata) {
+ result = of_iio_read_mount_matrix(dev, "mount-matrix",
+ &st->orientation);
+ if (result) {
+ dev_err(dev, "Failed to retrieve mounting matrix %d\n",
+ result);
+ return result;
+ }
+ } else {
st->plat_data = *pdata;
+ }
+
/* power is turned on inside check chip type*/
result = inv_check_and_setup_chip(st);
if (result)
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
index 9ba1179105bd..e1fd7fa53e3b 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_i2c.c
@@ -169,13 +169,14 @@ static int inv_mpu_remove(struct i2c_client *client)
static const struct i2c_device_id inv_mpu_id[] = {
{"mpu6050", INV_MPU6050},
{"mpu6500", INV_MPU6500},
+ {"mpu9150", INV_MPU9150},
{}
};
MODULE_DEVICE_TABLE(i2c, inv_mpu_id);
static const struct acpi_device_id inv_acpi_match[] = {
- {"INVN6500", 0},
+ {"INVN6500", INV_MPU6500},
{ },
};
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
index bb3cef6d7059..3bf8544ccc9f 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_iio.h
@@ -69,6 +69,7 @@ enum inv_devices {
INV_MPU6050,
INV_MPU6500,
INV_MPU6000,
+ INV_MPU9150,
INV_NUM_PARTS
};
@@ -94,13 +95,13 @@ struct inv_mpu6050_chip_config {
/**
* struct inv_mpu6050_hw - Other important hardware information.
- * @num_reg: Number of registers on device.
+ * @whoami: Self identification byte from WHO_AM_I register
* @name: name of the chip.
* @reg: register map of the chip.
* @config: configuration of the chip.
*/
struct inv_mpu6050_hw {
- u8 num_reg;
+ u8 whoami;
u8 *name;
const struct inv_mpu6050_reg_map *reg;
const struct inv_mpu6050_chip_config *config;
@@ -115,7 +116,8 @@ struct inv_mpu6050_hw {
* @hw: Other hardware-specific information.
* @chip_type: chip type.
* @time_stamp_lock: spin lock to time stamp.
- * @plat_data: platform data.
+ * @plat_data: platform data (deprecated in favor of @orientation).
+ * @orientation: sensor chip orientation relative to main hardware.
* @timestamps: kfifo queue to store time stamp.
* @map regmap pointer.
* @irq interrupt number.
@@ -132,6 +134,7 @@ struct inv_mpu6050_state {
struct i2c_client *mux_client;
unsigned int powerup_count;
struct inv_mpu6050_platform_data plat_data;
+ struct iio_mount_matrix orientation;
DECLARE_KFIFO(timestamps, long long, TIMESTAMP_FIFO_SIZE);
struct regmap *map;
int irq;
@@ -216,6 +219,13 @@ struct inv_mpu6050_state {
#define INV_MPU6050_MIN_FIFO_RATE 4
#define INV_MPU6050_ONE_K_HZ 1000
+#define INV_MPU6050_REG_WHOAMI 117
+
+#define INV_MPU6000_WHOAMI_VALUE 0x68
+#define INV_MPU6050_WHOAMI_VALUE 0x68
+#define INV_MPU6500_WHOAMI_VALUE 0x70
+#define INV_MPU9150_WHOAMI_VALUE 0x68
+
/* scan element definition */
enum inv_mpu6050_scan {
INV_MPU6050_SCAN_ACCL_X,
diff --git a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
index 7bcb8d839f05..190a4a51c830 100644
--- a/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
+++ b/drivers/iio/imu/inv_mpu6050/inv_mpu_spi.c
@@ -44,9 +44,19 @@ static int inv_mpu_i2c_disable(struct iio_dev *indio_dev)
static int inv_mpu_probe(struct spi_device *spi)
{
struct regmap *regmap;
- const struct spi_device_id *id = spi_get_device_id(spi);
- const char *name = id ? id->name : NULL;
- const int chip_type = id ? id->driver_data : 0;
+ const struct spi_device_id *spi_id;
+ const struct acpi_device_id *acpi_id;
+ const char *name = NULL;
+ enum inv_devices chip_type;
+
+ if ((spi_id = spi_get_device_id(spi))) {
+ chip_type = (enum inv_devices)spi_id->driver_data;
+ name = spi_id->name;
+ } else if ((acpi_id = acpi_match_device(spi->dev.driver->acpi_match_table, &spi->dev))) {
+ chip_type = (enum inv_devices)acpi_id->driver_data;
+ } else {
+ return -ENODEV;
+ }
regmap = devm_regmap_init_spi(spi, &inv_mpu_regmap_config);
if (IS_ERR(regmap)) {
@@ -70,13 +80,15 @@ static int inv_mpu_remove(struct spi_device *spi)
*/
static const struct spi_device_id inv_mpu_id[] = {
{"mpu6000", INV_MPU6000},
+ {"mpu6500", INV_MPU6500},
+ {"mpu9150", INV_MPU9150},
{}
};
MODULE_DEVICE_TABLE(spi, inv_mpu_id);
static const struct acpi_device_id inv_acpi_match[] = {
- {"INVN6000", 0},
+ {"INVN6000", INV_MPU6000},
{ },
};
MODULE_DEVICE_TABLE(acpi, inv_acpi_match);
diff --git a/drivers/iio/imu/kmx61.c b/drivers/iio/imu/kmx61.c
index e5306b4e020e..2e7dd5754a56 100644
--- a/drivers/iio/imu/kmx61.c
+++ b/drivers/iio/imu/kmx61.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/i2c.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 70cb7eb0a75c..e6319a9346b2 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -25,6 +25,7 @@
#include <linux/slab.h>
#include <linux/anon_inodes.h>
#include <linux/debugfs.h>
+#include <linux/mutex.h>
#include <linux/iio/iio.h>
#include "iio_core.h"
#include "iio_core_trigger.h"
@@ -78,6 +79,7 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_CONCENTRATION] = "concentration",
[IIO_RESISTANCE] = "resistance",
[IIO_PH] = "ph",
+ [IIO_UVINDEX] = "uvindex",
};
static const char * const iio_modifier_names[] = {
@@ -100,6 +102,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
+ [IIO_MOD_LIGHT_UV] = "uv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
@@ -409,6 +412,88 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
}
EXPORT_SYMBOL_GPL(iio_enum_write);
+static const struct iio_mount_matrix iio_mount_idmatrix = {
+ .rotation = {
+ "1", "0", "0",
+ "0", "1", "0",
+ "0", "0", "1"
+ }
+};
+
+static int iio_setup_mount_idmatrix(const struct device *dev,
+ struct iio_mount_matrix *matrix)
+{
+ *matrix = iio_mount_idmatrix;
+ dev_info(dev, "mounting matrix not found: using identity...\n");
+ return 0;
+}
+
+ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
+ const struct iio_chan_spec *chan, char *buf)
+{
+ const struct iio_mount_matrix *mtx = ((iio_get_mount_matrix_t *)
+ priv)(indio_dev, chan);
+
+ if (IS_ERR(mtx))
+ return PTR_ERR(mtx);
+
+ if (!mtx)
+ mtx = &iio_mount_idmatrix;
+
+ return snprintf(buf, PAGE_SIZE, "%s, %s, %s; %s, %s, %s; %s, %s, %s\n",
+ mtx->rotation[0], mtx->rotation[1], mtx->rotation[2],
+ mtx->rotation[3], mtx->rotation[4], mtx->rotation[5],
+ mtx->rotation[6], mtx->rotation[7], mtx->rotation[8]);
+}
+EXPORT_SYMBOL_GPL(iio_show_mount_matrix);
+
+/**
+ * of_iio_read_mount_matrix() - retrieve iio device mounting matrix from
+ * device-tree "mount-matrix" property
+ * @dev: device the mounting matrix property is assigned to
+ * @propname: device specific mounting matrix property name
+ * @matrix: where to store retrieved matrix
+ *
+ * If device is assigned no mounting matrix property, a default 3x3 identity
+ * matrix will be filled in.
+ *
+ * Return: 0 if success, or a negative error code on failure.
+ */
+#ifdef CONFIG_OF
+int of_iio_read_mount_matrix(const struct device *dev,
+ const char *propname,
+ struct iio_mount_matrix *matrix)
+{
+ if (dev->of_node) {
+ int err = of_property_read_string_array(dev->of_node,
+ propname, matrix->rotation,
+ ARRAY_SIZE(iio_mount_idmatrix.rotation));
+
+ if (err == ARRAY_SIZE(iio_mount_idmatrix.rotation))
+ return 0;
+
+ if (err >= 0)
+ /* Invalid number of matrix entries. */
+ return -EINVAL;
+
+ if (err != -EINVAL)
+ /* Invalid matrix declaration format. */
+ return err;
+ }
+
+ /* Matrix was not declared at all: fallback to identity. */
+ return iio_setup_mount_idmatrix(dev, matrix);
+}
+#else
+int of_iio_read_mount_matrix(const struct device *dev,
+ const char *propname,
+ struct iio_mount_matrix *matrix)
+{
+ return iio_setup_mount_idmatrix(dev, matrix);
+}
+#endif
+EXPORT_SYMBOL(of_iio_read_mount_matrix);
+
/**
* iio_format_value() - Formats a IIO value into its string representation
* @buf: The buffer to which the formatted value gets written
@@ -1375,6 +1460,44 @@ void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev)
}
EXPORT_SYMBOL_GPL(devm_iio_device_unregister);
+/**
+ * iio_device_claim_direct_mode - Keep device in direct mode
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * If the device is in direct mode it is guaranteed to stay
+ * that way until iio_device_release_direct_mode() is called.
+ *
+ * Use with iio_device_release_direct_mode()
+ *
+ * Returns: 0 on success, -EBUSY on failure
+ */
+int iio_device_claim_direct_mode(struct iio_dev *indio_dev)
+{
+ mutex_lock(&indio_dev->mlock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&indio_dev->mlock);
+ return -EBUSY;
+ }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(iio_device_claim_direct_mode);
+
+/**
+ * iio_device_release_direct_mode - releases claim on direct mode
+ * @indio_dev: the iio_dev associated with the device
+ *
+ * Release the claim. Device is no longer guaranteed to stay
+ * in direct mode.
+ *
+ * Use with iio_device_claim_direct_mode()
+ */
+void iio_device_release_direct_mode(struct iio_dev *indio_dev)
+{
+ mutex_unlock(&indio_dev->mlock);
+}
+EXPORT_SYMBOL_GPL(iio_device_release_direct_mode);
+
subsys_initcall(iio_init);
module_exit(iio_exit);
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index 734a0042de0c..c4757e6367e7 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -356,6 +356,54 @@ void iio_channel_release(struct iio_channel *channel)
}
EXPORT_SYMBOL_GPL(iio_channel_release);
+static void devm_iio_channel_free(struct device *dev, void *res)
+{
+ struct iio_channel *channel = *(struct iio_channel **)res;
+
+ iio_channel_release(channel);
+}
+
+static int devm_iio_channel_match(struct device *dev, void *res, void *data)
+{
+ struct iio_channel **r = res;
+
+ if (!r || !*r) {
+ WARN_ON(!r || !*r);
+ return 0;
+ }
+
+ return *r == data;
+}
+
+struct iio_channel *devm_iio_channel_get(struct device *dev,
+ const char *channel_name)
+{
+ struct iio_channel **ptr, *channel;
+
+ ptr = devres_alloc(devm_iio_channel_free, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ channel = iio_channel_get(dev, channel_name);
+ if (IS_ERR(channel)) {
+ devres_free(ptr);
+ return channel;
+ }
+
+ *ptr = channel;
+ devres_add(dev, ptr);
+
+ return channel;
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_get);
+
+void devm_iio_channel_release(struct device *dev, struct iio_channel *channel)
+{
+ WARN_ON(devres_release(dev, devm_iio_channel_free,
+ devm_iio_channel_match, channel));
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_release);
+
struct iio_channel *iio_channel_get_all(struct device *dev)
{
const char *name;
@@ -441,6 +489,42 @@ void iio_channel_release_all(struct iio_channel *channels)
}
EXPORT_SYMBOL_GPL(iio_channel_release_all);
+static void devm_iio_channel_free_all(struct device *dev, void *res)
+{
+ struct iio_channel *channels = *(struct iio_channel **)res;
+
+ iio_channel_release_all(channels);
+}
+
+struct iio_channel *devm_iio_channel_get_all(struct device *dev)
+{
+ struct iio_channel **ptr, *channels;
+
+ ptr = devres_alloc(devm_iio_channel_free_all, sizeof(*ptr), GFP_KERNEL);
+ if (!ptr)
+ return ERR_PTR(-ENOMEM);
+
+ channels = iio_channel_get_all(dev);
+ if (IS_ERR(channels)) {
+ devres_free(ptr);
+ return channels;
+ }
+
+ *ptr = channels;
+ devres_add(dev, ptr);
+
+ return channels;
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_get_all);
+
+void devm_iio_channel_release_all(struct device *dev,
+ struct iio_channel *channels)
+{
+ WARN_ON(devres_release(dev, devm_iio_channel_free_all,
+ devm_iio_channel_match, channels));
+}
+EXPORT_SYMBOL_GPL(devm_iio_channel_release_all);
+
static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
enum iio_chan_info_enum info)
{
@@ -452,7 +536,7 @@ static int iio_channel_read(struct iio_channel *chan, int *val, int *val2,
if (val2 == NULL)
val2 = &unused;
- if(!iio_channel_has_info(chan->channel, info))
+ if (!iio_channel_has_info(chan->channel, info))
return -EINVAL;
if (chan->indio_dev->info->read_raw_multi) {
diff --git a/drivers/iio/light/Kconfig b/drivers/iio/light/Kconfig
index cfd3df8416bb..7c566f516572 100644
--- a/drivers/iio/light/Kconfig
+++ b/drivers/iio/light/Kconfig
@@ -73,6 +73,17 @@ config BH1750
To compile this driver as a module, choose M here: the module will
be called bh1750.
+config BH1780
+ tristate "ROHM BH1780 ambient light sensor"
+ depends on I2C
+ depends on !SENSORS_BH1780
+ help
+ Say Y here to build support for the ROHM BH1780GLI ambient
+ light sensor.
+
+ To compile this driver as a module, choose M here: the module will
+ be called bh1780.
+
config CM32181
depends on I2C
tristate "CM32181 driver"
@@ -223,6 +234,17 @@ config LTR501
This driver can also be built as a module. If so, the module
will be called ltr501.
+config MAX44000
+ tristate "MAX44000 Ambient and Infrared Proximity Sensor"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say Y here if you want to build support for Maxim Integrated's
+ MAX44000 ambient and infrared proximity sensor device.
+
+ To compile this driver as a module, choose M here:
+ the module will be called max44000.
+
config OPT3001
tristate "Texas Instruments OPT3001 Light Sensor"
depends on I2C
@@ -320,4 +342,14 @@ config VCNL4000
To compile this driver as a module, choose M here: the
module will be called vcnl4000.
+config VEML6070
+ tristate "VEML6070 UV A light sensor"
+ depends on I2C
+ help
+ Say Y here if you want to build a driver for the Vishay VEML6070 UV A
+ light sensor.
+
+ To compile this driver as a module, choose M here: the
+ module will be called veml6070.
+
endmenu
diff --git a/drivers/iio/light/Makefile b/drivers/iio/light/Makefile
index b2c31053db0c..6f2a3c62de27 100644
--- a/drivers/iio/light/Makefile
+++ b/drivers/iio/light/Makefile
@@ -9,6 +9,7 @@ obj-$(CONFIG_AL3320A) += al3320a.o
obj-$(CONFIG_APDS9300) += apds9300.o
obj-$(CONFIG_APDS9960) += apds9960.o
obj-$(CONFIG_BH1750) += bh1750.o
+obj-$(CONFIG_BH1780) += bh1780.o
obj-$(CONFIG_CM32181) += cm32181.o
obj-$(CONFIG_CM3232) += cm3232.o
obj-$(CONFIG_CM3323) += cm3323.o
@@ -20,6 +21,7 @@ obj-$(CONFIG_ISL29125) += isl29125.o
obj-$(CONFIG_JSA1212) += jsa1212.o
obj-$(CONFIG_SENSORS_LM3533) += lm3533-als.o
obj-$(CONFIG_LTR501) += ltr501.o
+obj-$(CONFIG_MAX44000) += max44000.o
obj-$(CONFIG_OPT3001) += opt3001.o
obj-$(CONFIG_PA12203001) += pa12203001.o
obj-$(CONFIG_RPR0521) += rpr0521.o
@@ -30,3 +32,4 @@ obj-$(CONFIG_TCS3472) += tcs3472.o
obj-$(CONFIG_TSL4531) += tsl4531.o
obj-$(CONFIG_US5182D) += us5182d.o
obj-$(CONFIG_VCNL4000) += vcnl4000.o
+obj-$(CONFIG_VEML6070) += veml6070.o
diff --git a/drivers/iio/light/apds9960.c b/drivers/iio/light/apds9960.c
index a6af56ad10e1..b4dbb3912977 100644
--- a/drivers/iio/light/apds9960.c
+++ b/drivers/iio/light/apds9960.c
@@ -321,8 +321,12 @@ static const struct iio_chan_spec apds9960_channels[] = {
};
/* integration time in us */
-static const int apds9960_int_time[][2] =
- { {28000, 246}, {100000, 219}, {200000, 182}, {700000, 0} };
+static const int apds9960_int_time[][2] = {
+ { 28000, 246},
+ {100000, 219},
+ {200000, 182},
+ {700000, 0}
+};
/* gain mapping */
static const int apds9960_pxs_gain_map[] = {1, 2, 4, 8};
@@ -491,9 +495,10 @@ static int apds9960_read_raw(struct iio_dev *indio_dev,
case IIO_INTENSITY:
ret = regmap_bulk_read(data->regmap, chan->address,
&buf, 2);
- if (!ret)
+ if (!ret) {
ret = IIO_VAL_INT;
- *val = le16_to_cpu(buf);
+ *val = le16_to_cpu(buf);
+ }
break;
default:
ret = -EINVAL;
diff --git a/drivers/iio/light/bh1780.c b/drivers/iio/light/bh1780.c
new file mode 100644
index 000000000000..72b364e4aa72
--- /dev/null
+++ b/drivers/iio/light/bh1780.c
@@ -0,0 +1,297 @@
+/*
+ * ROHM 1780GLI Ambient Light Sensor Driver
+ *
+ * Copyright (C) 2016 Linaro Ltd.
+ * Author: Linus Walleij <linus.walleij@linaro.org>
+ * Loosely based on the previous BH1780 ALS misc driver
+ * Copyright (C) 2010 Texas Instruments
+ * Author: Hemanth V <hemanthv@ti.com>
+ */
+#include <linux/i2c.h>
+#include <linux/slab.h>
+#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/pm_runtime.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/bitops.h>
+
+#define BH1780_CMD_BIT BIT(7)
+#define BH1780_REG_CONTROL 0x00
+#define BH1780_REG_PARTID 0x0A
+#define BH1780_REG_MANFID 0x0B
+#define BH1780_REG_DLOW 0x0C
+#define BH1780_REG_DHIGH 0x0D
+
+#define BH1780_REVMASK GENMASK(3,0)
+#define BH1780_POWMASK GENMASK(1,0)
+#define BH1780_POFF (0x0)
+#define BH1780_PON (0x3)
+
+/* power on settling time in ms */
+#define BH1780_PON_DELAY 2
+/* max time before value available in ms */
+#define BH1780_INTERVAL 250
+
+struct bh1780_data {
+ struct i2c_client *client;
+};
+
+static int bh1780_write(struct bh1780_data *bh1780, u8 reg, u8 val)
+{
+ int ret = i2c_smbus_write_byte_data(bh1780->client,
+ BH1780_CMD_BIT | reg,
+ val);
+ if (ret < 0)
+ dev_err(&bh1780->client->dev,
+ "i2c_smbus_write_byte_data failed error "
+ "%d, register %01x\n",
+ ret, reg);
+ return ret;
+}
+
+static int bh1780_read(struct bh1780_data *bh1780, u8 reg)
+{
+ int ret = i2c_smbus_read_byte_data(bh1780->client,
+ BH1780_CMD_BIT | reg);
+ if (ret < 0)
+ dev_err(&bh1780->client->dev,
+ "i2c_smbus_read_byte_data failed error "
+ "%d, register %01x\n",
+ ret, reg);
+ return ret;
+}
+
+static int bh1780_read_word(struct bh1780_data *bh1780, u8 reg)
+{
+ int ret = i2c_smbus_read_word_data(bh1780->client,
+ BH1780_CMD_BIT | reg);
+ if (ret < 0)
+ dev_err(&bh1780->client->dev,
+ "i2c_smbus_read_word_data failed error "
+ "%d, register %01x\n",
+ ret, reg);
+ return ret;
+}
+
+static int bh1780_debugfs_reg_access(struct iio_dev *indio_dev,
+ unsigned int reg, unsigned int writeval,
+ unsigned int *readval)
+{
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int ret;
+
+ if (!readval)
+ bh1780_write(bh1780, (u8)reg, (u8)writeval);
+
+ ret = bh1780_read(bh1780, (u8)reg);
+ if (ret < 0)
+ return ret;
+
+ *readval = ret;
+
+ return 0;
+}
+
+static int bh1780_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int value;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ pm_runtime_get_sync(&bh1780->client->dev);
+ value = bh1780_read_word(bh1780, BH1780_REG_DLOW);
+ if (value < 0)
+ return value;
+ pm_runtime_mark_last_busy(&bh1780->client->dev);
+ pm_runtime_put_autosuspend(&bh1780->client->dev);
+ *val = value;
+
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ case IIO_CHAN_INFO_INT_TIME:
+ *val = 0;
+ *val2 = BH1780_INTERVAL * 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info bh1780_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = bh1780_read_raw,
+ .debugfs_reg_access = bh1780_debugfs_reg_access,
+};
+
+static const struct iio_chan_spec bh1780_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_INT_TIME)
+ }
+};
+
+static int bh1780_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ int ret;
+ struct bh1780_data *bh1780;
+ struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
+ struct iio_dev *indio_dev;
+
+ if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE))
+ return -EIO;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*bh1780));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ bh1780 = iio_priv(indio_dev);
+ bh1780->client = client;
+ i2c_set_clientdata(client, indio_dev);
+
+ /* Power up the device */
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
+ if (ret < 0)
+ return ret;
+ msleep(BH1780_PON_DELAY);
+ pm_runtime_get_noresume(&client->dev);
+ pm_runtime_set_active(&client->dev);
+ pm_runtime_enable(&client->dev);
+
+ ret = bh1780_read(bh1780, BH1780_REG_PARTID);
+ if (ret < 0)
+ goto out_disable_pm;
+ dev_info(&client->dev,
+ "Ambient Light Sensor, Rev : %lu\n",
+ (ret & BH1780_REVMASK));
+
+ /*
+ * As the device takes 250 ms to even come up with a fresh
+ * measurement after power-on, do not shut it down unnecessarily.
+ * Set autosuspend to a five seconds.
+ */
+ pm_runtime_set_autosuspend_delay(&client->dev, 5000);
+ pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_put(&client->dev);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &bh1780_info;
+ indio_dev->name = id->name;
+ indio_dev->channels = bh1780_channels;
+ indio_dev->num_channels = ARRAY_SIZE(bh1780_channels);
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ ret = iio_device_register(indio_dev);
+ if (ret)
+ goto out_disable_pm;
+ return 0;
+
+out_disable_pm:
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
+ return ret;
+}
+
+static int bh1780_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct bh1780_data *bh1780 = iio_priv(indio_dev);
+ int ret;
+
+ iio_device_unregister(indio_dev);
+ pm_runtime_get_sync(&client->dev);
+ pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(&client->dev);
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to power off\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+#ifdef CONFIG_PM
+static int bh1780_runtime_suspend(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+ int ret;
+
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_POFF);
+ if (ret < 0) {
+ dev_err(dev, "failed to runtime suspend\n");
+ return ret;
+ }
+
+ return 0;
+}
+
+static int bh1780_runtime_resume(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
+ struct bh1780_data *bh1780 = i2c_get_clientdata(client);
+ int ret;
+
+ ret = bh1780_write(bh1780, BH1780_REG_CONTROL, BH1780_PON);
+ if (ret < 0) {
+ dev_err(dev, "failed to runtime resume\n");
+ return ret;
+ }
+
+ /* Wait for power on, then for a value to be available */
+ msleep(BH1780_PON_DELAY + BH1780_INTERVAL);
+
+ return 0;
+}
+#endif /* CONFIG_PM */
+
+static const struct dev_pm_ops bh1780_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
+ SET_RUNTIME_PM_OPS(bh1780_runtime_suspend,
+ bh1780_runtime_resume, NULL)
+};
+
+static const struct i2c_device_id bh1780_id[] = {
+ { "bh1780", 0 },
+ { },
+};
+
+MODULE_DEVICE_TABLE(i2c, bh1780_id);
+
+#ifdef CONFIG_OF
+static const struct of_device_id of_bh1780_match[] = {
+ { .compatible = "rohm,bh1780gli", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, of_bh1780_match);
+#endif
+
+static struct i2c_driver bh1780_driver = {
+ .probe = bh1780_probe,
+ .remove = bh1780_remove,
+ .id_table = bh1780_id,
+ .driver = {
+ .name = "bh1780",
+ .pm = &bh1780_dev_pm_ops,
+ .of_match_table = of_match_ptr(of_bh1780_match),
+ },
+};
+
+module_i2c_driver(bh1780_driver);
+
+MODULE_DESCRIPTION("ROHM BH1780GLI Ambient Light Sensor Driver");
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>");
diff --git a/drivers/iio/light/max44000.c b/drivers/iio/light/max44000.c
new file mode 100644
index 000000000000..e01e58a9bd14
--- /dev/null
+++ b/drivers/iio/light/max44000.c
@@ -0,0 +1,639 @@
+/*
+ * MAX44000 Ambient and Infrared Proximity Sensor
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * Data sheet: https://datasheets.maximintegrated.com/en/ds/MAX44000.pdf
+ *
+ * 7-bit I2C slave address 0x4a
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/util_macros.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+#include <linux/acpi.h>
+
+#define MAX44000_DRV_NAME "max44000"
+
+/* Registers in datasheet order */
+#define MAX44000_REG_STATUS 0x00
+#define MAX44000_REG_CFG_MAIN 0x01
+#define MAX44000_REG_CFG_RX 0x02
+#define MAX44000_REG_CFG_TX 0x03
+#define MAX44000_REG_ALS_DATA_HI 0x04
+#define MAX44000_REG_ALS_DATA_LO 0x05
+#define MAX44000_REG_PRX_DATA 0x16
+#define MAX44000_REG_ALS_UPTHR_HI 0x06
+#define MAX44000_REG_ALS_UPTHR_LO 0x07
+#define MAX44000_REG_ALS_LOTHR_HI 0x08
+#define MAX44000_REG_ALS_LOTHR_LO 0x09
+#define MAX44000_REG_PST 0x0a
+#define MAX44000_REG_PRX_IND 0x0b
+#define MAX44000_REG_PRX_THR 0x0c
+#define MAX44000_REG_TRIM_GAIN_GREEN 0x0f
+#define MAX44000_REG_TRIM_GAIN_IR 0x10
+
+/* REG_CFG bits */
+#define MAX44000_CFG_ALSINTE 0x01
+#define MAX44000_CFG_PRXINTE 0x02
+#define MAX44000_CFG_MASK 0x1c
+#define MAX44000_CFG_MODE_SHUTDOWN 0x00
+#define MAX44000_CFG_MODE_ALS_GIR 0x04
+#define MAX44000_CFG_MODE_ALS_G 0x08
+#define MAX44000_CFG_MODE_ALS_IR 0x0c
+#define MAX44000_CFG_MODE_ALS_PRX 0x10
+#define MAX44000_CFG_MODE_PRX 0x14
+#define MAX44000_CFG_TRIM 0x20
+
+/*
+ * Upper 4 bits are not documented but start as 1 on powerup
+ * Setting them to 0 causes proximity to misbehave so set them to 1
+ */
+#define MAX44000_REG_CFG_RX_DEFAULT 0xf0
+
+/* REG_RX bits */
+#define MAX44000_CFG_RX_ALSTIM_MASK 0x0c
+#define MAX44000_CFG_RX_ALSTIM_SHIFT 2
+#define MAX44000_CFG_RX_ALSPGA_MASK 0x03
+#define MAX44000_CFG_RX_ALSPGA_SHIFT 0
+
+/* REG_TX bits */
+#define MAX44000_LED_CURRENT_MASK 0xf
+#define MAX44000_LED_CURRENT_MAX 11
+#define MAX44000_LED_CURRENT_DEFAULT 6
+
+#define MAX44000_ALSDATA_OVERFLOW 0x4000
+
+struct max44000_data {
+ struct mutex lock;
+ struct regmap *regmap;
+};
+
+/* Default scale is set to the minimum of 0.03125 or 1 / (1 << 5) lux */
+#define MAX44000_ALS_TO_LUX_DEFAULT_FRACTION_LOG2 5
+
+/* Scale can be multiplied by up to 128x via ALSPGA for measurement gain */
+static const int max44000_alspga_shift[] = {0, 2, 4, 7};
+#define MAX44000_ALSPGA_MAX_SHIFT 7
+
+/*
+ * Scale can be multiplied by up to 64x via ALSTIM because of lost resolution
+ *
+ * This scaling factor is hidden from userspace and instead accounted for when
+ * reading raw values from the device.
+ *
+ * This makes it possible to cleanly expose ALSPGA as IIO_CHAN_INFO_SCALE and
+ * ALSTIM as IIO_CHAN_INFO_INT_TIME without the values affecting each other.
+ *
+ * Handling this internally is also required for buffer support because the
+ * channel's scan_type can't be modified dynamically.
+ */
+static const int max44000_alstim_shift[] = {0, 2, 4, 6};
+#define MAX44000_ALSTIM_SHIFT(alstim) (2 * (alstim))
+
+/* Available integration times with pretty manual alignment: */
+static const int max44000_int_time_avail_ns_array[] = {
+ 100000000,
+ 25000000,
+ 6250000,
+ 1562500,
+};
+static const char max44000_int_time_avail_str[] =
+ "0.100 "
+ "0.025 "
+ "0.00625 "
+ "0.001625";
+
+/* Available scales (internal to ulux) with pretty manual alignment: */
+static const int max44000_scale_avail_ulux_array[] = {
+ 31250,
+ 125000,
+ 500000,
+ 4000000,
+};
+static const char max44000_scale_avail_str[] =
+ "0.03125 "
+ "0.125 "
+ "0.5 "
+ "4";
+
+#define MAX44000_SCAN_INDEX_ALS 0
+#define MAX44000_SCAN_INDEX_PRX 1
+
+static const struct iio_chan_spec max44000_channels[] = {
+ {
+ .type = IIO_LIGHT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_INT_TIME),
+ .scan_index = MAX44000_SCAN_INDEX_ALS,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 14,
+ .storagebits = 16,
+ }
+ },
+ {
+ .type = IIO_PROXIMITY,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ .scan_index = MAX44000_SCAN_INDEX_PRX,
+ .scan_type = {
+ .sign = 'u',
+ .realbits = 8,
+ .storagebits = 16,
+ }
+ },
+ IIO_CHAN_SOFT_TIMESTAMP(2),
+ {
+ .type = IIO_CURRENT,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE),
+ .extend_name = "led",
+ .output = 1,
+ .scan_index = -1,
+ },
+};
+
+static int max44000_read_alstim(struct max44000_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX44000_REG_CFG_RX, &val);
+ if (ret < 0)
+ return ret;
+ return (val & MAX44000_CFG_RX_ALSTIM_MASK) >> MAX44000_CFG_RX_ALSTIM_SHIFT;
+}
+
+static int max44000_write_alstim(struct max44000_data *data, int val)
+{
+ return regmap_write_bits(data->regmap, MAX44000_REG_CFG_RX,
+ MAX44000_CFG_RX_ALSTIM_MASK,
+ val << MAX44000_CFG_RX_ALSTIM_SHIFT);
+}
+
+static int max44000_read_alspga(struct max44000_data *data)
+{
+ unsigned int val;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX44000_REG_CFG_RX, &val);
+ if (ret < 0)
+ return ret;
+ return (val & MAX44000_CFG_RX_ALSPGA_MASK) >> MAX44000_CFG_RX_ALSPGA_SHIFT;
+}
+
+static int max44000_write_alspga(struct max44000_data *data, int val)
+{
+ return regmap_write_bits(data->regmap, MAX44000_REG_CFG_RX,
+ MAX44000_CFG_RX_ALSPGA_MASK,
+ val << MAX44000_CFG_RX_ALSPGA_SHIFT);
+}
+
+static int max44000_read_alsval(struct max44000_data *data)
+{
+ u16 regval;
+ int alstim, ret;
+
+ ret = regmap_bulk_read(data->regmap, MAX44000_REG_ALS_DATA_HI,
+ &regval, sizeof(regval));
+ if (ret < 0)
+ return ret;
+ alstim = ret = max44000_read_alstim(data);
+ if (ret < 0)
+ return ret;
+
+ regval = be16_to_cpu(regval);
+
+ /*
+ * Overflow is explained on datasheet page 17.
+ *
+ * It's a warning that either the G or IR channel has become saturated
+ * and that the value in the register is likely incorrect.
+ *
+ * The recommendation is to change the scale (ALSPGA).
+ * The driver just returns the max representable value.
+ */
+ if (regval & MAX44000_ALSDATA_OVERFLOW)
+ return 0x3FFF;
+
+ return regval << MAX44000_ALSTIM_SHIFT(alstim);
+}
+
+static int max44000_write_led_current_raw(struct max44000_data *data, int val)
+{
+ /* Maybe we should clamp the value instead? */
+ if (val < 0 || val > MAX44000_LED_CURRENT_MAX)
+ return -ERANGE;
+ if (val >= 8)
+ val += 4;
+ return regmap_write_bits(data->regmap, MAX44000_REG_CFG_TX,
+ MAX44000_LED_CURRENT_MASK, val);
+}
+
+static int max44000_read_led_current_raw(struct max44000_data *data)
+{
+ unsigned int regval;
+ int ret;
+
+ ret = regmap_read(data->regmap, MAX44000_REG_CFG_TX, &regval);
+ if (ret < 0)
+ return ret;
+ regval &= MAX44000_LED_CURRENT_MASK;
+ if (regval >= 8)
+ regval -= 4;
+ return regval;
+}
+
+static int max44000_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct max44000_data *data = iio_priv(indio_dev);
+ int alstim, alspga;
+ unsigned int regval;
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_LIGHT:
+ mutex_lock(&data->lock);
+ ret = max44000_read_alsval(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ case IIO_PROXIMITY:
+ mutex_lock(&data->lock);
+ ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = regval;
+ return IIO_VAL_INT;
+
+ case IIO_CURRENT:
+ mutex_lock(&data->lock);
+ ret = max44000_read_led_current_raw(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+ *val = ret;
+ return IIO_VAL_INT;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_CURRENT:
+ /* Output register is in 10s of miliamps */
+ *val = 10;
+ return IIO_VAL_INT;
+
+ case IIO_LIGHT:
+ mutex_lock(&data->lock);
+ alspga = ret = max44000_read_alspga(data);
+ mutex_unlock(&data->lock);
+ if (ret < 0)
+ return ret;
+
+ /* Avoid negative shifts */
+ *val = (1 << MAX44000_ALSPGA_MAX_SHIFT);
+ *val2 = MAX44000_ALS_TO_LUX_DEFAULT_FRACTION_LOG2
+ + MAX44000_ALSPGA_MAX_SHIFT
+ - max44000_alspga_shift[alspga];
+ return IIO_VAL_FRACTIONAL_LOG2;
+
+ default:
+ return -EINVAL;
+ }
+
+ case IIO_CHAN_INFO_INT_TIME:
+ mutex_lock(&data->lock);
+ alstim = ret = max44000_read_alstim(data);
+ mutex_unlock(&data->lock);
+
+ if (ret < 0)
+ return ret;
+ *val = 0;
+ *val2 = max44000_int_time_avail_ns_array[alstim];
+ return IIO_VAL_INT_PLUS_NANO;
+
+ default:
+ return -EINVAL;
+ }
+}
+
+static int max44000_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct max44000_data *data = iio_priv(indio_dev);
+ int ret;
+
+ if (mask == IIO_CHAN_INFO_RAW && chan->type == IIO_CURRENT) {
+ mutex_lock(&data->lock);
+ ret = max44000_write_led_current_raw(data, val);
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT) {
+ s64 valns = val * NSEC_PER_SEC + val2;
+ int alstim = find_closest_descending(valns,
+ max44000_int_time_avail_ns_array,
+ ARRAY_SIZE(max44000_int_time_avail_ns_array));
+ mutex_lock(&data->lock);
+ ret = max44000_write_alstim(data, alstim);
+ mutex_unlock(&data->lock);
+ return ret;
+ } else if (mask == IIO_CHAN_INFO_SCALE && chan->type == IIO_LIGHT) {
+ s64 valus = val * USEC_PER_SEC + val2;
+ int alspga = find_closest(valus,
+ max44000_scale_avail_ulux_array,
+ ARRAY_SIZE(max44000_scale_avail_ulux_array));
+ mutex_lock(&data->lock);
+ ret = max44000_write_alspga(data, alspga);
+ mutex_unlock(&data->lock);
+ return ret;
+ }
+
+ return -EINVAL;
+}
+
+static int max44000_write_raw_get_fmt(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ long mask)
+{
+ if (mask == IIO_CHAN_INFO_INT_TIME && chan->type == IIO_LIGHT)
+ return IIO_VAL_INT_PLUS_NANO;
+ else if (mask == IIO_CHAN_INFO_SCALE && chan->type == IIO_LIGHT)
+ return IIO_VAL_INT_PLUS_MICRO;
+ else
+ return IIO_VAL_INT;
+}
+
+static IIO_CONST_ATTR(illuminance_integration_time_available, max44000_int_time_avail_str);
+static IIO_CONST_ATTR(illuminance_scale_available, max44000_scale_avail_str);
+
+static struct attribute *max44000_attributes[] = {
+ &iio_const_attr_illuminance_integration_time_available.dev_attr.attr,
+ &iio_const_attr_illuminance_scale_available.dev_attr.attr,
+ NULL
+};
+
+static const struct attribute_group max44000_attribute_group = {
+ .attrs = max44000_attributes,
+};
+
+static const struct iio_info max44000_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = max44000_read_raw,
+ .write_raw = max44000_write_raw,
+ .write_raw_get_fmt = max44000_write_raw_get_fmt,
+ .attrs = &max44000_attribute_group,
+};
+
+static bool max44000_readable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX44000_REG_STATUS:
+ case MAX44000_REG_CFG_MAIN:
+ case MAX44000_REG_CFG_RX:
+ case MAX44000_REG_CFG_TX:
+ case MAX44000_REG_ALS_DATA_HI:
+ case MAX44000_REG_ALS_DATA_LO:
+ case MAX44000_REG_PRX_DATA:
+ case MAX44000_REG_ALS_UPTHR_HI:
+ case MAX44000_REG_ALS_UPTHR_LO:
+ case MAX44000_REG_ALS_LOTHR_HI:
+ case MAX44000_REG_ALS_LOTHR_LO:
+ case MAX44000_REG_PST:
+ case MAX44000_REG_PRX_IND:
+ case MAX44000_REG_PRX_THR:
+ case MAX44000_REG_TRIM_GAIN_GREEN:
+ case MAX44000_REG_TRIM_GAIN_IR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max44000_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX44000_REG_CFG_MAIN:
+ case MAX44000_REG_CFG_RX:
+ case MAX44000_REG_CFG_TX:
+ case MAX44000_REG_ALS_UPTHR_HI:
+ case MAX44000_REG_ALS_UPTHR_LO:
+ case MAX44000_REG_ALS_LOTHR_HI:
+ case MAX44000_REG_ALS_LOTHR_LO:
+ case MAX44000_REG_PST:
+ case MAX44000_REG_PRX_IND:
+ case MAX44000_REG_PRX_THR:
+ case MAX44000_REG_TRIM_GAIN_GREEN:
+ case MAX44000_REG_TRIM_GAIN_IR:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max44000_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case MAX44000_REG_STATUS:
+ case MAX44000_REG_ALS_DATA_HI:
+ case MAX44000_REG_ALS_DATA_LO:
+ case MAX44000_REG_PRX_DATA:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static bool max44000_precious_reg(struct device *dev, unsigned int reg)
+{
+ return reg == MAX44000_REG_STATUS;
+}
+
+static const struct regmap_config max44000_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = MAX44000_REG_PRX_DATA,
+ .readable_reg = max44000_readable_reg,
+ .writeable_reg = max44000_writeable_reg,
+ .volatile_reg = max44000_volatile_reg,
+ .precious_reg = max44000_precious_reg,
+
+ .use_single_rw = 1,
+ .cache_type = REGCACHE_RBTREE,
+};
+
+static irqreturn_t max44000_trigger_handler(int irq, void *p)
+{
+ struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+ struct max44000_data *data = iio_priv(indio_dev);
+ u16 buf[8]; /* 2x u16 + padding + 8 bytes timestamp */
+ int index = 0;
+ unsigned int regval;
+ int ret;
+
+ mutex_lock(&data->lock);
+ if (test_bit(MAX44000_SCAN_INDEX_ALS, indio_dev->active_scan_mask)) {
+ ret = max44000_read_alsval(data);
+ if (ret < 0)
+ goto out_unlock;
+ buf[index++] = ret;
+ }
+ if (test_bit(MAX44000_SCAN_INDEX_PRX, indio_dev->active_scan_mask)) {
+ ret = regmap_read(data->regmap, MAX44000_REG_PRX_DATA, &regval);
+ if (ret < 0)
+ goto out_unlock;
+ buf[index] = regval;
+ }
+ mutex_unlock(&data->lock);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buf, iio_get_time_ns());
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+
+out_unlock:
+ mutex_unlock(&data->lock);
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
+static int max44000_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct max44000_data *data;
+ struct iio_dev *indio_dev;
+ int ret, reg;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+ data = iio_priv(indio_dev);
+ data->regmap = devm_regmap_init_i2c(client, &max44000_regmap_config);
+ if (IS_ERR(data->regmap)) {
+ dev_err(&client->dev, "regmap_init failed!\n");
+ return PTR_ERR(data->regmap);
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+ mutex_init(&data->lock);
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &max44000_info;
+ indio_dev->name = MAX44000_DRV_NAME;
+ indio_dev->channels = max44000_channels;
+ indio_dev->num_channels = ARRAY_SIZE(max44000_channels);
+
+ /*
+ * The device doesn't have a reset function so we just clear some
+ * important bits at probe time to ensure sane operation.
+ *
+ * Since we don't support interrupts/events the threshold values are
+ * not important. We also don't touch trim values.
+ */
+
+ /* Reset ALS scaling bits */
+ ret = regmap_write(data->regmap, MAX44000_REG_CFG_RX,
+ MAX44000_REG_CFG_RX_DEFAULT);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write default CFG_RX: %d\n",
+ ret);
+ return ret;
+ }
+
+ /*
+ * By default the LED pulse used for the proximity sensor is disabled.
+ * Set a middle value so that we get some sort of valid data by default.
+ */
+ ret = max44000_write_led_current_raw(data, MAX44000_LED_CURRENT_DEFAULT);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write init config: %d\n", ret);
+ return ret;
+ }
+
+ /* Reset CFG bits to ALS_PRX mode which allows easy reading of both values. */
+ reg = MAX44000_CFG_TRIM | MAX44000_CFG_MODE_ALS_PRX;
+ ret = regmap_write(data->regmap, MAX44000_REG_CFG_MAIN, reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to write init config: %d\n", ret);
+ return ret;
+ }
+
+ /* Read status at least once to clear any stale interrupt bits. */
+ ret = regmap_read(data->regmap, MAX44000_REG_STATUS, &reg);
+ if (ret < 0) {
+ dev_err(&client->dev, "failed to read init status: %d\n", ret);
+ return ret;
+ }
+
+ ret = iio_triggered_buffer_setup(indio_dev, NULL, max44000_trigger_handler, NULL);
+ if (ret < 0) {
+ dev_err(&client->dev, "iio triggered buffer setup failed\n");
+ return ret;
+ }
+
+ return iio_device_register(indio_dev);
+}
+
+static int max44000_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+
+ return 0;
+}
+
+static const struct i2c_device_id max44000_id[] = {
+ {"max44000", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, max44000_id);
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id max44000_acpi_match[] = {
+ {"MAX44000", 0},
+ { }
+};
+MODULE_DEVICE_TABLE(acpi, max44000_acpi_match);
+#endif
+
+static struct i2c_driver max44000_driver = {
+ .driver = {
+ .name = MAX44000_DRV_NAME,
+ .acpi_match_table = ACPI_PTR(max44000_acpi_match),
+ },
+ .probe = max44000_probe,
+ .remove = max44000_remove,
+ .id_table = max44000_id,
+};
+
+module_i2c_driver(max44000_driver);
+
+MODULE_AUTHOR("Crestez Dan Leonard <leonard.crestez@intel.com>");
+MODULE_DESCRIPTION("MAX44000 Ambient and Infrared Proximity Sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index 42d334ba612e..9e847f8f4f0c 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -16,7 +16,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/regmap.h>
-#include <linux/gpio/consumer.h>
#include <linux/iio/events.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
diff --git a/drivers/iio/light/tsl2563.c b/drivers/iio/light/tsl2563.c
index 12731d6b89ec..57b108c30e98 100644
--- a/drivers/iio/light/tsl2563.c
+++ b/drivers/iio/light/tsl2563.c
@@ -806,8 +806,7 @@ static int tsl2563_probe(struct i2c_client *client,
return 0;
fail:
- cancel_delayed_work(&chip->poweroff_work);
- flush_scheduled_work();
+ cancel_delayed_work_sync(&chip->poweroff_work);
return err;
}
diff --git a/drivers/iio/light/veml6070.c b/drivers/iio/light/veml6070.c
new file mode 100644
index 000000000000..bc1c4cb782cd
--- /dev/null
+++ b/drivers/iio/light/veml6070.c
@@ -0,0 +1,218 @@
+/*
+ * veml6070.c - Support for Vishay VEML6070 UV A light sensor
+ *
+ * Copyright 2016 Peter Meerwald-Stadler <pmeerw@pmeerw.net>
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * IIO driver for VEML6070 (7-bit I2C slave addresses 0x38 and 0x39)
+ *
+ * TODO: integration time, ACK signal
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+#define VEML6070_DRV_NAME "veml6070"
+
+#define VEML6070_ADDR_CONFIG_DATA_MSB 0x38 /* read: MSB data, write: config */
+#define VEML6070_ADDR_DATA_LSB 0x39 /* LSB data */
+
+#define VEML6070_COMMAND_ACK BIT(5) /* raise interrupt when over threshold */
+#define VEML6070_COMMAND_IT GENMASK(3, 2) /* bit mask integration time */
+#define VEML6070_COMMAND_RSRVD BIT(1) /* reserved, set to 1 */
+#define VEML6070_COMMAND_SD BIT(0) /* shutdown mode when set */
+
+#define VEML6070_IT_10 0x04 /* integration time 1x */
+
+struct veml6070_data {
+ struct i2c_client *client1;
+ struct i2c_client *client2;
+ u8 config;
+ struct mutex lock;
+};
+
+static int veml6070_read(struct veml6070_data *data)
+{
+ int ret;
+ u8 msb, lsb;
+
+ mutex_lock(&data->lock);
+
+ /* disable shutdown */
+ ret = i2c_smbus_write_byte(data->client1,
+ data->config & ~VEML6070_COMMAND_SD);
+ if (ret < 0)
+ goto out;
+
+ msleep(125 + 10); /* measurement takes up to 125 ms for IT 1x */
+
+ ret = i2c_smbus_read_byte(data->client2); /* read MSB, address 0x39 */
+ if (ret < 0)
+ goto out;
+ msb = ret;
+
+ ret = i2c_smbus_read_byte(data->client1); /* read LSB, address 0x38 */
+ if (ret < 0)
+ goto out;
+ lsb = ret;
+
+ /* shutdown again */
+ ret = i2c_smbus_write_byte(data->client1, data->config);
+ if (ret < 0)
+ goto out;
+
+ ret = (msb << 8) | lsb;
+
+out:
+ mutex_unlock(&data->lock);
+ return ret;
+}
+
+static const struct iio_chan_spec veml6070_channels[] = {
+ {
+ .type = IIO_INTENSITY,
+ .modified = 1,
+ .channel2 = IIO_MOD_LIGHT_UV,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ },
+ {
+ .type = IIO_UVINDEX,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ }
+};
+
+static int veml6070_to_uv_index(unsigned val)
+{
+ /*
+ * conversion of raw UV intensity values to UV index depends on
+ * integration time (IT) and value of the resistor connected to
+ * the RSET pin (default: 270 KOhm)
+ */
+ unsigned uvi[11] = {
+ 187, 373, 560, /* low */
+ 746, 933, 1120, /* moderate */
+ 1308, 1494, /* high */
+ 1681, 1868, 2054}; /* very high */
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(uvi); i++)
+ if (val <= uvi[i])
+ return i;
+
+ return 11; /* extreme */
+}
+
+static int veml6070_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct veml6070_data *data = iio_priv(indio_dev);
+ int ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ case IIO_CHAN_INFO_PROCESSED:
+ ret = veml6070_read(data);
+ if (ret < 0)
+ return ret;
+ if (mask == IIO_CHAN_INFO_PROCESSED)
+ *val = veml6070_to_uv_index(ret);
+ else
+ *val = ret;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+}
+
+static const struct iio_info veml6070_info = {
+ .read_raw = veml6070_read_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int veml6070_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct veml6070_data *data;
+ struct iio_dev *indio_dev;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ i2c_set_clientdata(client, indio_dev);
+ data->client1 = client;
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->info = &veml6070_info;
+ indio_dev->channels = veml6070_channels;
+ indio_dev->num_channels = ARRAY_SIZE(veml6070_channels);
+ indio_dev->name = VEML6070_DRV_NAME;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ data->client2 = i2c_new_dummy(client->adapter, VEML6070_ADDR_DATA_LSB);
+ if (!data->client2) {
+ dev_err(&client->dev, "i2c device for second chip address failed\n");
+ return -ENODEV;
+ }
+
+ data->config = VEML6070_IT_10 | VEML6070_COMMAND_RSRVD |
+ VEML6070_COMMAND_SD;
+ ret = i2c_smbus_write_byte(data->client1, data->config);
+ if (ret < 0)
+ goto fail;
+
+ ret = iio_device_register(indio_dev);
+ if (ret < 0)
+ goto fail;
+
+ return ret;
+
+fail:
+ i2c_unregister_device(data->client2);
+ return ret;
+}
+
+static int veml6070_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct veml6070_data *data = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ i2c_unregister_device(data->client2);
+
+ return 0;
+}
+
+static const struct i2c_device_id veml6070_id[] = {
+ { "veml6070", 0 },
+ { }
+};
+MODULE_DEVICE_TABLE(i2c, veml6070_id);
+
+static struct i2c_driver veml6070_driver = {
+ .driver = {
+ .name = VEML6070_DRV_NAME,
+ },
+ .probe = veml6070_probe,
+ .remove = veml6070_remove,
+ .id_table = veml6070_id,
+};
+
+module_i2c_driver(veml6070_driver);
+
+MODULE_AUTHOR("Peter Meerwald-Stadler <pmeerw@pmeerw.net>");
+MODULE_DESCRIPTION("Vishay VEML6070 UV A light sensor driver");
+MODULE_LICENSE("GPL");
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index 021dc5361f53..84e6559ccc65 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -9,6 +9,8 @@ config AK8975
tristate "Asahi Kasei AK 3-Axis Magnetometer"
depends on I2C
depends on GPIOLIB || COMPILE_TEST
+ select IIO_BUFFER
+ select IIO_TRIGGERED_BUFFER
help
Say yes here to build support for Asahi Kasei AK8975, AK8963,
AK09911 or AK09912 3-Axis Magnetometer.
@@ -25,22 +27,41 @@ config AK09911
Deprecated: AK09911 is now supported by AK8975 driver.
config BMC150_MAGN
- tristate "Bosch BMC150 Magnetometer Driver"
- depends on I2C
- select REGMAP_I2C
+ tristate
select IIO_BUFFER
select IIO_TRIGGERED_BUFFER
+
+config BMC150_MAGN_I2C
+ tristate "Bosch BMC150 I2C Magnetometer Driver"
+ depends on I2C
+ select BMC150_MAGN
+ select REGMAP_I2C
help
- Say yes here to build support for the BMC150 magnetometer.
+ Say yes here to build support for the BMC150 magnetometer with
+ I2C interface.
- Currently this only supports the device via an i2c interface.
+ This is a combo module with both accelerometer and magnetometer.
+ This driver is only implementing magnetometer part, which has
+ its own address and register map.
+
+ To compile this driver as a module, choose M here: the module will be
+ called bmc150_magn_i2c.
+
+config BMC150_MAGN_SPI
+ tristate "Bosch BMC150 SPI Magnetometer Driver"
+ depends on SPI
+ select BMC150_MAGN
+ select REGMAP_SPI
+ help
+ Say yes here to build support for the BMC150 magnetometer with
+ SPI interface.
This is a combo module with both accelerometer and magnetometer.
This driver is only implementing magnetometer part, which has
its own address and register map.
To compile this driver as a module, choose M here: the module will be
- called bmc150_magn.
+ called bmc150_magn_spi.
config MAG3110
tristate "Freescale MAG3110 3-Axis Magnetometer"
diff --git a/drivers/iio/magnetometer/Makefile b/drivers/iio/magnetometer/Makefile
index dd03fe524481..92a745c9a6e8 100644
--- a/drivers/iio/magnetometer/Makefile
+++ b/drivers/iio/magnetometer/Makefile
@@ -5,6 +5,9 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_AK8975) += ak8975.o
obj-$(CONFIG_BMC150_MAGN) += bmc150_magn.o
+obj-$(CONFIG_BMC150_MAGN_I2C) += bmc150_magn_i2c.o
+obj-$(CONFIG_BMC150_MAGN_SPI) += bmc150_magn_spi.o
+
obj-$(CONFIG_MAG3110) += mag3110.o
obj-$(CONFIG_HID_SENSOR_MAGNETOMETER_3D) += hid-sensor-magn-3d.o
obj-$(CONFIG_MMC35240) += mmc35240.o
diff --git a/drivers/iio/magnetometer/ak8975.c b/drivers/iio/magnetometer/ak8975.c
index 0e931a9a1669..609a2c401b5d 100644
--- a/drivers/iio/magnetometer/ak8975.c
+++ b/drivers/iio/magnetometer/ak8975.c
@@ -32,9 +32,17 @@
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/acpi.h>
+#include <linux/regulator/consumer.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+#include <linux/iio/buffer.h>
+#include <linux/iio/trigger.h>
+#include <linux/iio/trigger_consumer.h>
+#include <linux/iio/triggered_buffer.h>
+
+#include <linux/iio/magnetometer/ak8975.h>
+
/*
* Register definitions, as well as various shifts and masks to get at the
* individual fields of the registers.
@@ -361,7 +369,6 @@ static const struct ak_def ak_def_array[AK_MAX_TYPE] = {
struct ak8975_data {
struct i2c_client *client;
const struct ak_def *def;
- struct attribute_group attrs;
struct mutex lock;
u8 asa[3];
long raw_to_gauss[3];
@@ -370,8 +377,41 @@ struct ak8975_data {
wait_queue_head_t data_ready_queue;
unsigned long flags;
u8 cntl_cache;
+ struct iio_mount_matrix orientation;
+ struct regulator *vdd;
};
+/* Enable attached power regulator if any. */
+static int ak8975_power_on(struct i2c_client *client)
+{
+ const struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct ak8975_data *data = iio_priv(indio_dev);
+ int ret;
+
+ data->vdd = devm_regulator_get(&client->dev, "vdd");
+ if (IS_ERR_OR_NULL(data->vdd)) {
+ ret = PTR_ERR(data->vdd);
+ if (ret == -ENODEV)
+ ret = 0;
+ } else {
+ ret = regulator_enable(data->vdd);
+ }
+
+ if (ret)
+ dev_err(&client->dev, "failed to enable Vdd supply: %d\n", ret);
+ return ret;
+}
+
+/* Disable attached power regulator if any. */
+static void ak8975_power_off(const struct i2c_client *client)
+{
+ const struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ const struct ak8975_data *data = iio_priv(indio_dev);
+
+ if (!IS_ERR_OR_NULL(data->vdd))
+ regulator_disable(data->vdd);
+}
+
/*
* Return 0 if the i2c device is the one we expect.
* return a negative error number otherwise
@@ -601,22 +641,15 @@ static int wait_conversion_complete_interrupt(struct ak8975_data *data)
return ret > 0 ? 0 : -ETIME;
}
-/*
- * Emits the raw flux value for the x, y, or z axis.
- */
-static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+static int ak8975_start_read_axis(struct ak8975_data *data,
+ const struct i2c_client *client)
{
- struct ak8975_data *data = iio_priv(indio_dev);
- struct i2c_client *client = data->client;
- int ret;
-
- mutex_lock(&data->lock);
-
/* Set up the device for taking a sample. */
- ret = ak8975_set_mode(data, MODE_ONCE);
+ int ret = ak8975_set_mode(data, MODE_ONCE);
+
if (ret < 0) {
dev_err(&client->dev, "Error in setting operating mode\n");
- goto exit;
+ return ret;
}
/* Wait for the conversion to complete. */
@@ -627,7 +660,7 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
else
ret = wait_conversion_complete_polled(data);
if (ret < 0)
- goto exit;
+ return ret;
/* This will be executed only for non-interrupt based waiting case */
if (ret & data->def->ctrl_masks[ST1_DRDY]) {
@@ -635,32 +668,45 @@ static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
data->def->ctrl_regs[ST2]);
if (ret < 0) {
dev_err(&client->dev, "Error in reading ST2\n");
- goto exit;
+ return ret;
}
if (ret & (data->def->ctrl_masks[ST2_DERR] |
data->def->ctrl_masks[ST2_HOFL])) {
dev_err(&client->dev, "ST2 status error 0x%x\n", ret);
- ret = -EINVAL;
- goto exit;
+ return -EINVAL;
}
}
- /* Read the flux value from the appropriate register
- (the register is specified in the iio device attributes). */
- ret = i2c_smbus_read_word_data(client, data->def->data_regs[index]);
- if (ret < 0) {
- dev_err(&client->dev, "Read axis data fails\n");
+ return 0;
+}
+
+/* Retrieve raw flux value for one of the x, y, or z axis. */
+static int ak8975_read_axis(struct iio_dev *indio_dev, int index, int *val)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ const struct i2c_client *client = data->client;
+ const struct ak_def *def = data->def;
+ int ret;
+
+ mutex_lock(&data->lock);
+
+ ret = ak8975_start_read_axis(data, client);
+ if (ret)
+ goto exit;
+
+ ret = i2c_smbus_read_word_data(client, def->data_regs[index]);
+ if (ret < 0)
goto exit;
- }
mutex_unlock(&data->lock);
/* Clamp to valid range. */
- *val = clamp_t(s16, ret, -data->def->range, data->def->range);
+ *val = clamp_t(s16, ret, -def->range, def->range);
return IIO_VAL_INT;
exit:
mutex_unlock(&data->lock);
+ dev_err(&client->dev, "Error in reading axis\n");
return ret;
}
@@ -682,6 +728,18 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
return -EINVAL;
}
+static const struct iio_mount_matrix *
+ak8975_get_mount_matrix(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan)
+{
+ return &((struct ak8975_data *)iio_priv(indio_dev))->orientation;
+}
+
+static const struct iio_chan_spec_ext_info ak8975_ext_info[] = {
+ IIO_MOUNT_MATRIX(IIO_SHARED_BY_DIR, ak8975_get_mount_matrix),
+ { },
+};
+
#define AK8975_CHANNEL(axis, index) \
{ \
.type = IIO_MAGN, \
@@ -690,12 +748,23 @@ static int ak8975_read_raw(struct iio_dev *indio_dev,
.info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
BIT(IIO_CHAN_INFO_SCALE), \
.address = index, \
+ .scan_index = index, \
+ .scan_type = { \
+ .sign = 's', \
+ .realbits = 16, \
+ .storagebits = 16, \
+ .endianness = IIO_CPU \
+ }, \
+ .ext_info = ak8975_ext_info, \
}
static const struct iio_chan_spec ak8975_channels[] = {
AK8975_CHANNEL(X, 0), AK8975_CHANNEL(Y, 1), AK8975_CHANNEL(Z, 2),
+ IIO_CHAN_SOFT_TIMESTAMP(3),
};
+static const unsigned long ak8975_scan_masks[] = { 0x7, 0 };
+
static const struct iio_info ak8975_info = {
.read_raw = &ak8975_read_raw,
.driver_module = THIS_MODULE,
@@ -724,6 +793,56 @@ static const char *ak8975_match_acpi_device(struct device *dev,
return dev_name(dev);
}
+static void ak8975_fill_buffer(struct iio_dev *indio_dev)
+{
+ struct ak8975_data *data = iio_priv(indio_dev);
+ const struct i2c_client *client = data->client;
+ const struct ak_def *def = data->def;
+ int ret;
+ s16 buff[8]; /* 3 x 16 bits axis values + 1 aligned 64 bits timestamp */
+
+ mutex_lock(&data->lock);
+
+ ret = ak8975_start_read_axis(data, client);
+ if (ret)
+ goto unlock;
+
+ /*
+ * For each axis, read the flux value from the appropriate register
+ * (the register is specified in the iio device attributes).
+ */
+ ret = i2c_smbus_read_i2c_block_data_or_emulated(client,
+ def->data_regs[0],
+ 3 * sizeof(buff[0]),
+ (u8 *)buff);
+ if (ret < 0)
+ goto unlock;
+
+ mutex_unlock(&data->lock);
+
+ /* Clamp to valid range. */
+ buff[0] = clamp_t(s16, le16_to_cpu(buff[0]), -def->range, def->range);
+ buff[1] = clamp_t(s16, le16_to_cpu(buff[1]), -def->range, def->range);
+ buff[2] = clamp_t(s16, le16_to_cpu(buff[2]), -def->range, def->range);
+
+ iio_push_to_buffers_with_timestamp(indio_dev, buff, iio_get_time_ns());
+ return;
+
+unlock:
+ mutex_unlock(&data->lock);
+ dev_err(&client->dev, "Error in reading axes block\n");
+}
+
+static irqreturn_t ak8975_handle_trigger(int irq, void *p)
+{
+ const struct iio_poll_func *pf = p;
+ struct iio_dev *indio_dev = pf->indio_dev;
+
+ ak8975_fill_buffer(indio_dev);
+ iio_trigger_notify_done(indio_dev->trig);
+ return IRQ_HANDLED;
+}
+
static int ak8975_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -733,10 +852,12 @@ static int ak8975_probe(struct i2c_client *client,
int err;
const char *name = NULL;
enum asahi_compass_chipset chipset = AK_MAX_TYPE;
+ const struct ak8975_platform_data *pdata =
+ dev_get_platdata(&client->dev);
/* Grab and set up the supplied GPIO. */
- if (client->dev.platform_data)
- eoc_gpio = *(int *)(client->dev.platform_data);
+ if (pdata)
+ eoc_gpio = pdata->eoc_gpio;
else if (client->dev.of_node)
eoc_gpio = of_get_gpio(client->dev.of_node, 0);
else
@@ -770,13 +891,24 @@ static int ak8975_probe(struct i2c_client *client,
data->eoc_gpio = eoc_gpio;
data->eoc_irq = 0;
+ if (!pdata) {
+ err = of_iio_read_mount_matrix(&client->dev,
+ "mount-matrix",
+ &data->orientation);
+ if (err)
+ return err;
+ } else
+ data->orientation = pdata->orientation;
+
/* id will be NULL when enumerated via ACPI */
if (id) {
chipset = (enum asahi_compass_chipset)(id->driver_data);
name = id->name;
- } else if (ACPI_HANDLE(&client->dev))
+ } else if (ACPI_HANDLE(&client->dev)) {
name = ak8975_match_acpi_device(&client->dev, &chipset);
- else
+ if (!name)
+ return -ENODEV;
+ } else
return -ENOSYS;
if (chipset >= AK_MAX_TYPE) {
@@ -786,10 +918,15 @@ static int ak8975_probe(struct i2c_client *client,
}
data->def = &ak_def_array[chipset];
+
+ err = ak8975_power_on(client);
+ if (err)
+ return err;
+
err = ak8975_who_i_am(client, data->def->type);
if (err < 0) {
dev_err(&client->dev, "Unexpected device\n");
- return err;
+ goto power_off;
}
dev_dbg(&client->dev, "Asahi compass chip %s\n", name);
@@ -797,7 +934,7 @@ static int ak8975_probe(struct i2c_client *client,
err = ak8975_setup(client);
if (err < 0) {
dev_err(&client->dev, "%s initialization fails\n", name);
- return err;
+ goto power_off;
}
mutex_init(&data->lock);
@@ -805,9 +942,41 @@ static int ak8975_probe(struct i2c_client *client,
indio_dev->channels = ak8975_channels;
indio_dev->num_channels = ARRAY_SIZE(ak8975_channels);
indio_dev->info = &ak8975_info;
+ indio_dev->available_scan_masks = ak8975_scan_masks;
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->name = name;
- return devm_iio_device_register(&client->dev, indio_dev);
+
+ err = iio_triggered_buffer_setup(indio_dev, NULL, ak8975_handle_trigger,
+ NULL);
+ if (err) {
+ dev_err(&client->dev, "triggered buffer setup failed\n");
+ goto power_off;
+ }
+
+ err = iio_device_register(indio_dev);
+ if (err) {
+ dev_err(&client->dev, "device register failed\n");
+ goto cleanup_buffer;
+ }
+
+ return 0;
+
+cleanup_buffer:
+ iio_triggered_buffer_cleanup(indio_dev);
+power_off:
+ ak8975_power_off(client);
+ return err;
+}
+
+static int ak8975_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+
+ iio_device_unregister(indio_dev);
+ iio_triggered_buffer_cleanup(indio_dev);
+ ak8975_power_off(client);
+
+ return 0;
}
static const struct i2c_device_id ak8975_id[] = {
@@ -841,6 +1010,7 @@ static struct i2c_driver ak8975_driver = {
.acpi_match_table = ACPI_PTR(ak_acpi_match),
},
.probe = ak8975_probe,
+ .remove = ak8975_remove,
.id_table = ak8975_id,
};
module_i2c_driver(ak8975_driver);
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index ffcb75ea64fb..d104fb8d9379 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -23,7 +23,6 @@
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/acpi.h>
-#include <linux/gpio/consumer.h>
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/iio/iio.h>
@@ -35,6 +34,8 @@
#include <linux/iio/triggered_buffer.h>
#include <linux/regmap.h>
+#include "bmc150_magn.h"
+
#define BMC150_MAGN_DRV_NAME "bmc150_magn"
#define BMC150_MAGN_IRQ_NAME "bmc150_magn_event"
@@ -135,7 +136,7 @@ struct bmc150_magn_trim_regs {
} __packed;
struct bmc150_magn_data {
- struct i2c_client *client;
+ struct device *dev;
/*
* 1. Protect this structure.
* 2. Serialize sequences that power on/off the device and access HW.
@@ -147,6 +148,7 @@ struct bmc150_magn_data {
struct iio_trigger *dready_trig;
bool dready_trigger_on;
int max_odr;
+ int irq;
};
static const struct {
@@ -216,7 +218,7 @@ static bool bmc150_magn_is_volatile_reg(struct device *dev, unsigned int reg)
}
}
-static const struct regmap_config bmc150_magn_regmap_config = {
+const struct regmap_config bmc150_magn_regmap_config = {
.reg_bits = 8,
.val_bits = 8,
@@ -226,6 +228,7 @@ static const struct regmap_config bmc150_magn_regmap_config = {
.writeable_reg = bmc150_magn_is_writeable_reg,
.volatile_reg = bmc150_magn_is_volatile_reg,
};
+EXPORT_SYMBOL(bmc150_magn_regmap_config);
static int bmc150_magn_set_power_mode(struct bmc150_magn_data *data,
enum bmc150_magn_power_modes mode,
@@ -264,17 +267,17 @@ static int bmc150_magn_set_power_state(struct bmc150_magn_data *data, bool on)
int ret;
if (on) {
- ret = pm_runtime_get_sync(&data->client->dev);
+ ret = pm_runtime_get_sync(data->dev);
} else {
- pm_runtime_mark_last_busy(&data->client->dev);
- ret = pm_runtime_put_autosuspend(&data->client->dev);
+ pm_runtime_mark_last_busy(data->dev);
+ ret = pm_runtime_put_autosuspend(data->dev);
}
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"failed to change power state to %d\n", on);
if (on)
- pm_runtime_put_noidle(&data->client->dev);
+ pm_runtime_put_noidle(data->dev);
return ret;
}
@@ -351,7 +354,7 @@ static int bmc150_magn_set_max_odr(struct bmc150_magn_data *data, int rep_xy,
/* the maximum selectable read-out frequency from datasheet */
max_odr = 1000000 / (145 * rep_xy + 500 * rep_z + 980);
if (odr > max_odr) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"Can't set oversampling with sampling freq %d\n",
odr);
return -EINVAL;
@@ -685,27 +688,27 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND,
false);
if (ret < 0) {
- dev_err(&data->client->dev,
+ dev_err(data->dev,
"Failed to bring up device from suspend mode\n");
return ret;
}
ret = regmap_read(data->regmap, BMC150_MAGN_REG_CHIP_ID, &chip_id);
if (ret < 0) {
- dev_err(&data->client->dev, "Failed reading chip id\n");
+ dev_err(data->dev, "Failed reading chip id\n");
goto err_poweroff;
}
if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
- dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
+ dev_err(data->dev, "Invalid chip id 0x%x\n", chip_id);
ret = -ENODEV;
goto err_poweroff;
}
- dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
+ dev_dbg(data->dev, "Chip id %x\n", chip_id);
preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
ret = bmc150_magn_set_odr(data, preset.odr);
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to set ODR to %d\n",
+ dev_err(data->dev, "Failed to set ODR to %d\n",
preset.odr);
goto err_poweroff;
}
@@ -713,7 +716,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_XY,
BMC150_MAGN_REPXY_TO_REGVAL(preset.rep_xy));
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to set REP XY to %d\n",
+ dev_err(data->dev, "Failed to set REP XY to %d\n",
preset.rep_xy);
goto err_poweroff;
}
@@ -721,7 +724,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = regmap_write(data->regmap, BMC150_MAGN_REG_REP_Z,
BMC150_MAGN_REPZ_TO_REGVAL(preset.rep_z));
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to set REP Z to %d\n",
+ dev_err(data->dev, "Failed to set REP Z to %d\n",
preset.rep_z);
goto err_poweroff;
}
@@ -734,7 +737,7 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
ret = bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
true);
if (ret < 0) {
- dev_err(&data->client->dev, "Failed to power on device\n");
+ dev_err(data->dev, "Failed to power on device\n");
goto err_poweroff;
}
@@ -843,41 +846,33 @@ static const char *bmc150_magn_match_acpi_device(struct device *dev)
return dev_name(dev);
}
-static int bmc150_magn_probe(struct i2c_client *client,
- const struct i2c_device_id *id)
+int bmc150_magn_probe(struct device *dev, struct regmap *regmap,
+ int irq, const char *name)
{
struct bmc150_magn_data *data;
struct iio_dev *indio_dev;
- const char *name = NULL;
int ret;
- indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
return -ENOMEM;
data = iio_priv(indio_dev);
- i2c_set_clientdata(client, indio_dev);
- data->client = client;
+ dev_set_drvdata(dev, indio_dev);
+ data->regmap = regmap;
+ data->irq = irq;
+ data->dev = dev;
- if (id)
- name = id->name;
- else if (ACPI_HANDLE(&client->dev))
- name = bmc150_magn_match_acpi_device(&client->dev);
- else
- return -ENOSYS;
+ if (!name && ACPI_HANDLE(dev))
+ name = bmc150_magn_match_acpi_device(dev);
mutex_init(&data->mutex);
- data->regmap = devm_regmap_init_i2c(client, &bmc150_magn_regmap_config);
- if (IS_ERR(data->regmap)) {
- dev_err(&client->dev, "Failed to allocate register map\n");
- return PTR_ERR(data->regmap);
- }
ret = bmc150_magn_init(data);
if (ret < 0)
return ret;
- indio_dev->dev.parent = &client->dev;
+ indio_dev->dev.parent = dev;
indio_dev->channels = bmc150_magn_channels;
indio_dev->num_channels = ARRAY_SIZE(bmc150_magn_channels);
indio_dev->available_scan_masks = bmc150_magn_scan_masks;
@@ -885,35 +880,34 @@ static int bmc150_magn_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
indio_dev->info = &bmc150_magn_info;
- if (client->irq > 0) {
- data->dready_trig = devm_iio_trigger_alloc(&client->dev,
+ if (irq > 0) {
+ data->dready_trig = devm_iio_trigger_alloc(dev,
"%s-dev%d",
indio_dev->name,
indio_dev->id);
if (!data->dready_trig) {
ret = -ENOMEM;
- dev_err(&client->dev, "iio trigger alloc failed\n");
+ dev_err(dev, "iio trigger alloc failed\n");
goto err_poweroff;
}
- data->dready_trig->dev.parent = &client->dev;
+ data->dready_trig->dev.parent = dev;
data->dready_trig->ops = &bmc150_magn_trigger_ops;
iio_trigger_set_drvdata(data->dready_trig, indio_dev);
ret = iio_trigger_register(data->dready_trig);
if (ret) {
- dev_err(&client->dev, "iio trigger register failed\n");
+ dev_err(dev, "iio trigger register failed\n");
goto err_poweroff;
}
- ret = request_threaded_irq(client->irq,
+ ret = request_threaded_irq(irq,
iio_trigger_generic_data_rdy_poll,
NULL,
IRQF_TRIGGER_RISING | IRQF_ONESHOT,
BMC150_MAGN_IRQ_NAME,
data->dready_trig);
if (ret < 0) {
- dev_err(&client->dev, "request irq %d failed\n",
- client->irq);
+ dev_err(dev, "request irq %d failed\n", irq);
goto err_trigger_unregister;
}
}
@@ -923,34 +917,33 @@ static int bmc150_magn_probe(struct i2c_client *client,
bmc150_magn_trigger_handler,
&bmc150_magn_buffer_setup_ops);
if (ret < 0) {
- dev_err(&client->dev,
- "iio triggered buffer setup failed\n");
+ dev_err(dev, "iio triggered buffer setup failed\n");
goto err_free_irq;
}
- ret = pm_runtime_set_active(&client->dev);
+ ret = pm_runtime_set_active(dev);
if (ret)
goto err_buffer_cleanup;
- pm_runtime_enable(&client->dev);
- pm_runtime_set_autosuspend_delay(&client->dev,
+ pm_runtime_enable(dev);
+ pm_runtime_set_autosuspend_delay(dev,
BMC150_MAGN_AUTO_SUSPEND_DELAY_MS);
- pm_runtime_use_autosuspend(&client->dev);
+ pm_runtime_use_autosuspend(dev);
ret = iio_device_register(indio_dev);
if (ret < 0) {
- dev_err(&client->dev, "unable to register iio device\n");
+ dev_err(dev, "unable to register iio device\n");
goto err_buffer_cleanup;
}
- dev_dbg(&indio_dev->dev, "Registered device %s\n", name);
+ dev_dbg(dev, "Registered device %s\n", name);
return 0;
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
err_free_irq:
- if (client->irq > 0)
- free_irq(client->irq, data->dready_trig);
+ if (irq > 0)
+ free_irq(irq, data->dready_trig);
err_trigger_unregister:
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -958,22 +951,23 @@ err_poweroff:
bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_SUSPEND, true);
return ret;
}
+EXPORT_SYMBOL(bmc150_magn_probe);
-static int bmc150_magn_remove(struct i2c_client *client)
+int bmc150_magn_remove(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
iio_device_unregister(indio_dev);
- pm_runtime_disable(&client->dev);
- pm_runtime_set_suspended(&client->dev);
- pm_runtime_put_noidle(&client->dev);
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ pm_runtime_put_noidle(dev);
iio_triggered_buffer_cleanup(indio_dev);
- if (client->irq > 0)
- free_irq(data->client->irq, data->dready_trig);
+ if (data->irq > 0)
+ free_irq(data->irq, data->dready_trig);
if (data->dready_trig)
iio_trigger_unregister(data->dready_trig);
@@ -984,11 +978,12 @@ static int bmc150_magn_remove(struct i2c_client *client)
return 0;
}
+EXPORT_SYMBOL(bmc150_magn_remove);
#ifdef CONFIG_PM
static int bmc150_magn_runtime_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
@@ -997,7 +992,7 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
true);
mutex_unlock(&data->mutex);
if (ret < 0) {
- dev_err(&data->client->dev, "powering off device failed\n");
+ dev_err(dev, "powering off device failed\n");
return ret;
}
return 0;
@@ -1008,7 +1003,7 @@ static int bmc150_magn_runtime_suspend(struct device *dev)
*/
static int bmc150_magn_runtime_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
return bmc150_magn_set_power_mode(data, BMC150_MAGN_POWER_MODE_NORMAL,
@@ -1019,7 +1014,7 @@ static int bmc150_magn_runtime_resume(struct device *dev)
#ifdef CONFIG_PM_SLEEP
static int bmc150_magn_suspend(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
@@ -1033,7 +1028,7 @@ static int bmc150_magn_suspend(struct device *dev)
static int bmc150_magn_resume(struct device *dev)
{
- struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
+ struct iio_dev *indio_dev = dev_get_drvdata(dev);
struct bmc150_magn_data *data = iio_priv(indio_dev);
int ret;
@@ -1046,38 +1041,13 @@ static int bmc150_magn_resume(struct device *dev)
}
#endif
-static const struct dev_pm_ops bmc150_magn_pm_ops = {
+const struct dev_pm_ops bmc150_magn_pm_ops = {
SET_SYSTEM_SLEEP_PM_OPS(bmc150_magn_suspend, bmc150_magn_resume)
SET_RUNTIME_PM_OPS(bmc150_magn_runtime_suspend,
bmc150_magn_runtime_resume, NULL)
};
-
-static const struct acpi_device_id bmc150_magn_acpi_match[] = {
- {"BMC150B", 0},
- {"BMC156B", 0},
- {},
-};
-MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
-
-static const struct i2c_device_id bmc150_magn_id[] = {
- {"bmc150_magn", 0},
- {"bmc156_magn", 0},
- {},
-};
-MODULE_DEVICE_TABLE(i2c, bmc150_magn_id);
-
-static struct i2c_driver bmc150_magn_driver = {
- .driver = {
- .name = BMC150_MAGN_DRV_NAME,
- .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
- .pm = &bmc150_magn_pm_ops,
- },
- .probe = bmc150_magn_probe,
- .remove = bmc150_magn_remove,
- .id_table = bmc150_magn_id,
-};
-module_i2c_driver(bmc150_magn_driver);
+EXPORT_SYMBOL(bmc150_magn_pm_ops);
MODULE_AUTHOR("Irina Tirdea <irina.tirdea@intel.com>");
MODULE_LICENSE("GPL v2");
-MODULE_DESCRIPTION("BMC150 magnetometer driver");
+MODULE_DESCRIPTION("BMC150 magnetometer core driver");
diff --git a/drivers/iio/magnetometer/bmc150_magn.h b/drivers/iio/magnetometer/bmc150_magn.h
new file mode 100644
index 000000000000..9a8e26812ca8
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn.h
@@ -0,0 +1,11 @@
+#ifndef _BMC150_MAGN_H_
+#define _BMC150_MAGN_H_
+
+extern const struct regmap_config bmc150_magn_regmap_config;
+extern const struct dev_pm_ops bmc150_magn_pm_ops;
+
+int bmc150_magn_probe(struct device *dev, struct regmap *regmap, int irq,
+ const char *name);
+int bmc150_magn_remove(struct device *dev);
+
+#endif /* _BMC150_MAGN_H_ */
diff --git a/drivers/iio/magnetometer/bmc150_magn_i2c.c b/drivers/iio/magnetometer/bmc150_magn_i2c.c
new file mode 100644
index 000000000000..eddc7f0d0096
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn_i2c.c
@@ -0,0 +1,77 @@
+/*
+ * 3-axis magnetometer driver supporting following I2C Bosch-Sensortec chips:
+ * - BMC150
+ * - BMC156
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ */
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+#include <linux/i2c.h>
+#include <linux/module.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+
+#include "bmc150_magn.h"
+
+static int bmc150_magn_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct regmap *regmap;
+ const char *name = NULL;
+
+ regmap = devm_regmap_init_i2c(client, &bmc150_magn_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&client->dev, "Failed to initialize i2c regmap\n");
+ return PTR_ERR(regmap);
+ }
+
+ if (id)
+ name = id->name;
+
+ return bmc150_magn_probe(&client->dev, regmap, client->irq, name);
+}
+
+static int bmc150_magn_i2c_remove(struct i2c_client *client)
+{
+ return bmc150_magn_remove(&client->dev);
+}
+
+static const struct acpi_device_id bmc150_magn_acpi_match[] = {
+ {"BMC150B", 0},
+ {"BMC156B", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
+
+static const struct i2c_device_id bmc150_magn_i2c_id[] = {
+ {"bmc150_magn", 0},
+ {"bmc156_magn", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, bmc150_magn_i2c_id);
+
+static struct i2c_driver bmc150_magn_driver = {
+ .driver = {
+ .name = "bmc150_magn_i2c",
+ .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .pm = &bmc150_magn_pm_ops,
+ },
+ .probe = bmc150_magn_i2c_probe,
+ .remove = bmc150_magn_i2c_remove,
+ .id_table = bmc150_magn_i2c_id,
+};
+module_i2c_driver(bmc150_magn_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("BMC150 I2C magnetometer driver");
diff --git a/drivers/iio/magnetometer/bmc150_magn_spi.c b/drivers/iio/magnetometer/bmc150_magn_spi.c
new file mode 100644
index 000000000000..c4c738a07695
--- /dev/null
+++ b/drivers/iio/magnetometer/bmc150_magn_spi.c
@@ -0,0 +1,68 @@
+/*
+ * 3-axis magnetometer driver support following SPI Bosch-Sensortec chips:
+ * - BMC150
+ * - BMC156
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ */
+#include <linux/module.h>
+#include <linux/mod_devicetable.h>
+#include <linux/spi/spi.h>
+#include <linux/acpi.h>
+#include <linux/regmap.h>
+
+#include "bmc150_magn.h"
+
+static int bmc150_magn_spi_probe(struct spi_device *spi)
+{
+ struct regmap *regmap;
+ const struct spi_device_id *id = spi_get_device_id(spi);
+
+ regmap = devm_regmap_init_spi(spi, &bmc150_magn_regmap_config);
+ if (IS_ERR(regmap)) {
+ dev_err(&spi->dev, "Failed to register spi regmap %d\n",
+ (int)PTR_ERR(regmap));
+ return PTR_ERR(regmap);
+ }
+ return bmc150_magn_probe(&spi->dev, regmap, spi->irq, id->name);
+}
+
+static int bmc150_magn_spi_remove(struct spi_device *spi)
+{
+ bmc150_magn_remove(&spi->dev);
+
+ return 0;
+}
+
+static const struct spi_device_id bmc150_magn_spi_id[] = {
+ {"bmc150_magn", 0},
+ {"bmc156_magn", 0},
+ {}
+};
+MODULE_DEVICE_TABLE(spi, bmc150_magn_spi_id);
+
+static const struct acpi_device_id bmc150_magn_acpi_match[] = {
+ {"BMC150B", 0},
+ {"BMC156B", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(acpi, bmc150_magn_acpi_match);
+
+static struct spi_driver bmc150_magn_spi_driver = {
+ .probe = bmc150_magn_spi_probe,
+ .remove = bmc150_magn_spi_remove,
+ .id_table = bmc150_magn_spi_id,
+ .driver = {
+ .acpi_match_table = ACPI_PTR(bmc150_magn_acpi_match),
+ .name = "bmc150_magn_spi",
+ },
+};
+module_spi_driver(bmc150_magn_spi_driver);
+
+MODULE_AUTHOR("Daniel Baluta <daniel.baluta@intel.com");
+MODULE_DESCRIPTION("BMC150 magnetometer SPI driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/magnetometer/st_magn_core.c b/drivers/iio/magnetometer/st_magn_core.c
index 501f858df413..62036d2a9956 100644
--- a/drivers/iio/magnetometer/st_magn_core.c
+++ b/drivers/iio/magnetometer/st_magn_core.c
@@ -484,6 +484,7 @@ static const struct st_sensor_settings st_magn_sensors_settings[] = {
.mask_int1 = ST_MAGN_3_DRDY_INT_MASK,
.addr_ihl = ST_MAGN_3_IHL_IRQ_ADDR,
.mask_ihl = ST_MAGN_3_IHL_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_MAGN_3_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/iio/potentiometer/Kconfig b/drivers/iio/potentiometer/Kconfig
index ffc735c168fb..6acb23810bb4 100644
--- a/drivers/iio/potentiometer/Kconfig
+++ b/drivers/iio/potentiometer/Kconfig
@@ -5,6 +5,34 @@
menu "Digital potentiometers"
+config DS1803
+ tristate "Maxim Integrated DS1803 Digital Potentiometer driver"
+ depends on I2C
+ help
+ Say yes here to build support for the Maxim Integrated DS1803
+ digital potentiomenter chip.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ds1803.
+
+config MCP4131
+ tristate "Microchip MCP413X/414X/415X/416X/423X/424X/425X/426X Digital Potentiometer driver"
+ depends on SPI
+ help
+ Say yes here to build support for the Microchip
+ MCP4131, MCP4132,
+ MCP4141, MCP4142,
+ MCP4151, MCP4152,
+ MCP4161, MCP4162,
+ MCP4231, MCP4232,
+ MCP4241, MCP4242,
+ MCP4251, MCP4252,
+ MCP4261, MCP4262,
+ digital potentiomenter chips.
+
+ To compile this driver as a module, choose M here: the
+ module will be called mcp4131.
+
config MCP4531
tristate "Microchip MCP45xx/MCP46xx Digital Potentiometer driver"
depends on I2C
diff --git a/drivers/iio/potentiometer/Makefile b/drivers/iio/potentiometer/Makefile
index b563b492b486..6007faa2fb02 100644
--- a/drivers/iio/potentiometer/Makefile
+++ b/drivers/iio/potentiometer/Makefile
@@ -3,5 +3,7 @@
#
# When adding new entries keep the list in alphabetical order
+obj-$(CONFIG_DS1803) += ds1803.o
+obj-$(CONFIG_MCP4131) += mcp4131.o
obj-$(CONFIG_MCP4531) += mcp4531.o
obj-$(CONFIG_TPL0102) += tpl0102.o
diff --git a/drivers/iio/potentiometer/ds1803.c b/drivers/iio/potentiometer/ds1803.c
new file mode 100644
index 000000000000..fb9e2a337dc2
--- /dev/null
+++ b/drivers/iio/potentiometer/ds1803.c
@@ -0,0 +1,173 @@
+/*
+ * Maxim Integrated DS1803 digital potentiometer driver
+ * Copyright (c) 2016 Slawomir Stepien
+ *
+ * Datasheet: https://datasheets.maximintegrated.com/en/ds/DS1803.pdf
+ *
+ * DEVID #Wipers #Positions Resistor Opts (kOhm) i2c address
+ * ds1803 2 256 10, 50, 100 0101xxx
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/module.h>
+#include <linux/of.h>
+
+#define DS1803_MAX_POS 255
+#define DS1803_WRITE(chan) (0xa8 | ((chan) + 1))
+
+enum ds1803_type {
+ DS1803_010,
+ DS1803_050,
+ DS1803_100,
+};
+
+struct ds1803_cfg {
+ int kohms;
+};
+
+static const struct ds1803_cfg ds1803_cfg[] = {
+ [DS1803_010] = { .kohms = 10, },
+ [DS1803_050] = { .kohms = 50, },
+ [DS1803_100] = { .kohms = 100, },
+};
+
+struct ds1803_data {
+ struct i2c_client *client;
+ const struct ds1803_cfg *cfg;
+};
+
+#define DS1803_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec ds1803_channels[] = {
+ DS1803_CHANNEL(0),
+ DS1803_CHANNEL(1),
+};
+
+static int ds1803_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct ds1803_data *data = iio_priv(indio_dev);
+ int pot = chan->channel;
+ int ret;
+ u8 result[indio_dev->num_channels];
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ ret = i2c_master_recv(data->client, result,
+ indio_dev->num_channels);
+ if (ret < 0)
+ return ret;
+
+ *val = result[pot];
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * data->cfg->kohms;
+ *val2 = DS1803_MAX_POS;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int ds1803_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ds1803_data *data = iio_priv(indio_dev);
+ int pot = chan->channel;
+
+ if (val2 != 0)
+ return -EINVAL;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > DS1803_MAX_POS || val < 0)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return i2c_smbus_write_byte_data(data->client, DS1803_WRITE(pot), val);
+}
+
+static const struct iio_info ds1803_info = {
+ .read_raw = ds1803_read_raw,
+ .write_raw = ds1803_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int ds1803_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct ds1803_data *data;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ i2c_set_clientdata(client, indio_dev);
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ data->cfg = &ds1803_cfg[id->driver_data];
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &ds1803_info;
+ indio_dev->channels = ds1803_channels;
+ indio_dev->num_channels = ARRAY_SIZE(ds1803_channels);
+ indio_dev->name = client->name;
+
+ return devm_iio_device_register(dev, indio_dev);
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id ds1803_dt_ids[] = {
+ { .compatible = "maxim,ds1803-010", .data = &ds1803_cfg[DS1803_010] },
+ { .compatible = "maxim,ds1803-050", .data = &ds1803_cfg[DS1803_050] },
+ { .compatible = "maxim,ds1803-100", .data = &ds1803_cfg[DS1803_100] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, ds1803_dt_ids);
+#endif /* CONFIG_OF */
+
+static const struct i2c_device_id ds1803_id[] = {
+ { "ds1803-010", DS1803_010 },
+ { "ds1803-050", DS1803_050 },
+ { "ds1803-100", DS1803_100 },
+ {}
+};
+MODULE_DEVICE_TABLE(i2c, ds1803_id);
+
+static struct i2c_driver ds1803_driver = {
+ .driver = {
+ .name = "ds1803",
+ .of_match_table = of_match_ptr(ds1803_dt_ids),
+ },
+ .probe = ds1803_probe,
+ .id_table = ds1803_id,
+};
+
+module_i2c_driver(ds1803_driver);
+
+MODULE_AUTHOR("Slawomir Stepien <sst@poczta.fm>");
+MODULE_DESCRIPTION("DS1803 digital potentiometer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4131.c b/drivers/iio/potentiometer/mcp4131.c
new file mode 100644
index 000000000000..4e7e2c6c522c
--- /dev/null
+++ b/drivers/iio/potentiometer/mcp4131.c
@@ -0,0 +1,494 @@
+/*
+ * Industrial I/O driver for Microchip digital potentiometers
+ *
+ * Copyright (c) 2016 Slawomir Stepien
+ * Based on: Peter Rosin's code from mcp4531.c
+ *
+ * Datasheet: http://ww1.microchip.com/downloads/en/DeviceDoc/22060b.pdf
+ *
+ * DEVID #Wipers #Positions Resistor Opts (kOhm)
+ * mcp4131 1 129 5, 10, 50, 100
+ * mcp4132 1 129 5, 10, 50, 100
+ * mcp4141 1 129 5, 10, 50, 100
+ * mcp4142 1 129 5, 10, 50, 100
+ * mcp4151 1 257 5, 10, 50, 100
+ * mcp4152 1 257 5, 10, 50, 100
+ * mcp4161 1 257 5, 10, 50, 100
+ * mcp4162 1 257 5, 10, 50, 100
+ * mcp4231 2 129 5, 10, 50, 100
+ * mcp4232 2 129 5, 10, 50, 100
+ * mcp4241 2 129 5, 10, 50, 100
+ * mcp4242 2 129 5, 10, 50, 100
+ * mcp4251 2 257 5, 10, 50, 100
+ * mcp4252 2 257 5, 10, 50, 100
+ * mcp4261 2 257 5, 10, 50, 100
+ * mcp4262 2 257 5, 10, 50, 100
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+
+/*
+ * TODO:
+ * 1. Write wiper setting to EEPROM for EEPROM capable models.
+ */
+
+#include <linux/cache.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/types.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/of.h>
+#include <linux/spi/spi.h>
+
+#define MCP4131_WRITE (0x00 << 2)
+#define MCP4131_READ (0x03 << 2)
+
+#define MCP4131_WIPER_SHIFT 4
+#define MCP4131_CMDERR(r) ((r[0]) & 0x02)
+#define MCP4131_RAW(r) ((r[0]) == 0xff ? 0x100 : (r[1]))
+
+struct mcp4131_cfg {
+ int wipers;
+ int max_pos;
+ int kohms;
+};
+
+enum mcp4131_type {
+ MCP413x_502 = 0,
+ MCP413x_103,
+ MCP413x_503,
+ MCP413x_104,
+ MCP414x_502,
+ MCP414x_103,
+ MCP414x_503,
+ MCP414x_104,
+ MCP415x_502,
+ MCP415x_103,
+ MCP415x_503,
+ MCP415x_104,
+ MCP416x_502,
+ MCP416x_103,
+ MCP416x_503,
+ MCP416x_104,
+ MCP423x_502,
+ MCP423x_103,
+ MCP423x_503,
+ MCP423x_104,
+ MCP424x_502,
+ MCP424x_103,
+ MCP424x_503,
+ MCP424x_104,
+ MCP425x_502,
+ MCP425x_103,
+ MCP425x_503,
+ MCP425x_104,
+ MCP426x_502,
+ MCP426x_103,
+ MCP426x_503,
+ MCP426x_104,
+};
+
+static const struct mcp4131_cfg mcp4131_cfg[] = {
+ [MCP413x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
+ [MCP413x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+ [MCP413x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
+ [MCP413x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+ [MCP414x_502] = { .wipers = 1, .max_pos = 128, .kohms = 5, },
+ [MCP414x_103] = { .wipers = 1, .max_pos = 128, .kohms = 10, },
+ [MCP414x_503] = { .wipers = 1, .max_pos = 128, .kohms = 50, },
+ [MCP414x_104] = { .wipers = 1, .max_pos = 128, .kohms = 100, },
+ [MCP415x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
+ [MCP415x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
+ [MCP415x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [MCP415x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ [MCP416x_502] = { .wipers = 1, .max_pos = 256, .kohms = 5, },
+ [MCP416x_103] = { .wipers = 1, .max_pos = 256, .kohms = 10, },
+ [MCP416x_503] = { .wipers = 1, .max_pos = 256, .kohms = 50, },
+ [MCP416x_104] = { .wipers = 1, .max_pos = 256, .kohms = 100, },
+ [MCP423x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
+ [MCP423x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
+ [MCP423x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
+ [MCP423x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+ [MCP424x_502] = { .wipers = 2, .max_pos = 128, .kohms = 5, },
+ [MCP424x_103] = { .wipers = 2, .max_pos = 128, .kohms = 10, },
+ [MCP424x_503] = { .wipers = 2, .max_pos = 128, .kohms = 50, },
+ [MCP424x_104] = { .wipers = 2, .max_pos = 128, .kohms = 100, },
+ [MCP425x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
+ [MCP425x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
+ [MCP425x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
+ [MCP425x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+ [MCP426x_502] = { .wipers = 2, .max_pos = 256, .kohms = 5, },
+ [MCP426x_103] = { .wipers = 2, .max_pos = 256, .kohms = 10, },
+ [MCP426x_503] = { .wipers = 2, .max_pos = 256, .kohms = 50, },
+ [MCP426x_104] = { .wipers = 2, .max_pos = 256, .kohms = 100, },
+};
+
+struct mcp4131_data {
+ struct spi_device *spi;
+ const struct mcp4131_cfg *cfg;
+ struct mutex lock;
+ u8 buf[2] ____cacheline_aligned;
+};
+
+#define MCP4131_CHANNEL(ch) { \
+ .type = IIO_RESISTANCE, \
+ .indexed = 1, \
+ .output = 1, \
+ .channel = (ch), \
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW), \
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE), \
+}
+
+static const struct iio_chan_spec mcp4131_channels[] = {
+ MCP4131_CHANNEL(0),
+ MCP4131_CHANNEL(1),
+};
+
+static int mcp4131_read(struct spi_device *spi, void *buf, size_t len)
+{
+ struct spi_transfer t = {
+ .tx_buf = buf, /* We need to send addr, cmd and 12 bits */
+ .rx_buf = buf,
+ .len = len,
+ };
+ struct spi_message m;
+
+ spi_message_init(&m);
+ spi_message_add_tail(&t, &m);
+
+ return spi_sync(spi, &m);
+}
+
+static int mcp4131_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ int err;
+ struct mcp4131_data *data = iio_priv(indio_dev);
+ int address = chan->channel;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ mutex_lock(&data->lock);
+
+ data->buf[0] = (address << MCP4131_WIPER_SHIFT) | MCP4131_READ;
+ data->buf[1] = 0;
+
+ err = mcp4131_read(data->spi, data->buf, 2);
+ if (err) {
+ mutex_unlock(&data->lock);
+ return err;
+ }
+
+ /* Error, bad address/command combination */
+ if (!MCP4131_CMDERR(data->buf)) {
+ mutex_unlock(&data->lock);
+ return -EIO;
+ }
+
+ *val = MCP4131_RAW(data->buf);
+ mutex_unlock(&data->lock);
+
+ return IIO_VAL_INT;
+
+ case IIO_CHAN_INFO_SCALE:
+ *val = 1000 * data->cfg->kohms;
+ *val2 = data->cfg->max_pos;
+ return IIO_VAL_FRACTIONAL;
+ }
+
+ return -EINVAL;
+}
+
+static int mcp4131_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int err;
+ struct mcp4131_data *data = iio_priv(indio_dev);
+ int address = chan->channel << MCP4131_WIPER_SHIFT;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ if (val > data->cfg->max_pos || val < 0)
+ return -EINVAL;
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ mutex_lock(&data->lock);
+
+ data->buf[0] = address << MCP4131_WIPER_SHIFT;
+ data->buf[0] |= MCP4131_WRITE | (val >> 8);
+ data->buf[1] = val & 0xFF; /* 8 bits here */
+
+ err = spi_write(data->spi, data->buf, 2);
+ mutex_unlock(&data->lock);
+
+ return err;
+}
+
+static const struct iio_info mcp4131_info = {
+ .read_raw = mcp4131_read_raw,
+ .write_raw = mcp4131_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int mcp4131_probe(struct spi_device *spi)
+{
+ int err;
+ struct device *dev = &spi->dev;
+ unsigned long devid = spi_get_device_id(spi)->driver_data;
+ struct mcp4131_data *data;
+ struct iio_dev *indio_dev;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ spi_set_drvdata(spi, indio_dev);
+ data->spi = spi;
+ data->cfg = &mcp4131_cfg[devid];
+
+ mutex_init(&data->lock);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->info = &mcp4131_info;
+ indio_dev->channels = mcp4131_channels;
+ indio_dev->num_channels = data->cfg->wipers;
+ indio_dev->name = spi_get_device_id(spi)->name;
+
+ err = devm_iio_device_register(dev, indio_dev);
+ if (err) {
+ dev_info(&spi->dev, "Unable to register %s\n", indio_dev->name);
+ return err;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id mcp4131_dt_ids[] = {
+ { .compatible = "microchip,mcp4131-502",
+ .data = &mcp4131_cfg[MCP413x_502] },
+ { .compatible = "microchip,mcp4131-103",
+ .data = &mcp4131_cfg[MCP413x_103] },
+ { .compatible = "microchip,mcp4131-503",
+ .data = &mcp4131_cfg[MCP413x_503] },
+ { .compatible = "microchip,mcp4131-104",
+ .data = &mcp4131_cfg[MCP413x_104] },
+ { .compatible = "microchip,mcp4132-502",
+ .data = &mcp4131_cfg[MCP413x_502] },
+ { .compatible = "microchip,mcp4132-103",
+ .data = &mcp4131_cfg[MCP413x_103] },
+ { .compatible = "microchip,mcp4132-503",
+ .data = &mcp4131_cfg[MCP413x_503] },
+ { .compatible = "microchip,mcp4132-104",
+ .data = &mcp4131_cfg[MCP413x_104] },
+ { .compatible = "microchip,mcp4141-502",
+ .data = &mcp4131_cfg[MCP414x_502] },
+ { .compatible = "microchip,mcp4141-103",
+ .data = &mcp4131_cfg[MCP414x_103] },
+ { .compatible = "microchip,mcp4141-503",
+ .data = &mcp4131_cfg[MCP414x_503] },
+ { .compatible = "microchip,mcp4141-104",
+ .data = &mcp4131_cfg[MCP414x_104] },
+ { .compatible = "microchip,mcp4142-502",
+ .data = &mcp4131_cfg[MCP414x_502] },
+ { .compatible = "microchip,mcp4142-103",
+ .data = &mcp4131_cfg[MCP414x_103] },
+ { .compatible = "microchip,mcp4142-503",
+ .data = &mcp4131_cfg[MCP414x_503] },
+ { .compatible = "microchip,mcp4142-104",
+ .data = &mcp4131_cfg[MCP414x_104] },
+ { .compatible = "microchip,mcp4151-502",
+ .data = &mcp4131_cfg[MCP415x_502] },
+ { .compatible = "microchip,mcp4151-103",
+ .data = &mcp4131_cfg[MCP415x_103] },
+ { .compatible = "microchip,mcp4151-503",
+ .data = &mcp4131_cfg[MCP415x_503] },
+ { .compatible = "microchip,mcp4151-104",
+ .data = &mcp4131_cfg[MCP415x_104] },
+ { .compatible = "microchip,mcp4152-502",
+ .data = &mcp4131_cfg[MCP415x_502] },
+ { .compatible = "microchip,mcp4152-103",
+ .data = &mcp4131_cfg[MCP415x_103] },
+ { .compatible = "microchip,mcp4152-503",
+ .data = &mcp4131_cfg[MCP415x_503] },
+ { .compatible = "microchip,mcp4152-104",
+ .data = &mcp4131_cfg[MCP415x_104] },
+ { .compatible = "microchip,mcp4161-502",
+ .data = &mcp4131_cfg[MCP416x_502] },
+ { .compatible = "microchip,mcp4161-103",
+ .data = &mcp4131_cfg[MCP416x_103] },
+ { .compatible = "microchip,mcp4161-503",
+ .data = &mcp4131_cfg[MCP416x_503] },
+ { .compatible = "microchip,mcp4161-104",
+ .data = &mcp4131_cfg[MCP416x_104] },
+ { .compatible = "microchip,mcp4162-502",
+ .data = &mcp4131_cfg[MCP416x_502] },
+ { .compatible = "microchip,mcp4162-103",
+ .data = &mcp4131_cfg[MCP416x_103] },
+ { .compatible = "microchip,mcp4162-503",
+ .data = &mcp4131_cfg[MCP416x_503] },
+ { .compatible = "microchip,mcp4162-104",
+ .data = &mcp4131_cfg[MCP416x_104] },
+ { .compatible = "microchip,mcp4231-502",
+ .data = &mcp4131_cfg[MCP423x_502] },
+ { .compatible = "microchip,mcp4231-103",
+ .data = &mcp4131_cfg[MCP423x_103] },
+ { .compatible = "microchip,mcp4231-503",
+ .data = &mcp4131_cfg[MCP423x_503] },
+ { .compatible = "microchip,mcp4231-104",
+ .data = &mcp4131_cfg[MCP423x_104] },
+ { .compatible = "microchip,mcp4232-502",
+ .data = &mcp4131_cfg[MCP423x_502] },
+ { .compatible = "microchip,mcp4232-103",
+ .data = &mcp4131_cfg[MCP423x_103] },
+ { .compatible = "microchip,mcp4232-503",
+ .data = &mcp4131_cfg[MCP423x_503] },
+ { .compatible = "microchip,mcp4232-104",
+ .data = &mcp4131_cfg[MCP423x_104] },
+ { .compatible = "microchip,mcp4241-502",
+ .data = &mcp4131_cfg[MCP424x_502] },
+ { .compatible = "microchip,mcp4241-103",
+ .data = &mcp4131_cfg[MCP424x_103] },
+ { .compatible = "microchip,mcp4241-503",
+ .data = &mcp4131_cfg[MCP424x_503] },
+ { .compatible = "microchip,mcp4241-104",
+ .data = &mcp4131_cfg[MCP424x_104] },
+ { .compatible = "microchip,mcp4242-502",
+ .data = &mcp4131_cfg[MCP424x_502] },
+ { .compatible = "microchip,mcp4242-103",
+ .data = &mcp4131_cfg[MCP424x_103] },
+ { .compatible = "microchip,mcp4242-503",
+ .data = &mcp4131_cfg[MCP424x_503] },
+ { .compatible = "microchip,mcp4242-104",
+ .data = &mcp4131_cfg[MCP424x_104] },
+ { .compatible = "microchip,mcp4251-502",
+ .data = &mcp4131_cfg[MCP425x_502] },
+ { .compatible = "microchip,mcp4251-103",
+ .data = &mcp4131_cfg[MCP425x_103] },
+ { .compatible = "microchip,mcp4251-503",
+ .data = &mcp4131_cfg[MCP425x_503] },
+ { .compatible = "microchip,mcp4251-104",
+ .data = &mcp4131_cfg[MCP425x_104] },
+ { .compatible = "microchip,mcp4252-502",
+ .data = &mcp4131_cfg[MCP425x_502] },
+ { .compatible = "microchip,mcp4252-103",
+ .data = &mcp4131_cfg[MCP425x_103] },
+ { .compatible = "microchip,mcp4252-503",
+ .data = &mcp4131_cfg[MCP425x_503] },
+ { .compatible = "microchip,mcp4252-104",
+ .data = &mcp4131_cfg[MCP425x_104] },
+ { .compatible = "microchip,mcp4261-502",
+ .data = &mcp4131_cfg[MCP426x_502] },
+ { .compatible = "microchip,mcp4261-103",
+ .data = &mcp4131_cfg[MCP426x_103] },
+ { .compatible = "microchip,mcp4261-503",
+ .data = &mcp4131_cfg[MCP426x_503] },
+ { .compatible = "microchip,mcp4261-104",
+ .data = &mcp4131_cfg[MCP426x_104] },
+ { .compatible = "microchip,mcp4262-502",
+ .data = &mcp4131_cfg[MCP426x_502] },
+ { .compatible = "microchip,mcp4262-103",
+ .data = &mcp4131_cfg[MCP426x_103] },
+ { .compatible = "microchip,mcp4262-503",
+ .data = &mcp4131_cfg[MCP426x_503] },
+ { .compatible = "microchip,mcp4262-104",
+ .data = &mcp4131_cfg[MCP426x_104] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, mcp4131_dt_ids);
+#endif /* CONFIG_OF */
+
+static const struct spi_device_id mcp4131_id[] = {
+ { "mcp4131-502", MCP413x_502 },
+ { "mcp4131-103", MCP413x_103 },
+ { "mcp4131-503", MCP413x_503 },
+ { "mcp4131-104", MCP413x_104 },
+ { "mcp4132-502", MCP413x_502 },
+ { "mcp4132-103", MCP413x_103 },
+ { "mcp4132-503", MCP413x_503 },
+ { "mcp4132-104", MCP413x_104 },
+ { "mcp4141-502", MCP414x_502 },
+ { "mcp4141-103", MCP414x_103 },
+ { "mcp4141-503", MCP414x_503 },
+ { "mcp4141-104", MCP414x_104 },
+ { "mcp4142-502", MCP414x_502 },
+ { "mcp4142-103", MCP414x_103 },
+ { "mcp4142-503", MCP414x_503 },
+ { "mcp4142-104", MCP414x_104 },
+ { "mcp4151-502", MCP415x_502 },
+ { "mcp4151-103", MCP415x_103 },
+ { "mcp4151-503", MCP415x_503 },
+ { "mcp4151-104", MCP415x_104 },
+ { "mcp4152-502", MCP415x_502 },
+ { "mcp4152-103", MCP415x_103 },
+ { "mcp4152-503", MCP415x_503 },
+ { "mcp4152-104", MCP415x_104 },
+ { "mcp4161-502", MCP416x_502 },
+ { "mcp4161-103", MCP416x_103 },
+ { "mcp4161-503", MCP416x_503 },
+ { "mcp4161-104", MCP416x_104 },
+ { "mcp4162-502", MCP416x_502 },
+ { "mcp4162-103", MCP416x_103 },
+ { "mcp4162-503", MCP416x_503 },
+ { "mcp4162-104", MCP416x_104 },
+ { "mcp4231-502", MCP423x_502 },
+ { "mcp4231-103", MCP423x_103 },
+ { "mcp4231-503", MCP423x_503 },
+ { "mcp4231-104", MCP423x_104 },
+ { "mcp4232-502", MCP423x_502 },
+ { "mcp4232-103", MCP423x_103 },
+ { "mcp4232-503", MCP423x_503 },
+ { "mcp4232-104", MCP423x_104 },
+ { "mcp4241-502", MCP424x_502 },
+ { "mcp4241-103", MCP424x_103 },
+ { "mcp4241-503", MCP424x_503 },
+ { "mcp4241-104", MCP424x_104 },
+ { "mcp4242-502", MCP424x_502 },
+ { "mcp4242-103", MCP424x_103 },
+ { "mcp4242-503", MCP424x_503 },
+ { "mcp4242-104", MCP424x_104 },
+ { "mcp4251-502", MCP425x_502 },
+ { "mcp4251-103", MCP425x_103 },
+ { "mcp4251-503", MCP425x_503 },
+ { "mcp4251-104", MCP425x_104 },
+ { "mcp4252-502", MCP425x_502 },
+ { "mcp4252-103", MCP425x_103 },
+ { "mcp4252-503", MCP425x_503 },
+ { "mcp4252-104", MCP425x_104 },
+ { "mcp4261-502", MCP426x_502 },
+ { "mcp4261-103", MCP426x_103 },
+ { "mcp4261-503", MCP426x_503 },
+ { "mcp4261-104", MCP426x_104 },
+ { "mcp4262-502", MCP426x_502 },
+ { "mcp4262-103", MCP426x_103 },
+ { "mcp4262-503", MCP426x_503 },
+ { "mcp4262-104", MCP426x_104 },
+ {}
+};
+MODULE_DEVICE_TABLE(spi, mcp4131_id);
+
+static struct spi_driver mcp4131_driver = {
+ .driver = {
+ .name = "mcp4131",
+ .of_match_table = of_match_ptr(mcp4131_dt_ids),
+ },
+ .probe = mcp4131_probe,
+ .id_table = mcp4131_id,
+};
+
+module_spi_driver(mcp4131_driver);
+
+MODULE_AUTHOR("Slawomir Stepien <sst@poczta.fm>");
+MODULE_DESCRIPTION("MCP4131 digital potentiometer");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/potentiometer/mcp4531.c b/drivers/iio/potentiometer/mcp4531.c
index 0db67fe14766..3b72e1a595db 100644
--- a/drivers/iio/potentiometer/mcp4531.c
+++ b/drivers/iio/potentiometer/mcp4531.c
@@ -79,7 +79,7 @@ static const struct mcp4531_cfg mcp4531_cfg[] = {
struct mcp4531_data {
struct i2c_client *client;
- unsigned long devid;
+ const struct mcp4531_cfg *cfg;
};
#define MCP4531_CHANNEL(ch) { \
@@ -113,8 +113,8 @@ static int mcp4531_read_raw(struct iio_dev *indio_dev,
*val = ret;
return IIO_VAL_INT;
case IIO_CHAN_INFO_SCALE:
- *val = 1000 * mcp4531_cfg[data->devid].kohms;
- *val2 = mcp4531_cfg[data->devid].max_pos;
+ *val = 1000 * data->cfg->kohms;
+ *val2 = data->cfg->max_pos;
return IIO_VAL_FRACTIONAL;
}
@@ -130,7 +130,7 @@ static int mcp4531_write_raw(struct iio_dev *indio_dev,
switch (mask) {
case IIO_CHAN_INFO_RAW:
- if (val > mcp4531_cfg[data->devid].max_pos || val < 0)
+ if (val > data->cfg->max_pos || val < 0)
return -EINVAL;
break;
default:
@@ -152,7 +152,6 @@ static int mcp4531_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct device *dev = &client->dev;
- unsigned long devid = id->driver_data;
struct mcp4531_data *data;
struct iio_dev *indio_dev;
@@ -168,12 +167,12 @@ static int mcp4531_probe(struct i2c_client *client,
data = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
data->client = client;
- data->devid = devid;
+ data->cfg = &mcp4531_cfg[id->driver_data];
indio_dev->dev.parent = dev;
indio_dev->info = &mcp4531_info;
indio_dev->channels = mcp4531_channels;
- indio_dev->num_channels = mcp4531_cfg[devid].wipers;
+ indio_dev->num_channels = data->cfg->wipers;
indio_dev->name = client->name;
return devm_iio_device_register(dev, indio_dev);
diff --git a/drivers/iio/potentiometer/tpl0102.c b/drivers/iio/potentiometer/tpl0102.c
index 313124b6fd59..5c304d42d713 100644
--- a/drivers/iio/potentiometer/tpl0102.c
+++ b/drivers/iio/potentiometer/tpl0102.c
@@ -118,7 +118,7 @@ static int tpl0102_probe(struct i2c_client *client,
if (!i2c_check_functionality(client->adapter,
I2C_FUNC_SMBUS_WORD_DATA))
- return -ENOTSUPP;
+ return -EOPNOTSUPP;
indio_dev = devm_iio_device_alloc(dev, sizeof(*data));
if (!indio_dev)
diff --git a/drivers/iio/pressure/Kconfig b/drivers/iio/pressure/Kconfig
index 31c0e1fd2202..cda9f128f3a4 100644
--- a/drivers/iio/pressure/Kconfig
+++ b/drivers/iio/pressure/Kconfig
@@ -6,12 +6,13 @@
menu "Pressure sensors"
config BMP280
- tristate "Bosch Sensortec BMP280 pressure sensor driver"
+ tristate "Bosch Sensortec BMP180 and BMP280 pressure sensor driver"
depends on I2C
+ depends on !(BMP085_I2C=y || BMP085_I2C=m)
select REGMAP_I2C
help
- Say yes here to build support for Bosch Sensortec BMP280
- pressure and temperature sensor.
+ Say yes here to build support for Bosch Sensortec BMP180 and BMP280
+ pressure and temperature sensors.
To compile this driver as a module, choose M here: the module
will be called bmp280.
@@ -30,6 +31,17 @@ config HID_SENSOR_PRESS
To compile this driver as a module, choose M here: the module
will be called hid-sensor-press.
+config HP03
+ tristate "Hope RF HP03 temperature and pressure sensor driver"
+ depends on I2C
+ select REGMAP_I2C
+ help
+ Say yes here to build support for Hope RF HP03 pressure and
+ temperature sensor.
+
+ To compile this driver as a module, choose M here: the module
+ will be called hp03.
+
config MPL115
tristate
@@ -148,4 +160,14 @@ config T5403
To compile this driver as a module, choose M here: the module
will be called t5403.
+config HP206C
+ tristate "HOPERF HP206C precision barometer and altimeter sensor"
+ depends on I2C
+ help
+ Say yes here to build support for the HOPREF HP206C precision
+ barometer and altimeter sensor.
+
+ This driver can also be built as a module. If so, the module will
+ be called hp206c.
+
endmenu
diff --git a/drivers/iio/pressure/Makefile b/drivers/iio/pressure/Makefile
index d336af14f3fe..17d6e7afa1ff 100644
--- a/drivers/iio/pressure/Makefile
+++ b/drivers/iio/pressure/Makefile
@@ -5,6 +5,7 @@
# When adding new entries keep the list in alphabetical order
obj-$(CONFIG_BMP280) += bmp280.o
obj-$(CONFIG_HID_SENSOR_PRESS) += hid-sensor-press.o
+obj-$(CONFIG_HP03) += hp03.o
obj-$(CONFIG_MPL115) += mpl115.o
obj-$(CONFIG_MPL115_I2C) += mpl115_i2c.o
obj-$(CONFIG_MPL115_SPI) += mpl115_spi.o
@@ -17,6 +18,7 @@ obj-$(CONFIG_IIO_ST_PRESS) += st_pressure.o
st_pressure-y := st_pressure_core.o
st_pressure-$(CONFIG_IIO_BUFFER) += st_pressure_buffer.o
obj-$(CONFIG_T5403) += t5403.o
+obj-$(CONFIG_HP206C) += hp206c.o
obj-$(CONFIG_IIO_ST_PRESS_I2C) += st_pressure_i2c.o
obj-$(CONFIG_IIO_ST_PRESS_SPI) += st_pressure_spi.o
diff --git a/drivers/iio/pressure/bmp280.c b/drivers/iio/pressure/bmp280.c
index a2602d8dd6d5..2f1498e12bb2 100644
--- a/drivers/iio/pressure/bmp280.c
+++ b/drivers/iio/pressure/bmp280.c
@@ -1,12 +1,15 @@
/*
* Copyright (c) 2014 Intel Corporation
*
- * Driver for Bosch Sensortec BMP280 digital pressure sensor.
+ * Driver for Bosch Sensortec BMP180 and BMP280 digital pressure sensor.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
+ * Datasheet:
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP180-DS000-121.pdf
+ * https://ae-bst.resource.bosch.com/media/_tech/media/datasheets/BST-BMP280-DS001-12.pdf
*/
#define pr_fmt(fmt) "bmp280: " fmt
@@ -15,9 +18,11 @@
#include <linux/i2c.h>
#include <linux/acpi.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
+/* BMP280 specific registers */
#define BMP280_REG_TEMP_XLSB 0xFC
#define BMP280_REG_TEMP_LSB 0xFB
#define BMP280_REG_TEMP_MSB 0xFA
@@ -26,10 +31,7 @@
#define BMP280_REG_PRESS_MSB 0xF7
#define BMP280_REG_CONFIG 0xF5
-#define BMP280_REG_CTRL_MEAS 0xF4
#define BMP280_REG_STATUS 0xF3
-#define BMP280_REG_RESET 0xE0
-#define BMP280_REG_ID 0xD0
#define BMP280_REG_COMP_TEMP_START 0x88
#define BMP280_COMP_TEMP_REG_COUNT 6
@@ -46,25 +48,49 @@
#define BMP280_OSRS_TEMP_MASK (BIT(7) | BIT(6) | BIT(5))
#define BMP280_OSRS_TEMP_SKIP 0
-#define BMP280_OSRS_TEMP_1X BIT(5)
-#define BMP280_OSRS_TEMP_2X BIT(6)
-#define BMP280_OSRS_TEMP_4X (BIT(6) | BIT(5))
-#define BMP280_OSRS_TEMP_8X BIT(7)
-#define BMP280_OSRS_TEMP_16X (BIT(7) | BIT(5))
+#define BMP280_OSRS_TEMP_X(osrs_t) ((osrs_t) << 5)
+#define BMP280_OSRS_TEMP_1X BMP280_OSRS_TEMP_X(1)
+#define BMP280_OSRS_TEMP_2X BMP280_OSRS_TEMP_X(2)
+#define BMP280_OSRS_TEMP_4X BMP280_OSRS_TEMP_X(3)
+#define BMP280_OSRS_TEMP_8X BMP280_OSRS_TEMP_X(4)
+#define BMP280_OSRS_TEMP_16X BMP280_OSRS_TEMP_X(5)
#define BMP280_OSRS_PRESS_MASK (BIT(4) | BIT(3) | BIT(2))
#define BMP280_OSRS_PRESS_SKIP 0
-#define BMP280_OSRS_PRESS_1X BIT(2)
-#define BMP280_OSRS_PRESS_2X BIT(3)
-#define BMP280_OSRS_PRESS_4X (BIT(3) | BIT(2))
-#define BMP280_OSRS_PRESS_8X BIT(4)
-#define BMP280_OSRS_PRESS_16X (BIT(4) | BIT(2))
+#define BMP280_OSRS_PRESS_X(osrs_p) ((osrs_p) << 2)
+#define BMP280_OSRS_PRESS_1X BMP280_OSRS_PRESS_X(1)
+#define BMP280_OSRS_PRESS_2X BMP280_OSRS_PRESS_X(2)
+#define BMP280_OSRS_PRESS_4X BMP280_OSRS_PRESS_X(3)
+#define BMP280_OSRS_PRESS_8X BMP280_OSRS_PRESS_X(4)
+#define BMP280_OSRS_PRESS_16X BMP280_OSRS_PRESS_X(5)
#define BMP280_MODE_MASK (BIT(1) | BIT(0))
#define BMP280_MODE_SLEEP 0
#define BMP280_MODE_FORCED BIT(0)
#define BMP280_MODE_NORMAL (BIT(1) | BIT(0))
+/* BMP180 specific registers */
+#define BMP180_REG_OUT_XLSB 0xF8
+#define BMP180_REG_OUT_LSB 0xF7
+#define BMP180_REG_OUT_MSB 0xF6
+
+#define BMP180_REG_CALIB_START 0xAA
+#define BMP180_REG_CALIB_COUNT 22
+
+#define BMP180_MEAS_SCO BIT(5)
+#define BMP180_MEAS_TEMP (0x0E | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_X(oss) ((oss) << 6 | 0x14 | BMP180_MEAS_SCO)
+#define BMP180_MEAS_PRESS_1X BMP180_MEAS_PRESS_X(0)
+#define BMP180_MEAS_PRESS_2X BMP180_MEAS_PRESS_X(1)
+#define BMP180_MEAS_PRESS_4X BMP180_MEAS_PRESS_X(2)
+#define BMP180_MEAS_PRESS_8X BMP180_MEAS_PRESS_X(3)
+
+/* BMP180 and BMP280 common registers */
+#define BMP280_REG_CTRL_MEAS 0xF4
+#define BMP280_REG_RESET 0xE0
+#define BMP280_REG_ID 0xD0
+
+#define BMP180_CHIP_ID 0x55
#define BMP280_CHIP_ID 0x58
#define BMP280_SOFT_RESET_VAL 0xB6
@@ -72,6 +98,11 @@ struct bmp280_data {
struct i2c_client *client;
struct mutex lock;
struct regmap *regmap;
+ const struct bmp280_chip_info *chip_info;
+
+ /* log of base 2 of oversampling rate */
+ u8 oversampling_press;
+ u8 oversampling_temp;
/*
* Carryover value from temperature conversion, used in pressure
@@ -80,9 +111,23 @@ struct bmp280_data {
s32 t_fine;
};
+struct bmp280_chip_info {
+ const struct regmap_config *regmap_config;
+
+ const int *oversampling_temp_avail;
+ int num_oversampling_temp_avail;
+
+ const int *oversampling_press_avail;
+ int num_oversampling_press_avail;
+
+ int (*chip_config)(struct bmp280_data *);
+ int (*read_temp)(struct bmp280_data *, int *);
+ int (*read_press)(struct bmp280_data *, int *, int *);
+};
+
/*
* These enums are used for indexing into the array of compensation
- * parameters.
+ * parameters for BMP280.
*/
enum { T1, T2, T3 };
enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
@@ -90,11 +135,13 @@ enum { P1, P2, P3, P4, P5, P6, P7, P8, P9 };
static const struct iio_chan_spec bmp280_channels[] = {
{
.type = IIO_PRESSURE,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
},
{
.type = IIO_TEMP,
- .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+ .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
},
};
@@ -290,10 +337,25 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
case IIO_CHAN_INFO_PROCESSED:
switch (chan->type) {
case IIO_PRESSURE:
- ret = bmp280_read_press(data, val, val2);
+ ret = data->chip_info->read_press(data, val, val2);
break;
case IIO_TEMP:
- ret = bmp280_read_temp(data, val);
+ ret = data->chip_info->read_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ break;
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 1 << data->oversampling_press;
+ ret = IIO_VAL_INT;
+ break;
+ case IIO_TEMP:
+ *val = 1 << data->oversampling_temp;
+ ret = IIO_VAL_INT;
break;
default:
ret = -EINVAL;
@@ -310,22 +372,135 @@ static int bmp280_read_raw(struct iio_dev *indio_dev,
return ret;
}
+static int bmp280_write_oversampling_ratio_temp(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_temp_avail;
+ const int n = data->chip_info->num_oversampling_temp_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_temp = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_oversampling_ratio_press(struct bmp280_data *data,
+ int val)
+{
+ int i;
+ const int *avail = data->chip_info->oversampling_press_avail;
+ const int n = data->chip_info->num_oversampling_press_avail;
+
+ for (i = 0; i < n; i++) {
+ if (avail[i] == val) {
+ data->oversampling_press = ilog2(val);
+
+ return data->chip_info->chip_config(data);
+ }
+ }
+ return -EINVAL;
+}
+
+static int bmp280_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+ struct bmp280_data *data = iio_priv(indio_dev);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ mutex_lock(&data->lock);
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ ret = bmp280_write_oversampling_ratio_press(data, val);
+ break;
+ case IIO_TEMP:
+ ret = bmp280_write_oversampling_ratio_temp(data, val);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ mutex_unlock(&data->lock);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return ret;
+}
+
+static ssize_t bmp280_show_avail(char *buf, const int *vals, const int n)
+{
+ size_t len = 0;
+ int i;
+
+ for (i = 0; i < n; i++)
+ len += scnprintf(buf + len, PAGE_SIZE - len, "%d ", vals[i]);
+
+ buf[len - 1] = '\n';
+
+ return len;
+}
+
+static ssize_t bmp280_show_temp_oversampling_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bmp280_show_avail(buf, data->chip_info->oversampling_temp_avail,
+ data->chip_info->num_oversampling_temp_avail);
+}
+
+static ssize_t bmp280_show_press_oversampling_avail(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct bmp280_data *data = iio_priv(dev_to_iio_dev(dev));
+
+ return bmp280_show_avail(buf, data->chip_info->oversampling_press_avail,
+ data->chip_info->num_oversampling_press_avail);
+}
+
+static IIO_DEVICE_ATTR(in_temp_oversampling_ratio_available,
+ S_IRUGO, bmp280_show_temp_oversampling_avail, NULL, 0);
+
+static IIO_DEVICE_ATTR(in_pressure_oversampling_ratio_available,
+ S_IRUGO, bmp280_show_press_oversampling_avail, NULL, 0);
+
+static struct attribute *bmp280_attributes[] = {
+ &iio_dev_attr_in_temp_oversampling_ratio_available.dev_attr.attr,
+ &iio_dev_attr_in_pressure_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group bmp280_attrs_group = {
+ .attrs = bmp280_attributes,
+};
+
static const struct iio_info bmp280_info = {
.driver_module = THIS_MODULE,
.read_raw = &bmp280_read_raw,
+ .write_raw = &bmp280_write_raw,
+ .attrs = &bmp280_attrs_group,
};
-static int bmp280_chip_init(struct bmp280_data *data)
+static int bmp280_chip_config(struct bmp280_data *data)
{
int ret;
+ u8 osrs = BMP280_OSRS_TEMP_X(data->oversampling_temp + 1) |
+ BMP280_OSRS_PRESS_X(data->oversampling_press + 1);
ret = regmap_update_bits(data->regmap, BMP280_REG_CTRL_MEAS,
BMP280_OSRS_TEMP_MASK |
BMP280_OSRS_PRESS_MASK |
BMP280_MODE_MASK,
- BMP280_OSRS_TEMP_2X |
- BMP280_OSRS_PRESS_16X |
- BMP280_MODE_NORMAL);
+ osrs | BMP280_MODE_NORMAL);
if (ret < 0) {
dev_err(&data->client->dev,
"failed to write ctrl_meas register\n");
@@ -344,6 +519,317 @@ static int bmp280_chip_init(struct bmp280_data *data)
return ret;
}
+static const int bmp280_oversampling_avail[] = { 1, 2, 4, 8, 16 };
+
+static const struct bmp280_chip_info bmp280_chip_info = {
+ .regmap_config = &bmp280_regmap_config,
+
+ .oversampling_temp_avail = bmp280_oversampling_avail,
+ .num_oversampling_temp_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .oversampling_press_avail = bmp280_oversampling_avail,
+ .num_oversampling_press_avail = ARRAY_SIZE(bmp280_oversampling_avail),
+
+ .chip_config = bmp280_chip_config,
+ .read_temp = bmp280_read_temp,
+ .read_press = bmp280_read_press,
+};
+
+static bool bmp180_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP280_REG_CTRL_MEAS:
+ case BMP280_REG_RESET:
+ return true;
+ default:
+ return false;
+ };
+}
+
+static bool bmp180_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ switch (reg) {
+ case BMP180_REG_OUT_XLSB:
+ case BMP180_REG_OUT_LSB:
+ case BMP180_REG_OUT_MSB:
+ case BMP280_REG_CTRL_MEAS:
+ return true;
+ default:
+ return false;
+ }
+}
+
+static const struct regmap_config bmp180_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = BMP180_REG_OUT_XLSB,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = bmp180_is_writeable_reg,
+ .volatile_reg = bmp180_is_volatile_reg,
+};
+
+static int bmp180_measure(struct bmp280_data *data, u8 ctrl_meas)
+{
+ int ret;
+ const int conversion_time_max[] = { 4500, 7500, 13500, 25500 };
+ unsigned int delay_us;
+ unsigned int ctrl;
+
+ ret = regmap_write(data->regmap, BMP280_REG_CTRL_MEAS, ctrl_meas);
+ if (ret)
+ return ret;
+
+ if (ctrl_meas == BMP180_MEAS_TEMP)
+ delay_us = 4500;
+ else
+ delay_us = conversion_time_max[data->oversampling_press];
+
+ usleep_range(delay_us, delay_us + 1000);
+
+ ret = regmap_read(data->regmap, BMP280_REG_CTRL_MEAS, &ctrl);
+ if (ret)
+ return ret;
+
+ /* The value of this bit reset to "0" after conversion is complete */
+ if (ctrl & BMP180_MEAS_SCO)
+ return -EIO;
+
+ return 0;
+}
+
+static int bmp180_read_adc_temp(struct bmp280_data *data, int *val)
+{
+ int ret;
+ __be16 tmp = 0;
+
+ ret = bmp180_measure(data, BMP180_MEAS_TEMP);
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 2);
+ if (ret)
+ return ret;
+
+ *val = be16_to_cpu(tmp);
+
+ return 0;
+}
+
+/*
+ * These enums are used for indexing into the array of calibration
+ * coefficients for BMP180.
+ */
+enum { AC1, AC2, AC3, AC4, AC5, AC6, B1, B2, MB, MC, MD };
+
+struct bmp180_calib {
+ s16 AC1;
+ s16 AC2;
+ s16 AC3;
+ u16 AC4;
+ u16 AC5;
+ u16 AC6;
+ s16 B1;
+ s16 B2;
+ s16 MB;
+ s16 MC;
+ s16 MD;
+};
+
+static int bmp180_read_calib(struct bmp280_data *data,
+ struct bmp180_calib *calib)
+{
+ int ret;
+ int i;
+ __be16 buf[BMP180_REG_CALIB_COUNT / 2];
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_CALIB_START, buf,
+ sizeof(buf));
+
+ if (ret < 0)
+ return ret;
+
+ /* None of the words has the value 0 or 0xFFFF */
+ for (i = 0; i < ARRAY_SIZE(buf); i++) {
+ if (buf[i] == cpu_to_be16(0) || buf[i] == cpu_to_be16(0xffff))
+ return -EIO;
+ }
+
+ calib->AC1 = be16_to_cpu(buf[AC1]);
+ calib->AC2 = be16_to_cpu(buf[AC2]);
+ calib->AC3 = be16_to_cpu(buf[AC3]);
+ calib->AC4 = be16_to_cpu(buf[AC4]);
+ calib->AC5 = be16_to_cpu(buf[AC5]);
+ calib->AC6 = be16_to_cpu(buf[AC6]);
+ calib->B1 = be16_to_cpu(buf[B1]);
+ calib->B2 = be16_to_cpu(buf[B2]);
+ calib->MB = be16_to_cpu(buf[MB]);
+ calib->MC = be16_to_cpu(buf[MC]);
+ calib->MD = be16_to_cpu(buf[MD]);
+
+ return 0;
+}
+
+/*
+ * Returns temperature in DegC, resolution is 0.1 DegC.
+ * t_fine carries fine temperature as global value.
+ *
+ * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
+ */
+static s32 bmp180_compensate_temp(struct bmp280_data *data, s32 adc_temp)
+{
+ int ret;
+ s32 x1, x2;
+ struct bmp180_calib calib;
+
+ ret = bmp180_read_calib(data, &calib);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read calibration coefficients\n");
+ return ret;
+ }
+
+ x1 = ((adc_temp - calib.AC6) * calib.AC5) >> 15;
+ x2 = (calib.MC << 11) / (x1 + calib.MD);
+ data->t_fine = x1 + x2;
+
+ return (data->t_fine + 8) >> 4;
+}
+
+static int bmp180_read_temp(struct bmp280_data *data, int *val)
+{
+ int ret;
+ s32 adc_temp, comp_temp;
+
+ ret = bmp180_read_adc_temp(data, &adc_temp);
+ if (ret)
+ return ret;
+
+ comp_temp = bmp180_compensate_temp(data, adc_temp);
+
+ /*
+ * val might be NULL if we're called by the read_press routine,
+ * who only cares about the carry over t_fine value.
+ */
+ if (val) {
+ *val = comp_temp * 100;
+ return IIO_VAL_INT;
+ }
+
+ return 0;
+}
+
+static int bmp180_read_adc_press(struct bmp280_data *data, int *val)
+{
+ int ret;
+ __be32 tmp = 0;
+ u8 oss = data->oversampling_press;
+
+ ret = bmp180_measure(data, BMP180_MEAS_PRESS_X(oss));
+ if (ret)
+ return ret;
+
+ ret = regmap_bulk_read(data->regmap, BMP180_REG_OUT_MSB, (u8 *)&tmp, 3);
+ if (ret)
+ return ret;
+
+ *val = (be32_to_cpu(tmp) >> 8) >> (8 - oss);
+
+ return 0;
+}
+
+/*
+ * Returns pressure in Pa, resolution is 1 Pa.
+ *
+ * Taken from datasheet, Section 3.5, "Calculating pressure and temperature".
+ */
+static u32 bmp180_compensate_press(struct bmp280_data *data, s32 adc_press)
+{
+ int ret;
+ s32 x1, x2, x3, p;
+ s32 b3, b6;
+ u32 b4, b7;
+ s32 oss = data->oversampling_press;
+ struct bmp180_calib calib;
+
+ ret = bmp180_read_calib(data, &calib);
+ if (ret < 0) {
+ dev_err(&data->client->dev,
+ "failed to read calibration coefficients\n");
+ return ret;
+ }
+
+ b6 = data->t_fine - 4000;
+ x1 = (calib.B2 * (b6 * b6 >> 12)) >> 11;
+ x2 = calib.AC2 * b6 >> 11;
+ x3 = x1 + x2;
+ b3 = ((((s32)calib.AC1 * 4 + x3) << oss) + 2) / 4;
+ x1 = calib.AC3 * b6 >> 13;
+ x2 = (calib.B1 * ((b6 * b6) >> 12)) >> 16;
+ x3 = (x1 + x2 + 2) >> 2;
+ b4 = calib.AC4 * (u32)(x3 + 32768) >> 15;
+ b7 = ((u32)adc_press - b3) * (50000 >> oss);
+ if (b7 < 0x80000000)
+ p = (b7 * 2) / b4;
+ else
+ p = (b7 / b4) * 2;
+
+ x1 = (p >> 8) * (p >> 8);
+ x1 = (x1 * 3038) >> 16;
+ x2 = (-7357 * p) >> 16;
+
+ return p + ((x1 + x2 + 3791) >> 4);
+}
+
+static int bmp180_read_press(struct bmp280_data *data,
+ int *val, int *val2)
+{
+ int ret;
+ s32 adc_press;
+ u32 comp_press;
+
+ /* Read and compensate temperature so we get a reading of t_fine. */
+ ret = bmp180_read_temp(data, NULL);
+ if (ret)
+ return ret;
+
+ ret = bmp180_read_adc_press(data, &adc_press);
+ if (ret)
+ return ret;
+
+ comp_press = bmp180_compensate_press(data, adc_press);
+
+ *val = comp_press;
+ *val2 = 1000;
+
+ return IIO_VAL_FRACTIONAL;
+}
+
+static int bmp180_chip_config(struct bmp280_data *data)
+{
+ return 0;
+}
+
+static const int bmp180_oversampling_temp_avail[] = { 1 };
+static const int bmp180_oversampling_press_avail[] = { 1, 2, 4, 8 };
+
+static const struct bmp280_chip_info bmp180_chip_info = {
+ .regmap_config = &bmp180_regmap_config,
+
+ .oversampling_temp_avail = bmp180_oversampling_temp_avail,
+ .num_oversampling_temp_avail =
+ ARRAY_SIZE(bmp180_oversampling_temp_avail),
+
+ .oversampling_press_avail = bmp180_oversampling_press_avail,
+ .num_oversampling_press_avail =
+ ARRAY_SIZE(bmp180_oversampling_press_avail),
+
+ .chip_config = bmp180_chip_config,
+ .read_temp = bmp180_read_temp,
+ .read_press = bmp180_read_press,
+};
+
static int bmp280_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
@@ -367,7 +853,23 @@ static int bmp280_probe(struct i2c_client *client,
indio_dev->info = &bmp280_info;
indio_dev->modes = INDIO_DIRECT_MODE;
- data->regmap = devm_regmap_init_i2c(client, &bmp280_regmap_config);
+ switch (id->driver_data) {
+ case BMP180_CHIP_ID:
+ data->chip_info = &bmp180_chip_info;
+ data->oversampling_press = ilog2(8);
+ data->oversampling_temp = ilog2(1);
+ break;
+ case BMP280_CHIP_ID:
+ data->chip_info = &bmp280_chip_info;
+ data->oversampling_press = ilog2(16);
+ data->oversampling_temp = ilog2(2);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ data->regmap = devm_regmap_init_i2c(client,
+ data->chip_info->regmap_config);
if (IS_ERR(data->regmap)) {
dev_err(&client->dev, "failed to allocate register map\n");
return PTR_ERR(data->regmap);
@@ -376,13 +878,13 @@ static int bmp280_probe(struct i2c_client *client,
ret = regmap_read(data->regmap, BMP280_REG_ID, &chip_id);
if (ret < 0)
return ret;
- if (chip_id != BMP280_CHIP_ID) {
+ if (chip_id != id->driver_data) {
dev_err(&client->dev, "bad chip id. expected %x got %x\n",
BMP280_CHIP_ID, chip_id);
return -EINVAL;
}
- ret = bmp280_chip_init(data);
+ ret = data->chip_info->chip_config(data);
if (ret < 0)
return ret;
@@ -390,13 +892,17 @@ static int bmp280_probe(struct i2c_client *client,
}
static const struct acpi_device_id bmp280_acpi_match[] = {
- {"BMP0280", 0},
+ {"BMP0280", BMP280_CHIP_ID },
+ {"BMP0180", BMP180_CHIP_ID },
+ {"BMP0085", BMP180_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(acpi, bmp280_acpi_match);
static const struct i2c_device_id bmp280_id[] = {
- {"bmp280", 0},
+ {"bmp280", BMP280_CHIP_ID },
+ {"bmp180", BMP180_CHIP_ID },
+ {"bmp085", BMP180_CHIP_ID },
{ },
};
MODULE_DEVICE_TABLE(i2c, bmp280_id);
@@ -412,5 +918,5 @@ static struct i2c_driver bmp280_driver = {
module_i2c_driver(bmp280_driver);
MODULE_AUTHOR("Vlad Dogaru <vlad.dogaru@intel.com>");
-MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP280 pressure and temperature sensor");
+MODULE_DESCRIPTION("Driver for Bosch Sensortec BMP180/BMP280 pressure and temperature sensor");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hp03.c b/drivers/iio/pressure/hp03.c
new file mode 100644
index 000000000000..ac76515d5d49
--- /dev/null
+++ b/drivers/iio/pressure/hp03.c
@@ -0,0 +1,312 @@
+/*
+ * Copyright (c) 2016 Marek Vasut <marex@denx.de>
+ *
+ * Driver for Hope RF HP03 digital temperature and pressure sensor.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define pr_fmt(fmt) "hp03: " fmt
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/regmap.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+
+/*
+ * The HP03 sensor occupies two fixed I2C addresses:
+ * 0x50 ... read-only EEPROM with calibration data
+ * 0x77 ... read-write ADC for pressure and temperature
+ */
+#define HP03_EEPROM_ADDR 0x50
+#define HP03_ADC_ADDR 0x77
+
+#define HP03_EEPROM_CX_OFFSET 0x10
+#define HP03_EEPROM_AB_OFFSET 0x1e
+#define HP03_EEPROM_CD_OFFSET 0x20
+
+#define HP03_ADC_WRITE_REG 0xff
+#define HP03_ADC_READ_REG 0xfd
+#define HP03_ADC_READ_PRESSURE 0xf0 /* D1 in datasheet */
+#define HP03_ADC_READ_TEMP 0xe8 /* D2 in datasheet */
+
+struct hp03_priv {
+ struct i2c_client *client;
+ struct mutex lock;
+ struct gpio_desc *xclr_gpio;
+
+ struct i2c_client *eeprom_client;
+ struct regmap *eeprom_regmap;
+
+ s32 pressure; /* kPa */
+ s32 temp; /* Deg. C */
+};
+
+static const struct iio_chan_spec hp03_channels[] = {
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
+ .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
+ },
+};
+
+static bool hp03_is_writeable_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static bool hp03_is_volatile_reg(struct device *dev, unsigned int reg)
+{
+ return false;
+}
+
+static const struct regmap_config hp03_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+
+ .max_register = HP03_EEPROM_CD_OFFSET + 1,
+ .cache_type = REGCACHE_RBTREE,
+
+ .writeable_reg = hp03_is_writeable_reg,
+ .volatile_reg = hp03_is_volatile_reg,
+};
+
+static int hp03_get_temp_pressure(struct hp03_priv *priv, const u8 reg)
+{
+ int ret;
+
+ ret = i2c_smbus_write_byte_data(priv->client, HP03_ADC_WRITE_REG, reg);
+ if (ret < 0)
+ return ret;
+
+ msleep(50); /* Wait for conversion to finish */
+
+ return i2c_smbus_read_word_data(priv->client, HP03_ADC_READ_REG);
+}
+
+static int hp03_update_temp_pressure(struct hp03_priv *priv)
+{
+ struct device *dev = &priv->client->dev;
+ u8 coefs[18];
+ u16 cx_val[7];
+ int ab_val, d1_val, d2_val, diff_val, dut, off, sens, x;
+ int i, ret;
+
+ /* Sample coefficients from EEPROM */
+ ret = regmap_bulk_read(priv->eeprom_regmap, HP03_EEPROM_CX_OFFSET,
+ coefs, sizeof(coefs));
+ if (ret < 0) {
+ dev_err(dev, "Failed to read EEPROM (reg=%02x)\n",
+ HP03_EEPROM_CX_OFFSET);
+ return ret;
+ }
+
+ /* Sample Temperature and Pressure */
+ gpiod_set_value_cansleep(priv->xclr_gpio, 1);
+
+ ret = hp03_get_temp_pressure(priv, HP03_ADC_READ_PRESSURE);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read pressure\n");
+ goto err_adc;
+ }
+ d1_val = ret;
+
+ ret = hp03_get_temp_pressure(priv, HP03_ADC_READ_TEMP);
+ if (ret < 0) {
+ dev_err(dev, "Failed to read temperature\n");
+ goto err_adc;
+ }
+ d2_val = ret;
+
+ gpiod_set_value_cansleep(priv->xclr_gpio, 0);
+
+ /* The Cx coefficients and Temp/Pressure values are MSB first. */
+ for (i = 0; i < 7; i++)
+ cx_val[i] = (coefs[2 * i] << 8) | (coefs[(2 * i) + 1] << 0);
+ d1_val = ((d1_val >> 8) & 0xff) | ((d1_val & 0xff) << 8);
+ d2_val = ((d2_val >> 8) & 0xff) | ((d2_val & 0xff) << 8);
+
+ /* Coefficient voodoo from the HP03 datasheet. */
+ if (d2_val >= cx_val[4])
+ ab_val = coefs[14]; /* A-value */
+ else
+ ab_val = coefs[15]; /* B-value */
+
+ diff_val = d2_val - cx_val[4];
+ dut = (ab_val * (diff_val >> 7) * (diff_val >> 7)) >> coefs[16];
+ dut = diff_val - dut;
+
+ off = (cx_val[1] + (((cx_val[3] - 1024) * dut) >> 14)) * 4;
+ sens = cx_val[0] + ((cx_val[2] * dut) >> 10);
+ x = ((sens * (d1_val - 7168)) >> 14) - off;
+
+ priv->pressure = ((x * 100) >> 5) + (cx_val[6] * 10);
+ priv->temp = 250 + ((dut * cx_val[5]) >> 16) - (dut >> coefs[17]);
+
+ return 0;
+
+err_adc:
+ gpiod_set_value_cansleep(priv->xclr_gpio, 0);
+ return ret;
+}
+
+static int hp03_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int *val, int *val2, long mask)
+{
+ struct hp03_priv *priv = iio_priv(indio_dev);
+ int ret;
+
+ mutex_lock(&priv->lock);
+ ret = hp03_update_temp_pressure(priv);
+ mutex_unlock(&priv->lock);
+
+ if (ret)
+ return ret;
+
+ switch (mask) {
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = priv->pressure;
+ return IIO_VAL_INT;
+ case IIO_TEMP:
+ *val = priv->temp;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 1000;
+ return IIO_VAL_INT_PLUS_MICRO;
+ case IIO_TEMP:
+ *val = 10;
+ return IIO_VAL_INT;
+ default:
+ return -EINVAL;
+ }
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return -EINVAL;
+}
+
+static const struct iio_info hp03_info = {
+ .driver_module = THIS_MODULE,
+ .read_raw = &hp03_read_raw,
+};
+
+static int hp03_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct device *dev = &client->dev;
+ struct iio_dev *indio_dev;
+ struct hp03_priv *priv;
+ int ret;
+
+ indio_dev = devm_iio_device_alloc(dev, sizeof(*priv));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ priv = iio_priv(indio_dev);
+ priv->client = client;
+ mutex_init(&priv->lock);
+
+ indio_dev->dev.parent = dev;
+ indio_dev->name = id->name;
+ indio_dev->channels = hp03_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hp03_channels);
+ indio_dev->info = &hp03_info;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+
+ priv->xclr_gpio = devm_gpiod_get_index(dev, "xclr", 0, GPIOD_OUT_HIGH);
+ if (IS_ERR(priv->xclr_gpio)) {
+ dev_err(dev, "Failed to claim XCLR GPIO\n");
+ ret = PTR_ERR(priv->xclr_gpio);
+ return ret;
+ }
+
+ /*
+ * Allocate another device for the on-sensor EEPROM,
+ * which has it's dedicated I2C address and contains
+ * the calibration constants for the sensor.
+ */
+ priv->eeprom_client = i2c_new_dummy(client->adapter, HP03_EEPROM_ADDR);
+ if (!priv->eeprom_client) {
+ dev_err(dev, "New EEPROM I2C device failed\n");
+ return -ENODEV;
+ }
+
+ priv->eeprom_regmap = regmap_init_i2c(priv->eeprom_client,
+ &hp03_regmap_config);
+ if (IS_ERR(priv->eeprom_regmap)) {
+ dev_err(dev, "Failed to allocate EEPROM regmap\n");
+ ret = PTR_ERR(priv->eeprom_regmap);
+ goto err_cleanup_eeprom_client;
+ }
+
+ ret = iio_device_register(indio_dev);
+ if (ret) {
+ dev_err(dev, "Failed to register IIO device\n");
+ goto err_cleanup_eeprom_regmap;
+ }
+
+ i2c_set_clientdata(client, indio_dev);
+
+ return 0;
+
+err_cleanup_eeprom_regmap:
+ regmap_exit(priv->eeprom_regmap);
+
+err_cleanup_eeprom_client:
+ i2c_unregister_device(priv->eeprom_client);
+ return ret;
+}
+
+static int hp03_remove(struct i2c_client *client)
+{
+ struct iio_dev *indio_dev = i2c_get_clientdata(client);
+ struct hp03_priv *priv = iio_priv(indio_dev);
+
+ iio_device_unregister(indio_dev);
+ regmap_exit(priv->eeprom_regmap);
+ i2c_unregister_device(priv->eeprom_client);
+
+ return 0;
+}
+
+static const struct i2c_device_id hp03_id[] = {
+ { "hp03", 0 },
+ { },
+};
+MODULE_DEVICE_TABLE(i2c, hp03_id);
+
+static struct i2c_driver hp03_driver = {
+ .driver = {
+ .name = "hp03",
+ },
+ .probe = hp03_probe,
+ .remove = hp03_remove,
+ .id_table = hp03_id,
+};
+module_i2c_driver(hp03_driver);
+
+MODULE_AUTHOR("Marek Vasut <marex@denx.de>");
+MODULE_DESCRIPTION("Driver for Hope RF HP03 pressure and temperature sensor");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/hp206c.c b/drivers/iio/pressure/hp206c.c
new file mode 100644
index 000000000000..90f2b6e4a920
--- /dev/null
+++ b/drivers/iio/pressure/hp206c.c
@@ -0,0 +1,426 @@
+/*
+ * hp206c.c - HOPERF HP206C precision barometer and altimeter sensor
+ *
+ * Copyright (c) 2016, Intel Corporation.
+ *
+ * This file is subject to the terms and conditions of version 2 of
+ * the GNU General Public License. See the file COPYING in the main
+ * directory of this archive for more details.
+ *
+ * (7-bit I2C slave address 0x76)
+ *
+ * Datasheet:
+ * http://www.hoperf.com/upload/sensor/HP206C_DataSheet_EN_V2.0.pdf
+ */
+
+#include <linux/module.h>
+#include <linux/i2c.h>
+#include <linux/iio/iio.h>
+#include <linux/iio/sysfs.h>
+#include <linux/delay.h>
+#include <linux/util_macros.h>
+#include <linux/acpi.h>
+
+/* I2C commands: */
+#define HP206C_CMD_SOFT_RST 0x06
+
+#define HP206C_CMD_ADC_CVT 0x40
+
+#define HP206C_CMD_ADC_CVT_OSR_4096 0x00
+#define HP206C_CMD_ADC_CVT_OSR_2048 0x04
+#define HP206C_CMD_ADC_CVT_OSR_1024 0x08
+#define HP206C_CMD_ADC_CVT_OSR_512 0x0c
+#define HP206C_CMD_ADC_CVT_OSR_256 0x10
+#define HP206C_CMD_ADC_CVT_OSR_128 0x14
+
+#define HP206C_CMD_ADC_CVT_CHNL_PT 0x00
+#define HP206C_CMD_ADC_CVT_CHNL_T 0x02
+
+#define HP206C_CMD_READ_P 0x30
+#define HP206C_CMD_READ_T 0x32
+
+#define HP206C_CMD_READ_REG 0x80
+#define HP206C_CMD_WRITE_REG 0xc0
+
+#define HP206C_REG_INT_EN 0x0b
+#define HP206C_REG_INT_CFG 0x0c
+
+#define HP206C_REG_INT_SRC 0x0d
+#define HP206C_FLAG_DEV_RDY 0x40
+
+#define HP206C_REG_PARA 0x0f
+#define HP206C_FLAG_CMPS_EN 0x80
+
+/* Maximum spin for DEV_RDY */
+#define HP206C_MAX_DEV_RDY_WAIT_COUNT 20
+#define HP206C_DEV_RDY_WAIT_US 20000
+
+struct hp206c_data {
+ struct mutex mutex;
+ struct i2c_client *client;
+ int temp_osr_index;
+ int pres_osr_index;
+};
+
+struct hp206c_osr_setting {
+ u8 osr_mask;
+ unsigned int temp_conv_time_us;
+ unsigned int pres_conv_time_us;
+};
+
+/* Data from Table 5 in datasheet. */
+static const struct hp206c_osr_setting hp206c_osr_settings[] = {
+ { HP206C_CMD_ADC_CVT_OSR_4096, 65600, 131100 },
+ { HP206C_CMD_ADC_CVT_OSR_2048, 32800, 65600 },
+ { HP206C_CMD_ADC_CVT_OSR_1024, 16400, 32800 },
+ { HP206C_CMD_ADC_CVT_OSR_512, 8200, 16400 },
+ { HP206C_CMD_ADC_CVT_OSR_256, 4100, 8200 },
+ { HP206C_CMD_ADC_CVT_OSR_128, 2100, 4100 },
+};
+static const int hp206c_osr_rates[] = { 4096, 2048, 1024, 512, 256, 128 };
+static const char hp206c_osr_rates_str[] = "4096 2048 1024 512 256 128";
+
+static inline int hp206c_read_reg(struct i2c_client *client, u8 reg)
+{
+ return i2c_smbus_read_byte_data(client, HP206C_CMD_READ_REG | reg);
+}
+
+static inline int hp206c_write_reg(struct i2c_client *client, u8 reg, u8 val)
+{
+ return i2c_smbus_write_byte_data(client,
+ HP206C_CMD_WRITE_REG | reg, val);
+}
+
+static int hp206c_read_20bit(struct i2c_client *client, u8 cmd)
+{
+ int ret;
+ u8 values[3];
+
+ ret = i2c_smbus_read_i2c_block_data(client, cmd, 3, values);
+ if (ret < 0)
+ return ret;
+ if (ret != 3)
+ return -EIO;
+ return ((values[0] & 0xF) << 16) | (values[1] << 8) | (values[2]);
+}
+
+/* Spin for max 160ms until DEV_RDY is 1, or return error. */
+static int hp206c_wait_dev_rdy(struct iio_dev *indio_dev)
+{
+ int ret;
+ int count = 0;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ while (++count <= HP206C_MAX_DEV_RDY_WAIT_COUNT) {
+ ret = hp206c_read_reg(client, HP206C_REG_INT_SRC);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed READ_REG INT_SRC: %d\n", ret);
+ return ret;
+ }
+ if (ret & HP206C_FLAG_DEV_RDY)
+ return 0;
+ usleep_range(HP206C_DEV_RDY_WAIT_US, HP206C_DEV_RDY_WAIT_US * 3 / 2);
+ }
+ return -ETIMEDOUT;
+}
+
+static int hp206c_set_compensation(struct i2c_client *client, bool enabled)
+{
+ int val;
+
+ val = hp206c_read_reg(client, HP206C_REG_PARA);
+ if (val < 0)
+ return val;
+ if (enabled)
+ val |= HP206C_FLAG_CMPS_EN;
+ else
+ val &= ~HP206C_FLAG_CMPS_EN;
+
+ return hp206c_write_reg(client, HP206C_REG_PARA, val);
+}
+
+/* Do a soft reset */
+static int hp206c_soft_reset(struct iio_dev *indio_dev)
+{
+ int ret;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = i2c_smbus_write_byte(client, HP206C_CMD_SOFT_RST);
+ if (ret) {
+ dev_err(&client->dev, "Failed to reset device: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(400, 600);
+
+ ret = hp206c_wait_dev_rdy(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "Device not ready after soft reset: %d\n", ret);
+ return ret;
+ }
+
+ ret = hp206c_set_compensation(client, true);
+ if (ret)
+ dev_err(&client->dev, "Failed to enable compensation: %d\n", ret);
+ return ret;
+}
+
+static int hp206c_conv_and_read(struct iio_dev *indio_dev,
+ u8 conv_cmd, u8 read_cmd,
+ unsigned int sleep_us)
+{
+ int ret;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ struct i2c_client *client = data->client;
+
+ ret = hp206c_wait_dev_rdy(indio_dev);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Device not ready: %d\n", ret);
+ return ret;
+ }
+
+ ret = i2c_smbus_write_byte(client, conv_cmd);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Failed convert: %d\n", ret);
+ return ret;
+ }
+
+ usleep_range(sleep_us, sleep_us * 3 / 2);
+
+ ret = hp206c_wait_dev_rdy(indio_dev);
+ if (ret < 0) {
+ dev_err(&indio_dev->dev, "Device not ready: %d\n", ret);
+ return ret;
+ }
+
+ ret = hp206c_read_20bit(client, read_cmd);
+ if (ret < 0)
+ dev_err(&indio_dev->dev, "Failed read: %d\n", ret);
+
+ return ret;
+}
+
+static int hp206c_read_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan, int *val,
+ int *val2, long mask)
+{
+ int ret;
+ struct hp206c_data *data = iio_priv(indio_dev);
+ const struct hp206c_osr_setting *osr_setting;
+ u8 conv_cmd;
+
+ mutex_lock(&data->mutex);
+
+ switch (mask) {
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = hp206c_osr_rates[data->temp_osr_index];
+ ret = IIO_VAL_INT;
+ break;
+
+ case IIO_PRESSURE:
+ *val = hp206c_osr_rates[data->pres_osr_index];
+ ret = IIO_VAL_INT;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ case IIO_CHAN_INFO_RAW:
+ switch (chan->type) {
+ case IIO_TEMP:
+ osr_setting = &hp206c_osr_settings[data->temp_osr_index];
+ conv_cmd = HP206C_CMD_ADC_CVT |
+ osr_setting->osr_mask |
+ HP206C_CMD_ADC_CVT_CHNL_T;
+ ret = hp206c_conv_and_read(indio_dev,
+ conv_cmd,
+ HP206C_CMD_READ_T,
+ osr_setting->temp_conv_time_us);
+ if (ret >= 0) {
+ /* 20 significant bits are provided.
+ * Extend sign over the rest.
+ */
+ *val = sign_extend32(ret, 19);
+ ret = IIO_VAL_INT;
+ }
+ break;
+
+ case IIO_PRESSURE:
+ osr_setting = &hp206c_osr_settings[data->pres_osr_index];
+ conv_cmd = HP206C_CMD_ADC_CVT |
+ osr_setting->osr_mask |
+ HP206C_CMD_ADC_CVT_CHNL_PT;
+ ret = hp206c_conv_and_read(indio_dev,
+ conv_cmd,
+ HP206C_CMD_READ_P,
+ osr_setting->pres_conv_time_us);
+ if (ret >= 0) {
+ *val = ret;
+ ret = IIO_VAL_INT;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ case IIO_CHAN_INFO_SCALE:
+ switch (chan->type) {
+ case IIO_TEMP:
+ *val = 0;
+ *val2 = 10000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+
+ case IIO_PRESSURE:
+ *val = 0;
+ *val2 = 1000;
+ ret = IIO_VAL_INT_PLUS_MICRO;
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ break;
+
+ default:
+ ret = -EINVAL;
+ }
+
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static int hp206c_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ int ret = 0;
+ struct hp206c_data *data = iio_priv(indio_dev);
+
+ if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
+ return -EINVAL;
+ mutex_lock(&data->mutex);
+ switch (chan->type) {
+ case IIO_TEMP:
+ data->temp_osr_index = find_closest_descending(val,
+ hp206c_osr_rates, ARRAY_SIZE(hp206c_osr_rates));
+ break;
+ case IIO_PRESSURE:
+ data->pres_osr_index = find_closest_descending(val,
+ hp206c_osr_rates, ARRAY_SIZE(hp206c_osr_rates));
+ break;
+ default:
+ ret = -EINVAL;
+ }
+ mutex_unlock(&data->mutex);
+ return ret;
+}
+
+static const struct iio_chan_spec hp206c_channels[] = {
+ {
+ .type = IIO_TEMP,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ },
+ {
+ .type = IIO_PRESSURE,
+ .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
+ }
+};
+
+static IIO_CONST_ATTR_SAMP_FREQ_AVAIL(hp206c_osr_rates_str);
+
+static struct attribute *hp206c_attributes[] = {
+ &iio_const_attr_sampling_frequency_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group hp206c_attribute_group = {
+ .attrs = hp206c_attributes,
+};
+
+static const struct iio_info hp206c_info = {
+ .attrs = &hp206c_attribute_group,
+ .read_raw = hp206c_read_raw,
+ .write_raw = hp206c_write_raw,
+ .driver_module = THIS_MODULE,
+};
+
+static int hp206c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct iio_dev *indio_dev;
+ struct hp206c_data *data;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter,
+ I2C_FUNC_SMBUS_BYTE |
+ I2C_FUNC_SMBUS_BYTE_DATA |
+ I2C_FUNC_SMBUS_READ_I2C_BLOCK)) {
+ dev_err(&client->dev, "Adapter does not support "
+ "all required i2c functionality\n");
+ return -ENODEV;
+ }
+
+ indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*data));
+ if (!indio_dev)
+ return -ENOMEM;
+
+ data = iio_priv(indio_dev);
+ data->client = client;
+ mutex_init(&data->mutex);
+
+ indio_dev->info = &hp206c_info;
+ indio_dev->name = id->name;
+ indio_dev->dev.parent = &client->dev;
+ indio_dev->modes = INDIO_DIRECT_MODE;
+ indio_dev->channels = hp206c_channels;
+ indio_dev->num_channels = ARRAY_SIZE(hp206c_channels);
+
+ i2c_set_clientdata(client, indio_dev);
+
+ /* Do a soft reset on probe */
+ ret = hp206c_soft_reset(indio_dev);
+ if (ret) {
+ dev_err(&client->dev, "Failed to reset on startup: %d\n", ret);
+ return -ENODEV;
+ }
+
+ return devm_iio_device_register(&client->dev, indio_dev);
+}
+
+static const struct i2c_device_id hp206c_id[] = {
+ {"hp206c"},
+ {}
+};
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id hp206c_acpi_match[] = {
+ {"HOP206C", 0},
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, hp206c_acpi_match);
+#endif
+
+static struct i2c_driver hp206c_driver = {
+ .probe = hp206c_probe,
+ .id_table = hp206c_id,
+ .driver = {
+ .name = "hp206c",
+ .acpi_match_table = ACPI_PTR(hp206c_acpi_match),
+ },
+};
+
+module_i2c_driver(hp206c_driver);
+
+MODULE_DESCRIPTION("HOPERF HP206C precision barometer and altimeter sensor");
+MODULE_AUTHOR("Leonard Crestez <leonard.crestez@intel.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iio/pressure/ms5611.h b/drivers/iio/pressure/ms5611.h
index 8b08e4b7e3a9..ccda63c5b3c3 100644
--- a/drivers/iio/pressure/ms5611.h
+++ b/drivers/iio/pressure/ms5611.h
@@ -16,15 +16,11 @@
#include <linux/iio/iio.h>
#include <linux/mutex.h>
+struct regulator;
+
#define MS5611_RESET 0x1e
#define MS5611_READ_ADC 0x00
#define MS5611_READ_PROM_WORD 0xA0
-#define MS5611_START_TEMP_CONV 0x58
-#define MS5611_START_PRESSURE_CONV 0x48
-
-#define MS5611_CONV_TIME_MIN 9040
-#define MS5611_CONV_TIME_MAX 10000
-
#define MS5611_PROM_WORDS_NB 8
enum {
@@ -39,16 +35,31 @@ struct ms5611_chip_info {
s32 *temp, s32 *pressure);
};
+/*
+ * OverSampling Rate descriptor.
+ * Warning: cmd MUST be kept aligned on a word boundary (see
+ * m5611_spi_read_adc_temp_and_pressure in ms5611_spi.c).
+ */
+struct ms5611_osr {
+ unsigned long conv_usec;
+ u8 cmd;
+ unsigned short rate;
+};
+
struct ms5611_state {
void *client;
struct mutex lock;
+ const struct ms5611_osr *pressure_osr;
+ const struct ms5611_osr *temp_osr;
+
int (*reset)(struct device *dev);
int (*read_prom_word)(struct device *dev, int index, u16 *word);
int (*read_adc_temp_and_pressure)(struct device *dev,
s32 *temp, s32 *pressure);
struct ms5611_chip_info *chip_info;
+ struct regulator *vdd;
};
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
diff --git a/drivers/iio/pressure/ms5611_core.c b/drivers/iio/pressure/ms5611_core.c
index 992ad8d3b67a..76578b07bb6e 100644
--- a/drivers/iio/pressure/ms5611_core.c
+++ b/drivers/iio/pressure/ms5611_core.c
@@ -18,11 +18,44 @@
#include <linux/delay.h>
#include <linux/regulator/consumer.h>
+#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/triggered_buffer.h>
#include <linux/iio/trigger_consumer.h>
#include "ms5611.h"
+#define MS5611_INIT_OSR(_cmd, _conv_usec, _rate) \
+ { .cmd = _cmd, .conv_usec = _conv_usec, .rate = _rate }
+
+static const struct ms5611_osr ms5611_avail_pressure_osr[] = {
+ MS5611_INIT_OSR(0x40, 600, 256),
+ MS5611_INIT_OSR(0x42, 1170, 512),
+ MS5611_INIT_OSR(0x44, 2280, 1024),
+ MS5611_INIT_OSR(0x46, 4540, 2048),
+ MS5611_INIT_OSR(0x48, 9040, 4096)
+};
+
+static const struct ms5611_osr ms5611_avail_temp_osr[] = {
+ MS5611_INIT_OSR(0x50, 600, 256),
+ MS5611_INIT_OSR(0x52, 1170, 512),
+ MS5611_INIT_OSR(0x54, 2280, 1024),
+ MS5611_INIT_OSR(0x56, 4540, 2048),
+ MS5611_INIT_OSR(0x58, 9040, 4096)
+};
+
+static const char ms5611_show_osr[] = "256 512 1024 2048 4096";
+
+static IIO_CONST_ATTR(oversampling_ratio_available, ms5611_show_osr);
+
+static struct attribute *ms5611_attributes[] = {
+ &iio_const_attr_oversampling_ratio_available.dev_attr.attr,
+ NULL,
+};
+
+static const struct attribute_group ms5611_attribute_group = {
+ .attrs = ms5611_attributes,
+};
+
static bool ms5611_prom_is_valid(u16 *prom, size_t len)
{
int i, j;
@@ -239,11 +272,70 @@ static int ms5611_read_raw(struct iio_dev *indio_dev,
default:
return -EINVAL;
}
+ case IIO_CHAN_INFO_OVERSAMPLING_RATIO:
+ if (chan->type != IIO_TEMP && chan->type != IIO_PRESSURE)
+ break;
+ mutex_lock(&st->lock);
+ if (chan->type == IIO_TEMP)
+ *val = (int)st->temp_osr->rate;
+ else
+ *val = (int)st->pressure_osr->rate;
+ mutex_unlock(&st->lock);
+ return IIO_VAL_INT;
}
return -EINVAL;
}
+static const struct ms5611_osr *ms5611_find_osr(int rate,
+ const struct ms5611_osr *osr,
+ size_t count)
+{
+ unsigned int r;
+
+ for (r = 0; r < count; r++)
+ if ((unsigned short)rate == osr[r].rate)
+ break;
+ if (r >= count)
+ return NULL;
+ return &osr[r];
+}
+
+static int ms5611_write_raw(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ int val, int val2, long mask)
+{
+ struct ms5611_state *st = iio_priv(indio_dev);
+ const struct ms5611_osr *osr = NULL;
+
+ if (mask != IIO_CHAN_INFO_OVERSAMPLING_RATIO)
+ return -EINVAL;
+
+ if (chan->type == IIO_TEMP)
+ osr = ms5611_find_osr(val, ms5611_avail_temp_osr,
+ ARRAY_SIZE(ms5611_avail_temp_osr));
+ else if (chan->type == IIO_PRESSURE)
+ osr = ms5611_find_osr(val, ms5611_avail_pressure_osr,
+ ARRAY_SIZE(ms5611_avail_pressure_osr));
+ if (!osr)
+ return -EINVAL;
+
+ mutex_lock(&st->lock);
+
+ if (iio_buffer_enabled(indio_dev)) {
+ mutex_unlock(&st->lock);
+ return -EBUSY;
+ }
+
+ if (chan->type == IIO_TEMP)
+ st->temp_osr = osr;
+ else
+ st->pressure_osr = osr;
+
+ mutex_unlock(&st->lock);
+ return 0;
+}
+
static const unsigned long ms5611_scan_masks[] = {0x3, 0};
static struct ms5611_chip_info chip_info_tbl[] = {
@@ -259,7 +351,8 @@ static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_PRESSURE,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.scan_index = 0,
.scan_type = {
.sign = 's',
@@ -271,7 +364,8 @@ static const struct iio_chan_spec ms5611_channels[] = {
{
.type = IIO_TEMP,
.info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED) |
- BIT(IIO_CHAN_INFO_SCALE),
+ BIT(IIO_CHAN_INFO_SCALE) |
+ BIT(IIO_CHAN_INFO_OVERSAMPLING_RATIO),
.scan_index = 1,
.scan_type = {
.sign = 's',
@@ -285,40 +379,68 @@ static const struct iio_chan_spec ms5611_channels[] = {
static const struct iio_info ms5611_info = {
.read_raw = &ms5611_read_raw,
+ .write_raw = &ms5611_write_raw,
+ .attrs = &ms5611_attribute_group,
.driver_module = THIS_MODULE,
};
static int ms5611_init(struct iio_dev *indio_dev)
{
int ret;
- struct regulator *vdd = devm_regulator_get(indio_dev->dev.parent,
- "vdd");
+ struct ms5611_state *st = iio_priv(indio_dev);
/* Enable attached regulator if any. */
- if (!IS_ERR(vdd)) {
- ret = regulator_enable(vdd);
+ st->vdd = devm_regulator_get(indio_dev->dev.parent, "vdd");
+ if (!IS_ERR(st->vdd)) {
+ ret = regulator_enable(st->vdd);
if (ret) {
dev_err(indio_dev->dev.parent,
- "failed to enable Vdd supply: %d\n", ret);
+ "failed to enable Vdd supply: %d\n", ret);
return ret;
}
+ } else {
+ ret = PTR_ERR(st->vdd);
+ if (ret != -ENODEV)
+ return ret;
}
ret = ms5611_reset(indio_dev);
if (ret < 0)
- return ret;
+ goto err_regulator_disable;
- return ms5611_read_prom(indio_dev);
+ ret = ms5611_read_prom(indio_dev);
+ if (ret < 0)
+ goto err_regulator_disable;
+
+ return 0;
+
+err_regulator_disable:
+ if (!IS_ERR_OR_NULL(st->vdd))
+ regulator_disable(st->vdd);
+ return ret;
+}
+
+static void ms5611_fini(const struct iio_dev *indio_dev)
+{
+ const struct ms5611_state *st = iio_priv(indio_dev);
+
+ if (!IS_ERR_OR_NULL(st->vdd))
+ regulator_disable(st->vdd);
}
int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
- const char *name, int type)
+ const char *name, int type)
{
int ret;
struct ms5611_state *st = iio_priv(indio_dev);
mutex_init(&st->lock);
st->chip_info = &chip_info_tbl[type];
+ st->temp_osr =
+ &ms5611_avail_temp_osr[ARRAY_SIZE(ms5611_avail_temp_osr) - 1];
+ st->pressure_osr =
+ &ms5611_avail_pressure_osr[ARRAY_SIZE(ms5611_avail_pressure_osr)
+ - 1];
indio_dev->dev.parent = dev;
indio_dev->name = name;
indio_dev->info = &ms5611_info;
@@ -335,7 +457,7 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
ms5611_trigger_handler, NULL);
if (ret < 0) {
dev_err(dev, "iio triggered buffer setup failed\n");
- return ret;
+ goto err_fini;
}
ret = iio_device_register(indio_dev);
@@ -348,7 +470,8 @@ int ms5611_probe(struct iio_dev *indio_dev, struct device *dev,
err_buffer_cleanup:
iio_triggered_buffer_cleanup(indio_dev);
-
+err_fini:
+ ms5611_fini(indio_dev);
return ret;
}
EXPORT_SYMBOL(ms5611_probe);
@@ -357,6 +480,7 @@ int ms5611_remove(struct iio_dev *indio_dev)
{
iio_device_unregister(indio_dev);
iio_triggered_buffer_cleanup(indio_dev);
+ ms5611_fini(indio_dev);
return 0;
}
diff --git a/drivers/iio/pressure/ms5611_i2c.c b/drivers/iio/pressure/ms5611_i2c.c
index 7f6fc8eee922..55fb5fc0b6ea 100644
--- a/drivers/iio/pressure/ms5611_i2c.c
+++ b/drivers/iio/pressure/ms5611_i2c.c
@@ -17,6 +17,7 @@
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include "ms5611.h"
@@ -62,23 +63,23 @@ static int ms5611_i2c_read_adc_temp_and_pressure(struct device *dev,
{
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
+ const struct ms5611_osr *osr = st->temp_osr;
- ret = i2c_smbus_write_byte(st->client, MS5611_START_TEMP_CONV);
+ ret = i2c_smbus_write_byte(st->client, osr->cmd);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
ret = ms5611_i2c_read_adc(st, temp);
if (ret < 0)
return ret;
- ret = i2c_smbus_write_byte(st->client, MS5611_START_PRESSURE_CONV);
+ osr = st->pressure_osr;
+ ret = i2c_smbus_write_byte(st->client, osr->cmd);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
return ms5611_i2c_read_adc(st, pressure);
}
@@ -113,6 +114,17 @@ static int ms5611_i2c_remove(struct i2c_client *client)
return ms5611_remove(i2c_get_clientdata(client));
}
+#if defined(CONFIG_OF)
+static const struct of_device_id ms5611_i2c_matches[] = {
+ { .compatible = "meas,ms5611" },
+ { .compatible = "ms5611" },
+ { .compatible = "meas,ms5607" },
+ { .compatible = "ms5607" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ms5611_i2c_matches);
+#endif
+
static const struct i2c_device_id ms5611_id[] = {
{ "ms5611", MS5611 },
{ "ms5607", MS5607 },
@@ -123,6 +135,7 @@ MODULE_DEVICE_TABLE(i2c, ms5611_id);
static struct i2c_driver ms5611_driver = {
.driver = {
.name = "ms5611",
+ .of_match_table = of_match_ptr(ms5611_i2c_matches)
},
.id_table = ms5611_id,
.probe = ms5611_i2c_probe,
diff --git a/drivers/iio/pressure/ms5611_spi.c b/drivers/iio/pressure/ms5611_spi.c
index 5cc009e85f0e..932e05001e1a 100644
--- a/drivers/iio/pressure/ms5611_spi.c
+++ b/drivers/iio/pressure/ms5611_spi.c
@@ -12,6 +12,7 @@
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/spi/spi.h>
+#include <linux/of_device.h>
#include "ms5611.h"
@@ -55,28 +56,29 @@ static int ms5611_spi_read_adc(struct device *dev, s32 *val)
static int ms5611_spi_read_adc_temp_and_pressure(struct device *dev,
s32 *temp, s32 *pressure)
{
- u8 cmd;
int ret;
struct ms5611_state *st = iio_priv(dev_to_iio_dev(dev));
+ const struct ms5611_osr *osr = st->temp_osr;
- cmd = MS5611_START_TEMP_CONV;
- ret = spi_write_then_read(st->client, &cmd, 1, NULL, 0);
+ /*
+ * Warning: &osr->cmd MUST be aligned on a word boundary since used as
+ * 2nd argument (void*) of spi_write_then_read.
+ */
+ ret = spi_write_then_read(st->client, &osr->cmd, 1, NULL, 0);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
ret = ms5611_spi_read_adc(dev, temp);
if (ret < 0)
return ret;
- cmd = MS5611_START_PRESSURE_CONV;
- ret = spi_write_then_read(st->client, &cmd, 1, NULL, 0);
+ osr = st->pressure_osr;
+ ret = spi_write_then_read(st->client, &osr->cmd, 1, NULL, 0);
if (ret < 0)
return ret;
- usleep_range(MS5611_CONV_TIME_MIN, MS5611_CONV_TIME_MAX);
-
+ usleep_range(osr->conv_usec, osr->conv_usec + (osr->conv_usec / 10UL));
return ms5611_spi_read_adc(dev, pressure);
}
@@ -106,7 +108,7 @@ static int ms5611_spi_probe(struct spi_device *spi)
st->client = spi;
return ms5611_probe(indio_dev, &spi->dev, spi_get_device_id(spi)->name,
- spi_get_device_id(spi)->driver_data);
+ spi_get_device_id(spi)->driver_data);
}
static int ms5611_spi_remove(struct spi_device *spi)
@@ -114,6 +116,17 @@ static int ms5611_spi_remove(struct spi_device *spi)
return ms5611_remove(spi_get_drvdata(spi));
}
+#if defined(CONFIG_OF)
+static const struct of_device_id ms5611_spi_matches[] = {
+ { .compatible = "meas,ms5611" },
+ { .compatible = "ms5611" },
+ { .compatible = "meas,ms5607" },
+ { .compatible = "ms5607" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, ms5611_spi_matches);
+#endif
+
static const struct spi_device_id ms5611_id[] = {
{ "ms5611", MS5611 },
{ "ms5607", MS5607 },
@@ -124,6 +137,7 @@ MODULE_DEVICE_TABLE(spi, ms5611_id);
static struct spi_driver ms5611_driver = {
.driver = {
.name = "ms5611",
+ .of_match_table = of_match_ptr(ms5611_spi_matches)
},
.id_table = ms5611_id,
.probe = ms5611_spi_probe,
diff --git a/drivers/iio/pressure/st_pressure_core.c b/drivers/iio/pressure/st_pressure_core.c
index 172393ad34af..9e9b72a8f18f 100644
--- a/drivers/iio/pressure/st_pressure_core.c
+++ b/drivers/iio/pressure/st_pressure_core.c
@@ -64,6 +64,8 @@
#define ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK 0x20
#define ST_PRESS_LPS331AP_IHL_IRQ_ADDR 0x22
#define ST_PRESS_LPS331AP_IHL_IRQ_MASK 0x80
+#define ST_PRESS_LPS331AP_OD_IRQ_ADDR 0x22
+#define ST_PRESS_LPS331AP_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS331AP_MULTIREAD_BIT true
#define ST_PRESS_LPS331AP_TEMP_OFFSET 42500
@@ -104,6 +106,8 @@
#define ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK 0x10
#define ST_PRESS_LPS25H_IHL_IRQ_ADDR 0x22
#define ST_PRESS_LPS25H_IHL_IRQ_MASK 0x80
+#define ST_PRESS_LPS25H_OD_IRQ_ADDR 0x22
+#define ST_PRESS_LPS25H_OD_IRQ_MASK 0x40
#define ST_PRESS_LPS25H_MULTIREAD_BIT true
#define ST_PRESS_LPS25H_TEMP_OFFSET 42500
#define ST_PRESS_LPS25H_OUT_XL_ADDR 0x28
@@ -226,6 +230,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_int2 = ST_PRESS_LPS331AP_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_PRESS_LPS331AP_IHL_IRQ_ADDR,
.mask_ihl = ST_PRESS_LPS331AP_IHL_IRQ_MASK,
+ .addr_od = ST_PRESS_LPS331AP_OD_IRQ_ADDR,
+ .mask_od = ST_PRESS_LPS331AP_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_PRESS_LPS331AP_MULTIREAD_BIT,
.bootime = 2,
@@ -312,6 +319,9 @@ static const struct st_sensor_settings st_press_sensors_settings[] = {
.mask_int2 = ST_PRESS_LPS25H_DRDY_IRQ_INT2_MASK,
.addr_ihl = ST_PRESS_LPS25H_IHL_IRQ_ADDR,
.mask_ihl = ST_PRESS_LPS25H_IHL_IRQ_MASK,
+ .addr_od = ST_PRESS_LPS25H_OD_IRQ_ADDR,
+ .mask_od = ST_PRESS_LPS25H_OD_IRQ_MASK,
+ .addr_stat_drdy = ST_SENSORS_DEFAULT_STAT_ADDR,
},
.multi_read_bit = ST_PRESS_LPS25H_MULTIREAD_BIT,
.bootime = 2,
diff --git a/drivers/staging/android/Kconfig b/drivers/staging/android/Kconfig
index bd90d2002afb..6480f60ebf6c 100644
--- a/drivers/staging/android/Kconfig
+++ b/drivers/staging/android/Kconfig
@@ -14,27 +14,13 @@ config ASHMEM
It is, in theory, a good memory allocator for low-memory devices,
because it can discard shared memory units when under memory pressure.
-config ANDROID_TIMED_OUTPUT
- bool "Timed output class driver"
- default y
-
-config ANDROID_TIMED_GPIO
- tristate "Android timed gpio driver"
- depends on GPIOLIB || COMPILE_TEST
- depends on ANDROID_TIMED_OUTPUT
- default n
- ---help---
- Unlike generic gpio is to allow programs to access and manipulate gpio
- registers from user space, timed output/gpio is a system to allow changing
- a gpio pin and restore it automatically after a specified timeout.
-
config ANDROID_LOW_MEMORY_KILLER
bool "Android Low Memory Killer"
---help---
Registers processes to be killed when low memory conditions, this is useful
as there is no particular swap space on android.
- The registered process will kills according to the priorities in android init
+ The registered process will kill according to the priorities in android init
scripts (/init.rc), and it defines priority values with minimum free memory size
for each priority.
@@ -52,6 +38,7 @@ config SW_SYNC
bool "Software synchronization objects"
default n
depends on SYNC
+ depends on SYNC_FILE
---help---
A sync object driver that uses a 32bit counter to coordinate
synchronization. Useful when there is no hardware primitive backing
diff --git a/drivers/staging/android/Makefile b/drivers/staging/android/Makefile
index c7b6c99cc5ce..980d6dc4b265 100644
--- a/drivers/staging/android/Makefile
+++ b/drivers/staging/android/Makefile
@@ -3,8 +3,6 @@ ccflags-y += -I$(src) # needed for trace events
obj-y += ion/
obj-$(CONFIG_ASHMEM) += ashmem.o
-obj-$(CONFIG_ANDROID_TIMED_OUTPUT) += timed_output.o
-obj-$(CONFIG_ANDROID_TIMED_GPIO) += timed_gpio.o
obj-$(CONFIG_ANDROID_LOW_MEMORY_KILLER) += lowmemorykiller.o
obj-$(CONFIG_SYNC) += sync.o sync_debug.o
obj-$(CONFIG_SW_SYNC) += sw_sync.o
diff --git a/drivers/staging/android/ion/ion.c b/drivers/staging/android/ion/ion.c
index 85365672c931..a2cf93b59016 100644
--- a/drivers/staging/android/ion/ion.c
+++ b/drivers/staging/android/ion/ion.c
@@ -184,7 +184,7 @@ static struct ion_buffer *ion_buffer_create(struct ion_heap *heap,
struct scatterlist *sg;
int i, ret;
- buffer = kzalloc(sizeof(struct ion_buffer), GFP_KERNEL);
+ buffer = kzalloc(sizeof(*buffer), GFP_KERNEL);
if (!buffer)
return ERR_PTR(-ENOMEM);
@@ -341,7 +341,7 @@ static struct ion_handle *ion_handle_create(struct ion_client *client,
{
struct ion_handle *handle;
- handle = kzalloc(sizeof(struct ion_handle), GFP_KERNEL);
+ handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (!handle)
return ERR_PTR(-ENOMEM);
kref_init(&handle->ref);
@@ -396,7 +396,7 @@ static int ion_handle_put_nolock(struct ion_handle *handle)
return ret;
}
-int ion_handle_put(struct ion_handle *handle)
+static int ion_handle_put(struct ion_handle *handle)
{
struct ion_client *client = handle->client;
int ret;
@@ -438,8 +438,8 @@ static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
return handle ? handle : ERR_PTR(-EINVAL);
}
-struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
- int id)
+static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
+ int id)
{
struct ion_handle *handle;
@@ -827,7 +827,7 @@ struct ion_client *ion_client_create(struct ion_device *dev,
}
task_unlock(current->group_leader);
- client = kzalloc(sizeof(struct ion_client), GFP_KERNEL);
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
if (!client)
goto err_put_task_struct;
@@ -1035,7 +1035,7 @@ static void ion_vm_open(struct vm_area_struct *vma)
struct ion_buffer *buffer = vma->vm_private_data;
struct ion_vma_list *vma_list;
- vma_list = kmalloc(sizeof(struct ion_vma_list), GFP_KERNEL);
+ vma_list = kmalloc(sizeof(*vma_list), GFP_KERNEL);
if (!vma_list)
return;
vma_list->vma = vma;
@@ -1650,7 +1650,7 @@ struct ion_device *ion_device_create(long (*custom_ioctl)
struct ion_device *idev;
int ret;
- idev = kzalloc(sizeof(struct ion_device), GFP_KERNEL);
+ idev = kzalloc(sizeof(*idev), GFP_KERNEL);
if (!idev)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_chunk_heap.c b/drivers/staging/android/ion/ion_chunk_heap.c
index 0813163f962f..e0553fee9b8a 100644
--- a/drivers/staging/android/ion/ion_chunk_heap.c
+++ b/drivers/staging/android/ion/ion_chunk_heap.c
@@ -55,7 +55,7 @@ static int ion_chunk_heap_allocate(struct ion_heap *heap,
if (allocated_size > chunk_heap->size - chunk_heap->allocated)
return -ENOMEM;
- table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
+ table = kmalloc(sizeof(*table), GFP_KERNEL);
if (!table)
return -ENOMEM;
ret = sg_alloc_table(table, num_chunks, GFP_KERNEL);
@@ -154,7 +154,7 @@ struct ion_heap *ion_chunk_heap_create(struct ion_platform_heap *heap_data)
if (ret)
return ERR_PTR(ret);
- chunk_heap = kzalloc(sizeof(struct ion_chunk_heap), GFP_KERNEL);
+ chunk_heap = kzalloc(sizeof(*chunk_heap), GFP_KERNEL);
if (!chunk_heap)
return ERR_PTR(-ENOMEM);
diff --git a/drivers/staging/android/ion/ion_dummy_driver.c b/drivers/staging/android/ion/ion_dummy_driver.c
index 5678870bff48..814a3c92a56e 100644
--- a/drivers/staging/android/ion/ion_dummy_driver.c
+++ b/drivers/staging/android/ion/ion_dummy_driver.c
@@ -68,6 +68,8 @@ static int __init ion_dummy_init(void)
int i, err;
idev = ion_device_create(NULL);
+ if (IS_ERR(idev))
+ return PTR_ERR(idev);
heaps = kcalloc(dummy_ion_pdata.nr, sizeof(struct ion_heap *),
GFP_KERNEL);
if (!heaps)
diff --git a/drivers/staging/android/ion/ion_test.c b/drivers/staging/android/ion/ion_test.c
index 83a3af06d01c..5a396a1a8238 100644
--- a/drivers/staging/android/ion/ion_test.c
+++ b/drivers/staging/android/ion/ion_test.c
@@ -208,7 +208,7 @@ static int ion_test_open(struct inode *inode, struct file *file)
struct ion_test_data *data;
struct miscdevice *miscdev = file->private_data;
- data = kzalloc(sizeof(struct ion_test_data), GFP_KERNEL);
+ data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c
index 2509e5df7244..24d2745e9437 100644
--- a/drivers/staging/android/lowmemorykiller.c
+++ b/drivers/staging/android/lowmemorykiller.c
@@ -131,7 +131,7 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (!p)
continue;
- if (test_tsk_thread_flag(p, TIF_MEMDIE) &&
+ if (task_lmk_waiting(p) &&
time_before_eq(jiffies, lowmem_deathpending_timeout)) {
task_unlock(p);
rcu_read_unlock();
@@ -162,13 +162,8 @@ static unsigned long lowmem_scan(struct shrinker *s, struct shrink_control *sc)
if (selected) {
task_lock(selected);
send_sig(SIGKILL, selected, 0);
- /*
- * FIXME: lowmemorykiller shouldn't abuse global OOM killer
- * infrastructure. There is no real reason why the selected
- * task should have access to the memory reserves.
- */
if (selected->mm)
- mark_oom_victim(selected);
+ task_set_lmk_waiting(selected);
task_unlock(selected);
lowmem_print(1, "Killing '%s' (%d), adj %hd,\n"
" to free %ldkB on behalf of '%s' (%d) because\n"
diff --git a/drivers/staging/android/sync.c b/drivers/staging/android/sync.c
index 3a8f21031440..1d14c83c7f7c 100644
--- a/drivers/staging/android/sync.c
+++ b/drivers/staging/android/sync.c
@@ -16,10 +16,7 @@
#include <linux/debugfs.h>
#include <linux/export.h>
-#include <linux/file.h>
-#include <linux/fs.h>
#include <linux/kernel.h>
-#include <linux/poll.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
@@ -32,7 +29,6 @@
#include "trace/sync.h"
static const struct fence_ops android_fence_ops;
-static const struct file_operations sync_file_fops;
struct sync_timeline *sync_timeline_create(const struct sync_timeline_ops *ops,
int size, const char *name)
@@ -136,170 +132,6 @@ struct fence *sync_pt_create(struct sync_timeline *obj, int size)
}
EXPORT_SYMBOL(sync_pt_create);
-static struct sync_file *sync_file_alloc(int size, const char *name)
-{
- struct sync_file *sync_file;
-
- sync_file = kzalloc(size, GFP_KERNEL);
- if (!sync_file)
- return NULL;
-
- sync_file->file = anon_inode_getfile("sync_file", &sync_file_fops,
- sync_file, 0);
- if (IS_ERR(sync_file->file))
- goto err;
-
- kref_init(&sync_file->kref);
- strlcpy(sync_file->name, name, sizeof(sync_file->name));
-
- init_waitqueue_head(&sync_file->wq);
-
- return sync_file;
-
-err:
- kfree(sync_file);
- return NULL;
-}
-
-static void fence_check_cb_func(struct fence *f, struct fence_cb *cb)
-{
- struct sync_file_cb *check;
- struct sync_file *sync_file;
-
- check = container_of(cb, struct sync_file_cb, cb);
- sync_file = check->sync_file;
-
- if (atomic_dec_and_test(&sync_file->status))
- wake_up_all(&sync_file->wq);
-}
-
-/* TODO: implement a create which takes more that one fence */
-struct sync_file *sync_file_create(const char *name, struct fence *fence)
-{
- struct sync_file *sync_file;
-
- sync_file = sync_file_alloc(offsetof(struct sync_file, cbs[1]),
- name);
- if (!sync_file)
- return NULL;
-
- sync_file->num_fences = 1;
- atomic_set(&sync_file->status, 1);
-
- sync_file->cbs[0].fence = fence;
- sync_file->cbs[0].sync_file = sync_file;
- if (fence_add_callback(fence, &sync_file->cbs[0].cb,
- fence_check_cb_func))
- atomic_dec(&sync_file->status);
-
- sync_file_debug_add(sync_file);
-
- return sync_file;
-}
-EXPORT_SYMBOL(sync_file_create);
-
-struct sync_file *sync_file_fdget(int fd)
-{
- struct file *file = fget(fd);
-
- if (!file)
- return NULL;
-
- if (file->f_op != &sync_file_fops)
- goto err;
-
- return file->private_data;
-
-err:
- fput(file);
- return NULL;
-}
-EXPORT_SYMBOL(sync_file_fdget);
-
-void sync_file_put(struct sync_file *sync_file)
-{
- fput(sync_file->file);
-}
-EXPORT_SYMBOL(sync_file_put);
-
-void sync_file_install(struct sync_file *sync_file, int fd)
-{
- fd_install(fd, sync_file->file);
-}
-EXPORT_SYMBOL(sync_file_install);
-
-static void sync_file_add_pt(struct sync_file *sync_file, int *i,
- struct fence *fence)
-{
- sync_file->cbs[*i].fence = fence;
- sync_file->cbs[*i].sync_file = sync_file;
-
- if (!fence_add_callback(fence, &sync_file->cbs[*i].cb,
- fence_check_cb_func)) {
- fence_get(fence);
- (*i)++;
- }
-}
-
-struct sync_file *sync_file_merge(const char *name,
- struct sync_file *a, struct sync_file *b)
-{
- int num_fences = a->num_fences + b->num_fences;
- struct sync_file *sync_file;
- int i, i_a, i_b;
- unsigned long size = offsetof(struct sync_file, cbs[num_fences]);
-
- sync_file = sync_file_alloc(size, name);
- if (!sync_file)
- return NULL;
-
- atomic_set(&sync_file->status, num_fences);
-
- /*
- * Assume sync_file a and b are both ordered and have no
- * duplicates with the same context.
- *
- * If a sync_file can only be created with sync_file_merge
- * and sync_file_create, this is a reasonable assumption.
- */
- for (i = i_a = i_b = 0; i_a < a->num_fences && i_b < b->num_fences; ) {
- struct fence *pt_a = a->cbs[i_a].fence;
- struct fence *pt_b = b->cbs[i_b].fence;
-
- if (pt_a->context < pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_a);
-
- i_a++;
- } else if (pt_a->context > pt_b->context) {
- sync_file_add_pt(sync_file, &i, pt_b);
-
- i_b++;
- } else {
- if (pt_a->seqno - pt_b->seqno <= INT_MAX)
- sync_file_add_pt(sync_file, &i, pt_a);
- else
- sync_file_add_pt(sync_file, &i, pt_b);
-
- i_a++;
- i_b++;
- }
- }
-
- for (; i_a < a->num_fences; i_a++)
- sync_file_add_pt(sync_file, &i, a->cbs[i_a].fence);
-
- for (; i_b < b->num_fences; i_b++)
- sync_file_add_pt(sync_file, &i, b->cbs[i_b].fence);
-
- if (num_fences > i)
- atomic_sub(num_fences - i, &sync_file->status);
- sync_file->num_fences = i;
-
- sync_file_debug_add(sync_file);
- return sync_file;
-}
-EXPORT_SYMBOL(sync_file_merge);
-
static const char *android_fence_get_driver_name(struct fence *fence)
{
struct sync_timeline *parent = fence_parent(fence);
@@ -387,191 +219,3 @@ static const struct fence_ops android_fence_ops = {
.fence_value_str = android_fence_value_str,
.timeline_value_str = android_fence_timeline_value_str,
};
-
-static void sync_file_free(struct kref *kref)
-{
- struct sync_file *sync_file = container_of(kref, struct sync_file,
- kref);
- int i;
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- fence_remove_callback(sync_file->cbs[i].fence,
- &sync_file->cbs[i].cb);
- fence_put(sync_file->cbs[i].fence);
- }
-
- kfree(sync_file);
-}
-
-static int sync_file_release(struct inode *inode, struct file *file)
-{
- struct sync_file *sync_file = file->private_data;
-
- sync_file_debug_remove(sync_file);
-
- kref_put(&sync_file->kref, sync_file_free);
- return 0;
-}
-
-static unsigned int sync_file_poll(struct file *file, poll_table *wait)
-{
- struct sync_file *sync_file = file->private_data;
- int status;
-
- poll_wait(file, &sync_file->wq, wait);
-
- status = atomic_read(&sync_file->status);
-
- if (!status)
- return POLLIN;
- if (status < 0)
- return POLLERR;
- return 0;
-}
-
-static long sync_file_ioctl_merge(struct sync_file *sync_file,
- unsigned long arg)
-{
- int fd = get_unused_fd_flags(O_CLOEXEC);
- int err;
- struct sync_file *fence2, *fence3;
- struct sync_merge_data data;
-
- if (fd < 0)
- return fd;
-
- if (copy_from_user(&data, (void __user *)arg, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fd;
- }
-
- fence2 = sync_file_fdget(data.fd2);
- if (!fence2) {
- err = -ENOENT;
- goto err_put_fd;
- }
-
- data.name[sizeof(data.name) - 1] = '\0';
- fence3 = sync_file_merge(data.name, sync_file, fence2);
- if (!fence3) {
- err = -ENOMEM;
- goto err_put_fence2;
- }
-
- data.fence = fd;
- if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- err = -EFAULT;
- goto err_put_fence3;
- }
-
- sync_file_install(fence3, fd);
- sync_file_put(fence2);
- return 0;
-
-err_put_fence3:
- sync_file_put(fence3);
-
-err_put_fence2:
- sync_file_put(fence2);
-
-err_put_fd:
- put_unused_fd(fd);
- return err;
-}
-
-static int sync_fill_fence_info(struct fence *fence, void *data, int size)
-{
- struct sync_fence_info *info = data;
-
- if (size < sizeof(*info))
- return -ENOMEM;
-
- strlcpy(info->obj_name, fence->ops->get_timeline_name(fence),
- sizeof(info->obj_name));
- strlcpy(info->driver_name, fence->ops->get_driver_name(fence),
- sizeof(info->driver_name));
- if (fence_is_signaled(fence))
- info->status = fence->status >= 0 ? 1 : fence->status;
- else
- info->status = 0;
- info->timestamp_ns = ktime_to_ns(fence->timestamp);
-
- return sizeof(*info);
-}
-
-static long sync_file_ioctl_fence_info(struct sync_file *sync_file,
- unsigned long arg)
-{
- struct sync_file_info *info;
- __u32 size;
- __u32 len = 0;
- int ret, i;
-
- if (copy_from_user(&size, (void __user *)arg, sizeof(size)))
- return -EFAULT;
-
- if (size < sizeof(struct sync_file_info))
- return -EINVAL;
-
- if (size > 4096)
- size = 4096;
-
- info = kzalloc(size, GFP_KERNEL);
- if (!info)
- return -ENOMEM;
-
- strlcpy(info->name, sync_file->name, sizeof(info->name));
- info->status = atomic_read(&sync_file->status);
- if (info->status >= 0)
- info->status = !info->status;
-
- len = sizeof(struct sync_file_info);
-
- for (i = 0; i < sync_file->num_fences; ++i) {
- struct fence *fence = sync_file->cbs[i].fence;
-
- ret = sync_fill_fence_info(fence, (u8 *)info + len, size - len);
-
- if (ret < 0)
- goto out;
-
- len += ret;
- }
-
- info->len = len;
-
- if (copy_to_user((void __user *)arg, info, len))
- ret = -EFAULT;
- else
- ret = 0;
-
-out:
- kfree(info);
-
- return ret;
-}
-
-static long sync_file_ioctl(struct file *file, unsigned int cmd,
- unsigned long arg)
-{
- struct sync_file *sync_file = file->private_data;
-
- switch (cmd) {
- case SYNC_IOC_MERGE:
- return sync_file_ioctl_merge(sync_file, arg);
-
- case SYNC_IOC_FENCE_INFO:
- return sync_file_ioctl_fence_info(sync_file, arg);
-
- default:
- return -ENOTTY;
- }
-}
-
-static const struct file_operations sync_file_fops = {
- .release = sync_file_release,
- .poll = sync_file_poll,
- .unlocked_ioctl = sync_file_ioctl,
- .compat_ioctl = sync_file_ioctl,
-};
-
diff --git a/drivers/staging/android/sync.h b/drivers/staging/android/sync.h
index d2a173433a7d..b56885c14839 100644
--- a/drivers/staging/android/sync.h
+++ b/drivers/staging/android/sync.h
@@ -20,10 +20,10 @@
#include <linux/spinlock.h>
#include <linux/fence.h>
-#include "uapi/sync.h"
+#include <linux/sync_file.h>
+#include <uapi/linux/sync_file.h>
struct sync_timeline;
-struct sync_file;
/**
* struct sync_timeline_ops - sync object implementation ops
@@ -86,38 +86,6 @@ static inline struct sync_timeline *fence_parent(struct fence *fence)
child_list_lock);
}
-struct sync_file_cb {
- struct fence_cb cb;
- struct fence *fence;
- struct sync_file *sync_file;
-};
-
-/**
- * struct sync_file - sync file to export to the userspace
- * @file: file representing this fence
- * @kref: reference count on fence.
- * @name: name of sync_file. Useful for debugging
- * @sync_file_list: membership in global file list
- * @num_fences number of sync_pts in the fence
- * @wq: wait queue for fence signaling
- * @status: 0: signaled, >0:active, <0: error
- * @cbs: sync_pts callback information
- */
-struct sync_file {
- struct file *file;
- struct kref kref;
- char name[32];
-#ifdef CONFIG_DEBUG_FS
- struct list_head sync_file_list;
-#endif
- int num_fences;
-
- wait_queue_head_t wq;
- atomic_t status;
-
- struct sync_file_cb cbs[];
-};
-
/*
* API for sync_timeline implementers
*/
@@ -167,61 +135,6 @@ void sync_timeline_signal(struct sync_timeline *obj);
*/
struct fence *sync_pt_create(struct sync_timeline *parent, int size);
-/**
- * sync_fence_create() - creates a sync fence
- * @name: name of fence to create
- * @fence: fence to add to the sync_fence
- *
- * Creates a sync_file containg @fence. Once this is called, the sync_file
- * takes ownership of @fence.
- */
-struct sync_file *sync_file_create(const char *name, struct fence *fence);
-
-/*
- * API for sync_file consumers
- */
-
-/**
- * sync_file_merge() - merge two sync_files
- * @name: name of new fence
- * @a: sync_file a
- * @b: sync_file b
- *
- * Creates a new sync_file which contains copies of all the fences in both
- * @a and @b. @a and @b remain valid, independent sync_file. Returns the
- * new merged sync_file or NULL in case of error.
- */
-struct sync_file *sync_file_merge(const char *name,
- struct sync_file *a, struct sync_file *b);
-
-/**
- * sync_file_fdget() - get a sync_file from an fd
- * @fd: fd referencing a fence
- *
- * Ensures @fd references a valid sync_file, increments the refcount of the
- * backing file. Returns the sync_file or NULL in case of error.
- */
-struct sync_file *sync_file_fdget(int fd);
-
-/**
- * sync_file_put() - puts a reference of a sync_file
- * @sync_file: sync_file to put
- *
- * Puts a reference on @sync_fence. If this is the last reference, the
- * sync_fil and all it's sync_pts will be freed
- */
-void sync_file_put(struct sync_file *sync_file);
-
-/**
- * sync_file_install() - installs a sync_file into a file descriptor
- * @sync_file: sync_file to install
- * @fd: file descriptor in which to install the fence
- *
- * Installs @sync_file into @fd. @fd's should be acquired through
- * get_unused_fd_flags(O_CLOEXEC).
- */
-void sync_file_install(struct sync_file *sync_file, int fd);
-
#ifdef CONFIG_DEBUG_FS
void sync_timeline_debug_add(struct sync_timeline *obj);
diff --git a/drivers/staging/android/sync_debug.c b/drivers/staging/android/sync_debug.c
index 5a7ec58fbc09..5f57499c98bf 100644
--- a/drivers/staging/android/sync_debug.c
+++ b/drivers/staging/android/sync_debug.c
@@ -26,6 +26,7 @@
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include <linux/time64.h>
+#include <linux/sync_file.h>
#include "sw_sync.h"
#ifdef CONFIG_DEBUG_FS
@@ -262,8 +263,7 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
goto err;
}
- data.name[sizeof(data.name) - 1] = '\0';
- sync_file = sync_file_create(data.name, fence);
+ sync_file = sync_file_create(fence);
if (!sync_file) {
fence_put(fence);
err = -ENOMEM;
@@ -272,12 +272,12 @@ static long sw_sync_ioctl_create_fence(struct sw_sync_timeline *obj,
data.fence = fd;
if (copy_to_user((void __user *)arg, &data, sizeof(data))) {
- sync_file_put(sync_file);
+ fput(sync_file->file);
err = -EFAULT;
goto err;
}
- sync_file_install(sync_file, fd);
+ fd_install(fd, sync_file->file);
return 0;
diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c
deleted file mode 100644
index 914fd1005467..000000000000
--- a/drivers/staging/android/timed_gpio.c
+++ /dev/null
@@ -1,166 +0,0 @@
-/* drivers/misc/timed_gpio.c
- *
- * Copyright (C) 2008 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/slab.h>
-#include <linux/hrtimer.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/ktime.h>
-
-#include "timed_output.h"
-#include "timed_gpio.h"
-
-struct timed_gpio_data {
- struct timed_output_dev dev;
- struct hrtimer timer;
- spinlock_t lock;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-static enum hrtimer_restart gpio_timer_func(struct hrtimer *timer)
-{
- struct timed_gpio_data *data =
- container_of(timer, struct timed_gpio_data, timer);
-
- gpio_direction_output(data->gpio, data->active_low ? 1 : 0);
- return HRTIMER_NORESTART;
-}
-
-static int gpio_get_time(struct timed_output_dev *dev)
-{
- struct timed_gpio_data *data;
- ktime_t t;
-
- data = container_of(dev, struct timed_gpio_data, dev);
-
- if (!hrtimer_active(&data->timer))
- return 0;
-
- t = hrtimer_get_remaining(&data->timer);
-
- return ktime_to_ms(t);
-}
-
-static void gpio_enable(struct timed_output_dev *dev, int value)
-{
- struct timed_gpio_data *data =
- container_of(dev, struct timed_gpio_data, dev);
- unsigned long flags;
-
- spin_lock_irqsave(&data->lock, flags);
-
- /* cancel previous timer and set GPIO according to value */
- hrtimer_cancel(&data->timer);
- gpio_direction_output(data->gpio, data->active_low ? !value : !!value);
-
- if (value > 0) {
- if (value > data->max_timeout)
- value = data->max_timeout;
-
- hrtimer_start(&data->timer,
- ktime_set(value / 1000, (value % 1000) * 1000000),
- HRTIMER_MODE_REL);
- }
-
- spin_unlock_irqrestore(&data->lock, flags);
-}
-
-static int timed_gpio_probe(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio *cur_gpio;
- struct timed_gpio_data *gpio_data, *gpio_dat;
- int i, ret;
-
- if (!pdata)
- return -EBUSY;
-
- gpio_data = devm_kcalloc(&pdev->dev, pdata->num_gpios,
- sizeof(*gpio_data), GFP_KERNEL);
- if (!gpio_data)
- return -ENOMEM;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- cur_gpio = &pdata->gpios[i];
- gpio_dat = &gpio_data[i];
-
- hrtimer_init(&gpio_dat->timer, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL);
- gpio_dat->timer.function = gpio_timer_func;
- spin_lock_init(&gpio_dat->lock);
-
- gpio_dat->dev.name = cur_gpio->name;
- gpio_dat->dev.get_time = gpio_get_time;
- gpio_dat->dev.enable = gpio_enable;
- ret = gpio_request(cur_gpio->gpio, cur_gpio->name);
- if (ret < 0)
- goto err_out;
- ret = timed_output_dev_register(&gpio_dat->dev);
- if (ret < 0) {
- gpio_free(cur_gpio->gpio);
- goto err_out;
- }
-
- gpio_dat->gpio = cur_gpio->gpio;
- gpio_dat->max_timeout = cur_gpio->max_timeout;
- gpio_dat->active_low = cur_gpio->active_low;
- gpio_direction_output(gpio_dat->gpio, gpio_dat->active_low);
- }
-
- platform_set_drvdata(pdev, gpio_data);
-
- return 0;
-
-err_out:
- while (--i >= 0) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return ret;
-}
-
-static int timed_gpio_remove(struct platform_device *pdev)
-{
- struct timed_gpio_platform_data *pdata = pdev->dev.platform_data;
- struct timed_gpio_data *gpio_data = platform_get_drvdata(pdev);
- int i;
-
- for (i = 0; i < pdata->num_gpios; i++) {
- timed_output_dev_unregister(&gpio_data[i].dev);
- gpio_free(gpio_data[i].gpio);
- }
-
- return 0;
-}
-
-static struct platform_driver timed_gpio_driver = {
- .probe = timed_gpio_probe,
- .remove = timed_gpio_remove,
- .driver = {
- .name = TIMED_GPIO_NAME,
- },
-};
-
-module_platform_driver(timed_gpio_driver);
-
-MODULE_AUTHOR("Mike Lockwood <lockwood@android.com>");
-MODULE_DESCRIPTION("timed gpio driver");
-MODULE_LICENSE("GPL");
diff --git a/drivers/staging/android/timed_gpio.h b/drivers/staging/android/timed_gpio.h
deleted file mode 100644
index d29e169d7ebe..000000000000
--- a/drivers/staging/android/timed_gpio.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* include/linux/timed_gpio.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_GPIO_H
-#define _LINUX_TIMED_GPIO_H
-
-#define TIMED_GPIO_NAME "timed-gpio"
-
-struct timed_gpio {
- const char *name;
- unsigned gpio;
- int max_timeout;
- u8 active_low;
-};
-
-struct timed_gpio_platform_data {
- int num_gpios;
- struct timed_gpio *gpios;
-};
-
-#endif
diff --git a/drivers/staging/android/timed_output.c b/drivers/staging/android/timed_output.c
deleted file mode 100644
index aff9cdb007e5..000000000000
--- a/drivers/staging/android/timed_output.c
+++ /dev/null
@@ -1,110 +0,0 @@
-/* drivers/misc/timed_output.c
- *
- * Copyright (C) 2009 Google, Inc.
- * Author: Mike Lockwood <lockwood@android.com>
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-#define pr_fmt(fmt) "timed_output: " fmt
-
-#include <linux/init.h>
-#include <linux/export.h>
-#include <linux/types.h>
-#include <linux/device.h>
-#include <linux/fs.h>
-#include <linux/err.h>
-
-#include "timed_output.h"
-
-static struct class *timed_output_class;
-static atomic_t device_count;
-
-static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int remaining = tdev->get_time(tdev);
-
- return sprintf(buf, "%d\n", remaining);
-}
-
-static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct timed_output_dev *tdev = dev_get_drvdata(dev);
- int value;
- int rc;
-
- rc = kstrtoint(buf, 0, &value);
- if (rc != 0)
- return -EINVAL;
-
- tdev->enable(tdev, value);
-
- return size;
-}
-static DEVICE_ATTR_RW(enable);
-
-static struct attribute *timed_output_attrs[] = {
- &dev_attr_enable.attr,
- NULL,
-};
-ATTRIBUTE_GROUPS(timed_output);
-
-static int create_timed_output_class(void)
-{
- if (!timed_output_class) {
- timed_output_class = class_create(THIS_MODULE, "timed_output");
- if (IS_ERR(timed_output_class))
- return PTR_ERR(timed_output_class);
- atomic_set(&device_count, 0);
- timed_output_class->dev_groups = timed_output_groups;
- }
-
- return 0;
-}
-
-int timed_output_dev_register(struct timed_output_dev *tdev)
-{
- int ret;
-
- if (!tdev || !tdev->name || !tdev->enable || !tdev->get_time)
- return -EINVAL;
-
- ret = create_timed_output_class();
- if (ret < 0)
- return ret;
-
- tdev->index = atomic_inc_return(&device_count);
- tdev->dev = device_create(timed_output_class, NULL,
- MKDEV(0, tdev->index), NULL, "%s", tdev->name);
- if (IS_ERR(tdev->dev))
- return PTR_ERR(tdev->dev);
-
- dev_set_drvdata(tdev->dev, tdev);
- tdev->state = 0;
- return 0;
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_register);
-
-void timed_output_dev_unregister(struct timed_output_dev *tdev)
-{
- tdev->enable(tdev, 0);
- device_destroy(timed_output_class, MKDEV(0, tdev->index));
-}
-EXPORT_SYMBOL_GPL(timed_output_dev_unregister);
-
-static int __init timed_output_init(void)
-{
- return create_timed_output_class();
-}
-device_initcall(timed_output_init);
diff --git a/drivers/staging/android/timed_output.h b/drivers/staging/android/timed_output.h
deleted file mode 100644
index 13d2ca51cbe8..000000000000
--- a/drivers/staging/android/timed_output.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* include/linux/timed_output.h
- *
- * Copyright (C) 2008 Google, Inc.
- *
- * This software is licensed under the terms of the GNU General Public
- * License version 2, as published by the Free Software Foundation, and
- * may be copied, distributed, and modified under those terms.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
-*/
-
-#ifndef _LINUX_TIMED_OUTPUT_H
-#define _LINUX_TIMED_OUTPUT_H
-
-struct timed_output_dev {
- const char *name;
-
- /* enable the output and set the timer */
- void (*enable)(struct timed_output_dev *sdev, int timeout);
-
- /* returns the current number of milliseconds remaining on the timer */
- int (*get_time)(struct timed_output_dev *sdev);
-
- /* private data */
- struct device *dev;
- int index;
- int state;
-};
-
-int timed_output_dev_register(struct timed_output_dev *dev);
-void timed_output_dev_unregister(struct timed_output_dev *dev);
-
-#endif
diff --git a/drivers/staging/board/armadillo800eva.c b/drivers/staging/board/armadillo800eva.c
index bb63ece4d766..4de4fd06eebc 100644
--- a/drivers/staging/board/armadillo800eva.c
+++ b/drivers/staging/board/armadillo800eva.c
@@ -87,10 +87,10 @@ static const struct board_staging_clk lcdc0_clocks[] __initconst = {
static const struct board_staging_dev armadillo800eva_devices[] __initconst = {
{
- .pdev = &lcdc0_device,
- .clocks = lcdc0_clocks,
- .nclocks = ARRAY_SIZE(lcdc0_clocks),
- .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
+ .pdev = &lcdc0_device,
+ .clocks = lcdc0_clocks,
+ .nclocks = ARRAY_SIZE(lcdc0_clocks),
+ .domain = "/system-controller@e6180000/pm-domains/c5/a4lc@1"
},
};
diff --git a/drivers/staging/comedi/comedi_buf.c b/drivers/staging/comedi/comedi_buf.c
index 90c28016c6c1..c7d7682b1412 100644
--- a/drivers/staging/comedi/comedi_buf.c
+++ b/drivers/staging/comedi/comedi_buf.c
@@ -80,14 +80,14 @@ static void __comedi_buf_free(struct comedi_device *dev,
static void __comedi_buf_alloc(struct comedi_device *dev,
struct comedi_subdevice *s,
- unsigned n_pages)
+ unsigned int n_pages)
{
struct comedi_async *async = s->async;
struct page **pages = NULL;
struct comedi_buf_map *bm;
struct comedi_buf_page *buf;
unsigned long flags;
- unsigned i;
+ unsigned int i;
if (!IS_ENABLED(CONFIG_HAS_DMA) && s->async_dma_dir != DMA_NONE) {
dev_err(dev->class_dev,
@@ -208,7 +208,7 @@ int comedi_buf_alloc(struct comedi_device *dev, struct comedi_subdevice *s,
/* allocate new buffer */
if (new_size) {
- unsigned n_pages = new_size >> PAGE_SHIFT;
+ unsigned int n_pages = new_size >> PAGE_SHIFT;
__comedi_buf_alloc(dev, s, n_pages);
@@ -302,7 +302,7 @@ static unsigned int comedi_buf_munge(struct comedi_subdevice *s,
{
struct comedi_async *async = s->async;
unsigned int count = 0;
- const unsigned num_sample_bytes = comedi_bytes_per_sample(s);
+ const unsigned int num_sample_bytes = comedi_bytes_per_sample(s);
if (!s->munge || (async->cmd.flags & CMDF_RAWDATA)) {
async->munge_count += num_bytes;
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(comedi_buf_write_free);
unsigned int comedi_buf_read_n_available(struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- unsigned num_bytes;
+ unsigned int num_bytes;
if (!async)
return 0;
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 7c7b477b0f28..629080f39db0 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -186,7 +186,7 @@ static bool comedi_clear_board_dev(struct comedi_device *dev)
return cleared;
}
-static struct comedi_device *comedi_clear_board_minor(unsigned minor)
+static struct comedi_device *comedi_clear_board_minor(unsigned int minor)
{
struct comedi_device *dev;
@@ -209,8 +209,8 @@ static void comedi_free_board_dev(struct comedi_device *dev)
}
}
-static struct comedi_subdevice
-*comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned minor)
+static struct comedi_subdevice *
+comedi_subdevice_from_minor(const struct comedi_device *dev, unsigned int minor)
{
struct comedi_subdevice *s;
unsigned int i = minor - COMEDI_NUM_BOARD_MINORS;
@@ -223,7 +223,7 @@ static struct comedi_subdevice
return s;
}
-static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
+static struct comedi_device *comedi_dev_get_from_board_minor(unsigned int minor)
{
struct comedi_device *dev;
@@ -233,7 +233,8 @@ static struct comedi_device *comedi_dev_get_from_board_minor(unsigned minor)
return dev;
}
-static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
+static struct comedi_device *
+comedi_dev_get_from_subdevice_minor(unsigned int minor)
{
struct comedi_device *dev;
struct comedi_subdevice *s;
@@ -258,7 +259,7 @@ static struct comedi_device *comedi_dev_get_from_subdevice_minor(unsigned minor)
* reference incremented. Return NULL if no COMEDI device exists with the
* specified minor device number.
*/
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor)
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor)
{
if (minor < COMEDI_NUM_BOARD_MINORS)
return comedi_dev_get_from_board_minor(minor);
@@ -342,7 +343,8 @@ static struct comedi_subdevice *comedi_file_write_subdevice(struct file *file)
}
static int resize_async_buffer(struct comedi_device *dev,
- struct comedi_subdevice *s, unsigned new_size)
+ struct comedi_subdevice *s,
+ unsigned int new_size)
{
struct comedi_async *async = s->async;
int retval;
@@ -616,19 +618,20 @@ static struct attribute *comedi_dev_attrs[] = {
ATTRIBUTE_GROUPS(comedi_dev);
static void __comedi_clear_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
+ unsigned int bits)
{
s->runflags &= ~bits;
}
static void __comedi_set_subdevice_runflags(struct comedi_subdevice *s,
- unsigned bits)
+ unsigned int bits)
{
s->runflags |= bits;
}
static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
- unsigned mask, unsigned bits)
+ unsigned int mask,
+ unsigned int bits)
{
unsigned long flags;
@@ -638,15 +641,15 @@ static void comedi_update_subdevice_runflags(struct comedi_subdevice *s,
spin_unlock_irqrestore(&s->spin_lock, flags);
}
-static unsigned __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+static unsigned int __comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
return s->runflags;
}
-static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
+static unsigned int comedi_get_subdevice_runflags(struct comedi_subdevice *s)
{
unsigned long flags;
- unsigned runflags;
+ unsigned int runflags;
spin_lock_irqsave(&s->spin_lock, flags);
runflags = __comedi_get_subdevice_runflags(s);
@@ -654,12 +657,12 @@ static unsigned comedi_get_subdevice_runflags(struct comedi_subdevice *s)
return runflags;
}
-static bool comedi_is_runflags_running(unsigned runflags)
+static bool comedi_is_runflags_running(unsigned int runflags)
{
return runflags & COMEDI_SRF_RUNNING;
}
-static bool comedi_is_runflags_in_error(unsigned runflags)
+static bool comedi_is_runflags_in_error(unsigned int runflags)
{
return runflags & COMEDI_SRF_ERROR;
}
@@ -673,7 +676,7 @@ static bool comedi_is_runflags_in_error(unsigned runflags)
*/
bool comedi_is_subdevice_running(struct comedi_subdevice *s)
{
- unsigned runflags = comedi_get_subdevice_runflags(s);
+ unsigned int runflags = comedi_get_subdevice_runflags(s);
return comedi_is_runflags_running(runflags);
}
@@ -681,14 +684,14 @@ EXPORT_SYMBOL_GPL(comedi_is_subdevice_running);
static bool __comedi_is_subdevice_running(struct comedi_subdevice *s)
{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
+ unsigned int runflags = __comedi_get_subdevice_runflags(s);
return comedi_is_runflags_running(runflags);
}
bool comedi_can_auto_free_spriv(struct comedi_subdevice *s)
{
- unsigned runflags = __comedi_get_subdevice_runflags(s);
+ unsigned int runflags = __comedi_get_subdevice_runflags(s);
return runflags & COMEDI_SRF_FREE_SPRIV;
}
@@ -2038,7 +2041,7 @@ static int do_setwsubd_ioctl(struct comedi_device *dev, unsigned long arg,
static long comedi_unlocked_ioctl(struct file *file, unsigned int cmd,
unsigned long arg)
{
- unsigned minor = iminor(file_inode(file));
+ unsigned int minor = iminor(file_inode(file));
struct comedi_file *cfp = file->private_data;
struct comedi_device *dev = cfp->dev;
int rc;
@@ -2342,7 +2345,7 @@ static ssize_t comedi_write(struct file *file, const char __user *buf,
add_wait_queue(&async->wait_head, &wait);
while (count == 0 && !retval) {
- unsigned runflags;
+ unsigned int runflags;
unsigned int wp, n1, n2;
set_current_state(TASK_INTERRUPTIBLE);
@@ -2485,7 +2488,8 @@ static ssize_t comedi_read(struct file *file, char __user *buf, size_t nbytes,
n = min_t(size_t, m, nbytes);
if (n == 0) {
- unsigned runflags = comedi_get_subdevice_runflags(s);
+ unsigned int runflags =
+ comedi_get_subdevice_runflags(s);
if (!comedi_is_runflags_running(runflags)) {
if (comedi_is_runflags_in_error(runflags))
@@ -2573,7 +2577,7 @@ out:
static int comedi_open(struct inode *inode, struct file *file)
{
- const unsigned minor = iminor(inode);
+ const unsigned int minor = iminor(inode);
struct comedi_file *cfp;
struct comedi_device *dev = comedi_dev_get_from_minor(minor);
int rc;
@@ -2733,7 +2737,7 @@ struct comedi_device *comedi_alloc_board_minor(struct device *hardware_device)
{
struct comedi_device *dev;
struct device *csdev;
- unsigned i;
+ unsigned int i;
dev = kzalloc(sizeof(*dev), GFP_KERNEL);
if (!dev)
@@ -2791,7 +2795,7 @@ int comedi_alloc_subdevice_minor(struct comedi_subdevice *s)
{
struct comedi_device *dev = s->device;
struct device *csdev;
- unsigned i;
+ unsigned int i;
mutex_lock(&comedi_subdevice_minor_table_lock);
for (i = 0; i < COMEDI_NUM_SUBDEVICE_MINORS; ++i) {
@@ -2841,7 +2845,7 @@ void comedi_free_subdevice_minor(struct comedi_subdevice *s)
static void comedi_cleanup_board_minors(void)
{
struct comedi_device *dev;
- unsigned i;
+ unsigned int i;
for (i = 0; i < COMEDI_NUM_BOARD_MINORS; i++) {
dev = comedi_clear_board_minor(i);
diff --git a/drivers/staging/comedi/comedidev.h b/drivers/staging/comedi/comedidev.h
index 115807215484..dcb637665eb7 100644
--- a/drivers/staging/comedi/comedidev.h
+++ b/drivers/staging/comedi/comedidev.h
@@ -173,7 +173,7 @@ struct comedi_subdevice {
void *lock;
void *busy;
- unsigned runflags;
+ unsigned int runflags;
spinlock_t spin_lock; /* generic spin-lock for COMEDI and drivers */
unsigned int io_bits;
@@ -566,7 +566,7 @@ struct comedi_device {
void comedi_event(struct comedi_device *dev, struct comedi_subdevice *s);
-struct comedi_device *comedi_dev_get_from_minor(unsigned minor);
+struct comedi_device *comedi_dev_get_from_minor(unsigned int minor);
int comedi_dev_put(struct comedi_device *dev);
bool comedi_is_subdevice_running(struct comedi_subdevice *s);
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index b63dd2ef78b5..44511d729450 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -564,7 +564,7 @@ unsigned int comedi_handle_events(struct comedi_device *dev,
if (events == 0)
return events;
- if (events & COMEDI_CB_CANCEL_MASK)
+ if ((events & COMEDI_CB_CANCEL_MASK) && s->cancel)
s->cancel(dev, s);
comedi_event(dev, s);
@@ -575,38 +575,35 @@ EXPORT_SYMBOL_GPL(comedi_handle_events);
static int insn_rw_emulate_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- struct comedi_insn new_insn;
+ struct comedi_insn _insn;
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int base_chan = (chan < 32) ? 0 : chan;
+ unsigned int _data[2];
int ret;
- static const unsigned channels_per_bitfield = 32;
-
- unsigned chan = CR_CHAN(insn->chanspec);
- const unsigned base_bitfield_channel =
- (chan < channels_per_bitfield) ? 0 : chan;
- unsigned int new_data[2];
- memset(new_data, 0, sizeof(new_data));
- memset(&new_insn, 0, sizeof(new_insn));
- new_insn.insn = INSN_BITS;
- new_insn.chanspec = base_bitfield_channel;
- new_insn.n = 2;
- new_insn.subdev = insn->subdev;
+ memset(_data, 0, sizeof(_data));
+ memset(&_insn, 0, sizeof(_insn));
+ _insn.insn = INSN_BITS;
+ _insn.chanspec = base_chan;
+ _insn.n = 2;
+ _insn.subdev = insn->subdev;
if (insn->insn == INSN_WRITE) {
if (!(s->subdev_flags & SDF_WRITABLE))
return -EINVAL;
- new_data[0] = 1 << (chan - base_bitfield_channel); /* mask */
- new_data[1] = data[0] ? (1 << (chan - base_bitfield_channel))
- : 0; /* bits */
+ _data[0] = 1 << (chan - base_chan); /* mask */
+ _data[1] = data[0] ? (1 << (chan - base_chan)) : 0; /* bits */
}
- ret = s->insn_bits(dev, s, &new_insn, new_data);
+ ret = s->insn_bits(dev, s, &_insn, _data);
if (ret < 0)
return ret;
if (insn->insn == INSN_READ)
- data[0] = (new_data[1] >> (chan - base_bitfield_channel)) & 1;
+ data[0] = (_data[1] >> (chan - base_chan)) & 1;
return 1;
}
@@ -628,6 +625,9 @@ static int __comedi_device_postconfig_async(struct comedi_device *dev,
"async subdevices must have a do_cmdtest() function\n");
return -EINVAL;
}
+ if (!s->cancel)
+ dev_warn(dev->class_dev,
+ "async subdevices should have a cancel() function\n");
async = kzalloc(sizeof(*async), GFP_KERNEL);
if (!async)
diff --git a/drivers/staging/comedi/drivers/amcc_s5933.h b/drivers/staging/comedi/drivers/amcc_s5933.h
index d4b8c0195bd3..f03e4c8c2021 100644
--- a/drivers/staging/comedi/drivers/amcc_s5933.h
+++ b/drivers/staging/comedi/drivers/amcc_s5933.h
@@ -1,16 +1,14 @@
/*
- comedi/drivers/amcc_s5933.h
-
- Stuff for AMCC S5933 PCI Controller
-
- Author: Michal Dobes <dobes@tesnet.cz>
-
- Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
- made by Andrea Cisternino <acister@pcape1.pi.infn.it>
- and as result of espionage from MITE code made by David A. Schleef.
- Thanks to AMCC for their on-line documentation and bus master DMA
- example.
-*/
+ * Stuff for AMCC S5933 PCI Controller
+ *
+ * Author: Michal Dobes <dobes@tesnet.cz>
+ *
+ * Inspirated from general-purpose AMCC S5933 PCI Matchmaker driver
+ * made by Andrea Cisternino <acister@pcape1.pi.infn.it>
+ * and as result of espionage from MITE code made by David A. Schleef.
+ * Thanks to AMCC for their on-line documentation and bus master DMA
+ * example.
+ */
#ifndef _AMCC_S5933_H_
#define _AMCC_S5933_H_
@@ -58,7 +56,7 @@
#define INTCSR_INTR_ASSERTED 0x800000
/****************************************************************************/
-/* AMCC - PCI non-volatile ram command register (byte 3 of master control/status register) */
+/* AMCC - PCI non-volatile ram command register (byte 3 of AMCC_OP_REG_MCSR) */
/****************************************************************************/
#define MCSR_NV_LOAD_LOW_ADDR 0x0
#define MCSR_NV_LOAD_HIGH_ADDR 0x20
diff --git a/drivers/staging/comedi/drivers/amplc_dio200_common.c b/drivers/staging/comedi/drivers/amplc_dio200_common.c
index d1539e798ffd..f6e4e984235d 100644
--- a/drivers/staging/comedi/drivers/amplc_dio200_common.c
+++ b/drivers/staging/comedi/drivers/amplc_dio200_common.c
@@ -101,7 +101,7 @@ struct dio200_subdev_8255 {
};
struct dio200_subdev_intr {
- spinlock_t spinlock;
+ spinlock_t spinlock; /* protects the 'active' flag */
unsigned int ofs;
unsigned int valid_isns;
unsigned int enabled_isns;
@@ -221,7 +221,7 @@ static void dio200_start_intr(struct comedi_device *dev,
struct dio200_subdev_intr *subpriv = s->private;
struct comedi_cmd *cmd = &s->async->cmd;
unsigned int n;
- unsigned isn_bits;
+ unsigned int isn_bits;
/* Determine interrupt sources to enable. */
isn_bits = 0;
@@ -284,9 +284,9 @@ static int dio200_handle_read_intr(struct comedi_device *dev,
{
const struct dio200_board *board = dev->board_ptr;
struct dio200_subdev_intr *subpriv = s->private;
- unsigned triggered;
- unsigned intstat;
- unsigned cur_enabled;
+ unsigned int triggered;
+ unsigned int intstat;
+ unsigned int cur_enabled;
unsigned long flags;
triggered = 0;
@@ -439,7 +439,7 @@ static int dio200_subdev_intr_cmd(struct comedi_device *dev,
static int dio200_subdev_intr_init(struct comedi_device *dev,
struct comedi_subdevice *s,
unsigned int offset,
- unsigned valid_isns)
+ unsigned int valid_isns)
{
const struct dio200_board *board = dev->board_ptr;
struct dio200_subdev_intr *subpriv;
diff --git a/drivers/staging/comedi/drivers/amplc_pc263.c b/drivers/staging/comedi/drivers/amplc_pc263.c
index b1946ce6ecc1..58b0b6b1a693 100644
--- a/drivers/staging/comedi/drivers/amplc_pc263.c
+++ b/drivers/staging/comedi/drivers/amplc_pc263.c
@@ -1,46 +1,44 @@
/*
- comedi/drivers/amplc_pc263.c
- Driver for Amplicon PC263 and PCI263 relay boards.
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
+ * Driver for Amplicon PC263 relay board.
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: amplc_pc263
-Description: Amplicon PC263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PC263 (pc263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options:
- [0] - I/O port base address
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
+ * Driver: amplc_pc263
+ * Description: Amplicon PC263
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Devices: [Amplicon] PC263 (pc263)
+ * Updated: Fri, 12 Apr 2013 15:19:36 +0100
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ *
+ * The board appears as one subdevice, with 16 digital outputs, each
+ * connected to a reed-relay. Relay contacts are closed when output is 1.
+ * The state of the outputs can be read.
+ */
#include <linux/module.h>
#include "../comedidev.h"
/* PC263 registers */
-
-/*
- * Board descriptions for Amplicon PC263.
- */
+#define PC263_DO_0_7_REG 0x00
+#define PC263_DO_8_15_REG 0x01
struct pc263_board {
const char *name;
@@ -58,8 +56,8 @@ static int pc263_do_insn_bits(struct comedi_device *dev,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
+ outb(s->state & 0xff, dev->iobase + PC263_DO_0_7_REG);
+ outb((s->state >> 8) & 0xff, dev->iobase + PC263_DO_8_15_REG);
}
data[1] = s->state;
@@ -80,28 +78,30 @@ static int pc263_attach(struct comedi_device *dev, struct comedi_devconfig *it)
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pc263_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pc263_do_insn_bits;
+
/* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
+ s->state = inb(dev->iobase + PC263_DO_0_7_REG) |
+ (inb(dev->iobase + PC263_DO_8_15_REG) << 8);
return 0;
}
static struct comedi_driver amplc_pc263_driver = {
- .driver_name = "amplc_pc263",
- .module = THIS_MODULE,
- .attach = pc263_attach,
- .detach = comedi_legacy_detach,
- .board_name = &pc263_boards[0].name,
- .offset = sizeof(struct pc263_board),
- .num_names = ARRAY_SIZE(pc263_boards),
+ .driver_name = "amplc_pc263",
+ .module = THIS_MODULE,
+ .attach = pc263_attach,
+ .detach = comedi_legacy_detach,
+ .board_name = &pc263_boards[0].name,
+ .offset = sizeof(struct pc263_board),
+ .num_names = ARRAY_SIZE(pc263_boards),
};
module_comedi_driver(amplc_pc263_driver);
diff --git a/drivers/staging/comedi/drivers/amplc_pci224.c b/drivers/staging/comedi/drivers/amplc_pci224.c
index cac011fdd375..2e6decf1b69d 100644
--- a/drivers/staging/comedi/drivers/amplc_pci224.c
+++ b/drivers/staging/comedi/drivers/amplc_pci224.c
@@ -132,48 +132,53 @@
* DACCON values.
*/
/* (r/w) Scan trigger. */
-#define PCI224_DACCON_TRIG_MASK (7 << 0)
-#define PCI224_DACCON_TRIG_NONE (0 << 0) /* none */
-#define PCI224_DACCON_TRIG_SW (1 << 0) /* software trig */
-#define PCI224_DACCON_TRIG_EXTP (2 << 0) /* ext +ve edge */
-#define PCI224_DACCON_TRIG_EXTN (3 << 0) /* ext -ve edge */
-#define PCI224_DACCON_TRIG_Z2CT0 (4 << 0) /* Z2 CT0 out */
-#define PCI224_DACCON_TRIG_Z2CT1 (5 << 0) /* Z2 CT1 out */
-#define PCI224_DACCON_TRIG_Z2CT2 (6 << 0) /* Z2 CT2 out */
+#define PCI224_DACCON_TRIG(x) (((x) & 0x7) << 0)
+#define PCI224_DACCON_TRIG_MASK PCI224_DACCON_TRIG(7)
+#define PCI224_DACCON_TRIG_NONE PCI224_DACCON_TRIG(0) /* none */
+#define PCI224_DACCON_TRIG_SW PCI224_DACCON_TRIG(1) /* soft trig */
+#define PCI224_DACCON_TRIG_EXTP PCI224_DACCON_TRIG(2) /* ext + edge */
+#define PCI224_DACCON_TRIG_EXTN PCI224_DACCON_TRIG(3) /* ext - edge */
+#define PCI224_DACCON_TRIG_Z2CT0 PCI224_DACCON_TRIG(4) /* Z2 CT0 out */
+#define PCI224_DACCON_TRIG_Z2CT1 PCI224_DACCON_TRIG(5) /* Z2 CT1 out */
+#define PCI224_DACCON_TRIG_Z2CT2 PCI224_DACCON_TRIG(6) /* Z2 CT2 out */
/* (r/w) Polarity (PCI224 only, PCI234 always bipolar!). */
-#define PCI224_DACCON_POLAR_MASK (1 << 3)
-#define PCI224_DACCON_POLAR_UNI (0 << 3) /* range [0,Vref] */
-#define PCI224_DACCON_POLAR_BI (1 << 3) /* range [-Vref,Vref] */
+#define PCI224_DACCON_POLAR(x) (((x) & 0x1) << 3)
+#define PCI224_DACCON_POLAR_MASK PCI224_DACCON_POLAR(1)
+#define PCI224_DACCON_POLAR_UNI PCI224_DACCON_POLAR(0) /* [0,+V] */
+#define PCI224_DACCON_POLAR_BI PCI224_DACCON_POLAR(1) /* [-V,+V] */
/* (r/w) Internal Vref (PCI224 only, when LK1 in position 1-2). */
-#define PCI224_DACCON_VREF_MASK (3 << 4)
-#define PCI224_DACCON_VREF_1_25 (0 << 4) /* Vref = 1.25V */
-#define PCI224_DACCON_VREF_2_5 (1 << 4) /* Vref = 2.5V */
-#define PCI224_DACCON_VREF_5 (2 << 4) /* Vref = 5V */
-#define PCI224_DACCON_VREF_10 (3 << 4) /* Vref = 10V */
+#define PCI224_DACCON_VREF(x) (((x) & 0x3) << 4)
+#define PCI224_DACCON_VREF_MASK PCI224_DACCON_VREF(3)
+#define PCI224_DACCON_VREF_1_25 PCI224_DACCON_VREF(0) /* 1.25V */
+#define PCI224_DACCON_VREF_2_5 PCI224_DACCON_VREF(1) /* 2.5V */
+#define PCI224_DACCON_VREF_5 PCI224_DACCON_VREF(2) /* 5V */
+#define PCI224_DACCON_VREF_10 PCI224_DACCON_VREF(3) /* 10V */
/* (r/w) Wraparound mode enable (to play back stored waveform). */
-#define PCI224_DACCON_FIFOWRAP (1 << 7)
+#define PCI224_DACCON_FIFOWRAP BIT(7)
/* (r/w) FIFO enable. It MUST be set! */
-#define PCI224_DACCON_FIFOENAB (1 << 8)
+#define PCI224_DACCON_FIFOENAB BIT(8)
/* (r/w) FIFO interrupt trigger level (most values are not very useful). */
-#define PCI224_DACCON_FIFOINTR_MASK (7 << 9)
-#define PCI224_DACCON_FIFOINTR_EMPTY (0 << 9) /* when empty */
-#define PCI224_DACCON_FIFOINTR_NEMPTY (1 << 9) /* when not empty */
-#define PCI224_DACCON_FIFOINTR_NHALF (2 << 9) /* when not half full */
-#define PCI224_DACCON_FIFOINTR_HALF (3 << 9) /* when half full */
-#define PCI224_DACCON_FIFOINTR_NFULL (4 << 9) /* when not full */
-#define PCI224_DACCON_FIFOINTR_FULL (5 << 9) /* when full */
+#define PCI224_DACCON_FIFOINTR(x) (((x) & 0x7) << 9)
+#define PCI224_DACCON_FIFOINTR_MASK PCI224_DACCON_FIFOINTR(7)
+#define PCI224_DACCON_FIFOINTR_EMPTY PCI224_DACCON_FIFOINTR(0) /* empty */
+#define PCI224_DACCON_FIFOINTR_NEMPTY PCI224_DACCON_FIFOINTR(1) /* !empty */
+#define PCI224_DACCON_FIFOINTR_NHALF PCI224_DACCON_FIFOINTR(2) /* !half */
+#define PCI224_DACCON_FIFOINTR_HALF PCI224_DACCON_FIFOINTR(3) /* half */
+#define PCI224_DACCON_FIFOINTR_NFULL PCI224_DACCON_FIFOINTR(4) /* !full */
+#define PCI224_DACCON_FIFOINTR_FULL PCI224_DACCON_FIFOINTR(5) /* full */
/* (r-o) FIFO fill level. */
-#define PCI224_DACCON_FIFOFL_MASK (7 << 12)
-#define PCI224_DACCON_FIFOFL_EMPTY (1 << 12) /* 0 */
-#define PCI224_DACCON_FIFOFL_ONETOHALF (0 << 12) /* [1,2048] */
-#define PCI224_DACCON_FIFOFL_HALFTOFULL (4 << 12) /* [2049,4095] */
-#define PCI224_DACCON_FIFOFL_FULL (6 << 12) /* 4096 */
+#define PCI224_DACCON_FIFOFL(x) (((x) & 0x7) << 12)
+#define PCI224_DACCON_FIFOFL_MASK PCI224_DACCON_FIFOFL(7)
+#define PCI224_DACCON_FIFOFL_EMPTY PCI224_DACCON_FIFOFL(1) /* 0 */
+#define PCI224_DACCON_FIFOFL_ONETOHALF PCI224_DACCON_FIFOFL(0) /* 1-2048 */
+#define PCI224_DACCON_FIFOFL_HALFTOFULL PCI224_DACCON_FIFOFL(4) /* 2049-4095 */
+#define PCI224_DACCON_FIFOFL_FULL PCI224_DACCON_FIFOFL(6) /* 4096 */
/* (r-o) DAC busy flag. */
-#define PCI224_DACCON_BUSY (1 << 15)
+#define PCI224_DACCON_BUSY BIT(15)
/* (w-o) FIFO reset. */
-#define PCI224_DACCON_FIFORESET (1 << 12)
+#define PCI224_DACCON_FIFORESET BIT(12)
/* (w-o) Global reset (not sure what it does). */
-#define PCI224_DACCON_GLOBALRESET (1 << 13)
+#define PCI224_DACCON_GLOBALRESET BIT(13)
/*
* DAC FIFO size.
diff --git a/drivers/staging/comedi/drivers/amplc_pci230.c b/drivers/staging/comedi/drivers/amplc_pci230.c
index 907c39cc89d7..42945de31fe2 100644
--- a/drivers/staging/comedi/drivers/amplc_pci230.c
+++ b/drivers/staging/comedi/drivers/amplc_pci230.c
@@ -237,47 +237,50 @@
/*
* DACCON read-write values.
*/
-#define PCI230_DAC_OR_UNI (0 << 0) /* Output range unipolar */
-#define PCI230_DAC_OR_BIP (1 << 0) /* Output range bipolar */
-#define PCI230_DAC_OR_MASK (1 << 0)
+#define PCI230_DAC_OR(x) (((x) & 0x1) << 0)
+#define PCI230_DAC_OR_UNI PCI230_DAC_OR(0) /* Output unipolar */
+#define PCI230_DAC_OR_BIP PCI230_DAC_OR(1) /* Output bipolar */
+#define PCI230_DAC_OR_MASK PCI230_DAC_OR(1)
/*
* The following applies only if DAC FIFO support is enabled in the EXTFUNC
* register (and only for PCI230+ hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_EN (1 << 8) /* FIFO enable */
+#define PCI230P2_DAC_FIFO_EN BIT(8) /* FIFO enable */
/*
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_TRIG_NONE (0 << 2) /* No trigger */
-#define PCI230P2_DAC_TRIG_SW (1 << 2) /* Software trigger trigger */
-#define PCI230P2_DAC_TRIG_EXTP (2 << 2) /* EXTTRIG +ve edge trigger */
-#define PCI230P2_DAC_TRIG_EXTN (3 << 2) /* EXTTRIG -ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT0 (4 << 2) /* CT0-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT1 (5 << 2) /* CT1-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_Z2CT2 (6 << 2) /* CT2-OUT +ve edge trigger */
-#define PCI230P2_DAC_TRIG_MASK (7 << 2)
-#define PCI230P2_DAC_FIFO_WRAP (1 << 7) /* FIFO wraparound mode */
-#define PCI230P2_DAC_INT_FIFO_EMPTY (0 << 9) /* FIFO interrupt empty */
-#define PCI230P2_DAC_INT_FIFO_NEMPTY (1 << 9)
-#define PCI230P2_DAC_INT_FIFO_NHALF (2 << 9) /* FIFO intr not half full */
-#define PCI230P2_DAC_INT_FIFO_HALF (3 << 9)
-#define PCI230P2_DAC_INT_FIFO_NFULL (4 << 9) /* FIFO interrupt not full */
-#define PCI230P2_DAC_INT_FIFO_FULL (5 << 9)
-#define PCI230P2_DAC_INT_FIFO_MASK (7 << 9)
+#define PCI230P2_DAC_TRIG(x) (((x) & 0x7) << 2)
+#define PCI230P2_DAC_TRIG_NONE PCI230P2_DAC_TRIG(0) /* none */
+#define PCI230P2_DAC_TRIG_SW PCI230P2_DAC_TRIG(1) /* soft trig */
+#define PCI230P2_DAC_TRIG_EXTP PCI230P2_DAC_TRIG(2) /* ext + edge */
+#define PCI230P2_DAC_TRIG_EXTN PCI230P2_DAC_TRIG(3) /* ext - edge */
+#define PCI230P2_DAC_TRIG_Z2CT0 PCI230P2_DAC_TRIG(4) /* Z2 CT0 out */
+#define PCI230P2_DAC_TRIG_Z2CT1 PCI230P2_DAC_TRIG(5) /* Z2 CT1 out */
+#define PCI230P2_DAC_TRIG_Z2CT2 PCI230P2_DAC_TRIG(6) /* Z2 CT2 out */
+#define PCI230P2_DAC_TRIG_MASK PCI230P2_DAC_TRIG(7)
+#define PCI230P2_DAC_FIFO_WRAP BIT(7) /* FIFO wraparound mode */
+#define PCI230P2_DAC_INT_FIFO(x) (((x) & 7) << 9)
+#define PCI230P2_DAC_INT_FIFO_EMPTY PCI230P2_DAC_INT_FIFO(0) /* empty */
+#define PCI230P2_DAC_INT_FIFO_NEMPTY PCI230P2_DAC_INT_FIFO(1) /* !empty */
+#define PCI230P2_DAC_INT_FIFO_NHALF PCI230P2_DAC_INT_FIFO(2) /* !half */
+#define PCI230P2_DAC_INT_FIFO_HALF PCI230P2_DAC_INT_FIFO(3) /* half */
+#define PCI230P2_DAC_INT_FIFO_NFULL PCI230P2_DAC_INT_FIFO(4) /* !full */
+#define PCI230P2_DAC_INT_FIFO_FULL PCI230P2_DAC_INT_FIFO(5) /* full */
+#define PCI230P2_DAC_INT_FIFO_MASK PCI230P2_DAC_INT_FIFO(7)
/*
* DACCON read-only values.
*/
-#define PCI230_DAC_BUSY (1 << 1) /* DAC busy. */
+#define PCI230_DAC_BUSY BIT(1) /* DAC busy. */
/*
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED (1 << 5) /* Underrun error */
-#define PCI230P2_DAC_FIFO_EMPTY (1 << 13) /* FIFO empty */
-#define PCI230P2_DAC_FIFO_FULL (1 << 14) /* FIFO full */
-#define PCI230P2_DAC_FIFO_HALF (1 << 15) /* FIFO half full */
+#define PCI230P2_DAC_FIFO_UNDERRUN_LATCHED BIT(5) /* Underrun error */
+#define PCI230P2_DAC_FIFO_EMPTY BIT(13) /* FIFO empty */
+#define PCI230P2_DAC_FIFO_FULL BIT(14) /* FIFO full */
+#define PCI230P2_DAC_FIFO_HALF BIT(15) /* FIFO half full */
/*
* DACCON write-only, transient values.
@@ -286,8 +289,8 @@
* The following apply only if the DAC FIFO is enabled (and only for PCI230+
* hardware version 2 onwards).
*/
-#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR (1 << 5) /* Clear underrun */
-#define PCI230P2_DAC_FIFO_RESET (1 << 12) /* FIFO reset */
+#define PCI230P2_DAC_FIFO_UNDERRUN_CLEAR BIT(5) /* Clear underrun */
+#define PCI230P2_DAC_FIFO_RESET BIT(12) /* FIFO reset */
/*
* PCI230+ hardware version 2 DAC FIFO levels.
@@ -304,44 +307,48 @@
/*
* ADCCON read/write values.
*/
-#define PCI230_ADC_TRIG_NONE (0 << 0) /* No trigger */
-#define PCI230_ADC_TRIG_SW (1 << 0) /* Software trigger trigger */
-#define PCI230_ADC_TRIG_EXTP (2 << 0) /* EXTTRIG +ve edge trigger */
-#define PCI230_ADC_TRIG_EXTN (3 << 0) /* EXTTRIG -ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT0 (4 << 0) /* CT0-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT1 (5 << 0) /* CT1-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_Z2CT2 (6 << 0) /* CT2-OUT +ve edge trigger */
-#define PCI230_ADC_TRIG_MASK (7 << 0)
-#define PCI230_ADC_IR_UNI (0 << 3) /* Input range unipolar */
-#define PCI230_ADC_IR_BIP (1 << 3) /* Input range bipolar */
-#define PCI230_ADC_IR_MASK (1 << 3)
-#define PCI230_ADC_IM_SE (0 << 4) /* Input mode single ended */
-#define PCI230_ADC_IM_DIF (1 << 4) /* Input mode differential */
-#define PCI230_ADC_IM_MASK (1 << 4)
-#define PCI230_ADC_FIFO_EN (1 << 8) /* FIFO enable */
-#define PCI230_ADC_INT_FIFO_EMPTY (0 << 9)
-#define PCI230_ADC_INT_FIFO_NEMPTY (1 << 9) /* FIFO interrupt not empty */
-#define PCI230_ADC_INT_FIFO_NHALF (2 << 9)
-#define PCI230_ADC_INT_FIFO_HALF (3 << 9) /* FIFO interrupt half full */
-#define PCI230_ADC_INT_FIFO_NFULL (4 << 9)
-#define PCI230_ADC_INT_FIFO_FULL (5 << 9) /* FIFO interrupt full */
-#define PCI230P_ADC_INT_FIFO_THRESH (7 << 9) /* FIFO interrupt threshold */
-#define PCI230_ADC_INT_FIFO_MASK (7 << 9)
+#define PCI230_ADC_TRIG(x) (((x) & 0x7) << 0)
+#define PCI230_ADC_TRIG_NONE PCI230_ADC_TRIG(0) /* none */
+#define PCI230_ADC_TRIG_SW PCI230_ADC_TRIG(1) /* soft trig */
+#define PCI230_ADC_TRIG_EXTP PCI230_ADC_TRIG(2) /* ext + edge */
+#define PCI230_ADC_TRIG_EXTN PCI230_ADC_TRIG(3) /* ext - edge */
+#define PCI230_ADC_TRIG_Z2CT0 PCI230_ADC_TRIG(4) /* Z2 CT0 out*/
+#define PCI230_ADC_TRIG_Z2CT1 PCI230_ADC_TRIG(5) /* Z2 CT1 out */
+#define PCI230_ADC_TRIG_Z2CT2 PCI230_ADC_TRIG(6) /* Z2 CT2 out */
+#define PCI230_ADC_TRIG_MASK PCI230_ADC_TRIG(7)
+#define PCI230_ADC_IR(x) (((x) & 0x1) << 3)
+#define PCI230_ADC_IR_UNI PCI230_ADC_IR(0) /* Input unipolar */
+#define PCI230_ADC_IR_BIP PCI230_ADC_IR(1) /* Input bipolar */
+#define PCI230_ADC_IR_MASK PCI230_ADC_IR(1)
+#define PCI230_ADC_IM(x) (((x) & 0x1) << 4)
+#define PCI230_ADC_IM_SE PCI230_ADC_IM(0) /* single ended */
+#define PCI230_ADC_IM_DIF PCI230_ADC_IM(1) /* differential */
+#define PCI230_ADC_IM_MASK PCI230_ADC_IM(1)
+#define PCI230_ADC_FIFO_EN BIT(8) /* FIFO enable */
+#define PCI230_ADC_INT_FIFO(x) (((x) & 0x7) << 9)
+#define PCI230_ADC_INT_FIFO_EMPTY PCI230_ADC_INT_FIFO(0) /* empty */
+#define PCI230_ADC_INT_FIFO_NEMPTY PCI230_ADC_INT_FIFO(1) /* !empty */
+#define PCI230_ADC_INT_FIFO_NHALF PCI230_ADC_INT_FIFO(2) /* !half */
+#define PCI230_ADC_INT_FIFO_HALF PCI230_ADC_INT_FIFO(3) /* half */
+#define PCI230_ADC_INT_FIFO_NFULL PCI230_ADC_INT_FIFO(4) /* !full */
+#define PCI230_ADC_INT_FIFO_FULL PCI230_ADC_INT_FIFO(5) /* full */
+#define PCI230P_ADC_INT_FIFO_THRESH PCI230_ADC_INT_FIFO(7) /* threshold */
+#define PCI230_ADC_INT_FIFO_MASK PCI230_ADC_INT_FIFO(7)
/*
* ADCCON write-only, transient values.
*/
-#define PCI230_ADC_FIFO_RESET (1 << 12) /* FIFO reset */
-#define PCI230_ADC_GLOB_RESET (1 << 13) /* Global reset */
+#define PCI230_ADC_FIFO_RESET BIT(12) /* FIFO reset */
+#define PCI230_ADC_GLOB_RESET BIT(13) /* Global reset */
/*
* ADCCON read-only values.
*/
-#define PCI230_ADC_BUSY (1 << 15) /* ADC busy */
-#define PCI230_ADC_FIFO_EMPTY (1 << 12) /* FIFO empty */
-#define PCI230_ADC_FIFO_FULL (1 << 13) /* FIFO full */
-#define PCI230_ADC_FIFO_HALF (1 << 14) /* FIFO half full */
-#define PCI230_ADC_FIFO_FULL_LATCHED (1 << 5) /* FIFO overrun occurred */
+#define PCI230_ADC_BUSY BIT(15) /* ADC busy */
+#define PCI230_ADC_FIFO_EMPTY BIT(12) /* FIFO empty */
+#define PCI230_ADC_FIFO_FULL BIT(13) /* FIFO full */
+#define PCI230_ADC_FIFO_HALF BIT(14) /* FIFO half full */
+#define PCI230_ADC_FIFO_FULL_LATCHED BIT(5) /* FIFO overrun occurred */
/*
* PCI230 ADC FIFO levels.
@@ -353,10 +360,10 @@
* PCI230+ EXTFUNC values.
*/
/* Route EXTTRIG pin to external gate inputs. */
-#define PCI230P_EXTFUNC_GAT_EXTTRIG (1 << 0)
+#define PCI230P_EXTFUNC_GAT_EXTTRIG BIT(0)
/* PCI230+ hardware version 2 values. */
/* Allow DAC FIFO to be enabled. */
-#define PCI230P2_EXTFUNC_DACFIFO (1 << 1)
+#define PCI230P2_EXTFUNC_DACFIFO BIT(1)
/*
* Counter/timer clock input configuration sources.
@@ -379,8 +386,12 @@
#define GAT_GND 1 /* GND (i.e. disabled) */
#define GAT_EXT 2 /* external gate input (PPCn on PCI230) */
#define GAT_NOUTNM2 3 /* inverted output of channel-2 modulo total */
-/* Macro to construct gate input configuration register value. */
-#define GAT_CONFIG(chan, src) ((((chan) & 3) << 3) | ((src) & 7))
+
+static inline unsigned int pci230_gat_config(unsigned int chan,
+ unsigned int src)
+{
+ return ((chan & 3) << 3) | (src & 7);
+}
/*
* Summary of CLK_OUTNM1 and GAT_NOUTNM2 connections for PCI230 and PCI260:
@@ -398,20 +409,20 @@
* Interrupt enables/status register values.
*/
#define PCI230_INT_DISABLE 0
-#define PCI230_INT_PPI_C0 (1 << 0)
-#define PCI230_INT_PPI_C3 (1 << 1)
-#define PCI230_INT_ADC (1 << 2)
-#define PCI230_INT_ZCLK_CT1 (1 << 5)
+#define PCI230_INT_PPI_C0 BIT(0)
+#define PCI230_INT_PPI_C3 BIT(1)
+#define PCI230_INT_ADC BIT(2)
+#define PCI230_INT_ZCLK_CT1 BIT(5)
/* For PCI230+ hardware version 2 when DAC FIFO enabled. */
-#define PCI230P2_INT_DAC (1 << 4)
+#define PCI230P2_INT_DAC BIT(4)
/*
* (Potentially) shared resources and their owners
*/
enum {
- RES_Z2CT0 = (1U << 0), /* Z2-CT0 */
- RES_Z2CT1 = (1U << 1), /* Z2-CT1 */
- RES_Z2CT2 = (1U << 2) /* Z2-CT2 */
+ RES_Z2CT0 = BIT(0), /* Z2-CT0 */
+ RES_Z2CT1 = BIT(1), /* Z2-CT1 */
+ RES_Z2CT2 = BIT(2) /* Z2-CT2 */
};
enum {
@@ -626,10 +637,10 @@ static void pci230_release_all_resources(struct comedi_device *dev,
pci230_release_shared(dev, (unsigned char)~0, owner);
}
-static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
+static unsigned int pci230_divide_ns(u64 ns, unsigned int timebase,
unsigned int flags)
{
- uint64_t div;
+ u64 div;
unsigned int rem;
div = ns;
@@ -652,7 +663,7 @@ static unsigned int pci230_divide_ns(uint64_t ns, unsigned int timebase,
* Given desired period in ns, returns the required internal clock source
* and gets the initial count.
*/
-static unsigned int pci230_choose_clk_count(uint64_t ns, unsigned int *count,
+static unsigned int pci230_choose_clk_count(u64 ns, unsigned int *count,
unsigned int flags)
{
unsigned int clk_src, cnt;
@@ -676,7 +687,7 @@ static void pci230_ns_to_single_timer(unsigned int *ns, unsigned int flags)
}
static void pci230_ct_setup_ns_mode(struct comedi_device *dev, unsigned int ct,
- unsigned int mode, uint64_t ns,
+ unsigned int mode, u64 ns,
unsigned int flags)
{
unsigned int clk_src;
@@ -1263,7 +1274,8 @@ static void pci230_ao_start(struct comedi_device *dev,
irqflags);
}
/* Set CT1 gate high to start counting. */
- outb(GAT_CONFIG(1, GAT_VCC), dev->iobase + PCI230_ZGAT_SCE);
+ outb(pci230_gat_config(1, GAT_VCC),
+ dev->iobase + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
async->inttrig = pci230_ao_inttrig_scan_begin;
@@ -1351,7 +1363,8 @@ static int pci230_ao_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* cmd->scan_begin_arg is sampling period in ns.
* Gate it off for now.
*/
- outb(GAT_CONFIG(1, GAT_GND), dev->iobase + PCI230_ZGAT_SCE);
+ outb(pci230_gat_config(1, GAT_GND),
+ dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
cmd->flags);
@@ -1792,9 +1805,9 @@ static int pci230_ai_inttrig_scan_begin(struct comedi_device *dev,
spin_lock_irqsave(&devpriv->ai_stop_spinlock, irqflags);
if (devpriv->ai_cmd_started) {
/* Trigger scan by waggling CT0 gate source. */
- zgat = GAT_CONFIG(0, GAT_GND);
+ zgat = pci230_gat_config(0, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
}
spin_unlock_irqrestore(&devpriv->ai_stop_spinlock, irqflags);
@@ -1926,20 +1939,20 @@ static void pci230_ai_start(struct comedi_device *dev,
* Conversion timer CT2 needs to be gated by
* inverted output of monostable CT2.
*/
- zgat = GAT_CONFIG(2, GAT_NOUTNM2);
+ zgat = pci230_gat_config(2, GAT_NOUTNM2);
} else {
/*
* Conversion timer CT2 needs to be gated on
* continuously.
*/
- zgat = GAT_CONFIG(2, GAT_VCC);
+ zgat = pci230_gat_config(2, GAT_VCC);
}
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
if (cmd->scan_begin_src != TRIG_FOLLOW) {
/* Set monostable CT0 trigger source. */
switch (cmd->scan_begin_src) {
default:
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
break;
case TRIG_EXT:
/*
@@ -1950,21 +1963,21 @@ static void pci230_ai_start(struct comedi_device *dev,
* input in order to use it as an external scan
* trigger.
*/
- zgat = GAT_CONFIG(0, GAT_EXT);
+ zgat = pci230_gat_config(0, GAT_EXT);
break;
case TRIG_TIMER:
/*
* Monostable CT0 triggered by rising edge on
* inverted output of CT1 (falling edge on CT1).
*/
- zgat = GAT_CONFIG(0, GAT_NOUTNM2);
+ zgat = pci230_gat_config(0, GAT_NOUTNM2);
break;
case TRIG_INT:
/*
* Monostable CT0 is triggered by inttrig
* function waggling the CT0 gate source.
*/
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
break;
}
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
@@ -1974,7 +1987,7 @@ static void pci230_ai_start(struct comedi_device *dev,
* Scan period timer CT1 needs to be
* gated on to start counting.
*/
- zgat = GAT_CONFIG(1, GAT_VCC);
+ zgat = pci230_gat_config(1, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
break;
case TRIG_INT:
@@ -2216,7 +2229,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* Note, counter/timer output 2 can be monitored on the
* connector: PCI230 pin 21, PCI260 pin 18.
*/
- zgat = GAT_CONFIG(2, GAT_GND);
+ zgat = pci230_gat_config(2, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
/* Set counter/timer 2 to the specified conversion period. */
pci230_ct_setup_ns_mode(dev, 2, I8254_MODE3, cmd->convert_arg,
@@ -2234,10 +2247,10 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
* monostable to stop it triggering. The trigger
* source will be changed later.
*/
- zgat = GAT_CONFIG(0, GAT_VCC);
+ zgat = pci230_gat_config(0, GAT_VCC);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 0, I8254_MODE1,
- ((uint64_t)cmd->convert_arg *
+ ((u64)cmd->convert_arg *
cmd->scan_end_arg),
CMDF_ROUND_UP);
if (cmd->scan_begin_src == TRIG_TIMER) {
@@ -2247,7 +2260,7 @@ static int pci230_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
*
* Set up CT1 but gate it off for now.
*/
- zgat = GAT_CONFIG(1, GAT_GND);
+ zgat = pci230_gat_config(1, GAT_GND);
outb(zgat, dev->iobase + PCI230_ZGAT_SCE);
pci230_ct_setup_ns_mode(dev, 1, I8254_MODE3,
cmd->scan_begin_arg,
diff --git a/drivers/staging/comedi/drivers/amplc_pci263.c b/drivers/staging/comedi/drivers/amplc_pci263.c
index b6768aa90547..8d4069bc5716 100644
--- a/drivers/staging/comedi/drivers/amplc_pci263.c
+++ b/drivers/staging/comedi/drivers/amplc_pci263.c
@@ -1,49 +1,53 @@
/*
- comedi/drivers/amplc_pci263.c
- Driver for Amplicon PCI263 relay board.
+ * Driver for Amplicon PCI263 relay board.
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
/*
-Driver: amplc_pci263
-Description: Amplicon PCI263
-Author: Ian Abbott <abbotti@mev.co.uk>
-Devices: [Amplicon] PCI263 (amplc_pci263)
-Updated: Fri, 12 Apr 2013 15:19:36 +0100
-Status: works
-
-Configuration options: not applicable, uses PCI auto config
-
-The board appears as one subdevice, with 16 digital outputs, each
-connected to a reed-relay. Relay contacts are closed when output is 1.
-The state of the outputs can be read.
-*/
+ * Driver: amplc_pci263
+ * Description: Amplicon PCI263
+ * Author: Ian Abbott <abbotti@mev.co.uk>
+ * Devices: [Amplicon] PCI263 (amplc_pci263)
+ * Updated: Fri, 12 Apr 2013 15:19:36 +0100
+ * Status: works
+ *
+ * Configuration options: not applicable, uses PCI auto config
+ *
+ * The board appears as one subdevice, with 16 digital outputs, each
+ * connected to a reed-relay. Relay contacts are closed when output is 1.
+ * The state of the outputs can be read.
+ */
#include <linux/module.h>
#include "../comedi_pci.h"
+/* PCI263 registers */
+#define PCI263_DO_0_7_REG 0x00
+#define PCI263_DO_8_15_REG 0x01
+
static int pci263_do_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
unsigned int *data)
{
if (comedi_dio_update_state(s, data)) {
- outb(s->state & 0xff, dev->iobase);
- outb((s->state >> 8) & 0xff, dev->iobase + 1);
+ outb(s->state & 0xff, dev->iobase + PCI263_DO_0_7_REG);
+ outb((s->state >> 8) & 0xff, dev->iobase + PCI263_DO_8_15_REG);
}
data[1] = s->state;
@@ -67,16 +71,18 @@ static int pci263_auto_attach(struct comedi_device *dev,
if (ret)
return ret;
+ /* Digital Output subdevice */
s = &dev->subdevices[0];
- /* digital output subdevice */
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = 16;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = pci263_do_insn_bits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = 16;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = pci263_do_insn_bits;
+
/* read initial relay state */
- s->state = inb(dev->iobase) | (inb(dev->iobase + 1) << 8);
+ s->state = inb(dev->iobase + PCI263_DO_0_7_REG) |
+ (inb(dev->iobase + PCI263_DO_8_15_REG) << 8);
return 0;
}
diff --git a/drivers/staging/comedi/drivers/c6xdigio.c b/drivers/staging/comedi/drivers/c6xdigio.c
index 1a109e30d8ff..8ee732571588 100644
--- a/drivers/staging/comedi/drivers/c6xdigio.c
+++ b/drivers/staging/comedi/drivers/c6xdigio.c
@@ -47,8 +47,8 @@
*/
#define C6XDIGIO_DATA_REG 0x00
#define C6XDIGIO_DATA_CHAN(x) (((x) + 1) << 4)
-#define C6XDIGIO_DATA_PWM (1 << 5)
-#define C6XDIGIO_DATA_ENCODER (1 << 6)
+#define C6XDIGIO_DATA_PWM BIT(5)
+#define C6XDIGIO_DATA_ENCODER BIT(6)
#define C6XDIGIO_STATUS_REG 0x01
#define C6XDIGIO_CTRL_REG 0x02
diff --git a/drivers/staging/comedi/drivers/comedi_8254.h b/drivers/staging/comedi/drivers/comedi_8254.h
index f4610ead6172..a12c29455d9d 100644
--- a/drivers/staging/comedi/drivers/comedi_8254.h
+++ b/drivers/staging/comedi/drivers/comedi_8254.h
@@ -53,13 +53,15 @@ struct comedi_subdevice;
#define I8254_COUNTER2_REG 0x02
#define I8254_CTRL_REG 0x03
#define I8254_CTRL_SEL_CTR(x) ((x) << 6)
-#define I8254_CTRL_READBACK_COUNT ((3 << 6) | (1 << 4))
-#define I8254_CTRL_READBACK_STATUS ((3 << 6) | (1 << 5))
+#define I8254_CTRL_READBACK(x) (I8254_CTRL_SEL_CTR(3) | BIT(x))
+#define I8254_CTRL_READBACK_COUNT I8254_CTRL_READBACK(4)
+#define I8254_CTRL_READBACK_STATUS I8254_CTRL_READBACK(5)
#define I8254_CTRL_READBACK_SEL_CTR(x) (2 << (x))
-#define I8254_CTRL_LATCH (0 << 4)
-#define I8254_CTRL_LSB_ONLY (1 << 4)
-#define I8254_CTRL_MSB_ONLY (2 << 4)
-#define I8254_CTRL_LSB_MSB (3 << 4)
+#define I8254_CTRL_RW(x) (((x) & 0x3) << 4)
+#define I8254_CTRL_LATCH I8254_CTRL_RW(0)
+#define I8254_CTRL_LSB_ONLY I8254_CTRL_RW(1)
+#define I8254_CTRL_MSB_ONLY I8254_CTRL_RW(2)
+#define I8254_CTRL_LSB_MSB I8254_CTRL_RW(3)
/* counter maps zero to 0x10000 */
#define I8254_MAX_COUNT 0x10000
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index 940781183fac..e0a34c2687a8 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1,98 +1,82 @@
/*
- comedi/drivers/das1800.c
- Driver for Keitley das1700/das1800 series boards
- Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: das1800
-Description: Keithley Metrabyte DAS1800 (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
- DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
- DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
- DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
- DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
- DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
- DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
- DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
- DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
- DAS-1802AO (das-1802ao)
-Status: works
-
-The waveform analog output on the 'ao' cards is not supported.
-If you need it, send me (Frank Hess) an email.
-
-Configuration options:
- [0] - I/O port base address
- [1] - IRQ (optional, required for timed or externally triggered conversions)
- [2] - DMA0 (optional, requires irq)
- [3] - DMA1 (optional, requires irq and dma0)
-*/
-/*
+ * Comedi driver for Keithley DAS-1700/DAS-1800 series boards
+ * Copyright (C) 2000 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
-This driver supports the following Keithley boards:
-
-das-1701st
-das-1701st-da
-das-1701ao
-das-1702st
-das-1702st-da
-das-1702hr
-das-1702hr-da
-das-1702ao
-das-1801st
-das-1801st-da
-das-1801hc
-das-1801ao
-das-1802st
-das-1802st-da
-das-1802hr
-das-1802hr-da
-das-1802hc
-das-1802ao
-
-Options:
- [0] - base io address
- [1] - irq (optional, required for timed or externally triggered conversions)
- [2] - dma0 (optional, requires irq)
- [3] - dma1 (optional, requires irq and dma0)
-
-irq can be omitted, although the cmd interface will not work without it.
-
-analog input cmd triggers supported:
- start_src: TRIG_NOW | TRIG_EXT
- scan_begin_src: TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT
- scan_end_src: TRIG_COUNT
- convert_src: TRIG_TIMER | TRIG_EXT (TRIG_EXT requires scan_begin_src == TRIG_FOLLOW)
- stop_src: TRIG_COUNT | TRIG_EXT | TRIG_NONE
-
-scan_begin_src triggers TRIG_TIMER and TRIG_EXT use the card's
-'burst mode' which limits the valid conversion time to 64 microseconds
-(convert_arg <= 64000). This limitation does not apply if scan_begin_src
-is TRIG_FOLLOW.
-
-NOTES:
-Only the DAS-1801ST has been tested by me.
-Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
-
-TODO:
- Make it automatically allocate irq and dma channels if they are not specified
- Add support for analog out on 'ao' cards
- read insn for analog out
-*/
+/*
+ * Driver: das1800
+ * Description: Keithley Metrabyte DAS1800 (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [Keithley Metrabyte] DAS-1701ST (das-1701st),
+ * DAS-1701ST-DA (das-1701st-da), DAS-1701/AO (das-1701ao),
+ * DAS-1702ST (das-1702st), DAS-1702ST-DA (das-1702st-da),
+ * DAS-1702HR (das-1702hr), DAS-1702HR-DA (das-1702hr-da),
+ * DAS-1702/AO (das-1702ao), DAS-1801ST (das-1801st),
+ * DAS-1801ST-DA (das-1801st-da), DAS-1801HC (das-1801hc),
+ * DAS-1801AO (das-1801ao), DAS-1802ST (das-1802st),
+ * DAS-1802ST-DA (das-1802st-da), DAS-1802HR (das-1802hr),
+ * DAS-1802HR-DA (das-1802hr-da), DAS-1802HC (das-1802hc),
+ * DAS-1802AO (das-1802ao)
+ * Status: works
+ *
+ * Configuration options:
+ * [0] - I/O port base address
+ * [1] - IRQ (optional, required for analog input cmd support)
+ * [2] - DMA0 (optional, requires irq)
+ * [3] - DMA1 (optional, requires irq and dma0)
+ *
+ * analog input cmd triggers supported:
+ *
+ * start_src TRIG_NOW command starts immediately
+ * TRIG_EXT command starts on external pin TGIN
+ *
+ * scan_begin_src TRIG_FOLLOW paced/external scans start immediately
+ * TRIG_TIMER burst scans start periodically
+ * TRIG_EXT burst scans start on external pin XPCLK
+ *
+ * scan_end_src TRIG_COUNT scan ends after last channel
+ *
+ * convert_src TRIG_TIMER paced/burst conversions are timed
+ * TRIG_EXT conversions on external pin XPCLK
+ * (requires scan_begin_src == TRIG_FOLLOW)
+ *
+ * stop_src TRIG_COUNT command stops after stop_arg scans
+ * TRIG_EXT command stops on external pin TGIN
+ * TRIG_NONE command runs until canceled
+ *
+ * If TRIG_EXT is used for both the start_src and stop_src, the first TGIN
+ * trigger starts the command, and the second trigger will stop it. If only
+ * one is TRIG_EXT, the first trigger will either stop or start the command.
+ * The external pin TGIN is normally set for negative edge triggering. It
+ * can be set to positive edge with the CR_INVERT flag. If TRIG_EXT is used
+ * for both the start_src and stop_src they must have the same polarity.
+ *
+ * Minimum conversion speed is limited to 64 microseconds (convert_arg <= 64000)
+ * for 'burst' scans. This limitation does not apply for 'paced' scans. The
+ * maximum conversion speed is limited by the board (convert_arg >= ai_speed).
+ * Maximum conversion speeds are not always achievable depending on the
+ * board setup (see user manual).
+ *
+ * NOTES:
+ * Only the DAS-1801ST has been tested by me.
+ * Unipolar and bipolar ranges cannot be mixed in the channel/gain list.
+ *
+ * The waveform analog output on the 'ao' cards is not supported.
+ * If you need it, send me (Frank Hess) an email.
+ */
#include <linux/module.h>
#include <linux/interrupt.h>
@@ -107,7 +91,6 @@ TODO:
/* misc. defines */
#define DAS1800_SIZE 16 /* uses 16 io addresses */
#define FIFO_SIZE 1024 /* 1024 sample fifo */
-#define UNIPOLAR 0x4 /* bit that determines whether input range is uni/bipolar */
#define DMA_BUF_SIZE 0x1ff00 /* size in bytes of dma buffers */
/* Registers for the das1800 */
@@ -125,6 +108,7 @@ TODO:
#define CGSL 0x8
#define TGEN 0x10
#define TGSL 0x20
+#define TGPL 0x40
#define ATEN 0x80
#define DAS1800_CONTROL_B 0x5
#define DMA_CH5 0x1
@@ -133,7 +117,7 @@ TODO:
#define DMA_CH5_CH6 0x5
#define DMA_CH6_CH7 0x6
#define DMA_CH7_CH5 0x7
-#define DMA_ENABLED 0x3 /* mask used to determine if dma is enabled */
+#define DMA_ENABLED 0x3
#define DMA_DUAL 0x4
#define IRQ3 0x8
#define IRQ5 0x10
@@ -151,319 +135,214 @@ TODO:
#define SD 0x40
#define UB 0x80
#define DAS1800_STATUS 0x7
-/* bits that prevent interrupt status bits (and CVEN) from being cleared on write */
-#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
#define INT 0x1
#define DMATC 0x2
#define CT0TC 0x8
#define OVF 0x10
#define FHF 0x20
#define FNE 0x40
-#define CVEN_MASK 0x40 /* masks CVEN on write */
#define CVEN 0x80
+#define CVEN_MASK 0x40
+#define CLEAR_INTR_MASK (CVEN_MASK | 0x1f)
#define DAS1800_BURST_LENGTH 0x8
#define DAS1800_BURST_RATE 0x9
#define DAS1800_QRAM_ADDRESS 0xa
#define DAS1800_COUNTER 0xc
-#define IOBASE2 0x400 /* offset of additional ioports used on 'ao' cards */
+#define IOBASE2 0x400
-enum {
- das1701st, das1701st_da, das1702st, das1702st_da, das1702hr,
- das1702hr_da,
- das1701ao, das1702ao, das1801st, das1801st_da, das1802st, das1802st_da,
- das1802hr, das1802hr_da, das1801hc, das1802hc, das1801ao, das1802ao
-};
-
-/* analog input ranges */
-static const struct comedi_lrange range_ai_das1801 = {
+static const struct comedi_lrange das1801_ai_range = {
8, {
- BIP_RANGE(5),
- BIP_RANGE(1),
- BIP_RANGE(0.1),
- BIP_RANGE(0.02),
- UNI_RANGE(5),
- UNI_RANGE(1),
- UNI_RANGE(0.1),
- UNI_RANGE(0.02)
+ BIP_RANGE(5), /* bipolar gain = 1 */
+ BIP_RANGE(1), /* bipolar gain = 10 */
+ BIP_RANGE(0.1), /* bipolar gain = 50 */
+ BIP_RANGE(0.02), /* bipolar gain = 250 */
+ UNI_RANGE(5), /* unipolar gain = 1 */
+ UNI_RANGE(1), /* unipolar gain = 10 */
+ UNI_RANGE(0.1), /* unipolar gain = 50 */
+ UNI_RANGE(0.02) /* unipolar gain = 250 */
}
};
-static const struct comedi_lrange range_ai_das1802 = {
+static const struct comedi_lrange das1802_ai_range = {
8, {
- BIP_RANGE(10),
- BIP_RANGE(5),
- BIP_RANGE(2.5),
- BIP_RANGE(1.25),
- UNI_RANGE(10),
- UNI_RANGE(5),
- UNI_RANGE(2.5),
- UNI_RANGE(1.25)
+ BIP_RANGE(10), /* bipolar gain = 1 */
+ BIP_RANGE(5), /* bipolar gain = 2 */
+ BIP_RANGE(2.5), /* bipolar gain = 4 */
+ BIP_RANGE(1.25), /* bipolar gain = 8 */
+ UNI_RANGE(10), /* unipolar gain = 1 */
+ UNI_RANGE(5), /* unipolar gain = 2 */
+ UNI_RANGE(2.5), /* unipolar gain = 4 */
+ UNI_RANGE(1.25) /* unipolar gain = 8 */
}
};
+/*
+ * The waveform analog outputs on the 'ao' boards are not currently
+ * supported. They have a comedi_lrange of:
+ * { 2, { BIP_RANGE(10), BIP_RANGE(5) } }
+ */
+
+enum das1800_boardid {
+ BOARD_DAS1701ST,
+ BOARD_DAS1701ST_DA,
+ BOARD_DAS1702ST,
+ BOARD_DAS1702ST_DA,
+ BOARD_DAS1702HR,
+ BOARD_DAS1702HR_DA,
+ BOARD_DAS1701AO,
+ BOARD_DAS1702AO,
+ BOARD_DAS1801ST,
+ BOARD_DAS1801ST_DA,
+ BOARD_DAS1802ST,
+ BOARD_DAS1802ST_DA,
+ BOARD_DAS1802HR,
+ BOARD_DAS1802HR_DA,
+ BOARD_DAS1801HC,
+ BOARD_DAS1802HC,
+ BOARD_DAS1801AO,
+ BOARD_DAS1802AO
+};
+
+/* board probe id values (hi byte of the digital input register) */
+#define DAS1800_ID_ST_DA 0x3
+#define DAS1800_ID_HR_DA 0x4
+#define DAS1800_ID_AO 0x5
+#define DAS1800_ID_HR 0x6
+#define DAS1800_ID_ST 0x7
+#define DAS1800_ID_HC 0x8
+
struct das1800_board {
const char *name;
- int ai_speed; /* max conversion period in nanoseconds */
- int resolution; /* bits of ai resolution */
- int qram_len; /* length of card's channel / gain queue */
- int common; /* supports AREF_COMMON flag */
- int do_n_chan; /* number of digital output channels */
- int ao_ability; /* 0 == no analog out, 1 == basic analog out, 2 == waveform analog out */
- int ao_n_chan; /* number of analog out channels */
- const struct comedi_lrange *range_ai; /* available input ranges */
+ unsigned char id;
+ unsigned int ai_speed;
+ unsigned int is_01_series:1;
};
-/* Warning: the maximum conversion speeds listed below are
- * not always achievable depending on board setup (see
- * user manual.)
- */
static const struct das1800_board das1800_boards[] = {
- {
- .name = "das-1701st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1701st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702st",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702st-da",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1702hr-da",
- .ai_speed = 20000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1701ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1702ao",
- .ai_speed = 6250,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1801st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802st",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802st-da",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 4,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 0,
- .ao_n_chan = 0,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1802hr-da",
- .ai_speed = 10000,
- .resolution = 16,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802hc",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 64,
- .common = 0,
- .do_n_chan = 8,
- .ao_ability = 1,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
- {
- .name = "das-1801ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1801,
- },
- {
- .name = "das-1802ao",
- .ai_speed = 3000,
- .resolution = 12,
- .qram_len = 256,
- .common = 1,
- .do_n_chan = 4,
- .ao_ability = 2,
- .ao_n_chan = 2,
- .range_ai = &range_ai_das1802,
- },
+ [BOARD_DAS1701ST] = {
+ .name = "das-1701st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1701ST_DA] = {
+ .name = "das-1701st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1702ST] = {
+ .name = "das-1702st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1702ST_DA] = {
+ .name = "das-1702st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1702HR] = {
+ .name = "das-1702hr",
+ .id = DAS1800_ID_HR,
+ .ai_speed = 20000,
+ },
+ [BOARD_DAS1702HR_DA] = {
+ .name = "das-1702hr-da",
+ .id = DAS1800_ID_HR_DA,
+ .ai_speed = 20000,
+ },
+ [BOARD_DAS1701AO] = {
+ .name = "das-1701ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 6250,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1702AO] = {
+ .name = "das-1702ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 6250,
+ },
+ [BOARD_DAS1801ST] = {
+ .name = "das-1801st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1801ST_DA] = {
+ .name = "das-1801st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802ST] = {
+ .name = "das-1802st",
+ .id = DAS1800_ID_ST,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1802ST_DA] = {
+ .name = "das-1802st-da",
+ .id = DAS1800_ID_ST_DA,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1802HR] = {
+ .name = "das-1802hr",
+ .id = DAS1800_ID_HR,
+ .ai_speed = 10000,
+ },
+ [BOARD_DAS1802HR_DA] = {
+ .name = "das-1802hr-da",
+ .id = DAS1800_ID_HR_DA,
+ .ai_speed = 10000,
+ },
+ [BOARD_DAS1801HC] = {
+ .name = "das-1801hc",
+ .id = DAS1800_ID_HC,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802HC] = {
+ .name = "das-1802hc",
+ .id = DAS1800_ID_HC,
+ .ai_speed = 3000,
+ },
+ [BOARD_DAS1801AO] = {
+ .name = "das-1801ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 3000,
+ .is_01_series = 1,
+ },
+ [BOARD_DAS1802AO] = {
+ .name = "das-1802ao",
+ .id = DAS1800_ID_AO,
+ .ai_speed = 3000,
+ },
};
struct das1800_private {
struct comedi_isadma *dma;
- int irq_dma_bits; /* bits for control register b */
- /* dma bits for control register b, stored so that dma can be
- * turned on and off */
+ int irq_dma_bits;
int dma_bits;
- uint16_t *fifo_buf; /* bounce buffer for analog input FIFO */
- unsigned long iobase2; /* secondary io address used for analog out on 'ao' boards */
- unsigned short ao_update_bits; /* remembers the last write to the
- * 'update' dac */
-};
-
-/* analog out range for 'ao' boards */
-/*
-static const struct comedi_lrange range_ao_2 = {
- 2, {
- BIP_RANGE(10),
- BIP_RANGE(5)
- }
+ unsigned short *fifo_buf;
+ unsigned long iobase2;
+ bool ai_is_unipolar;
};
-*/
-
-static inline uint16_t munge_bipolar_sample(const struct comedi_device *dev,
- uint16_t sample)
-{
- const struct das1800_board *board = dev->board_ptr;
-
- sample += 1 << (board->resolution - 1);
- return sample;
-}
-static void munge_data(struct comedi_device *dev, uint16_t *array,
- unsigned int num_elements)
+static void das1800_ai_munge(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ void *data, unsigned int num_bytes,
+ unsigned int start_chan_index)
{
+ struct das1800_private *devpriv = dev->private;
+ unsigned short *array = data;
+ unsigned int num_samples = comedi_bytes_to_samples(s, num_bytes);
unsigned int i;
- int unipolar;
- /* see if card is using a unipolar or bipolar range so we can munge data correctly */
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
+ if (devpriv->ai_is_unipolar)
+ return;
- /* convert to unsigned type if we are in a bipolar mode */
- if (!unipolar) {
- for (i = 0; i < num_elements; i++)
- array[i] = munge_bipolar_sample(dev, array[i]);
- }
+ for (i = 0; i < num_samples; i++)
+ array[i] = comedi_offset_munge(s, array[i]);
}
static void das1800_handle_fifo_half_full(struct comedi_device *dev,
@@ -473,7 +352,6 @@ static void das1800_handle_fifo_half_full(struct comedi_device *dev,
unsigned int nsamples = comedi_nsamples_left(s, FIFO_SIZE / 2);
insw(dev->iobase + DAS1800_FIFO, devpriv->fifo_buf, nsamples);
- munge_data(dev, devpriv->fifo_buf, nsamples);
comedi_buf_write_samples(s, devpriv->fifo_buf, nsamples);
}
@@ -482,14 +360,9 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
{
struct comedi_cmd *cmd = &s->async->cmd;
unsigned short dpnt;
- int unipolar;
-
- unipolar = inb(dev->iobase + DAS1800_CONTROL_C) & UB;
while (inb(dev->iobase + DAS1800_STATUS) & FNE) {
dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* convert to unsigned type */
- dpnt = munge_bipolar_sample(dev, dpnt);
comedi_buf_write_samples(s, &dpnt, 1);
if (cmd->stop_src == TRIG_COUNT &&
@@ -498,7 +371,6 @@ static void das1800_handle_fifo_not_empty(struct comedi_device *dev,
}
}
-/* Utility function used by das1800_flush_dma() and das1800_handle_dma() */
static void das1800_flush_dma_channel(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_isadma_desc *desc)
@@ -511,12 +383,9 @@ static void das1800_flush_dma_channel(struct comedi_device *dev,
nsamples = comedi_bytes_to_samples(s, nbytes);
nsamples = comedi_nsamples_left(s, nsamples);
- munge_data(dev, desc->virt_addr, nsamples);
comedi_buf_write_samples(s, desc->virt_addr, nsamples);
}
-/* flushes remaining data from board when external trigger has stopped acquisition
- * and we are using dma transfers */
static void das1800_flush_dma(struct comedi_device *dev,
struct comedi_subdevice *s)
{
@@ -560,27 +429,30 @@ static void das1800_handle_dma(struct comedi_device *dev,
}
}
-static int das1800_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
+static int das1800_ai_cancel(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
struct comedi_isadma_desc *desc;
int i;
- outb(0x0, dev->iobase + DAS1800_STATUS); /* disable conversions */
- outb(0x0, dev->iobase + DAS1800_CONTROL_B); /* disable interrupts and dma */
- outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* disable and clear fifo and stop triggering */
+ /* disable and stop conversions */
+ outb(0x0, dev->iobase + DAS1800_STATUS);
+ outb(0x0, dev->iobase + DAS1800_CONTROL_B);
+ outb(0x0, dev->iobase + DAS1800_CONTROL_A);
- for (i = 0; i < 2; i++) {
- desc = &dma->desc[i];
- if (desc->chan)
- comedi_isadma_disable(desc->chan);
+ if (dma) {
+ for (i = 0; i < 2; i++) {
+ desc = &dma->desc[i];
+ if (desc->chan)
+ comedi_isadma_disable(desc->chan);
+ }
}
return 0;
}
-/* the guts of the interrupt handler, that is shared with das1800_ai_poll */
static void das1800_ai_handler(struct comedi_device *dev)
{
struct das1800_private *devpriv = dev->private;
@@ -589,17 +461,16 @@ static void das1800_ai_handler(struct comedi_device *dev)
struct comedi_cmd *cmd = &async->cmd;
unsigned int status = inb(dev->iobase + DAS1800_STATUS);
- /* select adc for base address + 0 */
+ /* select adc register (spinlock is already held) */
outb(ADC, dev->iobase + DAS1800_SELECT);
- /* dma buffer full */
- if (devpriv->irq_dma_bits & DMA_ENABLED) {
- /* look for data from dma transfer even if dma terminal count hasn't happened yet */
+
+ /* get samples with dma, fifo, or polled as necessary */
+ if (devpriv->irq_dma_bits & DMA_ENABLED)
das1800_handle_dma(dev, s, status);
- } else if (status & FHF) { /* if fifo half full */
+ else if (status & FHF)
das1800_handle_fifo_half_full(dev, s);
- } else if (status & FNE) { /* if fifo not empty */
+ else if (status & FNE)
das1800_handle_fifo_not_empty(dev, s);
- }
/* if the card's fifo has overflowed */
if (status & OVF) {
@@ -615,7 +486,7 @@ static void das1800_ai_handler(struct comedi_device *dev)
if (status & CT0TC) {
/* clear CT0TC interrupt bit */
outb(CLEAR_INTR_MASK & ~CT0TC, dev->iobase + DAS1800_STATUS);
- /* make sure we get all remaining data from board before quitting */
+ /* get all remaining samples before quitting */
if (devpriv->irq_dma_bits & DMA_ENABLED)
das1800_flush_dma(dev, s);
else
@@ -634,9 +505,14 @@ static int das1800_ai_poll(struct comedi_device *dev,
{
unsigned long flags;
- /* prevent race with interrupt handler */
+ /*
+ * Protects the indirect addressing selected by DAS1800_SELECT
+ * in das1800_ai_handler() also prevents race with das1800_interrupt().
+ */
spin_lock_irqsave(&dev->spinlock, flags);
+
das1800_ai_handler(dev);
+
spin_unlock_irqrestore(&dev->spinlock, flags);
return comedi_buf_n_bytes_ready(s);
@@ -652,9 +528,12 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
- /* Prevent race with das1800_ai_poll() on multi processor systems.
- * Also protects indirect addressing in das1800_ai_handler */
+ /*
+ * Protects the indirect addressing selected by DAS1800_SELECT
+ * in das1800_ai_handler() also prevents race with das1800_ai_poll().
+ */
spin_lock(&dev->spinlock);
+
status = inb(dev->iobase + DAS1800_STATUS);
/* if interrupt was not caused by das-1800 */
@@ -671,46 +550,87 @@ static irqreturn_t das1800_interrupt(int irq, void *d)
return IRQ_HANDLED;
}
-/* converts requested conversion timing to timing compatible with
- * hardware, used only when card is in 'burst mode'
- */
-static unsigned int burst_convert_arg(unsigned int convert_arg, int flags)
+static int das1800_ai_fixup_paced_timing(struct comedi_device *dev,
+ struct comedi_cmd *cmd)
+{
+ unsigned int arg = cmd->convert_arg;
+
+ /*
+ * Paced mode:
+ * scan_begin_src is TRIG_FOLLOW
+ * convert_src is TRIG_TIMER
+ *
+ * The convert_arg sets the pacer sample acquisition time.
+ * The max acquisition speed is limited to the boards
+ * 'ai_speed' (this was already verified). The min speed is
+ * limited by the cascaded 8254 timer.
+ */
+ comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
+ return comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
+}
+
+static int das1800_ai_fixup_burst_timing(struct comedi_device *dev,
+ struct comedi_cmd *cmd)
{
- unsigned int micro_sec;
+ unsigned int arg = cmd->convert_arg;
+ int err = 0;
- /* in burst mode, the maximum conversion time is 64 microseconds */
- if (convert_arg > 64000)
- convert_arg = 64000;
+ /*
+ * Burst mode:
+ * scan_begin_src is TRIG_TIMER or TRIG_EXT
+ * convert_src is TRIG_TIMER
+ *
+ * The convert_arg sets burst sample acquisition time.
+ * The max acquisition speed is limited to the boards
+ * 'ai_speed' (this was already verified). The min speed is
+ * limiited to 64 microseconds,
+ */
+ err |= comedi_check_trigger_arg_max(&arg, 64000);
- /* the conversion time must be an integral number of microseconds */
- switch (flags & CMDF_ROUND_MASK) {
+ /* round to microseconds then verify */
+ switch (cmd->flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
- micro_sec = (convert_arg + 500) / 1000;
+ arg = DIV_ROUND_CLOSEST(arg, 1000);
break;
case CMDF_ROUND_DOWN:
- micro_sec = convert_arg / 1000;
+ arg = arg / 1000;
break;
case CMDF_ROUND_UP:
- micro_sec = (convert_arg - 1) / 1000 + 1;
+ arg = DIV_ROUND_UP(arg, 1000);
break;
}
+ err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg * 1000);
- /* return number of nanoseconds */
- return micro_sec * 1000;
+ /*
+ * The pacer can be used to set the scan sample rate. The max scan
+ * speed is limited by the conversion speed and the number of channels
+ * to convert. The min speed is limited by the cascaded 8254 timer.
+ */
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ arg = cmd->convert_arg * cmd->chanlist_len;
+ err |= comedi_check_trigger_arg_min(&cmd->scan_begin_arg, arg);
+
+ arg = cmd->scan_begin_arg;
+ comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
+ err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, arg);
+ }
+
+ return err;
}
static int das1800_ai_check_chanlist(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_cmd *cmd)
{
- unsigned int unipolar0 = CR_RANGE(cmd->chanlist[0]) & UNIPOLAR;
+ unsigned int range = CR_RANGE(cmd->chanlist[0]);
+ bool unipolar0 = comedi_range_is_unipolar(s, range);
int i;
for (i = 1; i < cmd->chanlist_len; i++) {
- unsigned int unipolar = CR_RANGE(cmd->chanlist[i]) & UNIPOLAR;
+ range = CR_RANGE(cmd->chanlist[i]);
- if (unipolar != unipolar0) {
+ if (unipolar0 != comedi_range_is_unipolar(s, range)) {
dev_dbg(dev->class_dev,
"unipolar and bipolar ranges cannot be mixed in the chanlist\n");
return -EINVAL;
@@ -720,14 +640,12 @@ static int das1800_ai_check_chanlist(struct comedi_device *dev,
return 0;
}
-/* test analog input cmd */
-static int das1800_ai_do_cmdtest(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_cmd *cmd)
+static int das1800_ai_cmdtest(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_cmd *cmd)
{
const struct das1800_board *board = dev->board_ptr;
int err = 0;
- unsigned int arg;
/* Step 1 : check if triggers are trivially valid */
@@ -752,16 +670,23 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
/* Step 2b : and mutually compatible */
+ /* burst scans must use timed conversions */
if (cmd->scan_begin_src != TRIG_FOLLOW &&
cmd->convert_src != TRIG_TIMER)
err |= -EINVAL;
+ /* the external pin TGIN must use the same polarity */
+ if (cmd->start_src == TRIG_EXT && cmd->stop_src == TRIG_EXT)
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg,
+ cmd->stop_arg);
+
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
- err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
+ if (cmd->start_arg == TRIG_NOW)
+ err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
if (cmd->convert_src == TRIG_TIMER) {
err |= comedi_check_trigger_arg_min(&cmd->convert_arg,
@@ -786,31 +711,13 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
if (err)
return 3;
- /* step 4: fix up any arguments */
+ /* Step 4: fix up any arguments */
- if (cmd->scan_begin_src == TRIG_FOLLOW &&
- cmd->convert_src == TRIG_TIMER) {
- /* we are not in burst mode */
- arg = cmd->convert_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
- } else if (cmd->convert_src == TRIG_TIMER) {
- /* we are in burst mode */
- arg = burst_convert_arg(cmd->convert_arg, cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->convert_arg, arg);
-
- if (cmd->scan_begin_src == TRIG_TIMER) {
- arg = cmd->convert_arg * cmd->chanlist_len;
- err |= comedi_check_trigger_arg_max(&cmd->
- scan_begin_arg,
- arg);
-
- arg = cmd->scan_begin_arg;
- comedi_8254_cascade_ns_to_timer(dev->pacer, &arg,
- cmd->flags);
- err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg,
- arg);
- }
+ if (cmd->convert_src == TRIG_TIMER) {
+ if (cmd->scan_begin_src == TRIG_FOLLOW)
+ err |= das1800_ai_fixup_paced_timing(dev, cmd);
+ else /* TRIG_TIMER or TRIG_EXT */
+ err |= das1800_ai_fixup_burst_timing(dev, cmd);
}
if (err)
@@ -826,74 +733,22 @@ static int das1800_ai_do_cmdtest(struct comedi_device *dev,
return 0;
}
-/* returns appropriate bits for control register a, depending on command */
-static int control_a_bits(const struct comedi_cmd *cmd)
-{
- int control_a;
-
- control_a = FFEN; /* enable fifo */
- if (cmd->stop_src == TRIG_EXT)
- control_a |= ATEN;
- switch (cmd->start_src) {
- case TRIG_EXT:
- control_a |= TGEN | CGSL;
- break;
- case TRIG_NOW:
- control_a |= CGEN;
- break;
- default:
- break;
- }
-
- return control_a;
-}
-
-/* returns appropriate bits for control register c, depending on command */
-static int control_c_bits(const struct comedi_cmd *cmd)
+static unsigned char das1800_ai_chanspec_bits(struct comedi_subdevice *s,
+ unsigned int chanspec)
{
- int control_c;
- int aref;
+ unsigned int range = CR_RANGE(chanspec);
+ unsigned int aref = CR_AREF(chanspec);
+ unsigned char bits;
- /* set clock source to internal or external, select analog reference,
- * select unipolar / bipolar
- */
- aref = CR_AREF(cmd->chanlist[0]);
- control_c = UQEN; /* enable upper qram addresses */
+ bits = UQEN;
if (aref != AREF_DIFF)
- control_c |= SD;
+ bits |= SD;
if (aref == AREF_COMMON)
- control_c |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(cmd->chanlist[0]) & UNIPOLAR)
- control_c |= UB;
- switch (cmd->scan_begin_src) {
- case TRIG_FOLLOW: /* not in burst mode */
- switch (cmd->convert_src) {
- case TRIG_TIMER:
- /* trig on cascaded counters */
- control_c |= IPCLK;
- break;
- case TRIG_EXT:
- /* trig on falling edge of external trigger */
- control_c |= XPCLK;
- break;
- default:
- break;
- }
- break;
- case TRIG_TIMER:
- /* burst mode with internal pacer clock */
- control_c |= BMDE | IPCLK;
- break;
- case TRIG_EXT:
- /* burst mode with external trigger */
- control_c |= BMDE | XPCLK;
- break;
- default:
- break;
- }
+ bits |= CMEN;
+ if (comedi_range_is_unipolar(s, range))
+ bits |= UB;
- return control_c;
+ return bits;
}
static unsigned int das1800_ai_transfer_size(struct comedi_device *dev,
@@ -934,13 +789,14 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
{
struct das1800_private *devpriv = dev->private;
struct comedi_isadma *dma = devpriv->dma;
- struct comedi_isadma_desc *desc = &dma->desc[0];
+ struct comedi_isadma_desc *desc;
unsigned int bytes;
if ((devpriv->irq_dma_bits & DMA_ENABLED) == 0)
return;
dma->cur_dma = 0;
+ desc = &dma->desc[0];
/* determine a dma transfer size to fill buffer in 0.3 sec */
bytes = das1800_ai_transfer_size(dev, s, desc->maxsize, 300000000);
@@ -956,43 +812,48 @@ static void das1800_ai_setup_dma(struct comedi_device *dev,
}
}
-/* programs channel/gain list into card */
-static void program_chanlist(struct comedi_device *dev,
- const struct comedi_cmd *cmd)
+static void das1800_ai_set_chanlist(struct comedi_device *dev,
+ unsigned int *chanlist, unsigned int len)
{
- int i, n, chan_range;
- unsigned long irq_flags;
- const int range_mask = 0x3; /* masks unipolar/bipolar bit off range */
- const int range_bitshift = 8;
-
- n = cmd->chanlist_len;
- /* spinlock protects indirect addressing */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*set QRAM address start */
+ unsigned long flags;
+ unsigned int i;
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ /* select QRAM register and set start address */
+ outb(QRAM, dev->iobase + DAS1800_SELECT);
+ outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
+
/* make channel / gain list */
- for (i = 0; i < n; i++) {
- chan_range =
- CR_CHAN(cmd->chanlist[i]) |
- ((CR_RANGE(cmd->chanlist[i]) & range_mask) <<
- range_bitshift);
- outw(chan_range, dev->iobase + DAS1800_QRAM);
+ for (i = 0; i < len; i++) {
+ unsigned int chan = CR_CHAN(chanlist[i]);
+ unsigned int range = CR_RANGE(chanlist[i]);
+ unsigned short val;
+
+ val = chan | ((range & 0x3) << 8);
+ outw(val, dev->iobase + DAS1800_QRAM);
}
- outb(n - 1, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+
+ /* finish write to QRAM */
+ outb(len - 1, dev->iobase + DAS1800_QRAM_ADDRESS);
+
+ spin_unlock_irqrestore(&dev->spinlock, flags);
}
-/* analog input do_cmd */
-static int das1800_ai_do_cmd(struct comedi_device *dev,
- struct comedi_subdevice *s)
+static int das1800_ai_cmd(struct comedi_device *dev,
+ struct comedi_subdevice *s)
{
struct das1800_private *devpriv = dev->private;
int control_a, control_c;
struct comedi_async *async = s->async;
const struct comedi_cmd *cmd = &async->cmd;
+ unsigned int range0 = CR_RANGE(cmd->chanlist[0]);
- /* disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY
- * (because dma in handler is unsafe at hard real-time priority) */
+ /*
+ * Disable dma on CMDF_WAKE_EOS, or CMDF_PRIORITY (because dma in
+ * handler is unsafe at hard real-time priority).
+ */
if (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY))
devpriv->irq_dma_bits &= ~DMA_ENABLED;
else
@@ -1006,14 +867,42 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
devpriv->irq_dma_bits |= FIMD;
}
- das1800_cancel(dev, s);
+ das1800_ai_cancel(dev, s);
+
+ devpriv->ai_is_unipolar = comedi_range_is_unipolar(s, range0);
- /* determine proper bits for control registers */
- control_a = control_a_bits(cmd);
- control_c = control_c_bits(cmd);
+ control_a = FFEN;
+ if (cmd->stop_src == TRIG_EXT)
+ control_a |= ATEN;
+ if (cmd->start_src == TRIG_EXT)
+ control_a |= TGEN | CGSL;
+ else /* TRIG_NOW */
+ control_a |= CGEN;
+ if (control_a & (ATEN | TGEN)) {
+ if ((cmd->start_arg & CR_INVERT) || (cmd->stop_arg & CR_INVERT))
+ control_a |= TGPL;
+ }
+
+ control_c = das1800_ai_chanspec_bits(s, cmd->chanlist[0]);
+ /* set clock source to internal or external */
+ if (cmd->scan_begin_src == TRIG_FOLLOW) {
+ /* not in burst mode */
+ if (cmd->convert_src == TRIG_TIMER) {
+ /* trig on cascaded counters */
+ control_c |= IPCLK;
+ } else { /* TRIG_EXT */
+ /* trig on falling edge of external trigger */
+ control_c |= XPCLK;
+ }
+ } else if (cmd->scan_begin_src == TRIG_TIMER) {
+ /* burst mode with internal pacer clock */
+ control_c |= BMDE | IPCLK;
+ } else { /* TRIG_EXT */
+ /* burst mode with external trigger */
+ control_c |= BMDE | XPCLK;
+ }
- /* setup card and start */
- program_chanlist(dev, cmd);
+ das1800_ai_set_chanlist(dev, cmd->chanlist, cmd->chanlist_len);
/* setup cascaded counters for conversion/scan frequency */
if ((cmd->scan_begin_src == TRIG_FOLLOW ||
@@ -1031,118 +920,117 @@ static int das1800_ai_do_cmd(struct comedi_device *dev,
outb(control_c, dev->iobase + DAS1800_CONTROL_C);
/* set conversion rate and length for burst mode */
if (control_c & BMDE) {
- /* program conversion period with number of microseconds minus 1 */
- outb(cmd->convert_arg / 1000 - 1,
+ outb(cmd->convert_arg / 1000 - 1, /* microseconds - 1 */
dev->iobase + DAS1800_BURST_RATE);
outb(cmd->chanlist_len - 1, dev->iobase + DAS1800_BURST_LENGTH);
}
- outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B); /* enable irq/dma */
- outb(control_a, dev->iobase + DAS1800_CONTROL_A); /* enable fifo and triggering */
- outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
+
+ /* enable and start conversions */
+ outb(devpriv->irq_dma_bits, dev->iobase + DAS1800_CONTROL_B);
+ outb(control_a, dev->iobase + DAS1800_CONTROL_A);
+ outb(CVEN, dev->iobase + DAS1800_STATUS);
return 0;
}
-/* read analog input */
-static int das1800_ai_rinsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_ai_eoc(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned long context)
{
- const struct das1800_board *board = dev->board_ptr;
- int i, n;
- int chan, range, aref, chan_range;
- int timeout = 1000;
- unsigned short dpnt;
- int conv_flags = 0;
- unsigned long irq_flags;
+ unsigned char status;
- /* set up analog reference and unipolar / bipolar mode */
- aref = CR_AREF(insn->chanspec);
- conv_flags |= UQEN;
- if (aref != AREF_DIFF)
- conv_flags |= SD;
- if (aref == AREF_COMMON)
- conv_flags |= CMEN;
- /* if a unipolar range was selected */
- if (CR_RANGE(insn->chanspec) & UNIPOLAR)
- conv_flags |= UB;
+ status = inb(dev->iobase + DAS1800_STATUS);
+ if (status & FNE)
+ return 0;
+ return -EBUSY;
+}
+
+static int das1800_ai_insn_read(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
+{
+ unsigned int range = CR_RANGE(insn->chanspec);
+ bool is_unipolar = comedi_range_is_unipolar(s, range);
+ int ret = 0;
+ int n;
+ unsigned short dpnt;
+ unsigned long flags;
- outb(conv_flags, dev->iobase + DAS1800_CONTROL_C); /* software conversion enabled */
+ outb(das1800_ai_chanspec_bits(s, insn->chanspec),
+ dev->iobase + DAS1800_CONTROL_C); /* software pacer */
outb(CVEN, dev->iobase + DAS1800_STATUS); /* enable conversions */
outb(0x0, dev->iobase + DAS1800_CONTROL_A); /* reset fifo */
outb(FFEN, dev->iobase + DAS1800_CONTROL_A);
- chan = CR_CHAN(insn->chanspec);
- /* mask of unipolar/bipolar bit from range */
- range = CR_RANGE(insn->chanspec) & 0x3;
- chan_range = chan | (range << 8);
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(QRAM, dev->iobase + DAS1800_SELECT); /* select QRAM for baseAddress + 0x0 */
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /* set QRAM address start */
- outw(chan_range, dev->iobase + DAS1800_QRAM);
- outb(0x0, dev->iobase + DAS1800_QRAM_ADDRESS); /*finish write to QRAM */
- outb(ADC, dev->iobase + DAS1800_SELECT); /* select ADC for baseAddress + 0x0 */
+ das1800_ai_set_chanlist(dev, &insn->chanspec, 1);
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ /* select ai fifo register */
+ outb(ADC, dev->iobase + DAS1800_SELECT);
for (n = 0; n < insn->n; n++) {
/* trigger conversion */
outb(0, dev->iobase + DAS1800_FIFO);
- for (i = 0; i < timeout; i++) {
- if (inb(dev->iobase + DAS1800_STATUS) & FNE)
- break;
- }
- if (i == timeout) {
- dev_err(dev->class_dev, "timeout\n");
- n = -ETIME;
- goto exit;
- }
+
+ ret = comedi_timeout(dev, s, insn, das1800_ai_eoc, 0);
+ if (ret)
+ break;
+
dpnt = inw(dev->iobase + DAS1800_FIFO);
- /* shift data to offset binary for bipolar ranges */
- if ((conv_flags & UB) == 0)
- dpnt += 1 << (board->resolution - 1);
+ if (!is_unipolar)
+ dpnt = comedi_offset_munge(s, dpnt);
data[n] = dpnt;
}
-exit:
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
- return n;
+ return ret ? ret : insn->n;
}
-/* writes to an analog output channel */
-static int das1800_ao_winsn(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_ao_insn_write(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- const struct das1800_board *board = dev->board_ptr;
- struct das1800_private *devpriv = dev->private;
- int chan = CR_CHAN(insn->chanspec);
-/* int range = CR_RANGE(insn->chanspec); */
- int update_chan = board->ao_n_chan - 1;
- unsigned short output;
- unsigned long irq_flags;
-
- /* card expects two's complement data */
- output = data[0] - (1 << (board->resolution - 1));
- /* if the write is to the 'update' channel, we need to remember its value */
- if (chan == update_chan)
- devpriv->ao_update_bits = output;
- /* write to channel */
- spin_lock_irqsave(&dev->spinlock, irq_flags);
- outb(DAC(chan), dev->iobase + DAS1800_SELECT); /* select dac channel for baseAddress + 0x0 */
- outw(output, dev->iobase + DAS1800_DAC);
- /* now we need to write to 'update' channel to update all dac channels */
- if (chan != update_chan) {
- outb(DAC(update_chan), dev->iobase + DAS1800_SELECT); /* select 'update' channel for baseAddress + 0x0 */
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
+ unsigned int chan = CR_CHAN(insn->chanspec);
+ unsigned int update_chan = s->n_chan - 1;
+ unsigned long flags;
+ int i;
+
+ /* protects the indirect addressing selected by DAS1800_SELECT */
+ spin_lock_irqsave(&dev->spinlock, flags);
+
+ for (i = 0; i < insn->n; i++) {
+ unsigned int val = data[i];
+
+ s->readback[chan] = val;
+
+ val = comedi_offset_munge(s, val);
+
+ /* load this channel (and update if it's the last channel) */
+ outb(DAC(chan), dev->iobase + DAS1800_SELECT);
+ outw(val, dev->iobase + DAS1800_DAC);
+
+ /* update all channels */
+ if (chan != update_chan) {
+ val = comedi_offset_munge(s, s->readback[update_chan]);
+
+ outb(DAC(update_chan), dev->iobase + DAS1800_SELECT);
+ outw(val, dev->iobase + DAS1800_DAC);
+ }
}
- spin_unlock_irqrestore(&dev->spinlock, irq_flags);
+ spin_unlock_irqrestore(&dev->spinlock, flags);
- return 1;
+ return insn->n;
}
-/* reads from digital input channels */
-static int das1800_di_rbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+static int das1800_di_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
data[1] = inb(dev->iobase + DAS1800_DIGITAL) & 0xf;
data[0] = 0;
@@ -1150,10 +1038,10 @@ static int das1800_di_rbits(struct comedi_device *dev,
return insn->n;
}
-static int das1800_do_wbits(struct comedi_device *dev,
- struct comedi_subdevice *s,
- struct comedi_insn *insn,
- unsigned int *data)
+static int das1800_do_insn_bits(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ struct comedi_insn *insn,
+ unsigned int *data)
{
if (comedi_dio_update_state(s, data))
outb(s->state, dev->iobase + DAS1800_DIGITAL);
@@ -1216,68 +1104,68 @@ static void das1800_free_dma(struct comedi_device *dev)
comedi_isadma_free(devpriv->dma);
}
-static const struct das1800_board *das1800_probe(struct comedi_device *dev)
+static int das1800_probe(struct comedi_device *dev)
{
const struct das1800_board *board = dev->board_ptr;
- int index = board ? board - das1800_boards : -EINVAL;
- int id;
+ unsigned char id;
+
+ id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
/*
* The dev->board_ptr will be set by comedi_device_attach() if the
* board name provided by the user matches a board->name in this
* driver. If so, this function sanity checks the id to verify that
* the board is correct.
- *
- * If the dev->board_ptr is not set, the user is trying to attach
- * an unspecified board to this driver. In this case the id is used
- * to 'probe' for the correct dev->board_ptr.
*/
- id = (inb(dev->iobase + DAS1800_DIGITAL) >> 4) & 0xf;
+ if (board) {
+ if (board->id == id)
+ return 0;
+ dev_err(dev->class_dev,
+ "probed id does not match board id (0x%x != 0x%x)\n",
+ id, board->id);
+ return -ENODEV;
+ }
+
+ /*
+ * If the dev->board_ptr is not set, the user is trying to attach
+ * an unspecified board to this driver. In this case the id is used
+ * to 'probe' for the dev->board_ptr.
+ */
switch (id) {
- case 0x3:
- if (index == das1801st_da || index == das1802st_da ||
- index == das1701st_da || index == das1702st_da)
- return board;
- index = das1801st;
+ case DAS1800_ID_ST_DA:
+ /* das-1701st-da, das-1702st-da, das-1801st-da, das-1802st-da */
+ board = &das1800_boards[BOARD_DAS1801ST_DA];
break;
- case 0x4:
- if (index == das1802hr_da || index == das1702hr_da)
- return board;
- index = das1802hr;
+ case DAS1800_ID_HR_DA:
+ /* das-1702hr-da, das-1802hr-da */
+ board = &das1800_boards[BOARD_DAS1802HR_DA];
break;
- case 0x5:
- if (index == das1801ao || index == das1802ao ||
- index == das1701ao || index == das1702ao)
- return board;
- index = das1801ao;
+ case DAS1800_ID_AO:
+ /* das-1701ao, das-1702ao, das-1801ao, das-1802ao */
+ board = &das1800_boards[BOARD_DAS1801AO];
break;
- case 0x6:
- if (index == das1802hr || index == das1702hr)
- return board;
- index = das1802hr;
+ case DAS1800_ID_HR:
+ /* das-1702hr, das-1802hr */
+ board = &das1800_boards[BOARD_DAS1802HR];
break;
- case 0x7:
- if (index == das1801st || index == das1802st ||
- index == das1701st || index == das1702st)
- return board;
- index = das1801st;
+ case DAS1800_ID_ST:
+ /* das-1701st, das-1702st, das-1801st, das-1802st */
+ board = &das1800_boards[BOARD_DAS1801ST];
break;
- case 0x8:
- if (index == das1801hc || index == das1802hc)
- return board;
- index = das1801hc;
+ case DAS1800_ID_HC:
+ /* das-1801hc, das-1802hc */
+ board = &das1800_boards[BOARD_DAS1801HC];
break;
default:
- dev_err(dev->class_dev,
- "Board model: probe returned 0x%x (unknown, please report)\n",
- id);
- return NULL;
+ dev_err(dev->class_dev, "invalid probe id 0x%x\n", id);
+ return -ENODEV;
}
- dev_err(dev->class_dev,
- "Board model (probed, not recommended): %s series\n",
- das1800_boards[index].name);
-
- return &das1800_boards[index];
+ dev->board_ptr = board;
+ dev->board_name = board->name;
+ dev_warn(dev->class_dev,
+ "probed id 0x%0x: %s series (not recommended)\n",
+ id, board->name);
+ return 0;
}
static int das1800_attach(struct comedi_device *dev,
@@ -1287,7 +1175,9 @@ static int das1800_attach(struct comedi_device *dev,
struct das1800_private *devpriv;
struct comedi_subdevice *s;
unsigned int irq = it->options[1];
+ bool is_16bit;
int ret;
+ int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -1297,16 +1187,15 @@ static int das1800_attach(struct comedi_device *dev,
if (ret)
return ret;
- board = das1800_probe(dev);
- if (!board) {
- dev_err(dev->class_dev, "unable to determine board type\n");
- return -ENODEV;
- }
- dev->board_ptr = board;
- dev->board_name = board->name;
+ ret = das1800_probe(dev);
+ if (ret)
+ return ret;
+ board = dev->board_ptr;
- /* if it is an 'ao' board with fancy analog out then we need extra io ports */
- if (board->ao_ability == 2) {
+ is_16bit = board->id == DAS1800_ID_HR || board->id == DAS1800_ID_HR_DA;
+
+ /* waveform 'ao' boards have additional io ports */
+ if (board->id == DAS1800_ID_AO) {
unsigned long iobase2 = dev->iobase + IOBASE2;
ret = __comedi_request_region(dev, iobase2, DAS1800_SIZE);
@@ -1349,7 +1238,9 @@ static int das1800_attach(struct comedi_device *dev,
if (dev->irq & it->options[2])
das1800_init_dma(dev, it);
- devpriv->fifo_buf = kmalloc_array(FIFO_SIZE, sizeof(uint16_t), GFP_KERNEL);
+ devpriv->fifo_buf = kmalloc_array(FIFO_SIZE,
+ sizeof(*devpriv->fifo_buf),
+ GFP_KERNEL);
if (!devpriv->fifo_buf)
return -ENOMEM;
@@ -1362,70 +1253,94 @@ static int das1800_attach(struct comedi_device *dev,
if (ret)
return ret;
- /* analog input subdevice */
+ /*
+ * Analog Input subdevice
+ *
+ * The "hc" type boards have 64 analog input channels and a 64
+ * entry QRAM fifo.
+ *
+ * All the other board types have 16 on-board channels. Each channel
+ * can be expanded to 16 channels with the addition of an EXP-1800
+ * expansion board for a total of 256 channels. The QRAM fifo on
+ * these boards has 256 entries.
+ *
+ * From the datasheets it's not clear what the comedi channel to
+ * actual physical channel mapping is when EXP-1800 boards are used.
+ */
s = &dev->subdevices[0];
- s->type = COMEDI_SUBD_AI;
- s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
- if (board->common)
- s->subdev_flags |= SDF_COMMON;
- s->n_chan = board->qram_len;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = board->range_ai;
- s->insn_read = das1800_ai_rinsn;
+ s->type = COMEDI_SUBD_AI;
+ s->subdev_flags = SDF_READABLE | SDF_DIFF | SDF_GROUND;
+ if (board->id != DAS1800_ID_HC)
+ s->subdev_flags |= SDF_COMMON;
+ s->n_chan = (board->id == DAS1800_ID_HC) ? 64 : 256;
+ s->maxdata = is_16bit ? 0xffff : 0x0fff;
+ s->range_table = board->is_01_series ? &das1801_ai_range
+ : &das1802_ai_range;
+ s->insn_read = das1800_ai_insn_read;
if (dev->irq) {
dev->read_subdev = s;
- s->subdev_flags |= SDF_CMD_READ;
- s->len_chanlist = s->n_chan;
- s->do_cmd = das1800_ai_do_cmd;
- s->do_cmdtest = das1800_ai_do_cmdtest;
- s->poll = das1800_ai_poll;
- s->cancel = das1800_cancel;
+ s->subdev_flags |= SDF_CMD_READ;
+ s->len_chanlist = s->n_chan;
+ s->do_cmd = das1800_ai_cmd;
+ s->do_cmdtest = das1800_ai_cmdtest;
+ s->poll = das1800_ai_poll;
+ s->cancel = das1800_ai_cancel;
+ s->munge = das1800_ai_munge;
}
- /* analog out */
+ /* Analog Output subdevice */
s = &dev->subdevices[1];
- if (board->ao_ability == 1) {
- s->type = COMEDI_SUBD_AO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->ao_n_chan;
- s->maxdata = (1 << board->resolution) - 1;
- s->range_table = &range_bipolar10;
- s->insn_write = das1800_ao_winsn;
+ if (board->id == DAS1800_ID_ST_DA || board->id == DAS1800_ID_HR_DA) {
+ s->type = COMEDI_SUBD_AO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (board->id == DAS1800_ID_ST_DA) ? 4 : 2;
+ s->maxdata = is_16bit ? 0xffff : 0x0fff;
+ s->range_table = &range_bipolar10;
+ s->insn_write = das1800_ao_insn_write;
+
+ ret = comedi_alloc_subdev_readback(s);
+ if (ret)
+ return ret;
+
+ /* initialize all channels to 0V */
+ for (i = 0; i < s->n_chan; i++) {
+ /* spinlock is not necessary during the attach */
+ outb(DAC(i), dev->iobase + DAS1800_SELECT);
+ outw(0, dev->iobase + DAS1800_DAC);
+ }
+ } else if (board->id == DAS1800_ID_AO) {
+ /*
+ * 'ao' boards have waveform analog outputs that are not
+ * currently supported.
+ */
+ s->type = COMEDI_SUBD_UNUSED;
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
- /* di */
+ /* Digital Input subdevice */
s = &dev->subdevices[2];
- s->type = COMEDI_SUBD_DI;
- s->subdev_flags = SDF_READABLE;
- s->n_chan = 4;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_di_rbits;
-
- /* do */
+ s->type = COMEDI_SUBD_DI;
+ s->subdev_flags = SDF_READABLE;
+ s->n_chan = 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das1800_di_insn_bits;
+
+ /* Digital Output subdevice */
s = &dev->subdevices[3];
- s->type = COMEDI_SUBD_DO;
- s->subdev_flags = SDF_WRITABLE;
- s->n_chan = board->do_n_chan;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = das1800_do_wbits;
+ s->type = COMEDI_SUBD_DO;
+ s->subdev_flags = SDF_WRITABLE;
+ s->n_chan = (board->id == DAS1800_ID_HC) ? 8 : 4;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = das1800_do_insn_bits;
- das1800_cancel(dev, dev->read_subdev);
+ das1800_ai_cancel(dev, dev->read_subdev);
/* initialize digital out channels */
outb(0, dev->iobase + DAS1800_DIGITAL);
- /* initialize analog out channels */
- if (board->ao_ability == 1) {
- /* select 'update' dac channel for baseAddress + 0x0 */
- outb(DAC(board->ao_n_chan - 1),
- dev->iobase + DAS1800_SELECT);
- outw(devpriv->ao_update_bits, dev->iobase + DAS1800_DAC);
- }
-
return 0;
};
@@ -1454,5 +1369,5 @@ static struct comedi_driver das1800_driver = {
module_comedi_driver(das1800_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for DAS1800 compatible ISA boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c
index 40bf00984fa5..d5295bbdd28c 100644
--- a/drivers/staging/comedi/drivers/dt282x.c
+++ b/drivers/staging/comedi/drivers/dt282x.c
@@ -69,49 +69,61 @@
* Register map
*/
#define DT2821_ADCSR_REG 0x00
-#define DT2821_ADCSR_ADERR (1 << 15)
-#define DT2821_ADCSR_ADCLK (1 << 9)
-#define DT2821_ADCSR_MUXBUSY (1 << 8)
-#define DT2821_ADCSR_ADDONE (1 << 7)
-#define DT2821_ADCSR_IADDONE (1 << 6)
+#define DT2821_ADCSR_ADERR BIT(15)
+#define DT2821_ADCSR_ADCLK BIT(9)
+#define DT2821_ADCSR_MUXBUSY BIT(8)
+#define DT2821_ADCSR_ADDONE BIT(7)
+#define DT2821_ADCSR_IADDONE BIT(6)
#define DT2821_ADCSR_GS(x) (((x) & 0x3) << 4)
#define DT2821_ADCSR_CHAN(x) (((x) & 0xf) << 0)
#define DT2821_CHANCSR_REG 0x02
-#define DT2821_CHANCSR_LLE (1 << 15)
-#define DT2821_CHANCSR_PRESLA(x) (((x) & 0xf) >> 8)
+#define DT2821_CHANCSR_LLE BIT(15)
+#define DT2821_CHANCSR_TO_PRESLA(x) (((x) >> 8) & 0xf)
#define DT2821_CHANCSR_NUMB(x) ((((x) - 1) & 0xf) << 0)
#define DT2821_ADDAT_REG 0x04
#define DT2821_DACSR_REG 0x06
-#define DT2821_DACSR_DAERR (1 << 15)
+#define DT2821_DACSR_DAERR BIT(15)
#define DT2821_DACSR_YSEL(x) ((x) << 9)
-#define DT2821_DACSR_SSEL (1 << 8)
-#define DT2821_DACSR_DACRDY (1 << 7)
-#define DT2821_DACSR_IDARDY (1 << 6)
-#define DT2821_DACSR_DACLK (1 << 5)
-#define DT2821_DACSR_HBOE (1 << 1)
-#define DT2821_DACSR_LBOE (1 << 0)
+#define DT2821_DACSR_SSEL BIT(8)
+#define DT2821_DACSR_DACRDY BIT(7)
+#define DT2821_DACSR_IDARDY BIT(6)
+#define DT2821_DACSR_DACLK BIT(5)
+#define DT2821_DACSR_HBOE BIT(1)
+#define DT2821_DACSR_LBOE BIT(0)
#define DT2821_DADAT_REG 0x08
#define DT2821_DIODAT_REG 0x0a
#define DT2821_SUPCSR_REG 0x0c
-#define DT2821_SUPCSR_DMAD (1 << 15)
-#define DT2821_SUPCSR_ERRINTEN (1 << 14)
-#define DT2821_SUPCSR_CLRDMADNE (1 << 13)
-#define DT2821_SUPCSR_DDMA (1 << 12)
-#define DT2821_SUPCSR_DS_PIO (0 << 10)
-#define DT2821_SUPCSR_DS_AD_CLK (1 << 10)
-#define DT2821_SUPCSR_DS_DA_CLK (2 << 10)
-#define DT2821_SUPCSR_DS_AD_TRIG (3 << 10)
-#define DT2821_SUPCSR_BUFFB (1 << 9)
-#define DT2821_SUPCSR_SCDN (1 << 8)
-#define DT2821_SUPCSR_DACON (1 << 7)
-#define DT2821_SUPCSR_ADCINIT (1 << 6)
-#define DT2821_SUPCSR_DACINIT (1 << 5)
-#define DT2821_SUPCSR_PRLD (1 << 4)
-#define DT2821_SUPCSR_STRIG (1 << 3)
-#define DT2821_SUPCSR_XTRIG (1 << 2)
-#define DT2821_SUPCSR_XCLK (1 << 1)
-#define DT2821_SUPCSR_BDINIT (1 << 0)
+#define DT2821_SUPCSR_DMAD BIT(15)
+#define DT2821_SUPCSR_ERRINTEN BIT(14)
+#define DT2821_SUPCSR_CLRDMADNE BIT(13)
+#define DT2821_SUPCSR_DDMA BIT(12)
+#define DT2821_SUPCSR_DS(x) (((x) & 0x3) << 10)
+#define DT2821_SUPCSR_DS_PIO DT2821_SUPCSR_DS(0)
+#define DT2821_SUPCSR_DS_AD_CLK DT2821_SUPCSR_DS(1)
+#define DT2821_SUPCSR_DS_DA_CLK DT2821_SUPCSR_DS(2)
+#define DT2821_SUPCSR_DS_AD_TRIG DT2821_SUPCSR_DS(3)
+#define DT2821_SUPCSR_BUFFB BIT(9)
+#define DT2821_SUPCSR_SCDN BIT(8)
+#define DT2821_SUPCSR_DACON BIT(7)
+#define DT2821_SUPCSR_ADCINIT BIT(6)
+#define DT2821_SUPCSR_DACINIT BIT(5)
+#define DT2821_SUPCSR_PRLD BIT(4)
+#define DT2821_SUPCSR_STRIG BIT(3)
+#define DT2821_SUPCSR_XTRIG BIT(2)
+#define DT2821_SUPCSR_XCLK BIT(1)
+#define DT2821_SUPCSR_BDINIT BIT(0)
#define DT2821_TMRCTR_REG 0x0e
+#define DT2821_TMRCTR_PRESCALE(x) (((x) & 0xf) << 8)
+#define DT2821_TMRCTR_DIVIDER(x) ((255 - ((x) & 0xff)) << 0)
+
+/* Pacer Clock */
+#define DT2821_OSC_BASE 250 /* 4 MHz (in nanoseconds) */
+#define DT2821_PRESCALE(x) BIT(x)
+#define DT2821_PRESCALE_MAX 15
+#define DT2821_DIVIDER_MAX 255
+#define DT2821_OSC_MAX (DT2821_OSC_BASE * \
+ DT2821_PRESCALE(DT2821_PRESCALE_MAX) * \
+ DT2821_DIVIDER_MAX)
static const struct comedi_lrange range_dt282x_ai_lo_bipolar = {
4, {
@@ -364,10 +376,10 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
{
unsigned int prescale, base, divider;
- for (prescale = 0; prescale < 16; prescale++) {
- if (prescale == 1)
+ for (prescale = 0; prescale <= DT2821_PRESCALE_MAX; prescale++) {
+ if (prescale == 1) /* 0 and 1 are both divide by 1 */
continue;
- base = 250 * (1 << prescale);
+ base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
switch (flags & CMDF_ROUND_MASK) {
case CMDF_ROUND_NEAREST:
default:
@@ -380,15 +392,17 @@ static unsigned int dt282x_ns_to_timer(unsigned int *ns, unsigned int flags)
divider = DIV_ROUND_UP(*ns, base);
break;
}
- if (divider < 256) {
- *ns = divider * base;
- return (prescale << 8) | (255 - divider);
- }
+ if (divider <= DT2821_DIVIDER_MAX)
+ break;
+ }
+ if (divider > DT2821_DIVIDER_MAX) {
+ prescale = DT2821_PRESCALE_MAX;
+ divider = DT2821_DIVIDER_MAX;
+ base = DT2821_OSC_BASE * DT2821_PRESCALE(prescale);
}
- base = 250 * (1 << 15);
- divider = 255;
*ns = divider * base;
- return (15 << 8) | (255 - divider);
+ return DT2821_TMRCTR_PRESCALE(prescale) |
+ DT2821_TMRCTR_DIVIDER(divider);
}
static void dt282x_munge(struct comedi_device *dev,
@@ -683,13 +697,8 @@ static int dt282x_ai_cmdtest(struct comedi_device *dev,
/* Step 3: check if arguments are trivially valid */
err |= comedi_check_trigger_arg_is(&cmd->start_arg, 0);
-
err |= comedi_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
-
- err |= comedi_check_trigger_arg_min(&cmd->convert_arg, 4000);
-
-#define SLOWEST_TIMER (250*(1<<15)*255)
- err |= comedi_check_trigger_arg_max(&cmd->convert_arg, SLOWEST_TIMER);
+ err |= comedi_check_trigger_arg_max(&cmd->convert_arg, DT2821_OSC_MAX);
err |= comedi_check_trigger_arg_min(&cmd->convert_arg, board->ai_speed);
err |= comedi_check_trigger_arg_is(&cmd->scan_end_arg,
cmd->chanlist_len);
@@ -1084,20 +1093,6 @@ static int dt282x_initialize(struct comedi_device *dev)
return 0;
}
-/*
- options:
- 0 i/o base
- 1 irq
- 2 dma1
- 3 dma2
- 4 0=single ended, 1=differential
- 5 ai 0=straight binary, 1=2's comp
- 6 ao0 0=straight binary, 1=2's comp
- 7 ao1 0=straight binary, 1=2's comp
- 8 ai 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V
- 9 ao0 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- 10 ao1 0=±10 V, 1=0-10 V, 2=±5 V, 3=0-5 V, 4=±2.5 V
- */
static int dt282x_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
const struct dt282x_board *board = dev->board_ptr;
diff --git a/drivers/staging/comedi/drivers/mite.c b/drivers/staging/comedi/drivers/mite.c
index 8f24702c3380..b1c0860135d0 100644
--- a/drivers/staging/comedi/drivers/mite.c
+++ b/drivers/staging/comedi/drivers/mite.c
@@ -46,355 +46,451 @@
#include <linux/module.h>
#include <linux/slab.h>
+#include <linux/log2.h>
#include "../comedi_pci.h"
#include "mite.h"
-#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
+/*
+ * Mite registers
+ */
+#define MITE_UNKNOWN_DMA_BURST_REG 0x28
+#define UNKNOWN_DMA_BURST_ENABLE_BITS 0x600
+
+#define MITE_PCI_CONFIG_OFFSET 0x300
+#define MITE_CSIGR 0x460 /* chip signature */
+#define CSIGR_TO_IOWINS(x) (((x) >> 29) & 0x7)
+#define CSIGR_TO_WINS(x) (((x) >> 24) & 0x1f)
+#define CSIGR_TO_WPDEP(x) (((x) >> 20) & 0x7)
+#define CSIGR_TO_DMAC(x) (((x) >> 16) & 0xf)
+#define CSIGR_TO_IMODE(x) (((x) >> 12) & 0x3) /* pci=0x3 */
+#define CSIGR_TO_MMODE(x) (((x) >> 8) & 0x3) /* minimite=1 */
+#define CSIGR_TO_TYPE(x) (((x) >> 4) & 0xf) /* mite=0, minimite=1 */
+#define CSIGR_TO_VER(x) (((x) >> 0) & 0xf)
+
+#define MITE_CHAN(x) (0x500 + 0x100 * (x))
+#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
+#define CHOR_DMARESET BIT(31)
+#define CHOR_SET_SEND_TC BIT(11)
+#define CHOR_CLR_SEND_TC BIT(10)
+#define CHOR_SET_LPAUSE BIT(9)
+#define CHOR_CLR_LPAUSE BIT(8)
+#define CHOR_CLRDONE BIT(7)
+#define CHOR_CLRRB BIT(6)
+#define CHOR_CLRLC BIT(5)
+#define CHOR_FRESET BIT(4)
+#define CHOR_ABORT BIT(3) /* stop without emptying fifo */
+#define CHOR_STOP BIT(2) /* stop after emptying fifo */
+#define CHOR_CONT BIT(1)
+#define CHOR_START BIT(0)
+#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
+#define CHCR_SET_DMA_IE BIT(31)
+#define CHCR_CLR_DMA_IE BIT(30)
+#define CHCR_SET_LINKP_IE BIT(29)
+#define CHCR_CLR_LINKP_IE BIT(28)
+#define CHCR_SET_SAR_IE BIT(27)
+#define CHCR_CLR_SAR_IE BIT(26)
+#define CHCR_SET_DONE_IE BIT(25)
+#define CHCR_CLR_DONE_IE BIT(24)
+#define CHCR_SET_MRDY_IE BIT(23)
+#define CHCR_CLR_MRDY_IE BIT(22)
+#define CHCR_SET_DRDY_IE BIT(21)
+#define CHCR_CLR_DRDY_IE BIT(20)
+#define CHCR_SET_LC_IE BIT(19)
+#define CHCR_CLR_LC_IE BIT(18)
+#define CHCR_SET_CONT_RB_IE BIT(17)
+#define CHCR_CLR_CONT_RB_IE BIT(16)
+#define CHCR_FIFO(x) (((x) & 0x1) << 15)
+#define CHCR_FIFODIS CHCR_FIFO(1)
+#define CHCR_FIFO_ON CHCR_FIFO(0)
+#define CHCR_BURST(x) (((x) & 0x1) << 14)
+#define CHCR_BURSTEN CHCR_BURST(1)
+#define CHCR_NO_BURSTEN CHCR_BURST(0)
+#define CHCR_BYTE_SWAP_DEVICE BIT(6)
+#define CHCR_BYTE_SWAP_MEMORY BIT(4)
+#define CHCR_DIR(x) (((x) & 0x1) << 3)
+#define CHCR_DEV_TO_MEM CHCR_DIR(1)
+#define CHCR_MEM_TO_DEV CHCR_DIR(0)
+#define CHCR_MODE(x) (((x) & 0x7) << 0)
+#define CHCR_NORMAL CHCR_MODE(0)
+#define CHCR_CONTINUE CHCR_MODE(1)
+#define CHCR_RINGBUFF CHCR_MODE(2)
+#define CHCR_LINKSHORT CHCR_MODE(4)
+#define CHCR_LINKLONG CHCR_MODE(5)
+#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
+#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory config */
+#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
+#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device config */
+#define DCR_NORMAL BIT(29)
+#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
+#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link config */
+#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
+#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
+#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
+#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
+#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
+#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
+#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
+#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
+#define CHSR_INT BIT(31)
+#define CHSR_LPAUSES BIT(29)
+#define CHSR_SARS BIT(27)
+#define CHSR_DONE BIT(25)
+#define CHSR_MRDY BIT(23)
+#define CHSR_DRDY BIT(21)
+#define CHSR_LINKC BIT(19)
+#define CHSR_CONTS_RB BIT(17)
+#define CHSR_ERROR BIT(15)
+#define CHSR_SABORT BIT(14)
+#define CHSR_HABORT BIT(13)
+#define CHSR_STOPS BIT(12)
+#define CHSR_OPERR(x) (((x) & 0x3) << 10)
+#define CHSR_OPERR_MASK CHSR_OPERR(3)
+#define CHSR_OPERR_NOERROR CHSR_OPERR(0)
+#define CHSR_OPERR_FIFOERROR CHSR_OPERR(1)
+#define CHSR_OPERR_LINKERROR CHSR_OPERR(1) /* ??? */
+#define CHSR_XFERR BIT(9)
+#define CHSR_END BIT(8)
+#define CHSR_DRQ1 BIT(7)
+#define CHSR_DRQ0 BIT(6)
+#define CHSR_LERR(x) (((x) & 0x3) << 4)
+#define CHSR_LERR_MASK CHSR_LERR(3)
+#define CHSR_LBERR CHSR_LERR(1)
+#define CHSR_LRERR CHSR_LERR(2)
+#define CHSR_LOERR CHSR_LERR(3)
+#define CHSR_MERR(x) (((x) & 0x3) << 2)
+#define CHSR_MERR_MASK CHSR_MERR(3)
+#define CHSR_MBERR CHSR_MERR(1)
+#define CHSR_MRERR CHSR_MERR(2)
+#define CHSR_MOERR CHSR_MERR(3)
+#define CHSR_DERR(x) (((x) & 0x3) << 0)
+#define CHSR_DERR_MASK CHSR_DERR(3)
+#define CHSR_DBERR CHSR_DERR(1)
+#define CHSR_DRERR CHSR_DERR(2)
+#define CHSR_DOERR CHSR_DERR(3)
+#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
+
+/* common bits for the memory/device/link config registers */
+#define CR_RL(x) (((x) & 0x7) << 21)
+#define CR_REQS(x) (((x) & 0x7) << 16)
+#define CR_REQS_MASK CR_REQS(7)
+#define CR_ASEQ(x) (((x) & 0x3) << 10)
+#define CR_ASEQDONT CR_ASEQ(0)
+#define CR_ASEQUP CR_ASEQ(1)
+#define CR_ASEQDOWN CR_ASEQ(2)
+#define CR_ASEQ_MASK CR_ASEQ(3)
+#define CR_PSIZE(x) (((x) & 0x3) << 8)
+#define CR_PSIZE8 CR_PSIZE(1)
+#define CR_PSIZE16 CR_PSIZE(2)
+#define CR_PSIZE32 CR_PSIZE(3)
+#define CR_PORT(x) (((x) & 0x3) << 6)
+#define CR_PORTCPU CR_PORT(0)
+#define CR_PORTIO CR_PORT(1)
+#define CR_PORTVXI CR_PORT(2)
+#define CR_PORTMXI CR_PORT(3)
+#define CR_AMDEVICE BIT(0)
+
+static unsigned int MITE_IODWBSR_1_WSIZE_bits(unsigned int size)
+{
+ return (ilog2(size) - 1) & 0x1f;
+}
-struct mite_struct *mite_alloc(struct pci_dev *pcidev)
+static unsigned int mite_retry_limit(unsigned int retry_limit)
{
- struct mite_struct *mite;
- unsigned int i;
+ unsigned int value = 0;
- mite = kzalloc(sizeof(*mite), GFP_KERNEL);
- if (mite) {
- spin_lock_init(&mite->lock);
- mite->pcidev = pcidev;
- for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
- mite->channels[i].mite = mite;
- mite->channels[i].channel = i;
- mite->channels[i].done = 1;
- }
- }
- return mite;
+ if (retry_limit)
+ value = 1 + ilog2(retry_limit);
+ if (value > 0x7)
+ value = 0x7;
+ return CR_RL(value);
}
-EXPORT_SYMBOL_GPL(mite_alloc);
-static void dump_chip_signature(u32 csigr_bits)
+static unsigned int mite_drq_reqs(unsigned int drq_line)
{
- pr_info("version = %i, type = %i, mite mode = %i, interface mode = %i\n",
- mite_csigr_version(csigr_bits), mite_csigr_type(csigr_bits),
- mite_csigr_mmode(csigr_bits), mite_csigr_imode(csigr_bits));
- pr_info("num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
- mite_csigr_dmac(csigr_bits), mite_csigr_wpdep(csigr_bits),
- mite_csigr_wins(csigr_bits), mite_csigr_iowins(csigr_bits));
+ /* This also works on m-series when using channels (drq_line) 4 or 5. */
+ return CR_REQS((drq_line & 0x3) | 0x4);
}
-static unsigned mite_fifo_size(struct mite_struct *mite, unsigned channel)
+static unsigned int mite_fifo_size(struct mite *mite, unsigned int channel)
{
- unsigned fcr_bits = readl(mite->mite_io_addr + MITE_FCR(channel));
- unsigned empty_count = (fcr_bits >> 16) & 0xff;
- unsigned full_count = fcr_bits & 0xff;
+ unsigned int fcr_bits = readl(mite->mmio + MITE_FCR(channel));
+ unsigned int empty_count = (fcr_bits >> 16) & 0xff;
+ unsigned int full_count = fcr_bits & 0xff;
return empty_count + full_count;
}
-int mite_setup2(struct comedi_device *dev,
- struct mite_struct *mite, bool use_win1)
+static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
{
- unsigned long length;
- int i;
- u32 csigr_bits;
- unsigned unknown_dma_burst_bits;
+ struct mite *mite = mite_chan->mite;
- pci_set_master(mite->pcidev);
+ return readl(mite->mmio + MITE_DAR(mite_chan->channel));
+}
- mite->mite_io_addr = pci_ioremap_bar(mite->pcidev, 0);
- if (!mite->mite_io_addr) {
- dev_err(dev->class_dev,
- "Failed to remap mite io memory address\n");
- return -ENOMEM;
- }
- mite->mite_phys_addr = pci_resource_start(mite->pcidev, 0);
+/**
+ * mite_bytes_in_transit() - Returns the number of unread bytes in the fifo.
+ * @mite_chan: MITE dma channel.
+ */
+u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
- dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
- if (!dev->mmio) {
- dev_err(dev->class_dev,
- "Failed to remap daq io memory address\n");
- return -ENOMEM;
- }
- mite->daq_phys_addr = pci_resource_start(mite->pcidev, 1);
- length = pci_resource_len(mite->pcidev, 1);
+ return readl(mite->mmio + MITE_FCR(mite_chan->channel)) & 0xff;
+}
+EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
- if (use_win1) {
- writel(0, mite->mite_io_addr + MITE_IODWBSR);
- dev_info(dev->class_dev,
- "using I/O Window Base Size register 1\n");
- writel(mite->daq_phys_addr | WENAB |
- MITE_IODWBSR_1_WSIZE_bits(length),
- mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0, mite->mite_io_addr + MITE_IODWCR_1);
- } else {
- writel(mite->daq_phys_addr | WENAB,
- mite->mite_io_addr + MITE_IODWBSR);
- }
- /*
- * Make sure dma bursts work. I got this from running a bus analyzer
- * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
- * of 0x61f and bursts worked. 6281 powered up with register value of
- * 0x1f and bursts didn't work. The NI windows driver reads the
- * register, then does a bitwise-or of 0x600 with it and writes it back.
- */
- unknown_dma_burst_bits =
- readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
- unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
- writel(unknown_dma_burst_bits,
- mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+/* returns lower bound for number of bytes transferred from device to memory */
+static u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
+{
+ u32 device_byte_count;
- csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
- mite->num_channels = mite_csigr_dmac(csigr_bits);
- if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
- dev_warn(dev->class_dev,
- "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
- mite->num_channels, MAX_MITE_DMA_CHANNELS);
- mite->num_channels = MAX_MITE_DMA_CHANNELS;
- }
- dump_chip_signature(csigr_bits);
- for (i = 0; i < mite->num_channels; i++) {
- writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
- /* disable interrupts */
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(i));
- }
- mite->fifo_size = mite_fifo_size(mite, 0);
- dev_info(dev->class_dev, "fifo size is %i.\n", mite->fifo_size);
- return 0;
+ device_byte_count = mite_device_bytes_transferred(mite_chan);
+ return device_byte_count - mite_bytes_in_transit(mite_chan);
}
-EXPORT_SYMBOL_GPL(mite_setup2);
-void mite_detach(struct mite_struct *mite)
+/* returns upper bound for number of bytes transferred from device to memory */
+static u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
{
- if (!mite)
- return;
-
- if (mite->mite_io_addr)
- iounmap(mite->mite_io_addr);
+ u32 in_transit_count;
- kfree(mite);
+ in_transit_count = mite_bytes_in_transit(mite_chan);
+ return mite_device_bytes_transferred(mite_chan) - in_transit_count;
}
-EXPORT_SYMBOL_GPL(mite_detach);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite)
+/* returns lower bound for number of bytes read from memory to device */
+static u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
{
- struct mite_dma_descriptor_ring *ring =
- kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_KERNEL);
+ u32 device_byte_count;
- if (!ring)
- return NULL;
- ring->hw_dev = get_device(&mite->pcidev->dev);
- if (!ring->hw_dev) {
- kfree(ring);
- return NULL;
- }
- ring->n_links = 0;
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- return ring;
-};
-EXPORT_SYMBOL_GPL(mite_alloc_ring);
+ device_byte_count = mite_device_bytes_transferred(mite_chan);
+ return device_byte_count + mite_bytes_in_transit(mite_chan);
+}
-void mite_free_ring(struct mite_dma_descriptor_ring *ring)
+/* returns upper bound for number of bytes read from memory to device */
+static u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
{
- if (ring) {
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
- }
- put_device(ring->hw_dev);
- kfree(ring);
- }
-};
-EXPORT_SYMBOL_GPL(mite_free_ring);
+ u32 in_transit_count;
+
+ in_transit_count = mite_bytes_in_transit(mite_chan);
+ return mite_device_bytes_transferred(mite_chan) + in_transit_count;
+}
-struct mite_channel *mite_request_channel_in_range(struct mite_struct *mite,
- struct
- mite_dma_descriptor_ring
- *ring, unsigned min_channel,
- unsigned max_channel)
+static void mite_sync_input_dma(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s)
{
- int i;
- unsigned long flags;
- struct mite_channel *channel = NULL;
+ struct comedi_async *async = s->async;
+ int count;
+ unsigned int nbytes, old_alloc_count;
+
+ old_alloc_count = async->buf_write_alloc_count;
+ /* write alloc as much as we can */
+ comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ nbytes = mite_bytes_written_to_memory_lb(mite_chan);
+ if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
+ old_alloc_count) > 0) {
+ dev_warn(s->device->class_dev,
+ "mite: DMA overwrite of free area\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
+ }
+
+ count = nbytes - async->buf_write_count;
/*
- * spin lock so mite_release_channel can be called safely
- * from interrupts
+ * it's possible count will be negative due to conservative value
+ * returned by mite_bytes_written_to_memory_lb
*/
- spin_lock_irqsave(&mite->lock, flags);
- for (i = min_channel; i <= max_channel; ++i) {
- if (mite->channel_allocated[i] == 0) {
- mite->channel_allocated[i] = 1;
- channel = &mite->channels[i];
- channel->ring = ring;
- break;
- }
+ if (count > 0) {
+ comedi_buf_write_free(s, count);
+ comedi_inc_scan_progress(s, count);
+ async->events |= COMEDI_CB_BLOCK;
}
- spin_unlock_irqrestore(&mite->lock, flags);
- return channel;
}
-EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-void mite_release_channel(struct mite_channel *mite_chan)
+static void mite_sync_output_dma(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
+ struct comedi_async *async = s->async;
+ struct comedi_cmd *cmd = &async->cmd;
+ u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
+ unsigned int old_alloc_count = async->buf_read_alloc_count;
+ u32 nbytes_ub, nbytes_lb;
+ int count;
+ bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
- /* spin lock to prevent races with mite_request_channel */
- spin_lock_irqsave(&mite->lock, flags);
- if (mite->channel_allocated[mite_chan->channel]) {
- mite_dma_disarm(mite_chan);
- mite_dma_reset(mite_chan);
+ /* read alloc as much as we can */
+ comedi_buf_read_alloc(s, async->prealloc_bufsz);
+ nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
+ if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
+ nbytes_lb = stop_count;
+ nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
+ if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
+ nbytes_ub = stop_count;
+
+ if ((!finite_regen || stop_count > old_alloc_count) &&
+ ((int)(nbytes_ub - old_alloc_count) > 0)) {
+ dev_warn(s->device->class_dev, "mite: DMA underrun\n");
+ async->events |= COMEDI_CB_OVERFLOW;
+ return;
+ }
+
+ if (finite_regen) {
/*
- * disable all channel's interrupts (do it after disarm/reset so
- * MITE_CHCR reg isn't changed while dma is still active!)
+ * This is a special case where we continuously output a finite
+ * buffer. In this case, we do not free any of the memory,
+ * hence we expect that old_alloc_count will reach a maximum of
+ * stop_count bytes.
*/
- writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
- CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
- CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
- mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
- mite->channel_allocated[mite_chan->channel] = 0;
- mite_chan->ring = NULL;
- mmiowb();
+ return;
+ }
+
+ count = nbytes_lb - async->buf_read_count;
+ if (count > 0) {
+ comedi_buf_read_free(s, count);
+ async->events |= COMEDI_CB_BLOCK;
}
- spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_release_channel);
-void mite_dma_arm(struct mite_channel *mite_chan)
+/**
+ * mite_sync_dma() - Sync the MITE dma with the COMEDI async buffer.
+ * @mite_chan: MITE dma channel.
+ * @s: COMEDI subdevice.
+ */
+void mite_sync_dma(struct mite_channel *mite_chan, struct comedi_subdevice *s)
+{
+ if (mite_chan->dir == COMEDI_INPUT)
+ mite_sync_input_dma(mite_chan, s);
+ else
+ mite_sync_output_dma(mite_chan, s);
+}
+EXPORT_SYMBOL_GPL(mite_sync_dma);
+
+static unsigned int mite_get_status(struct mite_channel *mite_chan)
{
- struct mite_struct *mite = mite_chan->mite;
- int chor;
+ struct mite *mite = mite_chan->mite;
+ unsigned int status;
unsigned long flags;
- /*
- * memory barrier is intended to insure any twiddling with the buffer
- * is done before writing to the mite to arm dma transfer
- */
- smp_mb();
- /* arm */
- chor = CHOR_START;
spin_lock_irqsave(&mite->lock, flags);
- mite_chan->done = 0;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ status = readl(mite->mmio + MITE_CHSR(mite_chan->channel));
+ if (status & CHSR_DONE) {
+ mite_chan->done = 1;
+ writel(CHOR_CLRDONE,
+ mite->mmio + MITE_CHOR(mite_chan->channel));
+ }
mmiowb();
spin_unlock_irqrestore(&mite->lock, flags);
- /* mite_dma_tcr(mite, channel); */
+ return status;
}
-EXPORT_SYMBOL_GPL(mite_dma_arm);
-/**************************************/
-
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s)
+/**
+ * mite_ack_linkc() - Check and ack the LINKC interrupt,
+ * @mite_chan: MITE dma channel.
+ * @s: COMEDI subdevice.
+ * @sync: flag to force a mite_sync_dma().
+ *
+ * This will also ack the DONE interrupt if active.
+ */
+void mite_ack_linkc(struct mite_channel *mite_chan,
+ struct comedi_subdevice *s,
+ bool sync)
{
- struct comedi_async *async = s->async;
- unsigned int n_links;
+ struct mite *mite = mite_chan->mite;
+ unsigned int status;
- if (ring->descriptors) {
- dma_free_coherent(ring->hw_dev,
- ring->n_links *
- sizeof(struct mite_dma_descriptor),
- ring->descriptors,
- ring->descriptors_dma_addr);
+ status = mite_get_status(mite_chan);
+ if (status & CHSR_LINKC) {
+ writel(CHOR_CLRLC, mite->mmio + MITE_CHOR(mite_chan->channel));
+ sync = true;
}
- ring->descriptors = NULL;
- ring->descriptors_dma_addr = 0;
- ring->n_links = 0;
+ if (sync)
+ mite_sync_dma(mite_chan, s);
- if (async->prealloc_bufsz == 0)
- return 0;
-
- n_links = async->prealloc_bufsz >> PAGE_SHIFT;
-
- ring->descriptors =
- dma_alloc_coherent(ring->hw_dev,
- n_links * sizeof(struct mite_dma_descriptor),
- &ring->descriptors_dma_addr, GFP_KERNEL);
- if (!ring->descriptors) {
+ if (status & CHSR_XFERR) {
dev_err(s->device->class_dev,
- "mite: ring buffer allocation failed\n");
- return -ENOMEM;
+ "mite: transfer error %08x\n", status);
+ s->async->events |= COMEDI_CB_ERROR;
}
- ring->n_links = n_links;
-
- return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
}
-EXPORT_SYMBOL_GPL(mite_buf_change);
+EXPORT_SYMBOL_GPL(mite_ack_linkc);
-/*
- * initializes the ring buffer descriptors to provide correct DMA transfer links
- * to the exact amount of memory required. When the ring buffer is allocated in
- * mite_buf_change, the default is to initialize the ring to refer to the entire
- * DMA data buffer. A command may call this function later to re-initialize and
- * shorten the amount of memory that will be transferred.
+/**
+ * mite_done() - Check is a MITE dma transfer is complete.
+ * @mite_chan: MITE dma channel.
+ *
+ * This will also ack the DONE interrupt if active.
*/
-int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s,
- unsigned int nbytes)
+int mite_done(struct mite_channel *mite_chan)
{
- struct comedi_async *async = s->async;
- unsigned int n_full_links = nbytes >> PAGE_SHIFT;
- unsigned int remainder = nbytes % PAGE_SIZE;
- int i;
-
- dev_dbg(s->device->class_dev,
- "mite: init ring buffer to %u bytes\n", nbytes);
-
- if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
- dev_err(s->device->class_dev,
- "mite: ring buffer too small for requested init\n");
- return -ENOMEM;
- }
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
+ int done;
- /* We set the descriptors for all full links. */
- for (i = 0; i < n_full_links; ++i) {
- ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- ring->descriptors[i].next =
- cpu_to_le32(ring->descriptors_dma_addr +
- (i + 1) * sizeof(struct mite_dma_descriptor));
- }
+ mite_get_status(mite_chan);
+ spin_lock_irqsave(&mite->lock, flags);
+ done = mite_chan->done;
+ spin_unlock_irqrestore(&mite->lock, flags);
+ return done;
+}
+EXPORT_SYMBOL_GPL(mite_done);
- /* the last link is either a remainder or was a full link. */
- if (remainder > 0) {
- /* set the lesser count for the remainder link */
- ring->descriptors[i].count = cpu_to_le32(remainder);
- ring->descriptors[i].addr =
- cpu_to_le32(async->buf_map->page_list[i].dma_addr);
- /* increment i so that assignment below refs last link */
- ++i;
- }
+static void mite_dma_reset(struct mite_channel *mite_chan)
+{
+ writel(CHOR_DMARESET | CHOR_FRESET,
+ mite_chan->mite->mmio + MITE_CHOR(mite_chan->channel));
+}
- /* Assign the last link->next to point back to the head of the list. */
- ring->descriptors[i - 1].next = cpu_to_le32(ring->descriptors_dma_addr);
+/**
+ * mite_dma_arm() - Start a MITE dma transfer.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_dma_arm(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
/*
- * barrier is meant to insure that all the writes to the dma descriptors
- * have completed before the dma controller is commanded to read them
+ * memory barrier is intended to insure any twiddling with the buffer
+ * is done before writing to the mite to arm dma transfer
*/
- smp_wmb();
- return 0;
+ smp_mb();
+ spin_lock_irqsave(&mite->lock, flags);
+ mite_chan->done = 0;
+ /* arm */
+ writel(CHOR_START, mite->mmio + MITE_CHOR(mite_chan->channel));
+ mmiowb();
+ spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
+EXPORT_SYMBOL_GPL(mite_dma_arm);
+
+/**
+ * mite_dma_disarm() - Stop a MITE dma transfer.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_dma_disarm(struct mite_channel *mite_chan)
+{
+ struct mite *mite = mite_chan->mite;
+
+ /* disarm */
+ writel(CHOR_ABORT, mite->mmio + MITE_CHOR(mite_chan->channel));
+}
+EXPORT_SYMBOL_GPL(mite_dma_disarm);
+/**
+ * mite_prep_dma() - Prepare a MITE dma channel for transfers.
+ * @mite_chan: MITE dma channel.
+ * @num_device_bits: device transfer size (8, 16, or 32-bits).
+ * @num_memory_bits: memory transfer size (8, 16, or 32-bits).
+ */
void mite_prep_dma(struct mite_channel *mite_chan,
unsigned int num_device_bits, unsigned int num_memory_bits)
{
- unsigned int chor, chcr, mcr, dcr, lkcr;
- struct mite_struct *mite = mite_chan->mite;
+ struct mite *mite = mite_chan->mite;
+ unsigned int chcr, mcr, dcr, lkcr;
- /* reset DMA and FIFO */
- chor = CHOR_DMARESET | CHOR_FRESET;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ mite_dma_reset(mite_chan);
/* short link chaining mode */
chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
@@ -421,10 +517,10 @@ void mite_prep_dma(struct mite_channel *mite_chan,
if (mite_chan->dir == COMEDI_INPUT)
chcr |= CHCR_DEV_TO_MEM;
- writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+ writel(chcr, mite->mmio + MITE_CHCR(mite_chan->channel));
/* to/from memory */
- mcr = CR_RL(64) | CR_ASEQUP;
+ mcr = mite_retry_limit(64) | CR_ASEQUP;
switch (num_memory_bits) {
case 8:
mcr |= CR_PSIZE8;
@@ -439,11 +535,11 @@ void mite_prep_dma(struct mite_channel *mite_chan,
pr_warn("bug! invalid mem bit width for dma transfer\n");
break;
}
- writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
+ writel(mcr, mite->mmio + MITE_MCR(mite_chan->channel));
/* from/to device */
- dcr = CR_RL(64) | CR_ASEQUP;
- dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
+ dcr = mite_retry_limit(64) | CR_ASEQUP;
+ dcr |= CR_PORTIO | CR_AMDEVICE | mite_drq_reqs(mite_chan->channel);
switch (num_device_bits) {
case 8:
dcr |= CR_PSIZE8;
@@ -458,223 +554,402 @@ void mite_prep_dma(struct mite_channel *mite_chan,
pr_warn("bug! invalid dev bit width for dma transfer\n");
break;
}
- writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
+ writel(dcr, mite->mmio + MITE_DCR(mite_chan->channel));
/* reset the DAR */
- writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+ writel(0, mite->mmio + MITE_DAR(mite_chan->channel));
/* the link is 32bits */
- lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
- writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
+ lkcr = mite_retry_limit(64) | CR_ASEQUP | CR_PSIZE32;
+ writel(lkcr, mite->mmio + MITE_LKCR(mite_chan->channel));
/* starting address for link chaining */
- writel(mite_chan->ring->descriptors_dma_addr,
- mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
+ writel(mite_chan->ring->dma_addr,
+ mite->mmio + MITE_LKAR(mite_chan->channel));
}
EXPORT_SYMBOL_GPL(mite_prep_dma);
-static u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
+static struct mite_channel *__mite_request_channel(struct mite *mite,
+ struct mite_ring *ring,
+ unsigned int min_channel,
+ unsigned int max_channel)
{
- struct mite_struct *mite = mite_chan->mite;
+ struct mite_channel *mite_chan = NULL;
+ unsigned long flags;
+ int i;
- return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+ /*
+ * spin lock so mite_release_channel can be called safely
+ * from interrupts
+ */
+ spin_lock_irqsave(&mite->lock, flags);
+ for (i = min_channel; i <= max_channel; ++i) {
+ mite_chan = &mite->channels[i];
+ if (!mite_chan->ring) {
+ mite_chan->ring = ring;
+ break;
+ }
+ mite_chan = NULL;
+ }
+ spin_unlock_irqrestore(&mite->lock, flags);
+ return mite_chan;
}
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan)
+/**
+ * mite_request_channel_in_range() - Request a MITE dma channel.
+ * @mite: MITE device.
+ * @ring: MITE dma ring.
+ * @min_channel: minimum channel index to use.
+ * @max_channel: maximum channel index to use.
+ */
+struct mite_channel *mite_request_channel_in_range(struct mite *mite,
+ struct mite_ring *ring,
+ unsigned int min_channel,
+ unsigned int max_channel)
{
- struct mite_struct *mite = mite_chan->mite;
-
- return readl(mite->mite_io_addr +
- MITE_FCR(mite_chan->channel)) & 0x000000FF;
+ return __mite_request_channel(mite, ring, min_channel, max_channel);
}
-EXPORT_SYMBOL_GPL(mite_bytes_in_transit);
+EXPORT_SYMBOL_GPL(mite_request_channel_in_range);
-/* returns lower bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan)
+/**
+ * mite_request_channel() - Request a MITE dma channel.
+ * @mite: MITE device.
+ * @ring: MITE dma ring.
+ */
+struct mite_channel *mite_request_channel(struct mite *mite,
+ struct mite_ring *ring)
{
- u32 device_byte_count;
-
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count - mite_bytes_in_transit(mite_chan);
+ return __mite_request_channel(mite, ring, 0, mite->num_channels - 1);
}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL_GPL(mite_request_channel);
-/* returns upper bound for number of bytes transferred from device to memory */
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan)
+/**
+ * mite_release_channel() - Release a MITE dma channel.
+ * @mite_chan: MITE dma channel.
+ */
+void mite_release_channel(struct mite_channel *mite_chan)
{
- u32 in_transit_count;
+ struct mite *mite = mite_chan->mite;
+ unsigned long flags;
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) - in_transit_count;
+ /* spin lock to prevent races with mite_request_channel */
+ spin_lock_irqsave(&mite->lock, flags);
+ if (mite_chan->ring) {
+ mite_dma_disarm(mite_chan);
+ mite_dma_reset(mite_chan);
+ /*
+ * disable all channel's interrupts (do it after disarm/reset so
+ * MITE_CHCR reg isn't changed while dma is still active!)
+ */
+ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
+ CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
+ CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+ CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+ mite->mmio + MITE_CHCR(mite_chan->channel));
+ mite_chan->ring = NULL;
+ mmiowb();
+ }
+ spin_unlock_irqrestore(&mite->lock, flags);
}
-EXPORT_SYMBOL_GPL(mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL_GPL(mite_release_channel);
-/* returns lower bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan)
+/**
+ * mite_init_ring_descriptors() - Initialize a MITE dma ring descriptors.
+ * @ring: MITE dma ring.
+ * @s: COMEDI subdevice.
+ * @nbytes: the size of the dma ring (in bytes).
+ *
+ * Initializes the ring buffer descriptors to provide correct DMA transfer
+ * links to the exact amount of memory required. When the ring buffer is
+ * allocated by mite_buf_change(), the default is to initialize the ring
+ * to refer to the entire DMA data buffer. A command may call this function
+ * later to re-initialize and shorten the amount of memory that will be
+ * transferred.
+ */
+int mite_init_ring_descriptors(struct mite_ring *ring,
+ struct comedi_subdevice *s,
+ unsigned int nbytes)
{
- u32 device_byte_count;
+ struct comedi_async *async = s->async;
+ struct mite_dma_desc *desc = NULL;
+ unsigned int n_full_links = nbytes >> PAGE_SHIFT;
+ unsigned int remainder = nbytes % PAGE_SIZE;
+ int i;
- device_byte_count = mite_device_bytes_transferred(mite_chan);
- return device_byte_count + mite_bytes_in_transit(mite_chan);
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_lb);
+ dev_dbg(s->device->class_dev,
+ "mite: init ring buffer to %u bytes\n", nbytes);
-/* returns upper bound for number of bytes read from memory to device */
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan)
-{
- u32 in_transit_count;
+ if ((n_full_links + (remainder > 0 ? 1 : 0)) > ring->n_links) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer too small for requested init\n");
+ return -ENOMEM;
+ }
- in_transit_count = mite_bytes_in_transit(mite_chan);
- return mite_device_bytes_transferred(mite_chan) + in_transit_count;
-}
-EXPORT_SYMBOL_GPL(mite_bytes_read_from_memory_ub);
+ /* We set the descriptors for all full links. */
+ for (i = 0; i < n_full_links; ++i) {
+ desc = &ring->descs[i];
+ desc->count = cpu_to_le32(PAGE_SIZE);
+ desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ desc->next = cpu_to_le32(ring->dma_addr +
+ (i + 1) * sizeof(*desc));
+ }
-unsigned mite_dma_tcr(struct mite_channel *mite_chan)
-{
- struct mite_struct *mite = mite_chan->mite;
+ /* the last link is either a remainder or was a full link. */
+ if (remainder > 0) {
+ desc = &ring->descs[i];
+ /* set the lesser count for the remainder link */
+ desc->count = cpu_to_le32(remainder);
+ desc->addr = cpu_to_le32(async->buf_map->page_list[i].dma_addr);
+ }
+
+ /* Assign the last link->next to point back to the head of the list. */
+ desc->next = cpu_to_le32(ring->dma_addr);
- return readl(mite->mite_io_addr + MITE_TCR(mite_chan->channel));
+ /*
+ * barrier is meant to insure that all the writes to the dma descriptors
+ * have completed before the dma controller is commanded to read them
+ */
+ smp_wmb();
+ return 0;
}
-EXPORT_SYMBOL_GPL(mite_dma_tcr);
+EXPORT_SYMBOL_GPL(mite_init_ring_descriptors);
-void mite_dma_disarm(struct mite_channel *mite_chan)
+static void mite_free_dma_descs(struct mite_ring *ring)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned chor;
+ struct mite_dma_desc *descs = ring->descs;
- /* disarm */
- chor = CHOR_ABORT;
- writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ if (descs) {
+ dma_free_coherent(ring->hw_dev,
+ ring->n_links * sizeof(*descs),
+ descs, ring->dma_addr);
+ ring->descs = NULL;
+ ring->dma_addr = 0;
+ ring->n_links = 0;
+ }
}
-EXPORT_SYMBOL_GPL(mite_dma_disarm);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
+/**
+ * mite_buf_change() - COMEDI subdevice (*buf_change) for a MITE dma ring.
+ * @ring: MITE dma ring.
+ * @s: COMEDI subdevice.
+ */
+int mite_buf_change(struct mite_ring *ring, struct comedi_subdevice *s)
{
struct comedi_async *async = s->async;
- int count;
- unsigned int nbytes, old_alloc_count;
+ struct mite_dma_desc *descs;
+ unsigned int n_links;
- old_alloc_count = async->buf_write_alloc_count;
- /* write alloc as much as we can */
- comedi_buf_write_alloc(s, async->prealloc_bufsz);
+ mite_free_dma_descs(ring);
- nbytes = mite_bytes_written_to_memory_lb(mite_chan);
- if ((int)(mite_bytes_written_to_memory_ub(mite_chan) -
- old_alloc_count) > 0) {
- dev_warn(s->device->class_dev,
- "mite: DMA overwrite of free area\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
+ if (async->prealloc_bufsz == 0)
+ return 0;
+
+ n_links = async->prealloc_bufsz >> PAGE_SHIFT;
+
+ descs = dma_alloc_coherent(ring->hw_dev,
+ n_links * sizeof(*descs),
+ &ring->dma_addr, GFP_KERNEL);
+ if (!descs) {
+ dev_err(s->device->class_dev,
+ "mite: ring buffer allocation failed\n");
+ return -ENOMEM;
}
+ ring->descs = descs;
+ ring->n_links = n_links;
- count = nbytes - async->buf_write_count;
- /*
- * it's possible count will be negative due to conservative value
- * returned by mite_bytes_written_to_memory_lb
- */
- if (count <= 0)
- return 0;
+ return mite_init_ring_descriptors(ring, s, n_links << PAGE_SHIFT);
+}
+EXPORT_SYMBOL_GPL(mite_buf_change);
- comedi_buf_write_free(s, count);
- comedi_inc_scan_progress(s, count);
- async->events |= COMEDI_CB_BLOCK;
- return 0;
+/**
+ * mite_alloc_ring() - Allocate a MITE dma ring.
+ * @mite: MITE device.
+ */
+struct mite_ring *mite_alloc_ring(struct mite *mite)
+{
+ struct mite_ring *ring;
+
+ ring = kmalloc(sizeof(*ring), GFP_KERNEL);
+ if (!ring)
+ return NULL;
+ ring->hw_dev = get_device(&mite->pcidev->dev);
+ if (!ring->hw_dev) {
+ kfree(ring);
+ return NULL;
+ }
+ ring->n_links = 0;
+ ring->descs = NULL;
+ ring->dma_addr = 0;
+ return ring;
}
-EXPORT_SYMBOL_GPL(mite_sync_input_dma);
+EXPORT_SYMBOL_GPL(mite_alloc_ring);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s)
+/**
+ * mite_free_ring() - Free a MITE dma ring and its descriptors.
+ * @ring: MITE dma ring.
+ */
+void mite_free_ring(struct mite_ring *ring)
{
- struct comedi_async *async = s->async;
- struct comedi_cmd *cmd = &async->cmd;
- u32 stop_count = cmd->stop_arg * comedi_bytes_per_scan(s);
- unsigned int old_alloc_count = async->buf_read_alloc_count;
- u32 nbytes_ub, nbytes_lb;
- int count;
- bool finite_regen = (cmd->stop_src == TRIG_NONE && stop_count != 0);
+ if (ring) {
+ mite_free_dma_descs(ring);
+ put_device(ring->hw_dev);
+ kfree(ring);
+ }
+}
+EXPORT_SYMBOL_GPL(mite_free_ring);
- /* read alloc as much as we can */
- comedi_buf_read_alloc(s, async->prealloc_bufsz);
- nbytes_lb = mite_bytes_read_from_memory_lb(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_lb - stop_count) > 0)
- nbytes_lb = stop_count;
- nbytes_ub = mite_bytes_read_from_memory_ub(mite_chan);
- if (cmd->stop_src == TRIG_COUNT && (int)(nbytes_ub - stop_count) > 0)
- nbytes_ub = stop_count;
+static int mite_setup(struct comedi_device *dev, struct mite *mite,
+ bool use_win1)
+{
+ resource_size_t daq_phys_addr;
+ unsigned long length;
+ int i;
+ u32 csigr_bits;
+ unsigned int unknown_dma_burst_bits;
+ unsigned int wpdep;
- if ((!finite_regen || stop_count > old_alloc_count) &&
- ((int)(nbytes_ub - old_alloc_count) > 0)) {
- dev_warn(s->device->class_dev, "mite: DMA underrun\n");
- async->events |= COMEDI_CB_OVERFLOW;
- return -1;
+ pci_set_master(mite->pcidev);
+
+ mite->mmio = pci_ioremap_bar(mite->pcidev, 0);
+ if (!mite->mmio)
+ return -ENOMEM;
+
+ dev->mmio = pci_ioremap_bar(mite->pcidev, 1);
+ if (!dev->mmio)
+ return -ENOMEM;
+ daq_phys_addr = pci_resource_start(mite->pcidev, 1);
+ length = pci_resource_len(mite->pcidev, 1);
+
+ if (use_win1) {
+ writel(0, mite->mmio + MITE_IODWBSR);
+ dev_dbg(dev->class_dev,
+ "mite: using I/O Window Base Size register 1\n");
+ writel(daq_phys_addr | WENAB |
+ MITE_IODWBSR_1_WSIZE_bits(length),
+ mite->mmio + MITE_IODWBSR_1);
+ writel(0, mite->mmio + MITE_IODWCR_1);
+ } else {
+ writel(daq_phys_addr | WENAB, mite->mmio + MITE_IODWBSR);
}
+ /*
+ * Make sure dma bursts work. I got this from running a bus analyzer
+ * on a pxi-6281 and a pxi-6713. 6713 powered up with register value
+ * of 0x61f and bursts worked. 6281 powered up with register value of
+ * 0x1f and bursts didn't work. The NI windows driver reads the
+ * register, then does a bitwise-or of 0x600 with it and writes it back.
+ *
+ * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
+ * written and read back. The bits 0x1f always read as 1.
+ * The rest always read as zero.
+ */
+ unknown_dma_burst_bits = readl(mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
+ unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
+ writel(unknown_dma_burst_bits, mite->mmio + MITE_UNKNOWN_DMA_BURST_REG);
- if (finite_regen) {
- /*
- * This is a special case where we continuously output a finite
- * buffer. In this case, we do not free any of the memory,
- * hence we expect that old_alloc_count will reach a maximum of
- * stop_count bytes.
- */
- return 0;
+ csigr_bits = readl(mite->mmio + MITE_CSIGR);
+ mite->num_channels = CSIGR_TO_DMAC(csigr_bits);
+ if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
+ dev_warn(dev->class_dev,
+ "mite: bug? chip claims to have %i dma channels. Setting to %i.\n",
+ mite->num_channels, MAX_MITE_DMA_CHANNELS);
+ mite->num_channels = MAX_MITE_DMA_CHANNELS;
}
- count = nbytes_lb - async->buf_read_count;
- if (count <= 0)
- return 0;
+ /* get the wpdep bits and convert it to the write port fifo depth */
+ wpdep = CSIGR_TO_WPDEP(csigr_bits);
+ if (wpdep)
+ wpdep = BIT(wpdep);
- if (count) {
- comedi_buf_read_free(s, count);
- async->events |= COMEDI_CB_BLOCK;
+ dev_dbg(dev->class_dev,
+ "mite: version = %i, type = %i, mite mode = %i, interface mode = %i\n",
+ CSIGR_TO_VER(csigr_bits), CSIGR_TO_TYPE(csigr_bits),
+ CSIGR_TO_MMODE(csigr_bits), CSIGR_TO_IMODE(csigr_bits));
+ dev_dbg(dev->class_dev,
+ "mite: num channels = %i, write post fifo depth = %i, wins = %i, iowins = %i\n",
+ CSIGR_TO_DMAC(csigr_bits), wpdep,
+ CSIGR_TO_WINS(csigr_bits), CSIGR_TO_IOWINS(csigr_bits));
+
+ for (i = 0; i < mite->num_channels; i++) {
+ writel(CHOR_DMARESET, mite->mmio + MITE_CHOR(i));
+ /* disable interrupts */
+ writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+ CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+ CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+ mite->mmio + MITE_CHCR(i));
}
+ mite->fifo_size = mite_fifo_size(mite, 0);
+ dev_dbg(dev->class_dev, "mite: fifo size is %i.\n", mite->fifo_size);
return 0;
}
-EXPORT_SYMBOL_GPL(mite_sync_output_dma);
-unsigned mite_get_status(struct mite_channel *mite_chan)
+/**
+ * mite_attach() - Allocate and initialize a MITE device for a comedi driver.
+ * @dev: COMEDI device.
+ * @use_win1: flag to use I/O Window 1 instead of I/O Window 0.
+ *
+ * Called by a COMEDI drivers (*auto_attach).
+ *
+ * Returns a pointer to the MITE device on success, or NULL if the MITE cannot
+ * be allocated or remapped.
+ */
+struct mite *mite_attach(struct comedi_device *dev, bool use_win1)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned status;
- unsigned long flags;
+ struct pci_dev *pcidev = comedi_to_pci_dev(dev);
+ struct mite *mite;
+ unsigned int i;
+ int ret;
- spin_lock_irqsave(&mite->lock, flags);
- status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
- if (status & CHSR_DONE) {
- mite_chan->done = 1;
- writel(CHOR_CLRDONE,
- mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+ mite = kzalloc(sizeof(*mite), GFP_KERNEL);
+ if (!mite)
+ return NULL;
+
+ spin_lock_init(&mite->lock);
+ mite->pcidev = pcidev;
+ for (i = 0; i < MAX_MITE_DMA_CHANNELS; ++i) {
+ mite->channels[i].mite = mite;
+ mite->channels[i].channel = i;
+ mite->channels[i].done = 1;
}
- mmiowb();
- spin_unlock_irqrestore(&mite->lock, flags);
- return status;
+
+ ret = mite_setup(dev, mite, use_win1);
+ if (ret) {
+ if (mite->mmio)
+ iounmap(mite->mmio);
+ kfree(mite);
+ return NULL;
+ }
+
+ return mite;
}
-EXPORT_SYMBOL_GPL(mite_get_status);
+EXPORT_SYMBOL_GPL(mite_attach);
-int mite_done(struct mite_channel *mite_chan)
+/**
+ * mite_detach() - Unmap and free a MITE device for a comedi driver.
+ * @mite: MITE device.
+ *
+ * Called by a COMEDI drivers (*detach).
+ */
+void mite_detach(struct mite *mite)
{
- struct mite_struct *mite = mite_chan->mite;
- unsigned long flags;
- int done;
+ if (!mite)
+ return;
- mite_get_status(mite_chan);
- spin_lock_irqsave(&mite->lock, flags);
- done = mite_chan->done;
- spin_unlock_irqrestore(&mite->lock, flags);
- return done;
+ if (mite->mmio)
+ iounmap(mite->mmio);
+
+ kfree(mite);
}
-EXPORT_SYMBOL_GPL(mite_done);
+EXPORT_SYMBOL_GPL(mite_detach);
static int __init mite_module_init(void)
{
return 0;
}
+module_init(mite_module_init);
static void __exit mite_module_exit(void)
{
}
-
-module_init(mite_module_init);
module_exit(mite_module_exit);
MODULE_AUTHOR("Comedi http://www.comedi.org");
diff --git a/drivers/staging/comedi/drivers/mite.h b/drivers/staging/comedi/drivers/mite.h
index 87534b07ec81..b6349aed97d0 100644
--- a/drivers/staging/comedi/drivers/mite.h
+++ b/drivers/staging/comedi/drivers/mite.h
@@ -19,8 +19,6 @@
#ifndef _MITE_H_
#define _MITE_H_
-#include <linux/io.h>
-#include <linux/log2.h>
#include <linux/spinlock.h>
#define MAX_MITE_DMA_CHANNELS 8
@@ -30,323 +28,74 @@ struct comedi_subdevice;
struct device;
struct pci_dev;
-struct mite_dma_descriptor {
+struct mite_dma_desc {
__le32 count;
__le32 addr;
__le32 next;
u32 dar;
};
-struct mite_dma_descriptor_ring {
+struct mite_ring {
struct device *hw_dev;
unsigned int n_links;
- struct mite_dma_descriptor *descriptors;
- dma_addr_t descriptors_dma_addr;
+ struct mite_dma_desc *descs;
+ dma_addr_t dma_addr;
};
struct mite_channel {
- struct mite_struct *mite;
- unsigned channel;
+ struct mite *mite;
+ unsigned int channel;
int dir;
int done;
- struct mite_dma_descriptor_ring *ring;
+ struct mite_ring *ring;
};
-struct mite_struct {
+struct mite {
struct pci_dev *pcidev;
- resource_size_t mite_phys_addr;
- void __iomem *mite_io_addr;
- resource_size_t daq_phys_addr;
+ void __iomem *mmio;
struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
- short channel_allocated[MAX_MITE_DMA_CHANNELS];
int num_channels;
- unsigned fifo_size;
+ unsigned int fifo_size;
+ /* protects mite_channel from being released by the driver */
spinlock_t lock;
};
-struct mite_struct *mite_alloc(struct pci_dev *pcidev);
+u32 mite_bytes_in_transit(struct mite_channel *);
-int mite_setup2(struct comedi_device *, struct mite_struct *, bool use_win1);
+void mite_sync_dma(struct mite_channel *, struct comedi_subdevice *);
+void mite_ack_linkc(struct mite_channel *, struct comedi_subdevice *s,
+ bool sync);
+int mite_done(struct mite_channel *);
-static inline int mite_setup(struct comedi_device *dev,
- struct mite_struct *mite)
-{
- return mite_setup2(dev, mite, false);
-}
+void mite_dma_arm(struct mite_channel *);
+void mite_dma_disarm(struct mite_channel *);
-void mite_detach(struct mite_struct *mite);
-struct mite_dma_descriptor_ring *mite_alloc_ring(struct mite_struct *mite);
-void mite_free_ring(struct mite_dma_descriptor_ring *ring);
-struct mite_channel *
-mite_request_channel_in_range(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring,
- unsigned min_channel, unsigned max_channel);
-static inline struct mite_channel *
-mite_request_channel(struct mite_struct *mite,
- struct mite_dma_descriptor_ring *ring)
-{
- return mite_request_channel_in_range(mite, ring, 0,
- mite->num_channels - 1);
-}
-
-void mite_release_channel(struct mite_channel *mite_chan);
-
-unsigned mite_dma_tcr(struct mite_channel *mite_chan);
-void mite_dma_arm(struct mite_channel *mite_chan);
-void mite_dma_disarm(struct mite_channel *mite_chan);
-int mite_sync_input_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-int mite_sync_output_dma(struct mite_channel *mite_chan,
- struct comedi_subdevice *s);
-u32 mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
-u32 mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
-u32 mite_bytes_in_transit(struct mite_channel *mite_chan);
-unsigned mite_get_status(struct mite_channel *mite_chan);
-int mite_done(struct mite_channel *mite_chan);
-
-void mite_prep_dma(struct mite_channel *mite_chan,
+void mite_prep_dma(struct mite_channel *,
unsigned int num_device_bits, unsigned int num_memory_bits);
-int mite_buf_change(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s);
-int mite_init_ring_descriptors(struct mite_dma_descriptor_ring *ring,
- struct comedi_subdevice *s,
- unsigned int nbytes);
-
-enum mite_registers {
- /*
- * The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
- * written and read back. The bits 0x1f always read as 1.
- * The rest always read as zero.
- */
- MITE_UNKNOWN_DMA_BURST_REG = 0x28,
- MITE_IODWBSR = 0xc0, /* IO Device Window Base Size Register */
- MITE_IODWBSR_1 = 0xc4, /* IO Device Window Base Size Register 1 */
- MITE_IODWCR_1 = 0xf4,
- MITE_PCI_CONFIG_OFFSET = 0x300,
- MITE_CSIGR = 0x460 /* chip signature */
-};
-
-#define MITE_CHAN(x) (0x500 + 0x100 * (x))
-#define MITE_CHOR(x) (0x00 + MITE_CHAN(x)) /* channel operation */
-#define MITE_CHCR(x) (0x04 + MITE_CHAN(x)) /* channel control */
-#define MITE_TCR(x) (0x08 + MITE_CHAN(x)) /* transfer count */
-#define MITE_MCR(x) (0x0c + MITE_CHAN(x)) /* memory configuration */
-#define MITE_MAR(x) (0x10 + MITE_CHAN(x)) /* memory address */
-#define MITE_DCR(x) (0x14 + MITE_CHAN(x)) /* device configuration */
-#define MITE_DAR(x) (0x18 + MITE_CHAN(x)) /* device address */
-#define MITE_LKCR(x) (0x1c + MITE_CHAN(x)) /* link configuration */
-#define MITE_LKAR(x) (0x20 + MITE_CHAN(x)) /* link address */
-#define MITE_LLKAR(x) (0x24 + MITE_CHAN(x)) /* see tnt5002 manual */
-#define MITE_BAR(x) (0x28 + MITE_CHAN(x)) /* base address */
-#define MITE_BCR(x) (0x2c + MITE_CHAN(x)) /* base count */
-#define MITE_SAR(x) (0x30 + MITE_CHAN(x)) /* ? address */
-#define MITE_WSCR(x) (0x34 + MITE_CHAN(x)) /* ? */
-#define MITE_WSER(x) (0x38 + MITE_CHAN(x)) /* ? */
-#define MITE_CHSR(x) (0x3c + MITE_CHAN(x)) /* channel status */
-#define MITE_FCR(x) (0x40 + MITE_CHAN(x)) /* fifo count */
-
-enum MITE_IODWBSR_bits {
- WENAB = 0x80, /* window enable */
-};
-
-static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
-{
- unsigned order = 0;
-
- BUG_ON(size == 0);
- order = ilog2(size);
- BUG_ON(order < 1);
- return (order - 1) & 0x1f;
-}
-
-enum MITE_UNKNOWN_DMA_BURST_bits {
- UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
-};
-
-static inline int mite_csigr_version(u32 csigr_bits)
-{
- return csigr_bits & 0xf;
-};
-
-static inline int mite_csigr_type(u32 csigr_bits)
-{ /* original mite = 0, minimite = 1 */
- return (csigr_bits >> 4) & 0xf;
-};
-
-static inline int mite_csigr_mmode(u32 csigr_bits)
-{ /* mite mode, minimite = 1 */
- return (csigr_bits >> 8) & 0x3;
-};
-
-static inline int mite_csigr_imode(u32 csigr_bits)
-{ /* cpu port interface mode, pci = 0x3 */
- return (csigr_bits >> 12) & 0x3;
-};
-
-static inline int mite_csigr_dmac(u32 csigr_bits)
-{ /* number of dma channels */
- return (csigr_bits >> 16) & 0xf;
-};
-static inline int mite_csigr_wpdep(u32 csigr_bits)
-{ /* write post fifo depth */
- unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
+struct mite_channel *mite_request_channel_in_range(struct mite *,
+ struct mite_ring *,
+ unsigned int min_channel,
+ unsigned int max_channel);
+struct mite_channel *mite_request_channel(struct mite *, struct mite_ring *);
+void mite_release_channel(struct mite_channel *);
- return (wpdep_bits) ? (1 << (wpdep_bits - 1)) : 0;
-}
-
-static inline int mite_csigr_wins(u32 csigr_bits)
-{
- return (csigr_bits >> 24) & 0x1f;
-};
-
-static inline int mite_csigr_iowins(u32 csigr_bits)
-{ /* number of io windows */
- return (csigr_bits >> 29) & 0x7;
-};
-
-enum MITE_MCR_bits {
- MCRPON = 0,
-};
-
-enum MITE_DCR_bits {
- DCR_NORMAL = (1 << 29),
- DCRPON = 0,
-};
-
-enum MITE_CHOR_bits {
- CHOR_DMARESET = (1 << 31),
- CHOR_SET_SEND_TC = (1 << 11),
- CHOR_CLR_SEND_TC = (1 << 10),
- CHOR_SET_LPAUSE = (1 << 9),
- CHOR_CLR_LPAUSE = (1 << 8),
- CHOR_CLRDONE = (1 << 7),
- CHOR_CLRRB = (1 << 6),
- CHOR_CLRLC = (1 << 5),
- CHOR_FRESET = (1 << 4),
- CHOR_ABORT = (1 << 3), /* stop without emptying fifo */
- CHOR_STOP = (1 << 2), /* stop after emptying fifo */
- CHOR_CONT = (1 << 1),
- CHOR_START = (1 << 0),
- CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
-};
-
-enum MITE_CHCR_bits {
- CHCR_SET_DMA_IE = (1 << 31),
- CHCR_CLR_DMA_IE = (1 << 30),
- CHCR_SET_LINKP_IE = (1 << 29),
- CHCR_CLR_LINKP_IE = (1 << 28),
- CHCR_SET_SAR_IE = (1 << 27),
- CHCR_CLR_SAR_IE = (1 << 26),
- CHCR_SET_DONE_IE = (1 << 25),
- CHCR_CLR_DONE_IE = (1 << 24),
- CHCR_SET_MRDY_IE = (1 << 23),
- CHCR_CLR_MRDY_IE = (1 << 22),
- CHCR_SET_DRDY_IE = (1 << 21),
- CHCR_CLR_DRDY_IE = (1 << 20),
- CHCR_SET_LC_IE = (1 << 19),
- CHCR_CLR_LC_IE = (1 << 18),
- CHCR_SET_CONT_RB_IE = (1 << 17),
- CHCR_CLR_CONT_RB_IE = (1 << 16),
- CHCR_FIFODIS = (1 << 15),
- CHCR_FIFO_ON = 0,
- CHCR_BURSTEN = (1 << 14),
- CHCR_NO_BURSTEN = 0,
- CHCR_BYTE_SWAP_DEVICE = (1 << 6),
- CHCR_BYTE_SWAP_MEMORY = (1 << 4),
- CHCR_DIR = (1 << 3),
- CHCR_DEV_TO_MEM = CHCR_DIR,
- CHCR_MEM_TO_DEV = 0,
- CHCR_NORMAL = (0 << 0),
- CHCR_CONTINUE = (1 << 0),
- CHCR_RINGBUFF = (2 << 0),
- CHCR_LINKSHORT = (4 << 0),
- CHCR_LINKLONG = (5 << 0),
- CHCRPON =
- (CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
- CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
- CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
-};
-
-enum ConfigRegister_bits {
- CR_REQS_MASK = 0x7 << 16,
- CR_ASEQDONT = 0x0 << 10,
- CR_ASEQUP = 0x1 << 10,
- CR_ASEQDOWN = 0x2 << 10,
- CR_ASEQ_MASK = 0x3 << 10,
- CR_PSIZE8 = (1 << 8),
- CR_PSIZE16 = (2 << 8),
- CR_PSIZE32 = (3 << 8),
- CR_PORTCPU = (0 << 6),
- CR_PORTIO = (1 << 6),
- CR_PORTVXI = (2 << 6),
- CR_PORTMXI = (3 << 6),
- CR_AMDEVICE = (1 << 0),
-};
-
-static inline int CR_REQS(int source)
-{
- return (source & 0x7) << 16;
-};
-
-static inline int CR_REQSDRQ(unsigned drq_line)
-{
- /* This also works on m-series when using channels (drq_line) 4 or 5. */
- return CR_REQS((drq_line & 0x3) | 0x4);
-}
-
-static inline int CR_RL(unsigned int retry_limit)
-{
- int value = 0;
+int mite_init_ring_descriptors(struct mite_ring *, struct comedi_subdevice *,
+ unsigned int nbytes);
+int mite_buf_change(struct mite_ring *, struct comedi_subdevice *);
- if (retry_limit)
- value = 1 + ilog2(retry_limit);
- if (value > 0x7)
- value = 0x7;
- return (value & 0x7) << 21;
-}
+struct mite_ring *mite_alloc_ring(struct mite *);
+void mite_free_ring(struct mite_ring *);
-enum CHSR_bits {
- CHSR_INT = (1 << 31),
- CHSR_LPAUSES = (1 << 29),
- CHSR_SARS = (1 << 27),
- CHSR_DONE = (1 << 25),
- CHSR_MRDY = (1 << 23),
- CHSR_DRDY = (1 << 21),
- CHSR_LINKC = (1 << 19),
- CHSR_CONTS_RB = (1 << 17),
- CHSR_ERROR = (1 << 15),
- CHSR_SABORT = (1 << 14),
- CHSR_HABORT = (1 << 13),
- CHSR_STOPS = (1 << 12),
- CHSR_OPERR_mask = (3 << 10),
- CHSR_OPERR_NOERROR = (0 << 10),
- CHSR_OPERR_FIFOERROR = (1 << 10),
- CHSR_OPERR_LINKERROR = (1 << 10), /* ??? */
- CHSR_XFERR = (1 << 9),
- CHSR_END = (1 << 8),
- CHSR_DRQ1 = (1 << 7),
- CHSR_DRQ0 = (1 << 6),
- CHSR_LxERR_mask = (3 << 4),
- CHSR_LBERR = (1 << 4),
- CHSR_LRERR = (2 << 4),
- CHSR_LOERR = (3 << 4),
- CHSR_MxERR_mask = (3 << 2),
- CHSR_MBERR = (1 << 2),
- CHSR_MRERR = (2 << 2),
- CHSR_MOERR = (3 << 2),
- CHSR_DxERR_mask = (3 << 0),
- CHSR_DBERR = (1 << 0),
- CHSR_DRERR = (2 << 0),
- CHSR_DOERR = (3 << 0),
-};
+struct mite *mite_attach(struct comedi_device *, bool use_win1);
+void mite_detach(struct mite *);
-static inline void mite_dma_reset(struct mite_channel *mite_chan)
-{
- writel(CHOR_DMARESET | CHOR_FRESET,
- mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
-};
+/*
+ * Mite registers (used outside of the mite driver)
+ */
+#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size */
+#define MITE_IODWBSR_1 0xc4 /* IO Device Window1 Base Size */
+#define WENAB BIT(7) /* window enable */
+#define MITE_IODWCR_1 0xf4
#endif
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index 46647c64f369..0dcb826a9f1f 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -1,17 +1,16 @@
/*
- comedi/drivers/ni_660x.c
- Hardware driver for NI 660x devices
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Hardware driver for NI 660x devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Driver: ni_660x
@@ -42,91 +41,13 @@
#include "mite.h"
#include "ni_tio.h"
-enum ni_660x_constants {
- min_counter_pfi_chan = 8,
- max_dio_pfi_chan = 31,
- counters_per_chip = 4
-};
-
-#define NUM_PFI_CHANNELS 40
-/* really there are only up to 3 dma channels, but the register layout allows
-for 4 */
-#define MAX_DMA_CHANNEL 4
-
/* See Register-Level Programmer Manual page 3.1 */
enum ni_660x_register {
- NI660X_G0_INT_ACK,
- NI660X_G0_STATUS,
- NI660X_G1_INT_ACK,
- NI660X_G1_STATUS,
- NI660X_G01_STATUS,
- NI660X_G0_CMD,
- NI660X_STC_DIO_PARALLEL_INPUT,
- NI660X_G1_CMD,
- NI660X_G0_HW_SAVE,
- NI660X_G1_HW_SAVE,
+ /* see enum ni_gpct_register */
+ NI660X_STC_DIO_PARALLEL_INPUT = NITIO_NUM_REGS,
NI660X_STC_DIO_OUTPUT,
NI660X_STC_DIO_CONTROL,
- NI660X_G0_SW_SAVE,
- NI660X_G1_SW_SAVE,
- NI660X_G0_MODE,
- NI660X_G01_STATUS1,
- NI660X_G1_MODE,
NI660X_STC_DIO_SERIAL_INPUT,
- NI660X_G0_LOADA,
- NI660X_G01_STATUS2,
- NI660X_G0_LOADB,
- NI660X_G1_LOADA,
- NI660X_G1_LOADB,
- NI660X_G0_INPUT_SEL,
- NI660X_G1_INPUT_SEL,
- NI660X_G0_AUTO_INC,
- NI660X_G1_AUTO_INC,
- NI660X_G01_RESET,
- NI660X_G0_INT_ENA,
- NI660X_G1_INT_ENA,
- NI660X_G0_CNT_MODE,
- NI660X_G1_CNT_MODE,
- NI660X_G0_GATE2,
- NI660X_G1_GATE2,
- NI660X_G0_DMA_CFG,
- NI660X_G0_DMA_STATUS,
- NI660X_G1_DMA_CFG,
- NI660X_G1_DMA_STATUS,
- NI660X_G2_INT_ACK,
- NI660X_G2_STATUS,
- NI660X_G3_INT_ACK,
- NI660X_G3_STATUS,
- NI660X_G23_STATUS,
- NI660X_G2_CMD,
- NI660X_G3_CMD,
- NI660X_G2_HW_SAVE,
- NI660X_G3_HW_SAVE,
- NI660X_G2_SW_SAVE,
- NI660X_G3_SW_SAVE,
- NI660X_G2_MODE,
- NI660X_G23_STATUS1,
- NI660X_G3_MODE,
- NI660X_G2_LOADA,
- NI660X_G23_STATUS2,
- NI660X_G2_LOADB,
- NI660X_G3_LOADA,
- NI660X_G3_LOADB,
- NI660X_G2_INPUT_SEL,
- NI660X_G3_INPUT_SEL,
- NI660X_G2_AUTO_INC,
- NI660X_G3_AUTO_INC,
- NI660X_G23_RESET,
- NI660X_G2_INT_ENA,
- NI660X_G3_INT_ENA,
- NI660X_G2_CNT_MODE,
- NI660X_G3_CNT_MODE,
- NI660X_G3_GATE2,
- NI660X_G2_GATE2,
- NI660X_G2_DMA_CFG,
- NI660X_G2_DMA_STATUS,
- NI660X_G3_DMA_CFG,
- NI660X_G3_DMA_STATUS,
NI660X_DIO32_INPUT,
NI660X_DIO32_OUTPUT,
NI660X_CLK_CFG,
@@ -156,224 +77,134 @@ enum ni_660x_register {
NI660X_NUM_REGS,
};
-static inline unsigned IOConfigReg(unsigned pfi_channel)
-{
- unsigned reg = NI660X_IO_CFG_0_1 + pfi_channel / 2;
-
- BUG_ON(reg > NI660X_IO_CFG_38_39);
- return reg;
-}
-
-enum ni_660x_register_width {
- DATA_1B,
- DATA_2B,
- DATA_4B
-};
+#define NI660X_CLK_CFG_COUNTER_SWAP BIT(21)
-enum ni_660x_register_direction {
- NI_660x_READ,
- NI_660x_WRITE,
- NI_660x_READ_WRITE
-};
+#define NI660X_GLOBAL_INT_COUNTER0 BIT(8)
+#define NI660X_GLOBAL_INT_COUNTER1 BIT(9)
+#define NI660X_GLOBAL_INT_COUNTER2 BIT(10)
+#define NI660X_GLOBAL_INT_COUNTER3 BIT(11)
+#define NI660X_GLOBAL_INT_CASCADE BIT(29)
+#define NI660X_GLOBAL_INT_GLOBAL_POL BIT(30)
+#define NI660X_GLOBAL_INT_GLOBAL BIT(31)
-enum ni_660x_pfi_output_select {
- pfi_output_select_high_Z = 0,
- pfi_output_select_counter = 1,
- pfi_output_select_do = 2,
- num_pfi_output_selects
-};
+#define NI660X_DMA_CFG_SEL(_c, _s) (((_s) & 0x1f) << (8 * (_c)))
+#define NI660X_DMA_CFG_SEL_MASK(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
+#define NI660X_DMA_CFG_SEL_NONE(_c) NI660X_DMA_CFG_SEL((_c), 0x1f)
+#define NI660X_DMA_CFG_RESET(_c) NI660X_DMA_CFG_SEL((_c), 0x80)
-enum ni_660x_subdevices {
- NI_660X_DIO_SUBDEV = 1,
- NI_660X_GPCT_SUBDEV_0 = 2
-};
-static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
-{
- return NI_660X_GPCT_SUBDEV_0 + index;
-}
+#define NI660X_IO_CFG(x) (NI660X_IO_CFG_0_1 + ((x) / 2))
+#define NI660X_IO_CFG_OUT_SEL(_c, _s) (((_s) & 0x3) << (((_c) % 2) ? 0 : 8))
+#define NI660X_IO_CFG_OUT_SEL_MASK(_c) NI660X_IO_CFG_OUT_SEL((_c), 0x3)
+#define NI660X_IO_CFG_IN_SEL(_c, _s) (((_s) & 0x7) << (((_c) % 2) ? 4 : 12))
+#define NI660X_IO_CFG_IN_SEL_MASK(_c) NI660X_IO_CFG_IN_SEL((_c), 0x7)
-struct NI_660xRegisterData {
- const char *name; /* Register Name */
+struct ni_660x_register_data {
int offset; /* Offset from base address from GPCT chip */
- enum ni_660x_register_direction direction;
- enum ni_660x_register_width size; /* 1 byte, 2 bytes, or 4 bytes */
-};
-
-static const struct NI_660xRegisterData registerData[NI660X_NUM_REGS] = {
- {"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
- {"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
- {"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
- {"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
- {"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
- {"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
- {"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
- {"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
- {"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
- {"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
- {"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
- {"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
- {"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
- {"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
- {"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
- {"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
- {"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
- {"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
- {"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
- {"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
- {"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
- {"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
- {"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
- {"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
- {"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
- {"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
- {"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
- {"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
- {"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
- {"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
- {"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
- {"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
- {"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
- {"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
- {"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
- {"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
- {"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
- {"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
- {"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
- {"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
- {"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
- {"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
- {"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
- {"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
- {"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
- {"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
- {"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
- {"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
- {"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
- {"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
- {"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
- {"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
- {"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
- {"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
- {"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
- {"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
- {"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
- {"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
- {"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
- {"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
- {"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
- {"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
- {"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
- {"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
- {"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
- {"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
- {"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
- {"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
- {"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
- {"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
- {"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
- {"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
- {"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
- {"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
- {"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
-};
-
-/* kind of ENABLE for the second counter */
-enum clock_config_register_bits {
- CounterSwap = 0x1 << 21
-};
-
-/* ioconfigreg */
-static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
-{
- return (pfi_channel % 2) ? 0 : 8;
-}
-
-static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
-{
- return 0x3 << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
- unsigned output_select)
-{
- return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
-}
-
-static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
-{
- return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
- unsigned input_select)
-{
- return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
-}
-
-/* dma configuration register bits */
-static inline unsigned dma_select_mask(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x1f << (8 * dma_channel);
-}
-
-enum dma_selection {
- dma_selection_none = 0x1f,
-};
-
-static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
-}
-
-static inline unsigned dma_reset_bit(unsigned dma_channel)
-{
- BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
- return 0x80 << (8 * dma_channel);
-}
-
-enum global_interrupt_status_register_bits {
- Counter_0_Int_Bit = 0x100,
- Counter_1_Int_Bit = 0x200,
- Counter_2_Int_Bit = 0x400,
- Counter_3_Int_Bit = 0x800,
- Cascade_Int_Bit = 0x20000000,
- Global_Int_Bit = 0x80000000
+ char size; /* 2 or 4 bytes */
};
-enum global_interrupt_config_register_bits {
- Cascade_Int_Enable_Bit = 0x20000000,
- Global_Int_Polarity_Bit = 0x40000000,
- Global_Int_Enable_Bit = 0x80000000
+static const struct ni_660x_register_data ni_660x_reg_data[NI660X_NUM_REGS] = {
+ [NITIO_G0_INT_ACK] = { 0x004, 2 }, /* write */
+ [NITIO_G0_STATUS] = { 0x004, 2 }, /* read */
+ [NITIO_G1_INT_ACK] = { 0x006, 2 }, /* write */
+ [NITIO_G1_STATUS] = { 0x006, 2 }, /* read */
+ [NITIO_G01_STATUS] = { 0x008, 2 }, /* read */
+ [NITIO_G0_CMD] = { 0x00c, 2 }, /* write */
+ [NI660X_STC_DIO_PARALLEL_INPUT] = { 0x00e, 2 }, /* read */
+ [NITIO_G1_CMD] = { 0x00e, 2 }, /* write */
+ [NITIO_G0_HW_SAVE] = { 0x010, 4 }, /* read */
+ [NITIO_G1_HW_SAVE] = { 0x014, 4 }, /* read */
+ [NI660X_STC_DIO_OUTPUT] = { 0x014, 2 }, /* write */
+ [NI660X_STC_DIO_CONTROL] = { 0x016, 2 }, /* write */
+ [NITIO_G0_SW_SAVE] = { 0x018, 4 }, /* read */
+ [NITIO_G1_SW_SAVE] = { 0x01c, 4 }, /* read */
+ [NITIO_G0_MODE] = { 0x034, 2 }, /* write */
+ [NITIO_G01_STATUS1] = { 0x036, 2 }, /* read */
+ [NITIO_G1_MODE] = { 0x036, 2 }, /* write */
+ [NI660X_STC_DIO_SERIAL_INPUT] = { 0x038, 2 }, /* read */
+ [NITIO_G0_LOADA] = { 0x038, 4 }, /* write */
+ [NITIO_G01_STATUS2] = { 0x03a, 2 }, /* read */
+ [NITIO_G0_LOADB] = { 0x03c, 4 }, /* write */
+ [NITIO_G1_LOADA] = { 0x040, 4 }, /* write */
+ [NITIO_G1_LOADB] = { 0x044, 4 }, /* write */
+ [NITIO_G0_INPUT_SEL] = { 0x048, 2 }, /* write */
+ [NITIO_G1_INPUT_SEL] = { 0x04a, 2 }, /* write */
+ [NITIO_G0_AUTO_INC] = { 0x088, 2 }, /* write */
+ [NITIO_G1_AUTO_INC] = { 0x08a, 2 }, /* write */
+ [NITIO_G01_RESET] = { 0x090, 2 }, /* write */
+ [NITIO_G0_INT_ENA] = { 0x092, 2 }, /* write */
+ [NITIO_G1_INT_ENA] = { 0x096, 2 }, /* write */
+ [NITIO_G0_CNT_MODE] = { 0x0b0, 2 }, /* write */
+ [NITIO_G1_CNT_MODE] = { 0x0b2, 2 }, /* write */
+ [NITIO_G0_GATE2] = { 0x0b4, 2 }, /* write */
+ [NITIO_G1_GATE2] = { 0x0b6, 2 }, /* write */
+ [NITIO_G0_DMA_CFG] = { 0x0b8, 2 }, /* write */
+ [NITIO_G0_DMA_STATUS] = { 0x0b8, 2 }, /* read */
+ [NITIO_G1_DMA_CFG] = { 0x0ba, 2 }, /* write */
+ [NITIO_G1_DMA_STATUS] = { 0x0ba, 2 }, /* read */
+ [NITIO_G2_INT_ACK] = { 0x104, 2 }, /* write */
+ [NITIO_G2_STATUS] = { 0x104, 2 }, /* read */
+ [NITIO_G3_INT_ACK] = { 0x106, 2 }, /* write */
+ [NITIO_G3_STATUS] = { 0x106, 2 }, /* read */
+ [NITIO_G23_STATUS] = { 0x108, 2 }, /* read */
+ [NITIO_G2_CMD] = { 0x10c, 2 }, /* write */
+ [NITIO_G3_CMD] = { 0x10e, 2 }, /* write */
+ [NITIO_G2_HW_SAVE] = { 0x110, 4 }, /* read */
+ [NITIO_G3_HW_SAVE] = { 0x114, 4 }, /* read */
+ [NITIO_G2_SW_SAVE] = { 0x118, 4 }, /* read */
+ [NITIO_G3_SW_SAVE] = { 0x11c, 4 }, /* read */
+ [NITIO_G2_MODE] = { 0x134, 2 }, /* write */
+ [NITIO_G23_STATUS1] = { 0x136, 2 }, /* read */
+ [NITIO_G3_MODE] = { 0x136, 2 }, /* write */
+ [NITIO_G2_LOADA] = { 0x138, 4 }, /* write */
+ [NITIO_G23_STATUS2] = { 0x13a, 2 }, /* read */
+ [NITIO_G2_LOADB] = { 0x13c, 4 }, /* write */
+ [NITIO_G3_LOADA] = { 0x140, 4 }, /* write */
+ [NITIO_G3_LOADB] = { 0x144, 4 }, /* write */
+ [NITIO_G2_INPUT_SEL] = { 0x148, 2 }, /* write */
+ [NITIO_G3_INPUT_SEL] = { 0x14a, 2 }, /* write */
+ [NITIO_G2_AUTO_INC] = { 0x188, 2 }, /* write */
+ [NITIO_G3_AUTO_INC] = { 0x18a, 2 }, /* write */
+ [NITIO_G23_RESET] = { 0x190, 2 }, /* write */
+ [NITIO_G2_INT_ENA] = { 0x192, 2 }, /* write */
+ [NITIO_G3_INT_ENA] = { 0x196, 2 }, /* write */
+ [NITIO_G2_CNT_MODE] = { 0x1b0, 2 }, /* write */
+ [NITIO_G3_CNT_MODE] = { 0x1b2, 2 }, /* write */
+ [NITIO_G2_GATE2] = { 0x1b4, 2 }, /* write */
+ [NITIO_G3_GATE2] = { 0x1b6, 2 }, /* write */
+ [NITIO_G2_DMA_CFG] = { 0x1b8, 2 }, /* write */
+ [NITIO_G2_DMA_STATUS] = { 0x1b8, 2 }, /* read */
+ [NITIO_G3_DMA_CFG] = { 0x1ba, 2 }, /* write */
+ [NITIO_G3_DMA_STATUS] = { 0x1ba, 2 }, /* read */
+ [NI660X_DIO32_INPUT] = { 0x414, 4 }, /* read */
+ [NI660X_DIO32_OUTPUT] = { 0x510, 4 }, /* write */
+ [NI660X_CLK_CFG] = { 0x73c, 4 }, /* write */
+ [NI660X_GLOBAL_INT_STATUS] = { 0x754, 4 }, /* read */
+ [NI660X_DMA_CFG] = { 0x76c, 4 }, /* write */
+ [NI660X_GLOBAL_INT_CFG] = { 0x770, 4 }, /* write */
+ [NI660X_IO_CFG_0_1] = { 0x77c, 2 }, /* read/write */
+ [NI660X_IO_CFG_2_3] = { 0x77e, 2 }, /* read/write */
+ [NI660X_IO_CFG_4_5] = { 0x780, 2 }, /* read/write */
+ [NI660X_IO_CFG_6_7] = { 0x782, 2 }, /* read/write */
+ [NI660X_IO_CFG_8_9] = { 0x784, 2 }, /* read/write */
+ [NI660X_IO_CFG_10_11] = { 0x786, 2 }, /* read/write */
+ [NI660X_IO_CFG_12_13] = { 0x788, 2 }, /* read/write */
+ [NI660X_IO_CFG_14_15] = { 0x78a, 2 }, /* read/write */
+ [NI660X_IO_CFG_16_17] = { 0x78c, 2 }, /* read/write */
+ [NI660X_IO_CFG_18_19] = { 0x78e, 2 }, /* read/write */
+ [NI660X_IO_CFG_20_21] = { 0x790, 2 }, /* read/write */
+ [NI660X_IO_CFG_22_23] = { 0x792, 2 }, /* read/write */
+ [NI660X_IO_CFG_24_25] = { 0x794, 2 }, /* read/write */
+ [NI660X_IO_CFG_26_27] = { 0x796, 2 }, /* read/write */
+ [NI660X_IO_CFG_28_29] = { 0x798, 2 }, /* read/write */
+ [NI660X_IO_CFG_30_31] = { 0x79a, 2 }, /* read/write */
+ [NI660X_IO_CFG_32_33] = { 0x79c, 2 }, /* read/write */
+ [NI660X_IO_CFG_34_35] = { 0x79e, 2 }, /* read/write */
+ [NI660X_IO_CFG_36_37] = { 0x7a0, 2 }, /* read/write */
+ [NI660X_IO_CFG_38_39] = { 0x7a2, 2 } /* read/write */
};
-/* Offset of the GPCT chips from the base-address of the card */
-/* First chip is at base-address + 0x00, etc. */
-static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
+#define NI660X_CHIP_OFFSET 0x800
enum ni_660x_boardid {
BOARD_PCI6601,
@@ -385,7 +216,7 @@ enum ni_660x_boardid {
struct ni_660x_board {
const char *name;
- unsigned n_chips; /* total number of TIO chips */
+ unsigned int n_chips; /* total number of TIO chips */
};
static const struct ni_660x_board ni_660x_boards[] = {
@@ -411,280 +242,95 @@ static const struct ni_660x_board ni_660x_boards[] = {
},
};
-#define NI_660X_MAX_NUM_CHIPS 2
-#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
+#define NI660X_NUM_PFI_CHANNELS 40
+
+/* there are only up to 3 dma channels, but the register layout allows for 4 */
+#define NI660X_MAX_DMA_CHANNEL 4
+
+#define NI660X_COUNTERS_PER_CHIP 4
+#define NI660X_MAX_CHIPS 2
+#define NI660X_MAX_COUNTERS (NI660X_MAX_CHIPS * \
+ NI660X_COUNTERS_PER_CHIP)
struct ni_660x_private {
- struct mite_struct *mite;
+ struct mite *mite;
struct ni_gpct_device *counter_dev;
- uint64_t pfi_direction_bits;
- struct mite_dma_descriptor_ring
- *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
+ struct mite_ring *ring[NI660X_MAX_CHIPS][NI660X_COUNTERS_PER_CHIP];
+ /* protects mite channel request/release */
spinlock_t mite_channel_lock;
- /* interrupt_lock prevents races between interrupt and comedi_poll */
+ /* prevents races between interrupt and comedi_poll */
spinlock_t interrupt_lock;
- unsigned dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
- spinlock_t soft_reg_copy_lock;
- unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
+ unsigned int dma_cfg[NI660X_MAX_CHIPS];
+ unsigned int io_cfg[NI660X_NUM_PFI_CHANNELS];
+ u64 io_dir;
};
-static inline unsigned ni_660x_num_counters(struct comedi_device *dev)
-{
- const struct ni_660x_board *board = dev->board_ptr;
-
- return board->n_chips * counters_per_chip;
-}
-
-static enum ni_660x_register ni_gpct_to_660x_register(enum ni_gpct_register reg)
-{
- switch (reg) {
- case NITIO_G0_AUTO_INC:
- return NI660X_G0_AUTO_INC;
- case NITIO_G1_AUTO_INC:
- return NI660X_G1_AUTO_INC;
- case NITIO_G2_AUTO_INC:
- return NI660X_G2_AUTO_INC;
- case NITIO_G3_AUTO_INC:
- return NI660X_G3_AUTO_INC;
- case NITIO_G0_CMD:
- return NI660X_G0_CMD;
- case NITIO_G1_CMD:
- return NI660X_G1_CMD;
- case NITIO_G2_CMD:
- return NI660X_G2_CMD;
- case NITIO_G3_CMD:
- return NI660X_G3_CMD;
- case NITIO_G0_HW_SAVE:
- return NI660X_G0_HW_SAVE;
- case NITIO_G1_HW_SAVE:
- return NI660X_G1_HW_SAVE;
- case NITIO_G2_HW_SAVE:
- return NI660X_G2_HW_SAVE;
- case NITIO_G3_HW_SAVE:
- return NI660X_G3_HW_SAVE;
- case NITIO_G0_SW_SAVE:
- return NI660X_G0_SW_SAVE;
- case NITIO_G1_SW_SAVE:
- return NI660X_G1_SW_SAVE;
- case NITIO_G2_SW_SAVE:
- return NI660X_G2_SW_SAVE;
- case NITIO_G3_SW_SAVE:
- return NI660X_G3_SW_SAVE;
- case NITIO_G0_MODE:
- return NI660X_G0_MODE;
- case NITIO_G1_MODE:
- return NI660X_G1_MODE;
- case NITIO_G2_MODE:
- return NI660X_G2_MODE;
- case NITIO_G3_MODE:
- return NI660X_G3_MODE;
- case NITIO_G0_LOADA:
- return NI660X_G0_LOADA;
- case NITIO_G1_LOADA:
- return NI660X_G1_LOADA;
- case NITIO_G2_LOADA:
- return NI660X_G2_LOADA;
- case NITIO_G3_LOADA:
- return NI660X_G3_LOADA;
- case NITIO_G0_LOADB:
- return NI660X_G0_LOADB;
- case NITIO_G1_LOADB:
- return NI660X_G1_LOADB;
- case NITIO_G2_LOADB:
- return NI660X_G2_LOADB;
- case NITIO_G3_LOADB:
- return NI660X_G3_LOADB;
- case NITIO_G0_INPUT_SEL:
- return NI660X_G0_INPUT_SEL;
- case NITIO_G1_INPUT_SEL:
- return NI660X_G1_INPUT_SEL;
- case NITIO_G2_INPUT_SEL:
- return NI660X_G2_INPUT_SEL;
- case NITIO_G3_INPUT_SEL:
- return NI660X_G3_INPUT_SEL;
- case NITIO_G01_STATUS:
- return NI660X_G01_STATUS;
- case NITIO_G23_STATUS:
- return NI660X_G23_STATUS;
- case NITIO_G01_RESET:
- return NI660X_G01_RESET;
- case NITIO_G23_RESET:
- return NI660X_G23_RESET;
- case NITIO_G01_STATUS1:
- return NI660X_G01_STATUS1;
- case NITIO_G23_STATUS1:
- return NI660X_G23_STATUS1;
- case NITIO_G01_STATUS2:
- return NI660X_G01_STATUS2;
- case NITIO_G23_STATUS2:
- return NI660X_G23_STATUS2;
- case NITIO_G0_CNT_MODE:
- return NI660X_G0_CNT_MODE;
- case NITIO_G1_CNT_MODE:
- return NI660X_G1_CNT_MODE;
- case NITIO_G2_CNT_MODE:
- return NI660X_G2_CNT_MODE;
- case NITIO_G3_CNT_MODE:
- return NI660X_G3_CNT_MODE;
- case NITIO_G0_GATE2:
- return NI660X_G0_GATE2;
- case NITIO_G1_GATE2:
- return NI660X_G1_GATE2;
- case NITIO_G2_GATE2:
- return NI660X_G2_GATE2;
- case NITIO_G3_GATE2:
- return NI660X_G3_GATE2;
- case NITIO_G0_DMA_CFG:
- return NI660X_G0_DMA_CFG;
- case NITIO_G0_DMA_STATUS:
- return NI660X_G0_DMA_STATUS;
- case NITIO_G1_DMA_CFG:
- return NI660X_G1_DMA_CFG;
- case NITIO_G1_DMA_STATUS:
- return NI660X_G1_DMA_STATUS;
- case NITIO_G2_DMA_CFG:
- return NI660X_G2_DMA_CFG;
- case NITIO_G2_DMA_STATUS:
- return NI660X_G2_DMA_STATUS;
- case NITIO_G3_DMA_CFG:
- return NI660X_G3_DMA_CFG;
- case NITIO_G3_DMA_STATUS:
- return NI660X_G3_DMA_STATUS;
- case NITIO_G0_INT_ACK:
- return NI660X_G0_INT_ACK;
- case NITIO_G1_INT_ACK:
- return NI660X_G1_INT_ACK;
- case NITIO_G2_INT_ACK:
- return NI660X_G2_INT_ACK;
- case NITIO_G3_INT_ACK:
- return NI660X_G3_INT_ACK;
- case NITIO_G0_STATUS:
- return NI660X_G0_STATUS;
- case NITIO_G1_STATUS:
- return NI660X_G1_STATUS;
- case NITIO_G2_STATUS:
- return NI660X_G2_STATUS;
- case NITIO_G3_STATUS:
- return NI660X_G3_STATUS;
- case NITIO_G0_INT_ENA:
- return NI660X_G0_INT_ENA;
- case NITIO_G1_INT_ENA:
- return NI660X_G1_INT_ENA;
- case NITIO_G2_INT_ENA:
- return NI660X_G2_INT_ENA;
- case NITIO_G3_INT_ENA:
- return NI660X_G3_INT_ENA;
- default:
- BUG();
- return 0;
- }
-}
-
-static inline void ni_660x_write_register(struct comedi_device *dev,
- unsigned chip, unsigned bits,
- enum ni_660x_register reg)
+static void ni_660x_write(struct comedi_device *dev, unsigned int chip,
+ unsigned int bits, unsigned int reg)
{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
+ unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
+ ni_660x_reg_data[reg].offset;
- switch (registerData[reg].size) {
- case DATA_2B:
+ if (ni_660x_reg_data[reg].size == 2)
writew(bits, dev->mmio + addr);
- break;
- case DATA_4B:
+ else
writel(bits, dev->mmio + addr);
- break;
- default:
- BUG();
- break;
- }
}
-static inline unsigned ni_660x_read_register(struct comedi_device *dev,
- unsigned chip,
- enum ni_660x_register reg)
+static unsigned int ni_660x_read(struct comedi_device *dev,
+ unsigned int chip, unsigned int reg)
{
- unsigned int addr = GPCT_OFFSET[chip] + registerData[reg].offset;
+ unsigned int addr = (chip * NI660X_CHIP_OFFSET) +
+ ni_660x_reg_data[reg].offset;
- switch (registerData[reg].size) {
- case DATA_2B:
+ if (ni_660x_reg_data[reg].size == 2)
return readw(dev->mmio + addr);
- case DATA_4B:
- return readl(dev->mmio + addr);
- default:
- BUG();
- break;
- }
- return 0;
+ return readl(dev->mmio + addr);
}
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
+static void ni_660x_gpct_write(struct ni_gpct *counter, unsigned int bits,
+ enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
- ni_660x_write_register(dev, chip, bits, ni_660x_register);
+ ni_660x_write(dev, counter->chip_index, bits, reg);
}
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
+static unsigned int ni_660x_gpct_read(struct ni_gpct *counter,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
- enum ni_660x_register ni_660x_register = ni_gpct_to_660x_register(reg);
- unsigned chip = counter->chip_index;
-
- return ni_660x_read_register(dev, chip, ni_660x_register);
-}
-
-static inline struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private
- *priv,
- struct ni_gpct
- *counter)
-{
- unsigned chip = counter->chip_index;
- return priv->mite_rings[chip][counter->counter_index];
+ return ni_660x_read(dev, counter->chip_index, reg);
}
static inline void ni_660x_set_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
+ unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, counter->counter_index);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip] |
- dma_reset_bit(mite_channel), NI660X_DMA_CFG);
+ unsigned int chip = counter->chip_index;
+
+ devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL(mite_channel,
+ counter->counter_index);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip] |
+ NI660X_DMA_CFG_RESET(mite_channel),
+ NI660X_DMA_CFG);
mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static inline void ni_660x_unset_dma_channel(struct comedi_device *dev,
- unsigned mite_channel,
+ unsigned int mite_channel,
struct ni_gpct *counter)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned chip = counter->chip_index;
- unsigned long flags;
+ unsigned int chip = counter->chip_index;
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->dma_configuration_soft_copies[chip] &=
- ~dma_select_mask(mite_channel);
- devpriv->dma_configuration_soft_copies[chip] |=
- dma_select_bits(mite_channel, dma_selection_none);
- ni_660x_write_register(dev, chip,
- devpriv->dma_configuration_soft_copies[chip],
- NI660X_DMA_CFG);
+ devpriv->dma_cfg[chip] &= ~NI660X_DMA_CFG_SEL_MASK(mite_channel);
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(mite_channel);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip], NI660X_DMA_CFG);
mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
}
static int ni_660x_request_mite_channel(struct comedi_device *dev,
@@ -692,13 +338,13 @@ static int ni_660x_request_mite_channel(struct comedi_device *dev,
enum comedi_io_direction direction)
{
struct ni_660x_private *devpriv = dev->private;
- unsigned long flags;
+ struct mite_ring *ring;
struct mite_channel *mite_chan;
+ unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(counter->mite_chan);
- mite_chan = mite_request_channel(devpriv->mite,
- mite_ring(devpriv, counter));
+ ring = devpriv->ring[counter->chip_index][counter->counter_index];
+ mite_chan = mite_request_channel(devpriv->mite, ring);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
@@ -757,7 +403,7 @@ static int ni_660x_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void set_tio_counterswap(struct comedi_device *dev, int chip)
{
- unsigned bits = 0;
+ unsigned int bits = 0;
/*
* See P. 3.5 of the Register-Level Programming manual.
@@ -766,9 +412,9 @@ static void set_tio_counterswap(struct comedi_device *dev, int chip)
* first chip.
*/
if (chip)
- bits = CounterSwap;
+ bits = NI660X_CLK_CFG_COUNTER_SWAP;
- ni_660x_write_register(dev, chip, bits, NI660X_CLK_CFG);
+ ni_660x_write(dev, chip, bits, NI660X_CLK_CFG);
}
static void ni_660x_handle_gpct_interrupt(struct comedi_device *dev,
@@ -785,17 +431,20 @@ static irqreturn_t ni_660x_interrupt(int irq, void *d)
struct comedi_device *dev = d;
struct ni_660x_private *devpriv = dev->private;
struct comedi_subdevice *s;
- unsigned i;
+ unsigned int i;
unsigned long flags;
if (!dev->attached)
return IRQ_NONE;
+ /* make sure dev->attached is checked before doing anything else */
+ smp_mb();
+
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- smp_mb();
- for (i = 0; i < ni_660x_num_counters(dev); ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- ni_660x_handle_gpct_interrupt(dev, s);
+ for (i = 0; i < dev->n_subdevices; ++i) {
+ s = &dev->subdevices[i];
+ if (s->type == COMEDI_SUBD_COUNTER)
+ ni_660x_handle_gpct_interrupt(dev, s);
}
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return IRQ_HANDLED;
@@ -810,7 +459,7 @@ static int ni_660x_input_poll(struct comedi_device *dev,
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&devpriv->interrupt_lock, flags);
- mite_sync_input_dma(counter->mite_chan, s);
+ mite_sync_dma(counter->mite_chan, s);
spin_unlock_irqrestore(&devpriv->interrupt_lock, flags);
return comedi_buf_read_n_available(s);
}
@@ -820,9 +469,11 @@ static int ni_660x_buf_change(struct comedi_device *dev,
{
struct ni_660x_private *devpriv = dev->private;
struct ni_gpct *counter = s->private;
+ struct mite_ring *ring;
int ret;
- ret = mite_buf_change(mite_ring(devpriv, counter), s);
+ ring = devpriv->ring[counter->chip_index][counter->counter_index];
+ ret = mite_buf_change(ring, s);
if (ret < 0)
return ret;
@@ -832,7 +483,7 @@ static int ni_660x_buf_change(struct comedi_device *dev,
static int ni_660x_allocate_private(struct comedi_device *dev)
{
struct ni_660x_private *devpriv;
- unsigned i;
+ unsigned int i;
devpriv = comedi_alloc_devpriv(dev, sizeof(*devpriv));
if (!devpriv)
@@ -840,9 +491,8 @@ static int ni_660x_allocate_private(struct comedi_device *dev)
spin_lock_init(&devpriv->mite_channel_lock);
spin_lock_init(&devpriv->interrupt_lock);
- spin_lock_init(&devpriv->soft_reg_copy_lock);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- devpriv->pfi_output_selects[i] = pfi_output_select_counter;
+ for (i = 0; i < NI660X_NUM_PFI_CHANNELS; ++i)
+ devpriv->io_cfg[i] = NI_660X_PFI_OUTPUT_COUNTER;
return 0;
}
@@ -851,14 +501,13 @@ static int ni_660x_alloc_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
+ unsigned int i;
+ unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j) {
- devpriv->mite_rings[i][j] =
- mite_alloc_ring(devpriv->mite);
- if (!devpriv->mite_rings[i][j])
+ for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j) {
+ devpriv->ring[i][j] = mite_alloc_ring(devpriv->mite);
+ if (!devpriv->ring[i][j])
return -ENOMEM;
}
}
@@ -869,120 +518,101 @@ static void ni_660x_free_mite_rings(struct comedi_device *dev)
{
const struct ni_660x_board *board = dev->board_ptr;
struct ni_660x_private *devpriv = dev->private;
- unsigned i;
- unsigned j;
+ unsigned int i;
+ unsigned int j;
for (i = 0; i < board->n_chips; ++i) {
- for (j = 0; j < counters_per_chip; ++j)
- mite_free_ring(devpriv->mite_rings[i][j]);
- }
-}
-
-static void init_tio_chip(struct comedi_device *dev, int chipset)
-{
- struct ni_660x_private *devpriv = dev->private;
- unsigned i;
-
- /* init dma configuration register */
- devpriv->dma_configuration_soft_copies[chipset] = 0;
- for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
- devpriv->dma_configuration_soft_copies[chipset] |=
- dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
+ for (j = 0; j < NI660X_COUNTERS_PER_CHIP; ++j)
+ mite_free_ring(devpriv->ring[i][j]);
}
- ni_660x_write_register(dev, chipset,
- devpriv->dma_configuration_soft_copies[chipset],
- NI660X_DMA_CFG);
- for (i = 0; i < NUM_PFI_CHANNELS; ++i)
- ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
}
static int ni_660x_dio_insn_bits(struct comedi_device *dev,
struct comedi_subdevice *s,
- struct comedi_insn *insn, unsigned int *data)
+ struct comedi_insn *insn,
+ unsigned int *data)
{
- unsigned base_bitfield_channel = CR_CHAN(insn->chanspec);
-
- /* Check if we have to write some bits */
- if (data[0]) {
- s->state &= ~(data[0] << base_bitfield_channel);
- s->state |= (data[0] & data[1]) << base_bitfield_channel;
- /* Write out the new digital output lines */
- ni_660x_write_register(dev, 0, s->state, NI660X_DIO32_OUTPUT);
+ unsigned int shift = CR_CHAN(insn->chanspec);
+ unsigned int mask = data[0] << shift;
+ unsigned int bits = data[1] << shift;
+
+ /*
+ * There are 40 channels in this subdevice but only 32 are usable
+ * as DIO. The shift adjusts the mask/bits to account for the base
+ * channel in insn->chanspec. The state update can then be handled
+ * normally for the 32 usable channels.
+ */
+ if (mask) {
+ s->state &= ~mask;
+ s->state |= (bits & mask);
+ ni_660x_write(dev, 0, s->state, NI660X_DIO32_OUTPUT);
}
- /* on return, data[1] contains the value of the digital
- * input and output lines. */
- data[1] = (ni_660x_read_register(dev, 0, NI660X_DIO32_INPUT) >>
- base_bitfield_channel);
+
+ /*
+ * Return the input channels, shifted back to account for the base
+ * channel.
+ */
+ data[1] = ni_660x_read(dev, 0, NI660X_DIO32_INPUT) >> shift;
return insn->n;
}
static void ni_660x_select_pfi_output(struct comedi_device *dev,
- unsigned pfi_channel,
- unsigned output_select)
+ unsigned int chan, unsigned int out_sel)
{
const struct ni_660x_board *board = dev->board_ptr;
- static const unsigned counter_4_7_first_pfi = 8;
- static const unsigned counter_4_7_last_pfi = 23;
- unsigned active_chipset = 0;
- unsigned idle_chipset = 0;
- unsigned active_bits;
- unsigned idle_bits;
+ unsigned int active_chip = 0;
+ unsigned int idle_chip = 0;
+ unsigned int bits;
if (board->n_chips > 1) {
- if (output_select == pfi_output_select_counter &&
- pfi_channel >= counter_4_7_first_pfi &&
- pfi_channel <= counter_4_7_last_pfi) {
- active_chipset = 1;
- idle_chipset = 0;
+ if (out_sel == NI_660X_PFI_OUTPUT_COUNTER &&
+ chan >= 8 && chan <= 23) {
+ /* counters 4-7 pfi channels */
+ active_chip = 1;
+ idle_chip = 0;
} else {
- active_chipset = 0;
- idle_chipset = 1;
+ /* counters 0-3 pfi channels */
+ active_chip = 0;
+ idle_chip = 1;
}
}
- if (idle_chipset != active_chipset) {
- idle_bits =
- ni_660x_read_register(dev, idle_chipset,
- IOConfigReg(pfi_channel));
- idle_bits &= ~pfi_output_select_mask(pfi_channel);
- idle_bits |=
- pfi_output_select_bits(pfi_channel,
- pfi_output_select_high_Z);
- ni_660x_write_register(dev, idle_chipset, idle_bits,
- IOConfigReg(pfi_channel));
+ if (idle_chip != active_chip) {
+ /* set the pfi channel to high-z on the inactive chip */
+ bits = ni_660x_read(dev, idle_chip, NI660X_IO_CFG(chan));
+ bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
+ bits |= NI660X_IO_CFG_OUT_SEL(chan, 0); /* high-z */
+ ni_660x_write(dev, idle_chip, bits, NI660X_IO_CFG(chan));
}
- active_bits =
- ni_660x_read_register(dev, active_chipset,
- IOConfigReg(pfi_channel));
- active_bits &= ~pfi_output_select_mask(pfi_channel);
- active_bits |= pfi_output_select_bits(pfi_channel, output_select);
- ni_660x_write_register(dev, active_chipset, active_bits,
- IOConfigReg(pfi_channel));
+ /* set the pfi channel output on the active chip */
+ bits = ni_660x_read(dev, active_chip, NI660X_IO_CFG(chan));
+ bits &= ~NI660X_IO_CFG_OUT_SEL_MASK(chan);
+ bits |= NI660X_IO_CFG_OUT_SEL(chan, out_sel);
+ ni_660x_write(dev, active_chip, bits, NI660X_IO_CFG(chan));
}
-static int ni_660x_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
+static int ni_660x_set_pfi_routing(struct comedi_device *dev,
+ unsigned int chan, unsigned int source)
{
struct ni_660x_private *devpriv = dev->private;
- if (source > num_pfi_output_selects)
- return -EINVAL;
- if (source == pfi_output_select_high_Z)
- return -EINVAL;
- if (chan < min_counter_pfi_chan) {
- if (source == pfi_output_select_counter)
+ switch (source) {
+ case NI_660X_PFI_OUTPUT_COUNTER:
+ if (chan < 8)
return -EINVAL;
- } else if (chan > max_dio_pfi_chan) {
- if (source == pfi_output_select_do)
+ break;
+ case NI_660X_PFI_OUTPUT_DIO:
+ if (chan > 31)
return -EINVAL;
+ default:
+ return -EINVAL;
}
- devpriv->pfi_output_selects[chan] = source;
- if (devpriv->pfi_direction_bits & (((uint64_t) 1) << chan))
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
+ devpriv->io_cfg[chan] = source;
+ if (devpriv->io_dir & (1ULL << chan))
+ ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
return 0;
}
@@ -993,25 +623,24 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
{
struct ni_660x_private *devpriv = dev->private;
unsigned int chan = CR_CHAN(insn->chanspec);
- uint64_t bit = 1ULL << chan;
+ u64 bit = 1ULL << chan;
unsigned int val;
int ret;
switch (data[0]) {
case INSN_CONFIG_DIO_OUTPUT:
- devpriv->pfi_direction_bits |= bit;
- ni_660x_select_pfi_output(dev, chan,
- devpriv->pfi_output_selects[chan]);
+ devpriv->io_dir |= bit;
+ ni_660x_select_pfi_output(dev, chan, devpriv->io_cfg[chan]);
break;
case INSN_CONFIG_DIO_INPUT:
- devpriv->pfi_direction_bits &= ~bit;
- ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
+ devpriv->io_dir &= ~bit;
+ ni_660x_select_pfi_output(dev, chan, 0); /* high-z */
break;
case INSN_CONFIG_DIO_QUERY:
- data[1] = (devpriv->pfi_direction_bits & bit) ? COMEDI_OUTPUT
- : COMEDI_INPUT;
+ data[1] = (devpriv->io_dir & bit) ? COMEDI_OUTPUT
+ : COMEDI_INPUT;
break;
case INSN_CONFIG_SET_ROUTING:
@@ -1021,14 +650,14 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
break;
case INSN_CONFIG_GET_ROUTING:
- data[1] = devpriv->pfi_output_selects[chan];
+ data[1] = devpriv->io_cfg[chan];
break;
case INSN_CONFIG_FILTER:
- val = ni_660x_read_register(dev, 0, IOConfigReg(chan));
- val &= ~pfi_input_select_mask(chan);
- val |= pfi_input_select_bits(chan, data[1]);
- ni_660x_write_register(dev, 0, val, IOConfigReg(chan));
+ val = ni_660x_read(dev, 0, NI660X_IO_CFG(chan));
+ val &= ~NI660X_IO_CFG_IN_SEL_MASK(chan);
+ val |= NI660X_IO_CFG_IN_SEL(chan, data[1]);
+ ni_660x_write(dev, 0, val, NI660X_IO_CFG(chan));
break;
default:
@@ -1038,6 +667,33 @@ static int ni_660x_dio_insn_config(struct comedi_device *dev,
return insn->n;
}
+static void ni_660x_init_tio_chips(struct comedi_device *dev,
+ unsigned int n_chips)
+{
+ struct ni_660x_private *devpriv = dev->private;
+ unsigned int chip;
+ unsigned int chan;
+
+ /*
+ * We use the ioconfig registers to control dio direction, so zero
+ * output enables in stc dio control reg.
+ */
+ ni_660x_write(dev, 0, 0, NI660X_STC_DIO_CONTROL);
+
+ for (chip = 0; chip < n_chips; ++chip) {
+ /* init dma configuration register */
+ devpriv->dma_cfg[chip] = 0;
+ for (chan = 0; chan < NI660X_MAX_DMA_CHANNEL; ++chan)
+ devpriv->dma_cfg[chip] |= NI660X_DMA_CFG_SEL_NONE(chan);
+ ni_660x_write(dev, chip, devpriv->dma_cfg[chip],
+ NI660X_DMA_CFG);
+
+ /* init ioconfig registers */
+ for (chan = 0; chan < NI660X_NUM_PFI_CHANNELS; ++chan)
+ ni_660x_write(dev, chip, 0, NI660X_IO_CFG(chan));
+ }
+}
+
static int ni_660x_auto_attach(struct comedi_device *dev,
unsigned long context)
{
@@ -1045,9 +701,12 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
const struct ni_660x_board *board = NULL;
struct ni_660x_private *devpriv;
struct comedi_subdevice *s;
+ struct ni_gpct_device *gpct_dev;
+ unsigned int n_counters;
+ int subdev;
int ret;
- unsigned i;
- unsigned global_interrupt_config_bits;
+ unsigned int i;
+ unsigned int global_interrupt_config_bits;
if (context < ARRAY_SIZE(ni_660x_boards))
board = &ni_660x_boards[context];
@@ -1065,91 +724,147 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
return ret;
devpriv = dev->private;
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, true); /* use win1 */
if (!devpriv->mite)
return -ENOMEM;
- ret = mite_setup2(dev, devpriv->mite, true);
- if (ret < 0)
- return ret;
-
ret = ni_660x_alloc_mite_rings(dev);
if (ret < 0)
return ret;
- ret = comedi_alloc_subdevices(dev, 2 + NI_660X_MAX_NUM_COUNTERS);
+ ni_660x_init_tio_chips(dev, board->n_chips);
+
+ n_counters = board->n_chips * NI660X_COUNTERS_PER_CHIP;
+ gpct_dev = ni_gpct_device_construct(dev,
+ ni_660x_gpct_write,
+ ni_660x_gpct_read,
+ ni_gpct_variant_660x,
+ n_counters);
+ if (!gpct_dev)
+ return -ENOMEM;
+ devpriv->counter_dev = gpct_dev;
+
+ ret = comedi_alloc_subdevices(dev, 2 + NI660X_MAX_COUNTERS);
if (ret)
return ret;
- s = &dev->subdevices[0];
+ subdev = 0;
+
+ s = &dev->subdevices[subdev++];
/* Old GENERAL-PURPOSE COUNTER/TIME (GPCT) subdevice, no longer used */
s->type = COMEDI_SUBD_UNUSED;
- s = &dev->subdevices[NI_660X_DIO_SUBDEV];
- /* DIGITAL I/O SUBDEVICE */
- s->type = COMEDI_SUBD_DIO;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
- s->n_chan = NUM_PFI_CHANNELS;
- s->maxdata = 1;
- s->range_table = &range_digital;
- s->insn_bits = ni_660x_dio_insn_bits;
- s->insn_config = ni_660x_dio_insn_config;
- /* we use the ioconfig registers to control dio direction, so zero
- output enables in stc dio control reg */
- ni_660x_write_register(dev, 0, 0, NI660X_STC_DIO_CONTROL);
-
- devpriv->counter_dev = ni_gpct_device_construct(dev,
- &ni_gpct_write_register,
- &ni_gpct_read_register,
- ni_gpct_variant_660x,
- ni_660x_num_counters
- (dev));
- if (!devpriv->counter_dev)
- return -ENOMEM;
- for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
- s = &dev->subdevices[NI_660X_GPCT_SUBDEV(i)];
- if (i < ni_660x_num_counters(dev)) {
- s->type = COMEDI_SUBD_COUNTER;
- s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
+ /*
+ * Digital I/O subdevice
+ *
+ * There are 40 channels but only the first 32 can be digital I/Os.
+ * The last 8 are dedicated to counters 0 and 1.
+ *
+ * Counter 0-3 signals are from the first TIO chip.
+ * Counter 4-7 signals are from the second TIO chip.
+ *
+ * Comedi External
+ * PFI Chan DIO Chan Counter Signal
+ * ------- -------- --------------
+ * 0 0
+ * 1 1
+ * 2 2
+ * 3 3
+ * 4 4
+ * 5 5
+ * 6 6
+ * 7 7
+ * 8 8 CTR 7 OUT
+ * 9 9 CTR 7 AUX
+ * 10 10 CTR 7 GATE
+ * 11 11 CTR 7 SOURCE
+ * 12 12 CTR 6 OUT
+ * 13 13 CTR 6 AUX
+ * 14 14 CTR 6 GATE
+ * 15 15 CTR 6 SOURCE
+ * 16 16 CTR 5 OUT
+ * 17 17 CTR 5 AUX
+ * 18 18 CTR 5 GATE
+ * 19 19 CTR 5 SOURCE
+ * 20 20 CTR 4 OUT
+ * 21 21 CTR 4 AUX
+ * 22 22 CTR 4 GATE
+ * 23 23 CTR 4 SOURCE
+ * 24 24 CTR 3 OUT
+ * 25 25 CTR 3 AUX
+ * 26 26 CTR 3 GATE
+ * 27 27 CTR 3 SOURCE
+ * 28 28 CTR 2 OUT
+ * 29 29 CTR 2 AUX
+ * 30 30 CTR 2 GATE
+ * 31 31 CTR 2 SOURCE
+ * 32 CTR 1 OUT
+ * 33 CTR 1 AUX
+ * 34 CTR 1 GATE
+ * 35 CTR 1 SOURCE
+ * 36 CTR 0 OUT
+ * 37 CTR 0 AUX
+ * 38 CTR 0 GATE
+ * 39 CTR 0 SOURCE
+ */
+ s = &dev->subdevices[subdev++];
+ s->type = COMEDI_SUBD_DIO;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE;
+ s->n_chan = NI660X_NUM_PFI_CHANNELS;
+ s->maxdata = 1;
+ s->range_table = &range_digital;
+ s->insn_bits = ni_660x_dio_insn_bits;
+ s->insn_config = ni_660x_dio_insn_config;
+
+ /*
+ * Default the DIO channels as:
+ * chan 0-7: DIO inputs
+ * chan 8-39: counter signal inputs
+ */
+ for (i = 0; i < s->n_chan; ++i) {
+ unsigned int source = (i < 8) ? NI_660X_PFI_OUTPUT_DIO
+ : NI_660X_PFI_OUTPUT_COUNTER;
+
+ ni_660x_set_pfi_routing(dev, i, source);
+ ni_660x_select_pfi_output(dev, i, 0); /* high-z */
+ }
+
+ /* Counter subdevices (4 NI TIO General Purpose Counters per chip) */
+ for (i = 0; i < NI660X_MAX_COUNTERS; ++i) {
+ s = &dev->subdevices[subdev++];
+ if (i < n_counters) {
+ struct ni_gpct *counter = &gpct_dev->counters[i];
+
+ counter->chip_index = i / NI660X_COUNTERS_PER_CHIP;
+ counter->counter_index = i % NI660X_COUNTERS_PER_CHIP;
+
+ s->type = COMEDI_SUBD_COUNTER;
+ s->subdev_flags = SDF_READABLE | SDF_WRITABLE |
SDF_LSAMPL | SDF_CMD_READ;
- s->n_chan = 3;
- s->maxdata = 0xffffffff;
- s->insn_read = ni_tio_insn_read;
- s->insn_write = ni_tio_insn_write;
- s->insn_config = ni_tio_insn_config;
- s->do_cmd = &ni_660x_cmd;
- s->len_chanlist = 1;
- s->do_cmdtest = ni_tio_cmdtest;
- s->cancel = &ni_660x_cancel;
- s->poll = &ni_660x_input_poll;
+ s->n_chan = 3;
+ s->maxdata = 0xffffffff;
+ s->insn_read = ni_tio_insn_read;
+ s->insn_write = ni_tio_insn_write;
+ s->insn_config = ni_tio_insn_config;
+ s->len_chanlist = 1;
+ s->do_cmd = ni_660x_cmd;
+ s->do_cmdtest = ni_tio_cmdtest;
+ s->cancel = ni_660x_cancel;
+ s->poll = ni_660x_input_poll;
+ s->buf_change = ni_660x_buf_change;
s->async_dma_dir = DMA_BIDIRECTIONAL;
- s->buf_change = &ni_660x_buf_change;
- s->private = &devpriv->counter_dev->counters[i];
+ s->private = counter;
- devpriv->counter_dev->counters[i].chip_index =
- i / counters_per_chip;
- devpriv->counter_dev->counters[i].counter_index =
- i % counters_per_chip;
+ ni_tio_init_counter(counter);
} else {
- s->type = COMEDI_SUBD_UNUSED;
+ s->type = COMEDI_SUBD_UNUSED;
}
}
- for (i = 0; i < board->n_chips; ++i)
- init_tio_chip(dev, i);
-
- for (i = 0; i < ni_660x_num_counters(dev); ++i)
- ni_tio_init_counter(&devpriv->counter_dev->counters[i]);
-
- for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
- if (i < min_counter_pfi_chan)
- ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
- else
- ni_660x_set_pfi_routing(dev, i,
- pfi_output_select_counter);
- ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
- }
- /* to be safe, set counterswap bits on tio chips after all the counter
- outputs have been set to high impedance mode */
+
+ /*
+ * To be safe, set counterswap bits on tio chips after all the counter
+ * outputs have been set to high impedance mode.
+ */
for (i = 0; i < board->n_chips; ++i)
set_tio_counterswap(dev, i);
@@ -1160,11 +875,11 @@ static int ni_660x_auto_attach(struct comedi_device *dev,
return ret;
}
dev->irq = pcidev->irq;
- global_interrupt_config_bits = Global_Int_Enable_Bit;
+ global_interrupt_config_bits = NI660X_GLOBAL_INT_GLOBAL;
if (board->n_chips > 1)
- global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
- ni_660x_write_register(dev, 0, global_interrupt_config_bits,
- NI660X_GLOBAL_INT_CFG);
+ global_interrupt_config_bits |= NI660X_GLOBAL_INT_CASCADE;
+ ni_660x_write(dev, 0, global_interrupt_config_bits,
+ NI660X_GLOBAL_INT_CFG);
return 0;
}
@@ -1173,11 +888,12 @@ static void ni_660x_detach(struct comedi_device *dev)
{
struct ni_660x_private *devpriv = dev->private;
- if (dev->irq)
+ if (dev->irq) {
+ ni_660x_write(dev, 0, 0, NI660X_GLOBAL_INT_CFG);
free_irq(dev->irq, dev);
+ }
if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
+ ni_gpct_device_destroy(devpriv->counter_dev);
ni_660x_free_mite_rings(dev);
mite_detach(devpriv->mite);
}
@@ -1218,5 +934,5 @@ static struct pci_driver ni_660x_pci_driver = {
module_comedi_pci_driver(ni_660x_driver, ni_660x_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
-MODULE_DESCRIPTION("Comedi low-level driver");
+MODULE_DESCRIPTION("Comedi driver for NI 660x counter/timer boards");
MODULE_LICENSE("GPL");
diff --git a/drivers/staging/comedi/drivers/ni_labpc.h b/drivers/staging/comedi/drivers/ni_labpc.h
index 83f878adbd53..be8d5cd3f7f0 100644
--- a/drivers/staging/comedi/drivers/ni_labpc.h
+++ b/drivers/staging/comedi/drivers/ni_labpc.h
@@ -1,27 +1,22 @@
/*
- ni_labpc.h
-
- Header for ni_labpc.c and ni_labpc_cs.c
-
- Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header for ni_labpc ISA/PCMCIA/PCI drivers
+ *
+ * Copyright (C) 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _NI_LABPC_H
#define _NI_LABPC_H
-#define EEPROM_SIZE 256 /* 256 byte eeprom */
-#define NUM_AO_CHAN 2 /* boards have two analog output channels */
-
enum transfer_type { fifo_not_empty_transfer, fifo_half_full_transfer,
isa_dma_transfer
};
diff --git a/drivers/staging/comedi/drivers/ni_labpc_common.c b/drivers/staging/comedi/drivers/ni_labpc_common.c
index 863afb28ee28..b0dfb8eed16d 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_common.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_common.c
@@ -84,8 +84,10 @@ static const struct comedi_lrange range_labpc_ao = {
}
};
-/* functions that do inb/outb and readb/writeb so we can use
- * function pointers to decide which to use */
+/*
+ * functions that do inb/outb and readb/writeb so we can use
+ * function pointers to decide which to use
+ */
static unsigned int labpc_inb(struct comedi_device *dev, unsigned long reg)
{
return inb(dev->iobase + reg);
@@ -656,19 +658,24 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* figure out what method we will use to transfer data */
if (devpriv->dma &&
- /* dma unsafe at RT priority,
- * and too much setup time for CMDF_WAKE_EOS */
- (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0)
+ (cmd->flags & (CMDF_WAKE_EOS | CMDF_PRIORITY)) == 0) {
+ /*
+ * dma unsafe at RT priority,
+ * and too much setup time for CMDF_WAKE_EOS
+ */
xfer = isa_dma_transfer;
- else if (/* pc-plus has no fifo-half full interrupt */
- board->is_labpc1200 &&
- /* wake-end-of-scan should interrupt on fifo not empty */
- (cmd->flags & CMDF_WAKE_EOS) == 0 &&
- /* make sure we are taking more than just a few points */
- (cmd->stop_src != TRIG_COUNT || devpriv->count > 256))
+ } else if (board->is_labpc1200 &&
+ (cmd->flags & CMDF_WAKE_EOS) == 0 &&
+ (cmd->stop_src != TRIG_COUNT || devpriv->count > 256)) {
+ /*
+ * pc-plus has no fifo-half full interrupt
+ * wake-end-of-scan should interrupt on fifo not empty
+ * make sure we are taking more than just a few points
+ */
xfer = fifo_half_full_transfer;
- else
+ } else {
xfer = fifo_not_empty_transfer;
+ }
devpriv->current_transfer = xfer;
labpc_ai_set_chan_and_gain(dev, mode, chan, range, aref);
@@ -679,9 +686,11 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* manual says to set scan enable bit on second pass */
if (mode == MODE_MULT_CHAN_UP || mode == MODE_MULT_CHAN_DOWN) {
devpriv->cmd1 |= CMD1_SCANEN;
- /* need a brief delay before enabling scan, or scan
- * list will get screwed when you switch
- * between scan up to scan down mode - dunno why */
+ /*
+ * Need a brief delay before enabling scan, or scan
+ * list will get screwed when you switch between
+ * scan up to scan down mode - dunno why.
+ */
udelay(1);
devpriv->write_byte(dev, devpriv->cmd1, CMD1_REG);
}
@@ -728,8 +737,10 @@ static int labpc_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->cmd4 = 0;
if (cmd->convert_src != TRIG_EXT)
devpriv->cmd4 |= CMD4_ECLKRCV;
- /* XXX should discard first scan when using interval scanning
- * since manual says it is not synced with scan clock */
+ /*
+ * XXX should discard first scan when using interval scanning
+ * since manual says it is not synced with scan clock.
+ */
if (!labpc_use_continuous_mode(cmd, mode)) {
devpriv->cmd4 |= CMD4_INTSCAN;
if (cmd->scan_begin_src == TRIG_EXT)
@@ -795,8 +806,10 @@ static int labpc_drain_fifo(struct comedi_device *dev)
return 0;
}
-/* makes sure all data acquired by board is transferred to comedi (used
- * when acquisition is terminated by stop_src == TRIG_EXT). */
+/*
+ * Makes sure all data acquired by board is transferred to comedi (used
+ * when acquisition is terminated by stop_src == TRIG_EXT).
+ */
static void labpc_drain_dregs(struct comedi_device *dev)
{
struct labpc_private *devpriv = dev->private;
@@ -907,9 +920,11 @@ static int labpc_ao_insn_write(struct comedi_device *dev,
channel = CR_CHAN(insn->chanspec);
- /* turn off pacing of analog output channel */
- /* note: hardware bug in daqcard-1200 means pacing cannot
- * be independently enabled/disabled for its the two channels */
+ /*
+ * Turn off pacing of analog output channel.
+ * NOTE: hardware bug in daqcard-1200 means pacing cannot
+ * be independently enabled/disabled for its the two channels.
+ */
spin_lock_irqsave(&dev->spinlock, flags);
devpriv->cmd2 &= ~CMD2_LDAC(channel);
devpriv->write_byte(dev, devpriv->cmd2, CMD2_REG);
@@ -1261,7 +1276,7 @@ int labpc_common_attach(struct comedi_device *dev,
if (board->has_ao) {
s->type = COMEDI_SUBD_AO;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_GROUND;
- s->n_chan = NUM_AO_CHAN;
+ s->n_chan = 2;
s->maxdata = 0x0fff;
s->range_table = &range_labpc_ao;
s->insn_write = labpc_ao_insn_write;
@@ -1307,12 +1322,12 @@ int labpc_common_attach(struct comedi_device *dev,
s->type = COMEDI_SUBD_UNUSED;
}
- /* EEPROM */
+ /* EEPROM (256 bytes) */
s = &dev->subdevices[4];
if (board->is_labpc1200) {
s->type = COMEDI_SUBD_MEMORY;
s->subdev_flags = SDF_READABLE | SDF_WRITABLE | SDF_INTERNAL;
- s->n_chan = EEPROM_SIZE;
+ s->n_chan = 256;
s->maxdata = 0xff;
s->insn_write = labpc_eeprom_insn_write;
diff --git a/drivers/staging/comedi/drivers/ni_labpc_cs.c b/drivers/staging/comedi/drivers/ni_labpc_cs.c
index a1c69ac075d5..3d4d0b9ad4e1 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_cs.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_cs.c
@@ -1,57 +1,50 @@
/*
- comedi/drivers/ni_labpc_cs.c
- Driver for National Instruments daqcard-1200 boards
- Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
- from the pcmcia package.
- The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
- <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
- are Copyright (C) 1999 David A. Hinds.
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
-/*
-Driver: ni_labpc_cs
-Description: National Instruments Lab-PC (& compatibles)
-Author: Frank Mori Hess <fmhess@users.sourceforge.net>
-Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
-Status: works
-
-Thanks go to Fredrik Lingvall for much testing and perseverance in
-helping to debug daqcard-1200 support.
-
-The 1200 series boards have onboard calibration dacs for correcting
-analog input/output offsets and gains. The proper settings for these
-caldacs are stored on the board's eeprom. To read the caldac values
-from the eeprom and store them into a file that can be then be used by
-comedilib, use the comedi_calibrate program.
-
-Configuration options:
- none
-
-The daqcard-1200 has quirky chanlist requirements
-when scanning multiple channels. Multiple channel scan
-sequence must start at highest channel, then decrement down to
-channel 0. Chanlists consisting of all one channel
-are also legal, and allow you to pace conversions in bursts.
-
-*/
+ * Driver for National Instruments daqcard-1200 boards
+ * Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
+ * from the pcmcia package.
+ * The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
+ * <dahinds@users.sourceforge.net>. Portions created by David A. Hinds
+ * are Copyright (C) 1999 David A. Hinds.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * General Public License for more details.
+ */
/*
-
-NI manuals:
-340988a (daqcard-1200)
-
-*/
+ * Driver: ni_labpc_cs
+ * Description: National Instruments Lab-PC (& compatibles)
+ * Author: Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
+ * Status: works
+ *
+ * Thanks go to Fredrik Lingvall for much testing and perseverance in
+ * helping to debug daqcard-1200 support.
+ *
+ * The 1200 series boards have onboard calibration dacs for correcting
+ * analog input/output offsets and gains. The proper settings for these
+ * caldacs are stored on the board's eeprom. To read the caldac values
+ * from the eeprom and store them into a file that can be then be used by
+ * comedilib, use the comedi_calibrate program.
+ *
+ * Configuration options: none
+ *
+ * The daqcard-1200 has quirky chanlist requirements when scanning multiple
+ * channels. Multiple channel scan sequence must start at highest channel,
+ * then decrement down to channel 0. Chanlists consisting of all one channel
+ * are also legal, and allow you to pace conversions in bursts.
+ *
+ * NI manuals:
+ * 340988a (daqcard-1200)
+ */
#include <linux/module.h>
diff --git a/drivers/staging/comedi/drivers/ni_labpc_pci.c b/drivers/staging/comedi/drivers/ni_labpc_pci.c
index 77d403801db5..cac089193121 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_pci.c
+++ b/drivers/staging/comedi/drivers/ni_labpc_pci.c
@@ -51,8 +51,8 @@ static const struct labpc_boardinfo labpc_pci_boards[] = {
};
/* ripped from mite.h and mite_setup2() to avoid mite dependency */
-#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
-#define WENAB (1 << 7) /* window enable */
+#define MITE_IODWBSR 0xc0 /* IO Device Window Base Size Register */
+#define WENAB BIT(7) /* window enable */
static int labpc_pci_mite_init(struct pci_dev *pcidev)
{
diff --git a/drivers/staging/comedi/drivers/ni_labpc_regs.h b/drivers/staging/comedi/drivers/ni_labpc_regs.h
index 2a274a3e4e73..8c52179e36fc 100644
--- a/drivers/staging/comedi/drivers/ni_labpc_regs.h
+++ b/drivers/staging/comedi/drivers/ni_labpc_regs.h
@@ -9,32 +9,32 @@
* Register map (all registers are 8-bit)
*/
#define STAT1_REG 0x00 /* R: Status 1 reg */
-#define STAT1_DAVAIL (1 << 0)
-#define STAT1_OVERRUN (1 << 1)
-#define STAT1_OVERFLOW (1 << 2)
-#define STAT1_CNTINT (1 << 3)
-#define STAT1_GATA0 (1 << 5)
-#define STAT1_EXTGATA0 (1 << 6)
+#define STAT1_DAVAIL BIT(0)
+#define STAT1_OVERRUN BIT(1)
+#define STAT1_OVERFLOW BIT(2)
+#define STAT1_CNTINT BIT(3)
+#define STAT1_GATA0 BIT(5)
+#define STAT1_EXTGATA0 BIT(6)
#define CMD1_REG 0x00 /* W: Command 1 reg */
#define CMD1_MA(x) (((x) & 0x7) << 0)
-#define CMD1_TWOSCMP (1 << 3)
+#define CMD1_TWOSCMP BIT(3)
#define CMD1_GAIN(x) (((x) & 0x7) << 4)
-#define CMD1_SCANEN (1 << 7)
+#define CMD1_SCANEN BIT(7)
#define CMD2_REG 0x01 /* W: Command 2 reg */
-#define CMD2_PRETRIG (1 << 0)
-#define CMD2_HWTRIG (1 << 1)
-#define CMD2_SWTRIG (1 << 2)
-#define CMD2_TBSEL (1 << 3)
-#define CMD2_2SDAC0 (1 << 4)
-#define CMD2_2SDAC1 (1 << 5)
-#define CMD2_LDAC(x) (1 << (6 + (x)))
+#define CMD2_PRETRIG BIT(0)
+#define CMD2_HWTRIG BIT(1)
+#define CMD2_SWTRIG BIT(2)
+#define CMD2_TBSEL BIT(3)
+#define CMD2_2SDAC0 BIT(4)
+#define CMD2_2SDAC1 BIT(5)
+#define CMD2_LDAC(x) BIT(6 + ((x) & 0x1))
#define CMD3_REG 0x02 /* W: Command 3 reg */
-#define CMD3_DMAEN (1 << 0)
-#define CMD3_DIOINTEN (1 << 1)
-#define CMD3_DMATCINTEN (1 << 2)
-#define CMD3_CNTINTEN (1 << 3)
-#define CMD3_ERRINTEN (1 << 4)
-#define CMD3_FIFOINTEN (1 << 5)
+#define CMD3_DMAEN BIT(0)
+#define CMD3_DIOINTEN BIT(1)
+#define CMD3_DMATCINTEN BIT(2)
+#define CMD3_CNTINTEN BIT(3)
+#define CMD3_ERRINTEN BIT(4)
+#define CMD3_FIFOINTEN BIT(5)
#define ADC_START_CONVERT_REG 0x03 /* W: Start Convert reg */
#define DAC_LSB_REG(x) (0x04 + 2 * (x)) /* W: DAC0/1 LSB reg */
#define DAC_MSB_REG(x) (0x05 + 2 * (x)) /* W: DAC0/1 MSB reg */
@@ -43,32 +43,32 @@
#define DMATC_CLEAR_REG 0x0a /* W: DMA Interrupt Clear reg */
#define TIMER_CLEAR_REG 0x0c /* W: Timer Interrupt Clear reg */
#define CMD6_REG 0x0e /* W: Command 6 reg */
-#define CMD6_NRSE (1 << 0)
-#define CMD6_ADCUNI (1 << 1)
-#define CMD6_DACUNI(x) (1 << (2 + (x)))
-#define CMD6_HFINTEN (1 << 5)
-#define CMD6_DQINTEN (1 << 6)
-#define CMD6_SCANUP (1 << 7)
+#define CMD6_NRSE BIT(0)
+#define CMD6_ADCUNI BIT(1)
+#define CMD6_DACUNI(x) BIT(2 + ((x) & 0x1))
+#define CMD6_HFINTEN BIT(5)
+#define CMD6_DQINTEN BIT(6)
+#define CMD6_SCANUP BIT(7)
#define CMD4_REG 0x0f /* W: Command 3 reg */
-#define CMD4_INTSCAN (1 << 0)
-#define CMD4_EOIRCV (1 << 1)
-#define CMD4_ECLKDRV (1 << 2)
-#define CMD4_SEDIFF (1 << 3)
-#define CMD4_ECLKRCV (1 << 4)
+#define CMD4_INTSCAN BIT(0)
+#define CMD4_EOIRCV BIT(1)
+#define CMD4_ECLKDRV BIT(2)
+#define CMD4_SEDIFF BIT(3)
+#define CMD4_ECLKRCV BIT(4)
#define DIO_BASE_REG 0x10 /* R/W: 8255 DIO base reg */
#define COUNTER_A_BASE_REG 0x14 /* R/W: 8253 Counter A base reg */
#define COUNTER_B_BASE_REG 0x18 /* R/W: 8253 Counter B base reg */
#define CMD5_REG 0x1c /* W: Command 5 reg */
-#define CMD5_WRTPRT (1 << 2)
-#define CMD5_DITHEREN (1 << 3)
-#define CMD5_CALDACLD (1 << 4)
-#define CMD5_SCLK (1 << 5)
-#define CMD5_SDATA (1 << 6)
-#define CMD5_EEPROMCS (1 << 7)
+#define CMD5_WRTPRT BIT(2)
+#define CMD5_DITHEREN BIT(3)
+#define CMD5_CALDACLD BIT(4)
+#define CMD5_SCLK BIT(5)
+#define CMD5_SDATA BIT(6)
+#define CMD5_EEPROMCS BIT(7)
#define STAT2_REG 0x1d /* R: Status 2 reg */
-#define STAT2_PROMOUT (1 << 0)
-#define STAT2_OUTA1 (1 << 1)
-#define STAT2_FIFONHF (1 << 2)
+#define STAT2_PROMOUT BIT(0)
+#define STAT2_OUTA1 BIT(1)
+#define STAT2_FIFONHF BIT(2)
#define INTERVAL_COUNT_REG 0x1e /* W: Interval Counter Data reg */
#define INTERVAL_STROBE_REG 0x1f /* W: Interval Counter Strobe reg */
diff --git a/drivers/staging/comedi/drivers/ni_mio_c_common.c b/drivers/staging/comedi/drivers/ni_mio_c_common.c
deleted file mode 100644
index e69de29bb2d1..000000000000
--- a/drivers/staging/comedi/drivers/ni_mio_c_common.c
+++ /dev/null
diff --git a/drivers/staging/comedi/drivers/ni_mio_common.c b/drivers/staging/comedi/drivers/ni_mio_common.c
index dcaf7e89f299..8dabb19519a5 100644
--- a/drivers/staging/comedi/drivers/ni_mio_common.c
+++ b/drivers/staging/comedi/drivers/ni_mio_common.c
@@ -1,56 +1,53 @@
/*
- comedi/drivers/ni_mio_common.c
- Hardware driver for DAQ-STC based boards
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
- Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Hardware driver for DAQ-STC based boards
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- This file is meant to be included by another file, e.g.,
- ni_atmio.c or ni_pcimio.c.
-
- Interrupt support originally added by Truxton Fulton
- <trux@truxton.com>
-
- References (from ftp://ftp.natinst.com/support/manuals):
-
- 340747b.pdf AT-MIO E series Register Level Programmer Manual
- 341079b.pdf PCI E Series RLPM
- 340934b.pdf DAQ-STC reference manual
- 67xx and 611x registers (from ftp://ftp.ni.com/support/daq/mhddk/documentation/)
- release_ni611x.pdf
- release_ni67xx.pdf
- Other possibly relevant info:
-
- 320517c.pdf User manual (obsolete)
- 320517f.pdf User manual (new)
- 320889a.pdf delete
- 320906c.pdf maximum signal ratings
- 321066a.pdf about 16x
- 321791a.pdf discontinuation of at-mio-16e-10 rev. c
- 321808a.pdf about at-mio-16e-10 rev P
- 321837a.pdf discontinuation of at-mio-16de-10 rev d
- 321838a.pdf about at-mio-16de-10 rev N
-
- ISSUES:
-
- - the interrupt routine needs to be cleaned up
-
- 2006-02-07: S-Series PCI-6143: Support has been added but is not
- fully tested as yet. Terry Barnaby, BEAM Ltd.
-*/
+ * This file is meant to be included by another file, e.g.,
+ * ni_atmio.c or ni_pcimio.c.
+ *
+ * Interrupt support originally added by Truxton Fulton <trux@truxton.com>
+ *
+ * References (ftp://ftp.natinst.com/support/manuals):
+ * 340747b.pdf AT-MIO E series Register Level Programmer Manual
+ * 341079b.pdf PCI E Series RLPM
+ * 340934b.pdf DAQ-STC reference manual
+ *
+ * 67xx and 611x registers (ftp://ftp.ni.com/support/daq/mhddk/documentation/)
+ * release_ni611x.pdf
+ * release_ni67xx.pdf
+ *
+ * Other possibly relevant info:
+ * 320517c.pdf User manual (obsolete)
+ * 320517f.pdf User manual (new)
+ * 320889a.pdf delete
+ * 320906c.pdf maximum signal ratings
+ * 321066a.pdf about 16x
+ * 321791a.pdf discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf about at-mio-16e-10 rev P
+ * 321837a.pdf discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - the interrupt routine needs to be cleaned up
+ *
+ * 2006-02-07: S-Series PCI-6143: Support has been added but is not
+ * fully tested as yet. Terry Barnaby, BEAM Ltd.
+ */
#include <linux/interrupt.h>
#include <linux/sched.h>
@@ -216,19 +213,8 @@ enum ni_common_subdevices {
NI_FREQ_OUT_SUBDEV,
NI_NUM_SUBDEVICES
};
-static inline unsigned NI_GPCT_SUBDEV(unsigned counter_index)
-{
- switch (counter_index) {
- case 0:
- return NI_GPCT0_SUBDEV;
- case 1:
- return NI_GPCT1_SUBDEV;
- default:
- break;
- }
- BUG();
- return NI_GPCT0_SUBDEV;
-}
+
+#define NI_GPCT_SUBDEV(x) (NI_GPCT0_SUBDEV + (x))
enum timebase_nanoseconds {
TIMEBASE_1_NS = 50,
@@ -242,7 +228,7 @@ enum timebase_nanoseconds {
static const int num_adc_stages_611x = 3;
-static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
+static void ni_writel(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writel(data, dev->mmio + reg);
@@ -250,7 +236,7 @@ static void ni_writel(struct comedi_device *dev, uint32_t data, int reg)
outl(data, dev->iobase + reg);
}
-static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
+static void ni_writew(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writew(data, dev->mmio + reg);
@@ -258,7 +244,7 @@ static void ni_writew(struct comedi_device *dev, uint16_t data, int reg)
outw(data, dev->iobase + reg);
}
-static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
+static void ni_writeb(struct comedi_device *dev, unsigned int data, int reg)
{
if (dev->mmio)
writeb(data, dev->mmio + reg);
@@ -266,7 +252,7 @@ static void ni_writeb(struct comedi_device *dev, uint8_t data, int reg)
outb(data, dev->iobase + reg);
}
-static uint32_t ni_readl(struct comedi_device *dev, int reg)
+static unsigned int ni_readl(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readl(dev->mmio + reg);
@@ -274,7 +260,7 @@ static uint32_t ni_readl(struct comedi_device *dev, int reg)
return inl(dev->iobase + reg);
}
-static uint16_t ni_readw(struct comedi_device *dev, int reg)
+static unsigned int ni_readw(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readw(dev->mmio + reg);
@@ -282,7 +268,7 @@ static uint16_t ni_readw(struct comedi_device *dev, int reg)
return inw(dev->iobase + reg);
}
-static uint8_t ni_readb(struct comedi_device *dev, int reg)
+static unsigned int ni_readb(struct comedi_device *dev, int reg)
{
if (dev->mmio)
return readb(dev->mmio + reg);
@@ -457,7 +443,8 @@ static unsigned int m_series_stc_read(struct comedi_device *dev,
}
}
-static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
+static void ni_stc_writew(struct comedi_device *dev,
+ unsigned int data, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -476,7 +463,8 @@ static void ni_stc_writew(struct comedi_device *dev, uint16_t data, int reg)
}
}
-static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
+static void ni_stc_writel(struct comedi_device *dev,
+ unsigned int data, int reg)
{
struct ni_private *devpriv = dev->private;
@@ -488,11 +476,11 @@ static void ni_stc_writel(struct comedi_device *dev, uint32_t data, int reg)
}
}
-static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
+static unsigned int ni_stc_readw(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
- uint16_t val;
+ unsigned int val;
if (devpriv->is_m_series) {
val = m_series_stc_read(dev, reg);
@@ -509,10 +497,10 @@ static uint16_t ni_stc_readw(struct comedi_device *dev, int reg)
return val;
}
-static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
+static unsigned int ni_stc_readl(struct comedi_device *dev, int reg)
{
struct ni_private *devpriv = dev->private;
- uint32_t val;
+ unsigned int val;
if (devpriv->is_m_series) {
val = m_series_stc_read(dev, reg);
@@ -524,7 +512,8 @@ static uint32_t ni_stc_readl(struct comedi_device *dev, int reg)
}
static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
- unsigned bit_mask, unsigned bit_values)
+ unsigned int bit_mask,
+ unsigned int bit_values)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -556,6 +545,11 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
devpriv->g0_g1_select_reg |= bit_values & bit_mask;
ni_writeb(dev, devpriv->g0_g1_select_reg, reg);
break;
+ case NI_M_CDIO_DMA_SEL_REG:
+ devpriv->cdio_dma_select_reg &= ~bit_mask;
+ devpriv->cdio_dma_select_reg |= bit_values & bit_mask;
+ ni_writeb(dev, devpriv->cdio_dma_select_reg, reg);
+ break;
default:
dev_err(dev->class_dev, "called with invalid register %d\n",
reg);
@@ -566,116 +560,35 @@ static inline void ni_set_bitfield(struct comedi_device *dev, int reg,
}
#ifdef PCIDMA
-/* DMA channel setup */
-static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
-{
- if (channel < 4)
- return 1 << channel;
- if (channel == 4)
- return 0x3;
- if (channel == 5)
- return 0x5;
- BUG();
- return 0;
-}
-
-static inline void ni_set_ai_dma_channel(struct comedi_device *dev,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
-}
-
-static inline void ni_set_ai_dma_no_channel(struct comedi_device *dev)
-{
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AI_SEL_MASK, 0);
-}
-
-static inline void ni_set_ao_dma_channel(struct comedi_device *dev,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
- NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
-}
-
-static inline void ni_set_ao_dma_no_channel(struct comedi_device *dev)
-{
- ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG, NI_E_DMA_AO_SEL_MASK, 0);
-}
-
-static inline void ni_set_gpct_dma_channel(struct comedi_device *dev,
- unsigned gpct_index,
- unsigned channel)
-{
- unsigned bits = ni_stc_dma_channel_select_bitfield(channel);
-
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
- NI_E_DMA_G0_G1_SEL(gpct_index, bits));
-}
-
-static inline void ni_set_gpct_dma_no_channel(struct comedi_device *dev,
- unsigned gpct_index)
-{
- ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
- NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
-}
-
-static inline void ni_set_cdo_dma_channel(struct comedi_device *dev,
- unsigned mite_channel)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- unsigned bits;
-
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- /*
- * XXX just guessing ni_stc_dma_channel_select_bitfield()
- * returns the right bits, under the assumption the cdio dma
- * selection works just like ai/ao/gpct.
- * Definitely works for dma channels 0 and 1.
- */
- bits = ni_stc_dma_channel_select_bitfield(mite_channel);
- devpriv->cdio_dma_select_reg |= NI_M_CDIO_DMA_SEL_CDO(bits);
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
-
-static inline void ni_set_cdo_dma_no_channel(struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- unsigned long flags;
- spin_lock_irqsave(&devpriv->soft_reg_copy_lock, flags);
- devpriv->cdio_dma_select_reg &= ~NI_M_CDIO_DMA_SEL_CDO_MASK;
- ni_writeb(dev, devpriv->cdio_dma_select_reg, NI_M_CDIO_DMA_SEL_REG);
- mmiowb();
- spin_unlock_irqrestore(&devpriv->soft_reg_copy_lock, flags);
-}
+/* selects the MITE channel to use for DMA */
+#define NI_STC_DMA_CHAN_SEL(x) (((x) < 4) ? BIT(x) : \
+ ((x) == 4) ? 0x3 : \
+ ((x) == 5) ? 0x5 : 0x0)
+/* DMA channel setup */
static int ni_request_ai_mite_channel(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ai_mite_chan);
- devpriv->ai_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
- if (!devpriv->ai_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for analog input\n");
return -EBUSY;
}
- devpriv->ai_mite_chan->dir = COMEDI_INPUT;
- ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
+ mite_chan->dir = COMEDI_INPUT;
+ devpriv->ai_mite_chan = mite_chan;
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AI_SEL_MASK, NI_E_DMA_AI_SEL(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
@@ -683,37 +596,42 @@ static int ni_request_ai_mite_channel(struct comedi_device *dev)
static int ni_request_ao_mite_channel(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->ao_mite_chan);
- devpriv->ao_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
- if (!devpriv->ao_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for analog outut\n");
return -EBUSY;
}
- devpriv->ao_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
+ mite_chan->dir = COMEDI_OUTPUT;
+ devpriv->ao_mite_chan = mite_chan;
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AO_SEL_MASK, NI_E_DMA_AO_SEL(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
static int ni_request_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index,
+ unsigned int gpct_index,
enum comedi_io_direction direction)
{
struct ni_private *devpriv = dev->private;
- unsigned long flags;
+ struct ni_gpct *counter = &devpriv->counter_dev->counters[gpct_index];
struct mite_channel *mite_chan;
+ unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->counter_dev->counters[gpct_index].mite_chan);
- mite_chan =
- mite_request_channel(devpriv->mite,
- devpriv->gpct_mite_ring[gpct_index]);
+ mite_chan = mite_request_channel(devpriv->mite,
+ devpriv->gpct_mite_ring[gpct_index]);
if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
@@ -721,37 +639,50 @@ static int ni_request_gpct_mite_channel(struct comedi_device *dev,
return -EBUSY;
}
mite_chan->dir = direction;
- ni_tio_set_mite_channel(&devpriv->counter_dev->counters[gpct_index],
- mite_chan);
- ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
+ ni_tio_set_mite_channel(counter, mite_chan);
+
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
+ NI_E_DMA_G0_G1_SEL_MASK(gpct_index),
+ NI_E_DMA_G0_G1_SEL(gpct_index, bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
return 0;
}
-#endif /* PCIDMA */
-
static int ni_request_cdo_mite_channel(struct comedi_device *dev)
{
-#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
+ struct mite_channel *mite_chan;
unsigned long flags;
+ unsigned int bits;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- BUG_ON(devpriv->cdo_mite_chan);
- devpriv->cdo_mite_chan =
- mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
- if (!devpriv->cdo_mite_chan) {
+ mite_chan = mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
+ if (!mite_chan) {
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
dev_err(dev->class_dev,
"failed to reserve mite dma channel for correlated digital output\n");
return -EBUSY;
}
- devpriv->cdo_mite_chan->dir = COMEDI_OUTPUT;
- ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
+ mite_chan->dir = COMEDI_OUTPUT;
+ devpriv->cdo_mite_chan = mite_chan;
+
+ /*
+ * XXX just guessing NI_STC_DMA_CHAN_SEL()
+ * returns the right bits, under the assumption the cdio dma
+ * selection works just like ai/ao/gpct.
+ * Definitely works for dma channels 0 and 1.
+ */
+ bits = NI_STC_DMA_CHAN_SEL(mite_chan->channel);
+ ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
+ NI_M_CDIO_DMA_SEL_CDO_MASK,
+ NI_M_CDIO_DMA_SEL_CDO(bits));
+
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
return 0;
}
+#endif /* PCIDMA */
static void ni_release_ai_mite_channel(struct comedi_device *dev)
{
@@ -761,7 +692,8 @@ static void ni_release_ai_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan) {
- ni_set_ai_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AI_SEL_MASK, 0);
mite_release_channel(devpriv->ai_mite_chan);
devpriv->ai_mite_chan = NULL;
}
@@ -777,7 +709,8 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ao_mite_chan) {
- ni_set_ao_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_E_DMA_AI_AO_SEL_REG,
+ NI_E_DMA_AO_SEL_MASK, 0);
mite_release_channel(devpriv->ao_mite_chan);
devpriv->ao_mite_chan = NULL;
}
@@ -787,7 +720,7 @@ static void ni_release_ao_mite_channel(struct comedi_device *dev)
#ifdef PCIDMA
static void ni_release_gpct_mite_channel(struct comedi_device *dev,
- unsigned gpct_index)
+ unsigned int gpct_index)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -797,7 +730,8 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
struct mite_channel *mite_chan =
devpriv->counter_dev->counters[gpct_index].mite_chan;
- ni_set_gpct_dma_no_channel(dev, gpct_index);
+ ni_set_bitfield(dev, NI_E_DMA_G0_G1_SEL_REG,
+ NI_E_DMA_G0_G1_SEL_MASK(gpct_index), 0);
ni_tio_set_mite_channel(&devpriv->
counter_dev->counters[gpct_index],
NULL);
@@ -805,30 +739,27 @@ static void ni_release_gpct_mite_channel(struct comedi_device *dev,
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
-#endif /* PCIDMA */
static void ni_release_cdo_mite_channel(struct comedi_device *dev)
{
-#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
unsigned long flags;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
- ni_set_cdo_dma_no_channel(dev);
+ ni_set_bitfield(dev, NI_M_CDIO_DMA_SEL_REG,
+ NI_M_CDIO_DMA_SEL_CDO_MASK, 0);
mite_release_channel(devpriv->cdo_mite_chan);
devpriv->cdo_mite_chan = NULL;
}
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif /* PCIDMA */
}
-#ifdef PCIDMA
static void ni_e_series_enable_second_irq(struct comedi_device *dev,
- unsigned gpct_index, short enable)
+ unsigned int gpct_index, short enable)
{
struct ni_private *devpriv = dev->private;
- uint16_t val = 0;
+ unsigned int val = 0;
int reg;
if (devpriv->is_m_series || gpct_index > 1)
@@ -875,8 +806,10 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
ni_writeb(dev, 0, NI_M_STATIC_AI_CTRL_REG(0));
ni_writeb(dev, 1, NI_M_STATIC_AI_CTRL_REG(0));
#if 0
- /* the NI example code does 3 convert pulses for 625x boards,
- but that appears to be wrong in practice. */
+ /*
+ * The NI example code does 3 convert pulses for 625x
+ * boards, But that appears to be wrong in practice.
+ */
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
NISTC_AI_CMD1_REG);
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
@@ -888,8 +821,8 @@ static void ni_clear_ai_fifo(struct comedi_device *dev)
}
}
-static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
- int addr)
+static inline void ni_ao_win_outw(struct comedi_device *dev,
+ unsigned int data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -900,8 +833,8 @@ static inline void ni_ao_win_outw(struct comedi_device *dev, uint16_t data,
spin_unlock_irqrestore(&devpriv->window_lock, flags);
}
-static inline void ni_ao_win_outl(struct comedi_device *dev, uint32_t data,
- int addr)
+static inline void ni_ao_win_outl(struct comedi_device *dev,
+ unsigned int data, int addr)
{
struct ni_private *devpriv = dev->private;
unsigned long flags;
@@ -925,20 +858,21 @@ static inline unsigned short ni_ao_win_inw(struct comedi_device *dev, int addr)
return data;
}
-/* ni_set_bits( ) allows different parts of the ni_mio_common driver to
-* share registers (such as Interrupt_A_Register) without interfering with
-* each other.
-*
-* NOTE: the switch/case statements are optimized out for a constant argument
-* so this is actually quite fast--- If you must wrap another function around this
-* make it inline to avoid a large speed penalty.
-*
-* value should only be 1 or 0.
-*/
+/*
+ * ni_set_bits( ) allows different parts of the ni_mio_common driver to
+ * share registers (such as Interrupt_A_Register) without interfering with
+ * each other.
+ *
+ * NOTE: the switch/case statements are optimized out for a constant argument
+ * so this is actually quite fast--- If you must wrap another function around
+ * this make it inline to avoid a large speed penalty.
+ *
+ * value should only be 1 or 0.
+ */
static inline void ni_set_bits(struct comedi_device *dev, int reg,
- unsigned bits, unsigned value)
+ unsigned int bits, unsigned int value)
{
- unsigned bit_values;
+ unsigned int bit_values;
if (value)
bit_values = bits;
@@ -956,7 +890,7 @@ static void ni_sync_ai_dma(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->ai_mite_chan)
- mite_sync_input_dma(devpriv->ai_mite_chan, s);
+ mite_sync_dma(devpriv->ai_mite_chan, s);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
}
@@ -972,9 +906,8 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
if (devpriv->ai_mite_chan) {
for (i = 0; i < timeout; i++) {
if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E)
- && mite_bytes_in_transit(devpriv->ai_mite_chan) ==
- 0)
+ NISTC_AI_STATUS1_FIFO_E) &&
+ mite_bytes_in_transit(devpriv->ai_mite_chan) == 0)
break;
udelay(5);
}
@@ -994,19 +927,6 @@ static int ni_ai_drain_dma(struct comedi_device *dev)
return retval;
}
-static void mite_handle_b_linkc(struct mite_struct *mite,
- struct comedi_device *dev)
-{
- struct ni_private *devpriv = dev->private;
- struct comedi_subdevice *s = dev->write_subdev;
- unsigned long flags;
-
- spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->ao_mite_chan)
- mite_sync_output_dma(devpriv->ao_mite_chan, s);
- spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-}
-
static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
{
static const int timeout = 10000;
@@ -1018,9 +938,11 @@ static int ni_ao_wait_for_dma_load(struct comedi_device *dev)
b_status = ni_stc_readw(dev, NISTC_AO_STATUS1_REG);
if (b_status & NISTC_AO_STATUS1_FIFO_HF)
break;
- /* if we poll too often, the pci bus activity seems
- to slow the dma transfer down */
- udelay(10);
+ /*
+ * If we poll too often, the pci bus activity seems
+ * to slow the dma transfer down.
+ */
+ usleep_range(10, 100);
}
if (i == timeout) {
dev_err(dev->class_dev, "timed out waiting for dma load\n");
@@ -1038,7 +960,7 @@ static void ni_ao_fifo_load(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
int i;
unsigned short d;
- u32 packed_data;
+ unsigned int packed_data;
for (i = 0; i < n; i++) {
comedi_buf_read_samples(s, &d, 1);
@@ -1128,7 +1050,7 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
struct comedi_async *async = s->async;
- u32 dl;
+ unsigned int dl;
unsigned short data;
int i;
@@ -1148,7 +1070,10 @@ static void ni_ai_fifo_read(struct comedi_device *dev,
comedi_buf_write_samples(s, &data, 1);
}
} else if (devpriv->is_6143) {
- /* This just reads the FIFO assuming the data is present, no checks on the FIFO status are performed */
+ /*
+ * This just reads the FIFO assuming the data is present,
+ * no checks on the FIFO status are performed.
+ */
for (i = 0; i < n / 2; i++) {
dl = ni_readl(dev, NI6143_AI_FIFO_DATA_REG);
@@ -1192,16 +1117,13 @@ static void ni_handle_fifo_half_full(struct comedi_device *dev)
}
#endif
-/*
- Empties the AI fifo
-*/
+/* Empties the AI fifo */
static void ni_handle_fifo_dregs(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
- u32 dl;
+ unsigned int dl;
unsigned short data;
- unsigned short fifo_empty;
int i;
if (devpriv->is_611x) {
@@ -1237,15 +1159,16 @@ static void ni_handle_fifo_dregs(struct comedi_device *dev)
}
} else {
- fifo_empty = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- while (fifo_empty == 0) {
+ unsigned short fe; /* fifo empty */
+
+ fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
+ NISTC_AI_STATUS1_FIFO_E;
+ while (fe == 0) {
for (i = 0;
i < ARRAY_SIZE(devpriv->ai_fifo_buffer); i++) {
- fifo_empty = ni_stc_readw(dev,
- NISTC_AI_STATUS1_REG) &
- NISTC_AI_STATUS1_FIFO_E;
- if (fifo_empty)
+ fe = ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
+ NISTC_AI_STATUS1_FIFO_E;
+ if (fe)
break;
devpriv->ai_fifo_buffer[i] =
ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
@@ -1260,7 +1183,7 @@ static void get_last_sample_611x(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short data;
- u32 dl;
+ unsigned int dl;
if (!devpriv->is_611x)
return;
@@ -1278,7 +1201,7 @@ static void get_last_sample_6143(struct comedi_device *dev)
struct ni_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
unsigned short data;
- u32 dl;
+ unsigned int dl;
if (!devpriv->is_6143)
return;
@@ -1365,42 +1288,23 @@ static void ack_a_interrupt(struct comedi_device *dev, unsigned short a_status)
ni_stc_writew(dev, ack, NISTC_INTA_ACK_REG);
}
-static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
- unsigned ai_mite_status)
+static void handle_a_interrupt(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned short status)
{
- struct comedi_subdevice *s = dev->read_subdev;
struct comedi_cmd *cmd = &s->async->cmd;
- /* 67xx boards don't have ai subdevice, but their gpct0 might generate an a interrupt */
- if (s->type == COMEDI_SUBD_UNUSED)
- return;
-
-#ifdef PCIDMA
- if (ai_mite_status & CHSR_LINKC)
- ni_sync_ai_dma(dev);
-
- if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ai_mite_status=%08x)\n",
- ai_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- /* disable_irq(dev->irq); */
- }
-#endif
-
/* test for all uncommon interrupt events at the same time */
if (status & (NISTC_AI_STATUS1_ERR |
NISTC_AI_STATUS1_SC_TC | NISTC_AI_STATUS1_START1)) {
if (status == 0xffff) {
dev_err(dev->class_dev, "Card removed?\n");
- /* we probably aren't even running a command now,
- * so it's a good idea to be careful. */
- if (comedi_is_subdevice_running(s)) {
+ /*
+ * We probably aren't even running a command now,
+ * so it's a good idea to be careful.
+ */
+ if (comedi_is_subdevice_running(s))
s->async->events |= COMEDI_CB_ERROR;
- comedi_handle_events(dev, s);
- }
return;
}
if (status & NISTC_AI_STATUS1_ERR) {
@@ -1412,8 +1316,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
s->async->events |= COMEDI_CB_ERROR;
if (status & NISTC_AI_STATUS1_OVER)
s->async->events |= COMEDI_CB_OVERFLOW;
-
- comedi_handle_events(dev, s);
return;
}
if (status & NISTC_AI_STATUS1_SC_TC) {
@@ -1425,8 +1327,11 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & NISTC_AI_STATUS1_FIFO_HF) {
int i;
static const int timeout = 10;
- /* pcmcia cards (at least 6036) seem to stop producing interrupts if we
- *fail to get the fifo less than half full, so loop to be sure.*/
+ /*
+ * PCMCIA cards (at least 6036) seem to stop producing
+ * interrupts if we fail to get the fifo less than half
+ * full, so loop to be sure.
+ */
for (i = 0; i < timeout; ++i) {
ni_handle_fifo_half_full(dev);
if ((ni_stc_readw(dev, NISTC_AI_STATUS1_REG) &
@@ -1438,8 +1343,6 @@ static void handle_a_interrupt(struct comedi_device *dev, unsigned short status,
if (status & NISTC_AI_STATUS1_STOP)
ni_handle_eos(dev, s);
-
- comedi_handle_events(dev, s);
}
static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
@@ -1465,29 +1368,9 @@ static void ack_b_interrupt(struct comedi_device *dev, unsigned short b_status)
}
static void handle_b_interrupt(struct comedi_device *dev,
- unsigned short b_status, unsigned ao_mite_status)
+ struct comedi_subdevice *s,
+ unsigned short b_status)
{
- struct comedi_subdevice *s = dev->write_subdev;
- /* unsigned short ack=0; */
-
-#ifdef PCIDMA
- /* Currently, mite.c requires us to handle LINKC */
- if (ao_mite_status & CHSR_LINKC) {
- struct ni_private *devpriv = dev->private;
-
- mite_handle_b_linkc(devpriv->mite, dev);
- }
-
- if (ao_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
- CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
- CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
- dev_err(dev->class_dev,
- "unknown mite interrupt (ao_mite_status=%08x)\n",
- ao_mite_status);
- s->async->events |= COMEDI_CB_ERROR;
- }
-#endif
-
if (b_status == 0xffff)
return;
if (b_status & NISTC_AO_STATUS1_OVERRUN) {
@@ -1515,8 +1398,6 @@ static void handle_b_interrupt(struct comedi_device *dev,
}
}
#endif
-
- comedi_handle_events(dev, s);
}
static void ni_ai_munge(struct comedi_device *dev, struct comedi_subdevice *s,
@@ -1606,8 +1487,11 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
if (devpriv->is_611x || devpriv->is_6713) {
mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
} else {
- /* doing 32 instead of 16 bit wide transfers from memory
- makes the mite do 32 bit pci transfers, doubling pci bandwidth. */
+ /*
+ * Doing 32 instead of 16 bit wide transfers from
+ * memory makes the mite do 32 bit pci transfers,
+ * doubling pci bandwidth.
+ */
mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
}
mite_dma_arm(devpriv->ao_mite_chan);
@@ -1622,16 +1506,15 @@ static int ni_ao_setup_MITE_dma(struct comedi_device *dev)
#endif /* PCIDMA */
/*
- used for both cancel ioctl and board initialization
-
- this is pretty harsh for a cancel, but it works...
+ * used for both cancel ioctl and board initialization
+ *
+ * this is pretty harsh for a cancel, but it works...
*/
-
static int ni_ai_reset(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
- unsigned ai_personal;
- unsigned ai_out_ctrl;
+ unsigned int ai_personal;
+ unsigned int ai_out_ctrl;
ni_release_ai_mite_channel(dev);
/* ai configuration */
@@ -1736,12 +1619,12 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
unsigned int chan, range, aref;
unsigned int i;
unsigned int dither;
- unsigned range_code;
+ unsigned int range_code;
ni_stc_writew(dev, 1, NISTC_CFG_MEM_CLR_REG);
if ((list[0] & CR_ALT_SOURCE)) {
- unsigned bypass_bits;
+ unsigned int bypass_bits;
chan = CR_CHAN(list[0]);
range = CR_RANGE(list[0]);
@@ -1760,7 +1643,7 @@ static void ni_m_series_load_channelgain_list(struct comedi_device *dev,
ni_writel(dev, 0, NI_M_CFG_BYPASS_FIFO_REG);
}
for (i = 0; i < n_chan; i++) {
- unsigned config_bits = 0;
+ unsigned int config_bits = 0;
chan = CR_CHAN(list[i]);
aref = CR_AREF(list[i]);
@@ -1842,8 +1725,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
return;
}
if (n_chan == 1 && !devpriv->is_611x && !devpriv->is_6143) {
- if (devpriv->changain_state
- && devpriv->changain_spec == list[0]) {
+ if (devpriv->changain_state &&
+ devpriv->changain_spec == list[0]) {
/* ready to go. */
return;
}
@@ -1857,8 +1740,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
/* Set up Calibration mode if required */
if (devpriv->is_6143) {
- if ((list[0] & CR_ALT_SOURCE)
- && !devpriv->ai_calib_source_enabled) {
+ if ((list[0] & CR_ALT_SOURCE) &&
+ !devpriv->ai_calib_source_enabled) {
/* Strobe Relay enable bit */
ni_writew(dev, devpriv->ai_calib_source |
NI6143_CALIB_CHAN_RELAY_ON,
@@ -1866,9 +1749,10 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
ni_writew(dev, devpriv->ai_calib_source,
NI6143_CALIB_CHAN_REG);
devpriv->ai_calib_source_enabled = 1;
- msleep_interruptible(100); /* Allow relays to change */
- } else if (!(list[0] & CR_ALT_SOURCE)
- && devpriv->ai_calib_source_enabled) {
+ /* Allow relays to change */
+ msleep_interruptible(100);
+ } else if (!(list[0] & CR_ALT_SOURCE) &&
+ devpriv->ai_calib_source_enabled) {
/* Strobe Relay disable bit */
ni_writew(dev, devpriv->ai_calib_source |
NI6143_CALIB_CHAN_RELAY_OFF,
@@ -1876,7 +1760,8 @@ static void ni_load_channelgain_list(struct comedi_device *dev,
ni_writew(dev, devpriv->ai_calib_source,
NI6143_CALIB_CHAN_REG);
devpriv->ai_calib_source_enabled = 0;
- msleep_interruptible(100); /* Allow relays to change */
+ /* Allow relays to change */
+ msleep_interruptible(100);
}
}
@@ -1949,7 +1834,7 @@ static int ni_ai_insn_read(struct comedi_device *dev,
struct ni_private *devpriv = dev->private;
unsigned int mask = (s->maxdata + 1) >> 1;
int i, n;
- unsigned signbits;
+ unsigned int signbits;
unsigned int d;
unsigned long dl;
@@ -1997,7 +1882,11 @@ static int ni_ai_insn_read(struct comedi_device *dev,
ni_stc_writew(dev, NISTC_AI_CMD1_CONVERT_PULSE,
NISTC_AI_CMD1_REG);
- /* The 6143 has 32-bit FIFOs. You need to strobe a bit to move a single 16bit stranded sample into the FIFO */
+ /*
+ * The 6143 has 32-bit FIFOs. You need to strobe a
+ * bit to move a single 16bit stranded sample into
+ * the FIFO.
+ */
dl = 0;
for (i = 0; i < NI_TIMEOUT; i++) {
if (ni_readl(dev, NI6143_AI_FIFO_STATUS_REG) &
@@ -2035,7 +1924,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
data[n] = dl;
} else {
d = ni_readw(dev, NI_E_AI_FIFO_DATA_REG);
- d += signbits; /* subtle: needs to be short addition */
+ /* subtle: needs to be short addition */
+ d += signbits;
data[n] = d;
}
}
@@ -2043,8 +1933,8 @@ static int ni_ai_insn_read(struct comedi_device *dev,
return insn->n;
}
-static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
- unsigned int flags)
+static int ni_ns_to_timer(const struct comedi_device *dev,
+ unsigned int nanosec, unsigned int flags)
{
struct ni_private *devpriv = dev->private;
int divider;
@@ -2064,14 +1954,14 @@ static int ni_ns_to_timer(const struct comedi_device *dev, unsigned nanosec,
return divider - 1;
}
-static unsigned ni_timer_to_ns(const struct comedi_device *dev, int timer)
+static unsigned int ni_timer_to_ns(const struct comedi_device *dev, int timer)
{
struct ni_private *devpriv = dev->private;
return devpriv->clock_ns * (timer + 1);
}
-static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
+static void ni_cmd_set_mite_transfer(struct mite_ring *ring,
struct comedi_subdevice *sdev,
const struct comedi_cmd *cmd,
unsigned int max_count) {
@@ -2102,8 +1992,8 @@ static void ni_cmd_set_mite_transfer(struct mite_dma_descriptor_ring *ring,
#endif
}
-static unsigned ni_min_ai_scan_period_ns(struct comedi_device *dev,
- unsigned num_channels)
+static unsigned int ni_min_ai_scan_period_ns(struct comedi_device *dev,
+ unsigned int num_channels)
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
@@ -2294,7 +2184,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
int start_stop_select = 0;
unsigned int stop_count;
int interrupt_a_enable = 0;
- unsigned ai_trig;
+ unsigned int ai_trig;
if (dev->irq == 0) {
dev_err(dev->class_dev, "cannot run command without an irq\n");
@@ -2307,8 +2197,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
/* start configuration */
ni_stc_writew(dev, NISTC_RESET_AI_CFG_START, NISTC_RESET_REG);
- /* disable analog triggering for now, since it
- * interferes with the use of pfi0 */
+ /*
+ * Disable analog triggering for now, since it interferes
+ * with the use of pfi0.
+ */
devpriv->an_trig_etc_reg &= ~NISTC_ATRIG_ETC_ENA;
ni_stc_writew(dev, devpriv->an_trig_etc_reg, NISTC_ATRIG_ETC_REG);
@@ -2369,7 +2261,10 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
if (stop_count == 0) {
devpriv->ai_cmd2 |= NISTC_AI_CMD2_END_ON_EOS;
interrupt_a_enable |= NISTC_INTA_ENA_AI_STOP;
- /* this is required to get the last sample for chanlist_len > 1, not sure why */
+ /*
+ * This is required to get the last sample for
+ * chanlist_len > 1, not sure why.
+ */
if (cmd->chanlist_len > 1)
start_stop_select |= NISTC_AI_STOP_POLARITY |
NISTC_AI_STOP_EDGE;
@@ -2489,7 +2384,7 @@ static int ni_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
switch (devpriv->aimode) {
case AIMODE_HALF_FULL:
- /*generate FIFO interrupts and DMA requests on half-full */
+ /* FIFO interrupts and DMA requests on half-full */
#ifdef PCIDMA
ni_stc_writew(dev, NISTC_AI_MODE3_FIFO_MODE_HF_E,
NISTC_AI_MODE3_REG);
@@ -2880,9 +2775,11 @@ static int ni_ao_inttrig(struct comedi_device *dev,
if (trig_num != cmd->start_arg)
return -EINVAL;
- /* Null trig at beginning prevent ao start trigger from executing more than
- once per command (and doing things like trying to allocate the ao dma channel
- multiple times) */
+ /*
+ * Null trig at beginning prevent ao start trigger from executing more
+ * than once per command (and doing things like trying to allocate the
+ * ao dma channel multiple times).
+ */
s->async->inttrig = NULL;
ni_set_bits(dev, NISTC_INTB_ENA_REG,
@@ -2951,7 +2848,7 @@ static void ni_ao_cmd_personalize(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
const struct ni_board_struct *board = dev->board_ptr;
- unsigned bits;
+ unsigned int bits;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -2999,6 +2896,7 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
const struct comedi_cmd *cmd)
{
struct ni_private *devpriv = dev->private;
+ unsigned int trigsel;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -3012,39 +2910,20 @@ static void ni_ao_cmd_set_trigger(struct comedi_device *dev,
}
ni_stc_writew(dev, devpriv->ao_mode1, NISTC_AO_MODE1_REG);
- {
- unsigned int trigsel = devpriv->ao_trigger_select;
-
- switch (cmd->start_src) {
- case TRIG_INT:
- case TRIG_NOW:
- trigsel &= ~(NISTC_AO_TRIG_START1_POLARITY |
- NISTC_AO_TRIG_START1_SEL_MASK);
- trigsel |= NISTC_AO_TRIG_START1_EDGE |
- NISTC_AO_TRIG_START1_SYNC;
- break;
- case TRIG_EXT:
- trigsel = NISTC_AO_TRIG_START1_SEL(
- CR_CHAN(cmd->start_arg) + 1);
- if (cmd->start_arg & CR_INVERT)
- /*
- * 0=active high, 1=active low.
- * see daq-stc 3-24 (p186)
- */
- trigsel |= NISTC_AO_TRIG_START1_POLARITY;
- if (cmd->start_arg & CR_EDGE)
- /* 0=edge detection disabled, 1=enabled */
- trigsel |= NISTC_AO_TRIG_START1_EDGE;
- break;
- default:
- BUG();
- break;
- }
-
- devpriv->ao_trigger_select = trigsel;
- ni_stc_writew(dev, devpriv->ao_trigger_select,
- NISTC_AO_TRIG_SEL_REG);
+ if (cmd->start_src == TRIG_INT) {
+ trigsel = NISTC_AO_TRIG_START1_EDGE |
+ NISTC_AO_TRIG_START1_SYNC;
+ } else { /* TRIG_EXT */
+ trigsel = NISTC_AO_TRIG_START1_SEL(CR_CHAN(cmd->start_arg) + 1);
+ /* 0=active high, 1=active low. see daq-stc 3-24 (p186) */
+ if (cmd->start_arg & CR_INVERT)
+ trigsel |= NISTC_AO_TRIG_START1_POLARITY;
+ /* 0=edge detection disabled, 1=enabled */
+ if (cmd->start_arg & CR_EDGE)
+ trigsel |= NISTC_AO_TRIG_START1_EDGE;
}
+ ni_stc_writew(dev, trigsel, NISTC_AO_TRIG_SEL_REG);
+
/* AO_Delayed_START1 = 0, we do not support delayed start...yet */
/* sync */
@@ -3149,8 +3028,9 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
NISTC_AO_MODE1_UPDATE_SRC_POLARITY
);
- switch (cmd->scan_begin_src) {
- case TRIG_TIMER:
+ if (cmd->scan_begin_src == TRIG_TIMER) {
+ unsigned int trigvar;
+
devpriv->ao_cmd2 &= ~NISTC_AO_CMD2_BC_GATE_ENA;
/*
@@ -3181,34 +3061,25 @@ static void ni_ao_cmd_set_update(struct comedi_device *dev,
* eseries/ni67xx and tMSeries.h for mseries.
*/
- {
- unsigned trigvar = ni_ns_to_timer(dev,
- cmd->scan_begin_arg,
- CMDF_ROUND_NEAREST);
+ trigvar = ni_ns_to_timer(dev, cmd->scan_begin_arg,
+ CMDF_ROUND_NEAREST);
- /*
- * Wait N TB3 ticks after the start trigger before
- * clocking(N must be >=2).
- */
- /* following line: 2-1 per STC */
- ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
- ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD,
- NISTC_AO_CMD1_REG);
- /* following line: N-1 per STC */
- ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
- }
- break;
- case TRIG_EXT:
+ /*
+ * Wait N TB3 ticks after the start trigger before
+ * clocking (N must be >=2).
+ */
+ /* following line: 2-1 per STC */
+ ni_stc_writel(dev, 1, NISTC_AO_UI_LOADA_REG);
+ ni_stc_writew(dev, NISTC_AO_CMD1_UI_LOAD, NISTC_AO_CMD1_REG);
+ /* following line: N-1 per STC */
+ ni_stc_writel(dev, trigvar - 1, NISTC_AO_UI_LOADA_REG);
+ } else { /* TRIG_EXT */
/* FIXME: assert scan_begin_arg != 0, ret failure otherwise */
devpriv->ao_cmd2 |= NISTC_AO_CMD2_BC_GATE_ENA;
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC(
CR_CHAN(cmd->scan_begin_arg));
if (cmd->scan_begin_arg & CR_INVERT)
devpriv->ao_mode1 |= NISTC_AO_MODE1_UPDATE_SRC_POLARITY;
- break;
- default:
- BUG();
- break;
}
ni_stc_writew(dev, devpriv->ao_cmd2, NISTC_AO_CMD2_REG);
@@ -3231,7 +3102,7 @@ static void ni_ao_cmd_set_channels(struct comedi_device *dev,
{
struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned bits = 0;
+ unsigned int bits = 0;
ni_stc_writew(dev, NISTC_RESET_AO_CFG_START, NISTC_RESET_REG);
@@ -3474,7 +3345,6 @@ static int ni_ao_reset(struct comedi_device *dev, struct comedi_subdevice *s)
devpriv->ao_mode3 = NISTC_AO_MODE3_LAST_GATE_DISABLE;
else
devpriv->ao_mode3 = 0;
- devpriv->ao_trigger_select = 0;
ni_stc_writew(dev, 0, NISTC_AO_PERSONAL_REG);
ni_stc_writew(dev, 0, NISTC_AO_CMD1_REG);
@@ -3550,6 +3420,7 @@ static int ni_dio_insn_bits(struct comedi_device *dev,
return insn->n;
}
+#ifdef PCIDMA
static int ni_m_series_dio_insn_config(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn,
@@ -3652,13 +3523,11 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
unsigned int trig_num)
{
struct comedi_cmd *cmd = &s->async->cmd;
- const unsigned timeout = 1000;
+ const unsigned int timeout = 1000;
int retval = 0;
- unsigned i;
-#ifdef PCIDMA
+ unsigned int i;
struct ni_private *devpriv = dev->private;
unsigned long flags;
-#endif
if (trig_num != cmd->start_arg)
return -EINVAL;
@@ -3668,7 +3537,6 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
/* read alloc the entire buffer */
comedi_buf_read_alloc(s, s->async->prealloc_bufsz);
-#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->cdo_mite_chan) {
mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
@@ -3680,7 +3548,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
if (retval < 0)
return retval;
-#endif
+
/*
* XXX not sure what interrupt C group does
* wait for dma to fill output fifo
@@ -3690,7 +3558,7 @@ static int ni_cdo_inttrig(struct comedi_device *dev,
if (ni_readl(dev, NI_M_CDIO_STATUS_REG) &
NI_M_CDIO_STATUS_CDO_FIFO_FULL)
break;
- udelay(10);
+ usleep_range(10, 100);
}
if (i == timeout) {
dev_err(dev->class_dev, "dma failed to fill cdo fifo!\n");
@@ -3708,7 +3576,7 @@ static int ni_cdio_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct ni_private *devpriv = dev->private;
const struct comedi_cmd *cmd = &s->async->cmd;
- unsigned cdo_mode_bits;
+ unsigned int cdo_mode_bits;
int retval;
ni_writel(dev, NI_M_CDO_CMD_RESET, NI_M_CDIO_CMD_REG);
@@ -3759,28 +3627,14 @@ static int ni_cdio_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static void handle_cdio_interrupt(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
- unsigned cdio_status;
+ unsigned int cdio_status;
struct comedi_subdevice *s = &dev->subdevices[NI_DIO_SUBDEV];
-#ifdef PCIDMA
unsigned long flags;
-#endif
- if (!devpriv->is_m_series)
- return;
-#ifdef PCIDMA
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
- if (devpriv->cdo_mite_chan) {
- unsigned cdo_mite_status =
- mite_get_status(devpriv->cdo_mite_chan);
- if (cdo_mite_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->cdo_mite_chan->channel));
- }
- mite_sync_output_dma(devpriv->cdo_mite_chan, s);
- }
+ if (devpriv->cdo_mite_chan)
+ mite_ack_linkc(devpriv->cdo_mite_chan, s, true);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags);
-#endif
cdio_status = ni_readl(dev, NI_M_CDIO_STATUS_REG);
if (cdio_status & NI_M_CDIO_STATUS_CDO_ERROR) {
@@ -3796,6 +3650,7 @@ static void handle_cdio_interrupt(struct comedi_device *dev)
}
comedi_handle_events(dev, s);
}
+#endif /* PCIDMA */
static int ni_serial_hw_readwrite8(struct comedi_device *dev,
struct comedi_subdevice *s,
@@ -3813,7 +3668,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
status1 = ni_stc_readw(dev, NISTC_STATUS1_REG);
if (status1 & NISTC_STATUS1_SERIO_IN_PROG) {
err = -EBUSY;
- goto Error;
+ goto error;
}
devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_START;
@@ -3829,7 +3684,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
dev_err(dev->class_dev,
"SPI serial I/O didn't finish in time!\n");
err = -ETIME;
- goto Error;
+ goto error;
}
}
@@ -3842,7 +3697,7 @@ static int ni_serial_hw_readwrite8(struct comedi_device *dev,
if (data_in)
*data_in = ni_stc_readw(dev, NISTC_DIO_SERIAL_IN_REG);
-Error:
+error:
ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
return err;
@@ -3860,16 +3715,20 @@ static int ni_serial_sw_readwrite8(struct comedi_device *dev,
udelay((devpriv->serial_interval_ns + 999) / 1000);
for (mask = 0x80; mask; mask >>= 1) {
- /* Output current bit; note that we cannot touch s->state
- because it is a per-subdevice field, and serial is
- a separate subdevice from DIO. */
+ /*
+ * Output current bit; note that we cannot touch s->state
+ * because it is a per-subdevice field, and serial is
+ * a separate subdevice from DIO.
+ */
devpriv->dio_output &= ~NISTC_DIO_SDOUT;
if (data_out & mask)
devpriv->dio_output |= NISTC_DIO_SDOUT;
ni_stc_writew(dev, devpriv->dio_output, NISTC_DIO_OUT_REG);
- /* Assert SDCLK (active low, inverted), wait for half of
- the delay, deassert SDCLK, and wait for the other half. */
+ /*
+ * Assert SDCLK (active low, inverted), wait for half of
+ * the delay, deassert SDCLK, and wait for the other half.
+ */
devpriv->dio_control |= NISTC_DIO_SDCLK;
ni_stc_writew(dev, devpriv->dio_control, NISTC_DIO_CTRL_REG);
@@ -3897,7 +3756,7 @@ static int ni_serial_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned clk_fout = devpriv->clock_and_fout;
+ unsigned int clk_fout = devpriv->clock_and_fout;
int err = insn->n;
unsigned char byte_out, byte_in = 0;
@@ -3916,8 +3775,10 @@ static int ni_serial_insn_config(struct comedi_device *dev,
data[1] = SERIAL_DISABLED;
devpriv->serial_interval_ns = data[1];
} else if (data[1] <= SERIAL_600NS) {
- /* Warning: this clock speed is too fast to reliably
- control SCXI. */
+ /*
+ * Warning: this clock speed is too fast to reliably
+ * control SCXI.
+ */
devpriv->dio_control &= ~NISTC_DIO_CTRL_HW_SER_TIMEBASE;
clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE;
clk_fout &= ~NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
@@ -3933,10 +3794,12 @@ static int ni_serial_insn_config(struct comedi_device *dev,
devpriv->dio_control |= NISTC_DIO_CTRL_HW_SER_TIMEBASE;
clk_fout |= NISTC_CLK_FOUT_SLOW_TIMEBASE |
NISTC_CLK_FOUT_DIO_SER_OUT_DIV2;
- /* Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
- 600ns/1.2us. If you turn divide_by_2 off with the
- slow clock, you will still get 10us, except then
- all your delays are wrong. */
+ /*
+ * Note: NISTC_CLK_FOUT_DIO_SER_OUT_DIV2 only affects
+ * 600ns/1.2us. If you turn divide_by_2 off with the
+ * slow clock, you will still get 10us, except then
+ * all your delays are wrong.
+ */
data[1] = SERIAL_10US;
devpriv->serial_interval_ns = data[1];
} else {
@@ -4046,15 +3909,11 @@ static unsigned int ni_gpct_to_stc_register(struct comedi_device *dev,
return regmap->mio_reg;
}
-static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
+static void ni_gpct_write_register(struct ni_gpct *counter, unsigned int bits,
enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
- static const unsigned gpct_interrupt_a_enable_mask =
- NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC;
- static const unsigned gpct_interrupt_b_enable_mask =
- NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC;
if (stc_register == 0)
return;
@@ -4082,25 +3941,22 @@ static void ni_gpct_write_register(struct ni_gpct *counter, unsigned bits,
/* 16 bit registers */
case NITIO_G0_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
ni_set_bitfield(dev, stc_register,
- gpct_interrupt_a_enable_mask, bits);
+ NISTC_INTA_ENA_G0_GATE | NISTC_INTA_ENA_G0_TC,
+ bits);
break;
case NITIO_G1_INT_ENA:
- BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
ni_set_bitfield(dev, stc_register,
- gpct_interrupt_b_enable_mask, bits);
+ NISTC_INTB_ENA_G1_GATE | NISTC_INTB_ENA_G1_TC,
+ bits);
break;
- case NITIO_G01_RESET:
- BUG_ON(bits & ~(NISTC_RESET_G0 | NISTC_RESET_G1));
- /* fall-through */
default:
ni_stc_writew(dev, bits, stc_register);
}
}
-static unsigned ni_gpct_read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
+static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
+ enum ni_gpct_register reg)
{
struct comedi_device *dev = counter->counter_dev->dev;
unsigned int stc_register = ni_gpct_to_stc_register(dev, reg);
@@ -4227,7 +4083,7 @@ static int ni_m_series_pwm_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
+ unsigned int up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
@@ -4287,7 +4143,7 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_private *devpriv = dev->private;
- unsigned up_count, down_count;
+ unsigned int up_count, down_count;
switch (data[0]) {
case INSN_CONFIG_PWM_OUTPUT:
@@ -4343,13 +4199,13 @@ static int ni_6143_pwm_config(struct comedi_device *dev,
static int pack_mb88341(int addr, int val, int *bitstring)
{
/*
- Fujitsu MB 88341
- Note that address bits are reversed. Thanks to
- Ingo Keen for noticing this.
-
- Note also that the 88341 expects address values from
- 1-12, whereas we use channel numbers 0-11. The NI
- docs use 1-12, also, so be careful here.
+ * Fujitsu MB 88341
+ * Note that address bits are reversed. Thanks to
+ * Ingo Keen for noticing this.
+ *
+ * Note also that the 88341 expects address values from
+ * 1-12, whereas we use channel numbers 0-11. The NI
+ * docs use 1-12, also, so be careful here.
*/
addr++;
*bitstring = ((addr & 0x1) << 11) |
@@ -4495,12 +4351,12 @@ static void caldac_setup(struct comedi_device *dev, struct comedi_subdevice *s)
s->n_chan = n_chans;
if (diffbits) {
- unsigned int *maxdata_list;
+ unsigned int *maxdata_list = devpriv->caldac_maxdata_list;
if (n_chans > MAX_N_CALDACS)
dev_err(dev->class_dev,
"BUG! MAX_N_CALDACS too small\n");
- s->maxdata_list = maxdata_list = devpriv->caldac_maxdata_list;
+ s->maxdata_list = maxdata_list;
chan = 0;
for (i = 0; i < n_dacs; i++) {
type = board->caldac[i];
@@ -4574,8 +4430,8 @@ static int ni_m_series_eeprom_insn_read(struct comedi_device *dev,
return 1;
}
-static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
+static unsigned int ni_old_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
/* pre-m-series boards have fixed signals on pfi pins */
switch (chan) {
@@ -4607,7 +4463,7 @@ static unsigned ni_old_get_pfi_routing(struct comedi_device *dev,
}
static int ni_old_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
/* pre-m-series boards have fixed signals on pfi pins */
if (source != ni_old_get_pfi_routing(dev, chan))
@@ -4615,21 +4471,21 @@ static int ni_old_set_pfi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_m_series_get_pfi_routing(struct comedi_device *dev,
- unsigned chan)
+static unsigned int ni_m_series_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
- const unsigned array_offset = chan / 3;
+ const unsigned int array_offset = chan / 3;
return NI_M_PFI_OUT_SEL_TO_SRC(chan,
devpriv->pfi_output_select_reg[array_offset]);
}
static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
- unsigned index = chan / 3;
+ unsigned int index = chan / 3;
unsigned short val = devpriv->pfi_output_select_reg[index];
if ((source & 0x1f) != source)
@@ -4643,7 +4499,8 @@ static int ni_m_series_set_pfi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
+static unsigned int ni_get_pfi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
@@ -4652,8 +4509,8 @@ static unsigned ni_get_pfi_routing(struct comedi_device *dev, unsigned chan)
: ni_old_get_pfi_routing(dev, chan);
}
-static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
- unsigned source)
+static int ni_set_pfi_routing(struct comedi_device *dev,
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
@@ -4663,11 +4520,11 @@ static int ni_set_pfi_routing(struct comedi_device *dev, unsigned chan,
}
static int ni_config_filter(struct comedi_device *dev,
- unsigned pfi_channel,
+ unsigned int pfi_channel,
enum ni_pfi_filter_select filter)
{
struct ni_private *devpriv = dev->private;
- unsigned bits;
+ unsigned int bits;
if (!devpriv->is_m_series)
return -ENOTSUPP;
@@ -4818,9 +4675,12 @@ static int cs5529_ai_insn_read(struct comedi_device *dev,
unsigned int channel_select;
const unsigned int INTERNAL_REF = 0x1000;
- /* Set calibration adc source. Docs lie, reference select bits 8 to 11
+ /*
+ * Set calibration adc source. Docs lie, reference select bits 8 to 11
* do nothing. bit 12 seems to chooses internal reference voltage, bit
- * 13 causes the adc input to go overrange (maybe reads external reference?) */
+ * 13 causes the adc input to go overrange (maybe reads external
+ * reference?)
+ */
if (insn->chanspec & CR_ALT_SOURCE)
channel_select = INTERNAL_REF;
else
@@ -4875,27 +4735,28 @@ static int init_cs5529(struct comedi_device *dev)
* Find best multiplier/divider to try and get the PLL running at 80 MHz
* given an arbitrary frequency input clock.
*/
-static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
- unsigned *freq_divider,
- unsigned *freq_multiplier,
- unsigned *actual_period_ns)
-{
- unsigned div;
- unsigned best_div = 1;
- unsigned mult;
- unsigned best_mult = 1;
- static const unsigned pico_per_nano = 1000;
-
- const unsigned reference_picosec = reference_period_ns * pico_per_nano;
- /* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
- * 20 MHz for most timing clocks */
- static const unsigned target_picosec = 12500;
- static const unsigned fudge_factor_80_to_20Mhz = 4;
+static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
+ unsigned int *freq_divider,
+ unsigned int *freq_multiplier,
+ unsigned int *actual_period_ns)
+{
+ unsigned int div;
+ unsigned int best_div = 1;
+ unsigned int mult;
+ unsigned int best_mult = 1;
+ static const unsigned int pico_per_nano = 1000;
+ const unsigned int reference_picosec = reference_period_ns *
+ pico_per_nano;
+ /*
+ * m-series wants the phased-locked loop to output 80MHz, which is
+ * divided by 4 to 20 MHz for most timing clocks
+ */
+ static const unsigned int target_picosec = 12500;
int best_period_picosec = 0;
for (div = 1; div <= NI_M_PLL_MAX_DIVISOR; ++div) {
for (mult = 1; mult <= NI_M_PLL_MAX_MULTIPLIER; ++mult) {
- unsigned new_period_ps =
+ unsigned int new_period_ps =
(reference_picosec * div) / mult;
if (abs(new_period_ps - target_picosec) <
abs(best_period_picosec - target_picosec)) {
@@ -4910,29 +4771,33 @@ static int ni_mseries_get_pll_parameters(unsigned reference_period_ns,
*freq_divider = best_div;
*freq_multiplier = best_mult;
- *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec *
- fudge_factor_80_to_20Mhz,
+ /* return the actual period (* fudge factor for 80 to 20 MHz) */
+ *actual_period_ns = DIV_ROUND_CLOSEST(best_period_picosec * 4,
pico_per_nano);
return 0;
}
static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
+ unsigned int source,
+ unsigned int period_ns)
{
struct ni_private *devpriv = dev->private;
- static const unsigned min_period_ns = 50;
- static const unsigned max_period_ns = 1000;
- static const unsigned timeout = 1000;
- unsigned pll_control_bits;
- unsigned freq_divider;
- unsigned freq_multiplier;
- unsigned rtsi;
- unsigned i;
+ static const unsigned int min_period_ns = 50;
+ static const unsigned int max_period_ns = 1000;
+ static const unsigned int timeout = 1000;
+ unsigned int pll_control_bits;
+ unsigned int freq_divider;
+ unsigned int freq_multiplier;
+ unsigned int rtsi;
+ unsigned int i;
int retval;
if (source == NI_MIO_PLL_PXI10_CLOCK)
period_ns = 100;
- /* these limits are somewhat arbitrary, but NI advertises 1 to 20MHz range so we'll use that */
+ /*
+ * These limits are somewhat arbitrary, but NI advertises 1 to 20MHz
+ * range so we'll use that.
+ */
if (period_ns < min_period_ns || period_ns > max_period_ns) {
dev_err(dev->class_dev,
"%s: you must specify an input clock frequency between %i and %i nanosec for the phased-lock loop\n",
@@ -4982,7 +4847,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
ni_writew(dev, pll_control_bits, NI_M_PLL_CTRL_REG);
devpriv->clock_source = source;
- /* it seems to typically take a few hundred microseconds for PLL to lock */
+ /* it takes a few hundred microseconds for PLL to lock */
for (i = 0; i < timeout; ++i) {
if (ni_readw(dev, NI_M_PLL_STATUS_REG) & NI_M_PLL_STATUS_LOCKED)
break;
@@ -4998,7 +4863,7 @@ static int ni_mseries_set_pll_master_clock(struct comedi_device *dev,
}
static int ni_set_master_clock(struct comedi_device *dev,
- unsigned source, unsigned period_ns)
+ unsigned int source, unsigned int period_ns)
{
struct ni_private *devpriv = dev->private;
@@ -5043,7 +4908,7 @@ static int ni_set_master_clock(struct comedi_device *dev,
}
static int ni_valid_rtsi_output_source(struct comedi_device *dev,
- unsigned chan, unsigned source)
+ unsigned int chan, unsigned int source)
{
struct ni_private *devpriv = dev->private;
@@ -5078,7 +4943,7 @@ static int ni_valid_rtsi_output_source(struct comedi_device *dev,
}
static int ni_set_rtsi_routing(struct comedi_device *dev,
- unsigned chan, unsigned src)
+ unsigned int chan, unsigned int src)
{
struct ni_private *devpriv = dev->private;
@@ -5098,7 +4963,8 @@ static int ni_set_rtsi_routing(struct comedi_device *dev,
return 2;
}
-static unsigned ni_get_rtsi_routing(struct comedi_device *dev, unsigned chan)
+static unsigned int ni_get_rtsi_routing(struct comedi_device *dev,
+ unsigned int chan)
{
struct ni_private *devpriv = dev->private;
@@ -5262,10 +5128,10 @@ static int ni_gpct_cancel(struct comedi_device *dev, struct comedi_subdevice *s)
static irqreturn_t ni_E_interrupt(int irq, void *d)
{
struct comedi_device *dev = d;
+ struct comedi_subdevice *s_ai = dev->read_subdev;
+ struct comedi_subdevice *s_ao = dev->write_subdev;
unsigned short a_status;
unsigned short b_status;
- unsigned int ai_mite_status = 0;
- unsigned int ao_mite_status = 0;
unsigned long flags;
#ifdef PCIDMA
struct ni_private *devpriv = dev->private;
@@ -5273,7 +5139,7 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
if (!dev->attached)
return IRQ_NONE;
- smp_mb(); /* make sure dev->attached is checked before handler does anything else. */
+ smp_mb(); /* make sure dev->attached is checked */
/* lock to avoid race with comedi_poll */
spin_lock_irqsave(&dev->spinlock, flags);
@@ -5284,34 +5150,33 @@ static irqreturn_t ni_E_interrupt(int irq, void *d)
unsigned long flags_too;
spin_lock_irqsave(&devpriv->mite_channel_lock, flags_too);
- if (devpriv->ai_mite_chan) {
- ai_mite_status = mite_get_status(devpriv->ai_mite_chan);
- if (ai_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ai_mite_chan->channel));
- }
- if (devpriv->ao_mite_chan) {
- ao_mite_status = mite_get_status(devpriv->ao_mite_chan);
- if (ao_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- devpriv->mite->mite_io_addr +
- MITE_CHOR(devpriv->
- ao_mite_chan->channel));
- }
+ if (s_ai && devpriv->ai_mite_chan)
+ mite_ack_linkc(devpriv->ai_mite_chan, s_ai, false);
+ if (s_ao && devpriv->ao_mite_chan)
+ mite_ack_linkc(devpriv->ao_mite_chan, s_ao, false);
spin_unlock_irqrestore(&devpriv->mite_channel_lock, flags_too);
}
#endif
ack_a_interrupt(dev, a_status);
ack_b_interrupt(dev, b_status);
- if ((a_status & NISTC_AI_STATUS1_INTA) || (ai_mite_status & CHSR_INT))
- handle_a_interrupt(dev, a_status, ai_mite_status);
- if ((b_status & NISTC_AO_STATUS1_INTB) || (ao_mite_status & CHSR_INT))
- handle_b_interrupt(dev, b_status, ao_mite_status);
+ if (s_ai) {
+ if (a_status & NISTC_AI_STATUS1_INTA)
+ handle_a_interrupt(dev, s_ai, a_status);
+ /* handle any interrupt or dma events */
+ comedi_handle_events(dev, s_ai);
+ }
+ if (s_ao) {
+ if (b_status & NISTC_AO_STATUS1_INTB)
+ handle_b_interrupt(dev, s_ao, b_status);
+ /* handle any interrupt or dma events */
+ comedi_handle_events(dev, s_ao);
+ }
handle_gpct_interrupt(dev, 0);
handle_gpct_interrupt(dev, 1);
- handle_cdio_interrupt(dev);
+#ifdef PCIDMA
+ if (devpriv->is_m_series)
+ handle_cdio_interrupt(dev);
+#endif
spin_unlock_irqrestore(&dev->spinlock, flags);
return IRQ_HANDLED;
@@ -5333,7 +5198,7 @@ static int ni_alloc_private(struct comedi_device *dev)
}
static int ni_E_init(struct comedi_device *dev,
- unsigned interrupt_pin, unsigned irq_polarity)
+ unsigned int interrupt_pin, unsigned int irq_polarity)
{
const struct ni_board_struct *board = dev->board_ptr;
struct ni_private *devpriv = dev->private;
@@ -5450,6 +5315,7 @@ static int ni_E_init(struct comedi_device *dev,
s->maxdata = 1;
s->range_table = &range_digital;
if (devpriv->is_m_series) {
+#ifdef PCIDMA
s->subdev_flags |= SDF_LSAMPL;
s->insn_bits = ni_m_series_dio_insn_bits;
s->insn_config = ni_m_series_dio_insn_config;
@@ -5469,6 +5335,7 @@ static int ni_E_init(struct comedi_device *dev,
NI_M_CDI_CMD_RESET,
NI_M_CDIO_CMD_REG);
ni_writel(dev, s->io_bits, NI_M_DIO_DIR_REG);
+#endif /* PCIDMA */
} else {
s->insn_bits = ni_dio_insn_bits;
s->insn_config = ni_dio_insn_config;
@@ -5675,8 +5542,6 @@ static void mio_common_detach(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
- if (devpriv) {
- if (devpriv->counter_dev)
- ni_gpct_device_destroy(devpriv->counter_dev);
- }
+ if (devpriv)
+ ni_gpct_device_destroy(devpriv->counter_dev);
}
diff --git a/drivers/staging/comedi/drivers/ni_pcidio.c b/drivers/staging/comedi/drivers/ni_pcidio.c
index 7112c3fec8bb..02a532990979 100644
--- a/drivers/staging/comedi/drivers/ni_pcidio.c
+++ b/drivers/staging/comedi/drivers/ni_pcidio.c
@@ -284,12 +284,12 @@ static const struct nidio_board nidio_boards[] = {
};
struct nidio96_private {
- struct mite_struct *mite;
+ struct mite *mite;
int boardtype;
int dio;
unsigned short OpModeBits;
struct mite_channel *di_mite_chan;
- struct mite_dma_descriptor_ring *di_mite_ring;
+ struct mite_ring *di_mite_ring;
spinlock_t mite_channel_lock;
};
@@ -324,8 +324,6 @@ static void ni_pcidio_release_di_mite_channel(struct comedi_device *dev)
spin_lock_irqsave(&devpriv->mite_channel_lock, flags);
if (devpriv->di_mite_chan) {
- mite_dma_disarm(devpriv->di_mite_chan);
- mite_dma_reset(devpriv->di_mite_chan);
mite_release_channel(devpriv->di_mite_chan);
devpriv->di_mite_chan = NULL;
writeb(primary_DMAChannel_bits(0) |
@@ -370,7 +368,7 @@ static int ni_pcidio_poll(struct comedi_device *dev, struct comedi_subdevice *s)
spin_lock_irqsave(&dev->spinlock, irq_flags);
spin_lock(&devpriv->mite_channel_lock);
if (devpriv->di_mite_chan)
- mite_sync_input_dma(devpriv->di_mite_chan, s);
+ mite_sync_dma(devpriv->di_mite_chan, s);
spin_unlock(&devpriv->mite_channel_lock);
count = comedi_buf_n_bytes_ready(s);
spin_unlock_irqrestore(&dev->spinlock, irq_flags);
@@ -383,12 +381,10 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
struct nidio96_private *devpriv = dev->private;
struct comedi_subdevice *s = dev->read_subdev;
struct comedi_async *async = s->async;
- struct mite_struct *mite = devpriv->mite;
unsigned int auxdata;
int flags;
int status;
int work = 0;
- unsigned int m_status = 0;
/* interrupcions parasites */
if (!dev->attached) {
@@ -403,24 +399,9 @@ static irqreturn_t nidio_interrupt(int irq, void *d)
flags = readb(dev->mmio + Group_1_Flags);
spin_lock(&devpriv->mite_channel_lock);
- if (devpriv->di_mite_chan)
- m_status = mite_get_status(devpriv->di_mite_chan);
-
- if (m_status & CHSR_INT) {
- if (m_status & CHSR_LINKC) {
- writel(CHOR_CLRLC,
- mite->mite_io_addr +
- MITE_CHOR(devpriv->di_mite_chan->channel));
- mite_sync_input_dma(devpriv->di_mite_chan, s);
- /* XXX need to byteswap */
- }
- if (m_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_DRDY |
- CHSR_DRQ1 | CHSR_MRDY)) {
- dev_dbg(dev->class_dev,
- "unknown mite interrupt, disabling IRQ\n");
- async->events |= COMEDI_CB_ERROR;
- disable_irq(dev->irq);
- }
+ if (devpriv->di_mite_chan) {
+ mite_ack_linkc(devpriv->di_mite_chan, s, false);
+ /* XXX need to byteswap sync'ed dma */
}
spin_unlock(&devpriv->mite_channel_lock);
@@ -916,14 +897,10 @@ static int nidio_auto_attach(struct comedi_device *dev,
spin_lock_init(&devpriv->mite_channel_lock);
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, false); /* use win0 */
if (!devpriv->mite)
return -ENOMEM;
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
devpriv->di_mite_ring = mite_alloc_ring(devpriv->mite);
if (!devpriv->di_mite_ring)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_pcimio.c b/drivers/staging/comedi/drivers/ni_pcimio.c
index 231e37d6b7c6..344aa343e5e1 100644
--- a/drivers/staging/comedi/drivers/ni_pcimio.c
+++ b/drivers/staging/comedi/drivers/ni_pcimio.c
@@ -1061,6 +1061,8 @@ static int pcimio_dio_change(struct comedi_device *dev,
static void m_series_init_eeprom_buffer(struct comedi_device *dev)
{
struct ni_private *devpriv = dev->private;
+ struct mite *mite = devpriv->mite;
+ resource_size_t daq_phys_addr;
static const int Start_Cal_EEPROM = 0x400;
static const unsigned window_size = 10;
static const int serial_number_eeprom_offset = 0x4;
@@ -1070,15 +1072,17 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
unsigned old_iodwcr1_bits;
int i;
- old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
- old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
- devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(0x1 | old_iodwcr1_bits,
- devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0xf, devpriv->mite->mite_io_addr + 0x30);
+ /* IO Window 1 needs to be temporarily mapped to read the eeprom */
+ daq_phys_addr = pci_resource_start(mite->pcidev, 1);
+
+ old_iodwbsr_bits = readl(mite->mmio + MITE_IODWBSR);
+ old_iodwbsr1_bits = readl(mite->mmio + MITE_IODWBSR_1);
+ old_iodwcr1_bits = readl(mite->mmio + MITE_IODWCR_1);
+ writel(0x0, mite->mmio + MITE_IODWBSR);
+ writel(((0x80 | window_size) | daq_phys_addr),
+ mite->mmio + MITE_IODWBSR_1);
+ writel(0x1 | old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
+ writel(0xf, mite->mmio + 0x30);
BUG_ON(serial_number_eeprom_length > sizeof(devpriv->serial_number));
for (i = 0; i < serial_number_eeprom_length; ++i) {
@@ -1090,10 +1094,10 @@ static void m_series_init_eeprom_buffer(struct comedi_device *dev)
for (i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
devpriv->eeprom_buffer[i] = ni_readb(dev, Start_Cal_EEPROM + i);
- writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
- writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
- writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
- writel(0x0, devpriv->mite->mite_io_addr + 0x30);
+ writel(old_iodwbsr1_bits, mite->mmio + MITE_IODWBSR_1);
+ writel(old_iodwbsr_bits, mite->mmio + MITE_IODWBSR);
+ writel(old_iodwcr1_bits, mite->mmio + MITE_IODWCR_1);
+ writel(0x0, mite->mmio + 0x30);
}
static void init_6143(struct comedi_device *dev)
@@ -1168,7 +1172,7 @@ static int pcimio_auto_attach(struct comedi_device *dev,
return ret;
devpriv = dev->private;
- devpriv->mite = mite_alloc(pcidev);
+ devpriv->mite = mite_attach(dev, false); /* use win0 */
if (!devpriv->mite)
return -ENOMEM;
@@ -1193,10 +1197,6 @@ static int pcimio_auto_attach(struct comedi_device *dev,
if (board->reg_type == ni_reg_6713)
devpriv->is_6713 = 1;
- ret = mite_setup(dev, devpriv->mite);
- if (ret < 0)
- return ret;
-
devpriv->ai_mite_ring = mite_alloc_ring(devpriv->mite);
if (!devpriv->ai_mite_ring)
return -ENOMEM;
diff --git a/drivers/staging/comedi/drivers/ni_stc.h b/drivers/staging/comedi/drivers/ni_stc.h
index 1d5af25b92a8..1966519cb6e5 100644
--- a/drivers/staging/comedi/drivers/ni_stc.h
+++ b/drivers/staging/comedi/drivers/ni_stc.h
@@ -1,24 +1,23 @@
/*
- module/ni_stc.h
- Register descriptions for NI DAQ-STC chip
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Register descriptions for NI DAQ-STC chip
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
- References:
- DAQ-STC Technical Reference Manual
+ * References:
+ * DAQ-STC Technical Reference Manual
*/
#ifndef _COMEDI_NI_STC_H
@@ -958,7 +957,7 @@ struct ni_board_struct {
unsigned int ao_maxdata;
int ao_fifo_depth;
const struct comedi_lrange *ao_range_table;
- unsigned ao_speed;
+ unsigned int ao_speed;
int reg_type;
unsigned int has_8255:1;
@@ -1002,12 +1001,11 @@ struct ni_private {
unsigned short ao_mode3;
unsigned short ao_cmd1;
unsigned short ao_cmd2;
- unsigned short ao_trigger_select;
struct ni_gpct_device *counter_dev;
unsigned short an_trig_etc_reg;
- unsigned ai_offset[512];
+ unsigned int ai_offset[512];
unsigned long serial_interval_ns;
unsigned char serial_hw_mode;
@@ -1025,24 +1023,24 @@ struct ni_private {
unsigned short g0_g1_select_reg;
unsigned short cdio_dma_select_reg;
- unsigned clock_ns;
- unsigned clock_source;
+ unsigned int clock_ns;
+ unsigned int clock_source;
unsigned short pwm_up_count;
unsigned short pwm_down_count;
unsigned short ai_fifo_buffer[0x2000];
- uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE];
+ u8 eeprom_buffer[M_SERIES_EEPROM_SIZE];
__be32 serial_number;
- struct mite_struct *mite;
+ struct mite *mite;
struct mite_channel *ai_mite_chan;
struct mite_channel *ao_mite_chan;
struct mite_channel *cdo_mite_chan;
- struct mite_dma_descriptor_ring *ai_mite_ring;
- struct mite_dma_descriptor_ring *ao_mite_ring;
- struct mite_dma_descriptor_ring *cdo_mite_ring;
- struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT];
+ struct mite_ring *ai_mite_ring;
+ struct mite_ring *ao_mite_ring;
+ struct mite_ring *cdo_mite_ring;
+ struct mite_ring *gpct_mite_ring[NUM_GPCT];
/* ni_pcimio board type flags (based on the boardinfo reg_type) */
unsigned int is_m_series:1;
diff --git a/drivers/staging/comedi/drivers/ni_tio.c b/drivers/staging/comedi/drivers/ni_tio.c
index b74e44ec521a..7043eb0543f6 100644
--- a/drivers/staging/comedi/drivers/ni_tio.c
+++ b/drivers/staging/comedi/drivers/ni_tio.c
@@ -1,19 +1,18 @@
/*
- comedi/drivers/ni_tio.c
- Support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Support for NI general purpose counters
+ *
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Module: ni_tio
@@ -36,13 +35,10 @@
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
+ *
+ * TODO: Support use of both banks X and Y
*/
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
#include <linux/module.h>
#include <linux/slab.h>
@@ -115,20 +111,7 @@ TODO:
#define NI_660X_LOGIC_LOW_GATE2_SEL 0x1f
#define NI_660X_MAX_UP_DOWN_PIN 7
-static inline unsigned GI_ALT_SYNC(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_ALT_SYNC;
- case ni_gpct_variant_660x:
- return GI_660X_ALT_SYNC;
- }
-}
-
-static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
+static inline unsigned int GI_PRESCALE_X2(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
@@ -141,7 +124,7 @@ static inline unsigned GI_PRESCALE_X2(enum ni_gpct_variant variant)
}
}
-static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
+static inline unsigned int GI_PRESCALE_X8(enum ni_gpct_variant variant)
{
switch (variant) {
case ni_gpct_variant_e_series:
@@ -154,19 +137,6 @@ static inline unsigned GI_PRESCALE_X8(enum ni_gpct_variant variant)
}
}
-static inline unsigned GI_HW_ARM_SEL_MASK(enum ni_gpct_variant variant)
-{
- switch (variant) {
- case ni_gpct_variant_e_series:
- default:
- return 0;
- case ni_gpct_variant_m_series:
- return GI_M_HW_ARM_SEL_MASK;
- case ni_gpct_variant_660x:
- return GI_660X_HW_ARM_SEL_MASK;
- }
-}
-
static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
{
switch (counter_dev->variant) {
@@ -179,17 +149,45 @@ static bool ni_tio_has_gate2_registers(const struct ni_gpct_device *counter_dev)
}
}
+/**
+ * ni_tio_write() - Write a TIO register using the driver provided callback.
+ * @counter: struct ni_gpct counter.
+ * @value: the value to write
+ * @reg: the register to write.
+ */
+void ni_tio_write(struct ni_gpct *counter, unsigned int value,
+ enum ni_gpct_register reg)
+{
+ if (reg < NITIO_NUM_REGS)
+ counter->counter_dev->write(counter, value, reg);
+}
+EXPORT_SYMBOL_GPL(ni_tio_write);
+
+/**
+ * ni_tio_read() - Read a TIO register using the driver provided callback.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to read.
+ */
+unsigned int ni_tio_read(struct ni_gpct *counter, enum ni_gpct_register reg)
+{
+ if (reg < NITIO_NUM_REGS)
+ return counter->counter_dev->read(counter, reg);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(ni_tio_read);
+
static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
- write_register(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
+ ni_tio_write(counter, GI_RESET(cidx), NITIO_RESET_REG(cidx));
}
-static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
- unsigned generic_clock_source)
+static int ni_tio_clock_period_ps(const struct ni_gpct *counter,
+ unsigned int generic_clock_source,
+ u64 *period_ps)
{
- uint64_t clock_period_ps;
+ u64 clock_period_ps;
switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -222,19 +220,80 @@ static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
clock_period_ps *= 8;
break;
default:
- BUG();
- break;
+ return -EINVAL;
}
- return clock_period_ps;
+ *period_ps = clock_period_ps;
+ return 0;
}
-static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+static void ni_tio_set_bits_transient(struct ni_gpct *counter,
+ enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value,
+ unsigned int transient)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_bits =
+ unsigned long flags;
+
+ if (reg < NITIO_NUM_REGS) {
+ spin_lock_irqsave(&counter_dev->regs_lock, flags);
+ counter_dev->regs[reg] &= ~mask;
+ counter_dev->regs[reg] |= (value & mask);
+ ni_tio_write(counter, counter_dev->regs[reg] | transient, reg);
+ mmiowb();
+ spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
+ }
+}
+
+/**
+ * ni_tio_set_bits() - Safely write a counter register.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to write.
+ * @mask: the bits to change.
+ * @value: the new bits value.
+ *
+ * Used to write to, and update the software copy, a register whose bits may
+ * be twiddled in interrupt context, or whose software copy may be read in
+ * interrupt context.
+ */
+void ni_tio_set_bits(struct ni_gpct *counter, enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value)
+{
+ ni_tio_set_bits_transient(counter, reg, mask, value, 0x0);
+}
+EXPORT_SYMBOL_GPL(ni_tio_set_bits);
+
+/**
+ * ni_tio_get_soft_copy() - Safely read the software copy of a counter register.
+ * @counter: struct ni_gpct counter.
+ * @reg: the register to read.
+ *
+ * Used to get the software copy of a register whose bits might be modified
+ * in interrupt context, or whose software copy might need to be read in
+ * interrupt context.
+ */
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
+ enum ni_gpct_register reg)
+{
+ struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int value = 0;
+ unsigned long flags;
+
+ if (reg < NITIO_NUM_REGS) {
+ spin_lock_irqsave(&counter_dev->regs_lock, flags);
+ value = counter_dev->regs[reg];
+ spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
+ }
+ return value;
+}
+EXPORT_SYMBOL_GPL(ni_tio_get_soft_copy);
+
+static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+{
+ struct ni_gpct_device *counter_dev = counter->counter_dev;
+ unsigned int cidx = counter->counter_index;
+ unsigned int counting_mode_bits =
ni_tio_get_soft_copy(counter, NITIO_CNT_MODE_REG(cidx));
- unsigned bits = 0;
+ unsigned int bits = 0;
if (ni_tio_get_soft_copy(counter, NITIO_INPUT_SEL_REG(cidx)) &
GI_SRC_POL_INVERT)
@@ -246,14 +305,15 @@ static unsigned ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
return bits;
}
-static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
+static int ni_m_series_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
- unsigned clock_source = 0;
- unsigned src;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
+ unsigned int clock_source = 0;
+ unsigned int src;
+ unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
@@ -304,19 +364,20 @@ static unsigned ni_m_series_clock_src_select(const struct ni_gpct *counter)
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
- BUG();
- break;
+ return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
+ *clk_src = clock_source;
+ return 0;
}
-static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
+static int ni_660x_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
- unsigned clock_source = 0;
- unsigned cidx = counter->counter_index;
- unsigned src;
- unsigned i;
+ unsigned int clock_source = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int src;
+ unsigned int i;
src = GI_BITS_TO_SRC(ni_tio_get_soft_copy(counter,
NITIO_INPUT_SEL_REG(cidx)));
@@ -361,78 +422,88 @@ static unsigned ni_660x_clock_src_select(const struct ni_gpct *counter)
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
- BUG();
- break;
+ return -EINVAL;
}
clock_source |= ni_tio_clock_src_modifiers(counter);
- return clock_source;
+ *clk_src = clock_source;
+ return 0;
}
-static unsigned ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
+static int ni_tio_generic_clock_src_select(const struct ni_gpct *counter,
+ unsigned int *clk_src)
{
switch (counter->counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- return ni_m_series_clock_src_select(counter);
+ return ni_m_series_clock_src_select(counter, clk_src);
case ni_gpct_variant_660x:
- return ni_660x_clock_src_select(counter);
+ return ni_660x_clock_src_select(counter, clk_src);
}
}
-static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
+static void ni_tio_set_sync_mode(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned counting_mode_reg = NITIO_CNT_MODE_REG(cidx);
- static const uint64_t min_normal_sync_period_ps = 25000;
- unsigned mode;
- uint64_t clock_period_ps;
-
- if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
+ unsigned int cidx = counter->counter_index;
+ static const u64 min_normal_sync_period_ps = 25000;
+ unsigned int mask = 0;
+ unsigned int bits = 0;
+ unsigned int reg;
+ unsigned int mode;
+ unsigned int clk_src;
+ u64 ps;
+ bool force_alt_sync;
+
+ /* only m series and 660x variants have counting mode registers */
+ switch (counter_dev->variant) {
+ case ni_gpct_variant_e_series:
+ default:
return;
+ case ni_gpct_variant_m_series:
+ mask = GI_M_ALT_SYNC;
+ break;
+ case ni_gpct_variant_660x:
+ mask = GI_660X_ALT_SYNC;
+ break;
+ }
- mode = ni_tio_get_soft_copy(counter, counting_mode_reg);
+ reg = NITIO_CNT_MODE_REG(cidx);
+ mode = ni_tio_get_soft_copy(counter, reg);
switch (mode & GI_CNT_MODE_MASK) {
case GI_CNT_MODE_QUADX1:
case GI_CNT_MODE_QUADX2:
case GI_CNT_MODE_QUADX4:
case GI_CNT_MODE_SYNC_SRC:
- force_alt_sync = 1;
+ force_alt_sync = true;
break;
default:
+ force_alt_sync = false;
break;
}
- clock_period_ps = ni_tio_clock_period_ps(counter,
- ni_tio_generic_clock_src_select(counter));
+ ni_tio_generic_clock_src_select(counter, &clk_src);
+ ni_tio_clock_period_ps(counter, clk_src, &ps);
/*
* It's not clear what we should do if clock_period is unknown, so we
- * are not using the alt sync bit in that case, but allow the caller
- * to decide by using the force_alt_sync parameter.
+ * are not using the alt sync bit in that case.
*/
- if (force_alt_sync ||
- (clock_period_ps && clock_period_ps < min_normal_sync_period_ps)) {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- GI_ALT_SYNC(counter_dev->variant));
- } else {
- ni_tio_set_bits(counter, counting_mode_reg,
- GI_ALT_SYNC(counter_dev->variant),
- 0x0);
- }
+ if (force_alt_sync || (ps && ps < min_normal_sync_period_ps))
+ bits = mask;
+
+ ni_tio_set_bits(counter, reg, mask, bits);
}
-static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
+static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode_reg_mask;
- unsigned mode_reg_values;
- unsigned input_select_bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mode_reg_mask;
+ unsigned int mode_reg_values;
+ unsigned int input_select_bits = 0;
/* these bits map directly on to the mode register */
- static const unsigned mode_reg_direct_mask =
+ static const unsigned int mode_reg_direct_mask =
NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
@@ -458,7 +529,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
mode_reg_mask, mode_reg_values);
if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
+ unsigned int bits = 0;
bits |= GI_CNT_MODE(mode >> NI_GPCT_COUNTING_MODE_SHIFT);
bits |= GI_INDEX_PHASE((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT));
@@ -467,7 +538,7 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
GI_CNT_MODE_MASK | GI_INDEX_PHASE_MASK |
GI_INDEX_MODE, bits);
- ni_tio_set_sync_mode(counter, 0);
+ ni_tio_set_sync_mode(counter);
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_CNT_DIR_MASK,
@@ -484,65 +555,68 @@ static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned mode)
return 0;
}
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger)
+int ni_tio_arm(struct ni_gpct *counter, bool arm, unsigned int start_trigger)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned command_transient_bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int transient_bits = 0;
if (arm) {
+ unsigned int mask = 0;
+ unsigned int bits = 0;
+
+ /* only m series and 660x have counting mode registers */
+ switch (counter_dev->variant) {
+ case ni_gpct_variant_e_series:
+ default:
+ break;
+ case ni_gpct_variant_m_series:
+ mask = GI_M_HW_ARM_SEL_MASK;
+ break;
+ case ni_gpct_variant_660x:
+ mask = GI_660X_HW_ARM_SEL_MASK;
+ break;
+ }
+
switch (start_trigger) {
case NI_GPCT_ARM_IMMEDIATE:
- command_transient_bits |= GI_ARM;
+ transient_bits |= GI_ARM;
break;
case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- command_transient_bits |= GI_ARM | GI_ARM_COPY;
+ transient_bits |= GI_ARM | GI_ARM_COPY;
break;
default:
+ /*
+ * for m series and 660x, pass-through the least
+ * significant bits so we can figure out what select
+ * later
+ */
+ if (mask && (start_trigger & NI_GPCT_ARM_UNKNOWN)) {
+ bits |= GI_HW_ARM_ENA |
+ (GI_HW_ARM_SEL(start_trigger) & mask);
+ } else {
+ return -EINVAL;
+ }
break;
}
- if (ni_tio_counting_mode_registers_present(counter_dev)) {
- unsigned bits = 0;
- unsigned sel_mask;
- sel_mask = GI_HW_ARM_SEL_MASK(counter_dev->variant);
-
- switch (start_trigger) {
- case NI_GPCT_ARM_IMMEDIATE:
- case NI_GPCT_ARM_PAIRED_IMMEDIATE:
- break;
- default:
- if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
- /*
- * pass-through the least significant
- * bits so we can figure out what
- * select later
- */
- bits |= GI_HW_ARM_ENA |
- (GI_HW_ARM_SEL(start_trigger) &
- sel_mask);
- } else {
- return -EINVAL;
- }
- break;
- }
+ if (mask)
ni_tio_set_bits(counter, NITIO_CNT_MODE_REG(cidx),
- GI_HW_ARM_ENA | sel_mask, bits);
- }
+ GI_HW_ARM_ENA | mask, bits);
} else {
- command_transient_bits |= GI_DISARM;
+ transient_bits |= GI_DISARM;
}
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
- 0, 0, command_transient_bits);
+ 0, 0, transient_bits);
return 0;
}
EXPORT_SYMBOL_GPL(ni_tio_arm);
-static unsigned ni_660x_clk_src(unsigned int clock_source)
+static int ni_660x_clk_src(unsigned int clock_source, unsigned int *bits)
{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_660x_clock;
- unsigned i;
+ unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+ unsigned int ni_660x_clock;
+ unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -583,18 +657,17 @@ static unsigned ni_660x_clk_src(unsigned int clock_source)
}
if (i <= NI_660X_MAX_SRC_PIN)
break;
- ni_660x_clock = 0;
- BUG();
- break;
+ return -EINVAL;
}
- return GI_SRC_SEL(ni_660x_clock);
+ *bits = GI_SRC_SEL(ni_660x_clock);
+ return 0;
}
-static unsigned ni_m_clk_src(unsigned int clock_source)
+static int ni_m_clk_src(unsigned int clock_source, unsigned int *bits)
{
- unsigned clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
- unsigned ni_m_series_clock;
- unsigned i;
+ unsigned int clk_src = clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+ unsigned int ni_m_series_clock;
+ unsigned int i;
switch (clk_src) {
case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
@@ -641,21 +714,18 @@ static unsigned ni_m_clk_src(unsigned int clock_source)
}
if (i <= NI_M_MAX_PFI_CHAN)
break;
- pr_err("invalid clock source 0x%lx\n",
- (unsigned long)clock_source);
- BUG();
- ni_m_series_clock = 0;
- break;
+ return -EINVAL;
}
- return GI_SRC_SEL(ni_m_series_clock);
+ *bits = GI_SRC_SEL(ni_m_series_clock);
+ return 0;
};
static void ni_tio_set_source_subselect(struct ni_gpct *counter,
unsigned int clock_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- const unsigned second_gate_reg = NITIO_GATE2_REG(cidx);
+ unsigned int cidx = counter->counter_index;
+ unsigned int second_gate_reg = NITIO_GATE2_REG(cidx);
if (counter_dev->variant != ni_gpct_variant_m_series)
return;
@@ -674,8 +744,8 @@ static void ni_tio_set_source_subselect(struct ni_gpct *counter,
default:
return;
}
- write_register(counter, counter_dev->regs[second_gate_reg],
- second_gate_reg);
+ ni_tio_write(counter, counter_dev->regs[second_gate_reg],
+ second_gate_reg);
}
static int ni_tio_set_clock_src(struct ni_gpct *counter,
@@ -683,20 +753,28 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
unsigned int period_ns)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned bits = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int bits = 0;
+ int ret;
- /* FIXME: validate clock source */
switch (counter_dev->variant) {
case ni_gpct_variant_660x:
- bits |= ni_660x_clk_src(clock_source);
+ ret = ni_660x_clk_src(clock_source, &bits);
break;
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- bits |= ni_m_clk_src(clock_source);
+ ret = ni_m_clk_src(clock_source, &bits);
break;
}
+ if (ret) {
+ struct comedi_device *dev = counter_dev->dev;
+
+ dev_err(dev->class_dev, "invalid clock source 0x%x\n",
+ clock_source);
+ return ret;
+ }
+
if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
bits |= GI_SRC_POL_INVERT;
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx),
@@ -722,28 +800,34 @@ static int ni_tio_set_clock_src(struct ni_gpct *counter,
GI_PRESCALE_X8(counter_dev->variant), bits);
}
counter->clock_period_ps = period_ns * 1000;
- ni_tio_set_sync_mode(counter, 0);
+ ni_tio_set_sync_mode(counter);
return 0;
}
-static void ni_tio_get_clock_src(struct ni_gpct *counter,
- unsigned int *clock_source,
- unsigned int *period_ns)
+static int ni_tio_get_clock_src(struct ni_gpct *counter,
+ unsigned int *clock_source,
+ unsigned int *period_ns)
{
- uint64_t temp64;
-
- *clock_source = ni_tio_generic_clock_src_select(counter);
- temp64 = ni_tio_clock_period_ps(counter, *clock_source);
+ u64 temp64;
+ int ret;
+
+ ret = ni_tio_generic_clock_src_select(counter, clock_source);
+ if (ret)
+ return ret;
+ ret = ni_tio_clock_period_ps(counter, *clock_source, &temp64);
+ if (ret)
+ return ret;
do_div(temp64, 1000); /* ps to ns */
*period_ns = temp64;
+ return 0;
}
static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int gate_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
@@ -782,9 +866,9 @@ static int ni_660x_set_gate(struct ni_gpct *counter, unsigned int gate_source)
static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
{
unsigned int chan = CR_CHAN(gate_source);
- unsigned cidx = counter->counter_index;
- unsigned gate_sel;
- unsigned i;
+ unsigned int cidx = counter->counter_index;
+ unsigned int gate_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
@@ -824,11 +908,11 @@ static int ni_m_set_gate(struct ni_gpct *counter, unsigned int gate_source)
static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
- unsigned i;
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int gate2_sel;
+ unsigned int i;
switch (chan) {
case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
@@ -863,17 +947,17 @@ static int ni_660x_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
return 0;
}
static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate2_sel;
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int gate2_sel;
/*
* FIXME: We don't know what the m-series second gate codes are,
@@ -887,20 +971,20 @@ static int ni_m_set_gate2(struct ni_gpct *counter, unsigned int gate_source)
counter_dev->regs[gate2_reg] |= GI_GATE2_MODE;
counter_dev->regs[gate2_reg] &= ~GI_GATE2_SEL_MASK;
counter_dev->regs[gate2_reg] |= GI_GATE2_SEL(gate2_sel);
- write_register(counter, counter_dev->regs[gate2_reg], gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg], gate2_reg);
return 0;
}
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source)
+int ni_tio_set_gate_src(struct ni_gpct *counter,
+ unsigned int gate, unsigned int src)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned int chan = CR_CHAN(gate_source);
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned mode = 0;
+ unsigned int cidx = counter->counter_index;
+ unsigned int chan = CR_CHAN(src);
+ unsigned int gate2_reg = NITIO_GATE2_REG(cidx);
+ unsigned int mode = 0;
- switch (gate_index) {
+ switch (gate) {
case 0:
if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx),
@@ -908,9 +992,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
GI_GATING_DISABLED);
return 0;
}
- if (gate_source & CR_INVERT)
+ if (src & CR_INVERT)
mode |= GI_GATE_POL_INVERT;
- if (gate_source & CR_EDGE)
+ if (src & CR_EDGE)
mode |= GI_RISING_EDGE_GATING;
else
mode |= GI_LEVEL_GATING;
@@ -921,9 +1005,9 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- return ni_m_set_gate(counter, gate_source);
+ return ni_m_set_gate(counter, src);
case ni_gpct_variant_660x:
- return ni_660x_set_gate(counter, gate_source);
+ return ni_660x_set_gate(counter, src);
}
break;
case 1:
@@ -932,22 +1016,21 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
if (chan == NI_GPCT_DISABLED_GATE_SELECT) {
counter_dev->regs[gate2_reg] &= ~GI_GATE2_MODE;
- write_register(counter, counter_dev->regs[gate2_reg],
- gate2_reg);
+ ni_tio_write(counter, counter_dev->regs[gate2_reg],
+ gate2_reg);
return 0;
}
- if (gate_source & CR_INVERT)
+ if (src & CR_INVERT)
counter_dev->regs[gate2_reg] |= GI_GATE2_POL_INVERT;
else
counter_dev->regs[gate2_reg] &= ~GI_GATE2_POL_INVERT;
switch (counter_dev->variant) {
case ni_gpct_variant_m_series:
- return ni_m_set_gate2(counter, gate_source);
+ return ni_m_set_gate2(counter, src);
case ni_gpct_variant_660x:
- return ni_660x_set_gate2(counter, gate_source);
+ return ni_660x_set_gate2(counter, src);
default:
- BUG();
- break;
+ return -EINVAL;
}
break;
default:
@@ -957,11 +1040,11 @@ int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
}
EXPORT_SYMBOL_GPL(ni_tio_set_gate_src);
-static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
+static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned int index,
unsigned int source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int abz_reg, shift, mask;
if (counter_dev->variant != ni_gpct_variant_m_series)
@@ -987,175 +1070,221 @@ static int ni_tio_set_other_src(struct ni_gpct *counter, unsigned index,
counter_dev->regs[abz_reg] &= ~mask;
counter_dev->regs[abz_reg] |= (source << shift) & mask;
- write_register(counter, counter_dev->regs[abz_reg], abz_reg);
+ ni_tio_write(counter, counter_dev->regs[abz_reg], abz_reg);
return 0;
}
-static unsigned ni_660x_gate_to_generic_gate(unsigned gate)
+static int ni_660x_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_GATE_PIN_I_GATE_SEL:
- return NI_GPCT_GATE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_GATE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_660X_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_660X_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_660X_RTSI_GATE_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_660X_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_660X_MAX_GATE_PIN; ++i) {
- if (gate == NI_660X_PIN_GATE_SEL(i))
- return NI_GPCT_GATE_PIN_GATE_SELECT(i);
+ if (gate == NI_660X_PIN_GATE_SEL(i)) {
+ source = NI_GPCT_GATE_PIN_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_660X_MAX_GATE_PIN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_m_gate_to_generic_gate(unsigned gate)
+static int ni_m_gate_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_M_TIMESTAMP_MUX_GATE_SEL:
- return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+ source = NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+ break;
case NI_M_AI_START2_GATE_SEL:
- return NI_GPCT_AI_START2_GATE_SELECT;
+ source = NI_GPCT_AI_START2_GATE_SELECT;
+ break;
case NI_M_PXI_STAR_TRIGGER_GATE_SEL:
- return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+ source = NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+ break;
case NI_M_NEXT_OUT_GATE_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_M_AI_START1_GATE_SEL:
- return NI_GPCT_AI_START1_GATE_SELECT;
+ source = NI_GPCT_AI_START1_GATE_SELECT;
+ break;
case NI_M_NEXT_SRC_GATE_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_M_ANALOG_TRIG_OUT_GATE_SEL:
- return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+ source = NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+ break;
case NI_M_LOGIC_LOW_GATE_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_M_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_M_RTSI_GATE_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_M_RTSI_GATE_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_M_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_M_MAX_PFI_CHAN; ++i) {
- if (gate == NI_M_PFI_GATE_SEL(i))
- return NI_GPCT_PFI_GATE_SELECT(i);
+ if (gate == NI_M_PFI_GATE_SEL(i)) {
+ source = NI_GPCT_PFI_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_M_MAX_PFI_CHAN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_660x_gate2_to_generic_gate(unsigned gate)
+static int ni_660x_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
- unsigned i;
+ unsigned int source;
+ unsigned int i;
switch (gate) {
case NI_660X_SRC_PIN_I_GATE2_SEL:
- return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ source = NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+ break;
case NI_660X_UD_PIN_I_GATE2_SEL:
- return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+ source = NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+ break;
case NI_660X_NEXT_SRC_GATE2_SEL:
- return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ source = NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+ break;
case NI_660X_NEXT_OUT_GATE2_SEL:
- return NI_GPCT_NEXT_OUT_GATE_SELECT;
+ source = NI_GPCT_NEXT_OUT_GATE_SELECT;
+ break;
case NI_660X_SELECTED_GATE2_SEL:
- return NI_GPCT_SELECTED_GATE_GATE_SELECT;
+ source = NI_GPCT_SELECTED_GATE_GATE_SELECT;
+ break;
case NI_660X_LOGIC_LOW_GATE2_SEL:
- return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ source = NI_GPCT_LOGIC_LOW_GATE_SELECT;
+ break;
default:
for (i = 0; i <= NI_660X_MAX_RTSI_CHAN; ++i) {
- if (gate == NI_660X_RTSI_GATE2_SEL(i))
- return NI_GPCT_RTSI_GATE_SELECT(i);
+ if (gate == NI_660X_RTSI_GATE2_SEL(i)) {
+ source = NI_GPCT_RTSI_GATE_SELECT(i);
+ break;
+ }
}
+ if (i <= NI_660X_MAX_RTSI_CHAN)
+ break;
for (i = 0; i <= NI_660X_MAX_UP_DOWN_PIN; ++i) {
- if (gate == NI_660X_UD_PIN_GATE2_SEL(i))
- return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+ if (gate == NI_660X_UD_PIN_GATE2_SEL(i)) {
+ source = NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+ break;
+ }
}
- BUG();
- break;
+ if (i <= NI_660X_MAX_UP_DOWN_PIN)
+ break;
+ return -EINVAL;
}
+ *src = source;
return 0;
};
-static unsigned ni_m_gate2_to_generic_gate(unsigned gate)
+static int ni_m_gate2_to_generic_gate(unsigned int gate, unsigned int *src)
{
/*
* FIXME: the second gate sources for the m series are undocumented,
* so we just return the raw bits for now.
*/
- switch (gate) {
- default:
- return gate;
- }
+ *src = gate;
return 0;
};
-static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned gate_index,
+static int ni_tio_get_gate_src(struct ni_gpct *counter, unsigned int gate_index,
unsigned int *gate_source)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
- unsigned gate2_reg = NITIO_GATE2_REG(cidx);
- unsigned gate;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mode;
+ unsigned int reg;
+ unsigned int gate;
+ int ret;
+
+ mode = ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx));
+ if (((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) ||
+ (gate_index == 1 &&
+ !(counter_dev->regs[NITIO_GATE2_REG(cidx)] & GI_GATE2_MODE))) {
+ *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+ return 0;
+ }
switch (gate_index) {
case 0:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter,
- NITIO_INPUT_SEL_REG(cidx)));
+ reg = NITIO_INPUT_SEL_REG(cidx);
+ gate = GI_BITS_TO_GATE(ni_tio_get_soft_copy(counter, reg));
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- *gate_source = ni_m_gate_to_generic_gate(gate);
+ ret = ni_m_gate_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate_to_generic_gate(gate);
+ ret = ni_660x_gate_to_generic_gate(gate, gate_source);
break;
}
+ if (ret)
+ return ret;
if (mode & GI_GATE_POL_INVERT)
*gate_source |= CR_INVERT;
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
*gate_source |= CR_EDGE;
break;
case 1:
- if ((mode & GI_GATING_MODE_MASK) == GI_GATING_DISABLED ||
- !(counter_dev->regs[gate2_reg] & GI_GATE2_MODE)) {
- *gate_source = NI_GPCT_DISABLED_GATE_SELECT;
- return 0;
- }
-
- gate = GI_BITS_TO_GATE2(counter_dev->regs[gate2_reg]);
+ reg = NITIO_GATE2_REG(cidx);
+ gate = GI_BITS_TO_GATE2(counter_dev->regs[reg]);
switch (counter_dev->variant) {
case ni_gpct_variant_e_series:
case ni_gpct_variant_m_series:
default:
- *gate_source = ni_m_gate2_to_generic_gate(gate);
+ ret = ni_m_gate2_to_generic_gate(gate, gate_source);
break;
case ni_gpct_variant_660x:
- *gate_source = ni_660x_gate2_to_generic_gate(gate);
+ ret = ni_660x_gate2_to_generic_gate(gate, gate_source);
break;
}
- if (counter_dev->regs[gate2_reg] & GI_GATE2_POL_INVERT)
+ if (ret)
+ return ret;
+ if (counter_dev->regs[reg] & GI_GATE2_POL_INVERT)
*gate_source |= CR_INVERT;
/* second gate can't have edge/level mode set independently */
if ((mode & GI_GATING_MODE_MASK) != GI_LEVEL_GATING)
@@ -1173,45 +1302,52 @@ int ni_tio_insn_config(struct comedi_device *dev,
unsigned int *data)
{
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
- unsigned status;
+ unsigned int cidx = counter->counter_index;
+ unsigned int status;
+ int ret = 0;
switch (data[0]) {
case INSN_CONFIG_SET_COUNTER_MODE:
- return ni_tio_set_counter_mode(counter, data[1]);
+ ret = ni_tio_set_counter_mode(counter, data[1]);
+ break;
case INSN_CONFIG_ARM:
- return ni_tio_arm(counter, 1, data[1]);
+ ret = ni_tio_arm(counter, true, data[1]);
+ break;
case INSN_CONFIG_DISARM:
- ni_tio_arm(counter, 0, 0);
- return 0;
+ ret = ni_tio_arm(counter, false, 0);
+ break;
case INSN_CONFIG_GET_COUNTER_STATUS:
data[1] = 0;
- status = read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
+ status = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
if (status & GI_ARMED(cidx)) {
data[1] |= COMEDI_COUNTER_ARMED;
if (status & GI_COUNTING(cidx))
data[1] |= COMEDI_COUNTER_COUNTING;
}
data[2] = COMEDI_COUNTER_ARMED | COMEDI_COUNTER_COUNTING;
- return 0;
+ break;
case INSN_CONFIG_SET_CLOCK_SRC:
- return ni_tio_set_clock_src(counter, data[1], data[2]);
+ ret = ni_tio_set_clock_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_GET_CLOCK_SRC:
- ni_tio_get_clock_src(counter, &data[1], &data[2]);
- return 0;
+ ret = ni_tio_get_clock_src(counter, &data[1], &data[2]);
+ break;
case INSN_CONFIG_SET_GATE_SRC:
- return ni_tio_set_gate_src(counter, data[1], data[2]);
+ ret = ni_tio_set_gate_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_GET_GATE_SRC:
- return ni_tio_get_gate_src(counter, data[1], &data[2]);
+ ret = ni_tio_get_gate_src(counter, data[1], &data[2]);
+ break;
case INSN_CONFIG_SET_OTHER_SRC:
- return ni_tio_set_other_src(counter, data[1], data[2]);
+ ret = ni_tio_set_other_src(counter, data[1], data[2]);
+ break;
case INSN_CONFIG_RESET:
ni_tio_reset_count_and_disarm(counter);
- return 0;
- default:
break;
+ default:
+ return -EINVAL;
}
- return -EINVAL;
+ return ret ? ret : insn->n;
}
EXPORT_SYMBOL_GPL(ni_tio_insn_config);
@@ -1219,7 +1355,7 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned int val;
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
@@ -1235,9 +1371,9 @@ static unsigned int ni_tio_read_sw_save_reg(struct comedi_device *dev,
* will be correct since the count value will definitely have latched
* by then.
*/
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
- if (val != read_register(counter, NITIO_SW_SAVE_REG(cidx)))
- val = read_register(counter, NITIO_SW_SAVE_REG(cidx));
+ val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
+ if (val != ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx)))
+ val = ni_tio_read(counter, NITIO_SW_SAVE_REG(cidx));
return val;
}
@@ -1250,7 +1386,7 @@ int ni_tio_insn_read(struct comedi_device *dev,
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
unsigned int channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
int i;
for (i = 0; i < insn->n; i++) {
@@ -1270,11 +1406,10 @@ int ni_tio_insn_read(struct comedi_device *dev,
}
EXPORT_SYMBOL_GPL(ni_tio_insn_read);
-static unsigned ni_tio_next_load_register(struct ni_gpct *counter)
+static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
- const unsigned bits =
- read_register(counter, NITIO_SHARED_STATUS_REG(cidx));
+ unsigned int cidx = counter->counter_index;
+ unsigned int bits = ni_tio_read(counter, NITIO_SHARED_STATUS_REG(cidx));
return (bits & GI_NEXT_LOAD_SRC(cidx))
? NITIO_LOADB_REG(cidx)
@@ -1288,9 +1423,9 @@ int ni_tio_insn_write(struct comedi_device *dev,
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
- const unsigned channel = CR_CHAN(insn->chanspec);
- unsigned cidx = counter->counter_index;
- unsigned load_reg;
+ unsigned int channel = CR_CHAN(insn->chanspec);
+ unsigned int cidx = counter->counter_index;
+ unsigned int load_reg;
if (insn->n < 1)
return 0;
@@ -1306,19 +1441,19 @@ int ni_tio_insn_write(struct comedi_device *dev,
* load register is already selected.
*/
load_reg = ni_tio_next_load_register(counter);
- write_register(counter, data[0], load_reg);
+ ni_tio_write(counter, data[0], load_reg);
ni_tio_set_bits_transient(counter, NITIO_CMD_REG(cidx),
0, 0, GI_LOAD);
/* restore load reg */
- write_register(counter, counter_dev->regs[load_reg], load_reg);
+ ni_tio_write(counter, counter_dev->regs[load_reg], load_reg);
break;
case 1:
counter_dev->regs[NITIO_LOADA_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADA_REG(cidx));
+ ni_tio_write(counter, data[0], NITIO_LOADA_REG(cidx));
break;
case 2:
counter_dev->regs[NITIO_LOADB_REG(cidx)] = data[0];
- write_register(counter, data[0], NITIO_LOADB_REG(cidx));
+ ni_tio_write(counter, data[0], NITIO_LOADB_REG(cidx));
break;
default:
return -EINVAL;
@@ -1330,13 +1465,13 @@ EXPORT_SYMBOL_GPL(ni_tio_insn_write);
void ni_tio_init_counter(struct ni_gpct *counter)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
ni_tio_reset_count_and_disarm(counter);
/* initialize counter registers */
counter_dev->regs[NITIO_AUTO_INC_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_AUTO_INC_REG(cidx));
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx),
~0, GI_SYNC_GATE);
@@ -1344,10 +1479,10 @@ void ni_tio_init_counter(struct ni_gpct *counter)
ni_tio_set_bits(counter, NITIO_MODE_REG(cidx), ~0, 0);
counter_dev->regs[NITIO_LOADA_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADA_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_LOADA_REG(cidx));
counter_dev->regs[NITIO_LOADB_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_LOADB_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_LOADB_REG(cidx));
ni_tio_set_bits(counter, NITIO_INPUT_SEL_REG(cidx), ~0, 0);
@@ -1356,7 +1491,7 @@ void ni_tio_init_counter(struct ni_gpct *counter)
if (ni_tio_has_gate2_registers(counter_dev)) {
counter_dev->regs[NITIO_GATE2_REG(cidx)] = 0x0;
- write_register(counter, 0x0, NITIO_GATE2_REG(cidx));
+ ni_tio_write(counter, 0x0, NITIO_GATE2_REG(cidx));
}
ni_tio_set_bits(counter, NITIO_DMA_CFG_REG(cidx), ~0, 0x0);
@@ -1367,17 +1502,17 @@ EXPORT_SYMBOL_GPL(ni_tio_init_counter);
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *dev,
- void (*write_register)(struct ni_gpct *counter,
- unsigned bits,
- enum ni_gpct_register reg),
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg),
+ void (*write)(struct ni_gpct *counter,
+ unsigned int value,
+ enum ni_gpct_register reg),
+ unsigned int (*read)(struct ni_gpct *counter,
+ enum ni_gpct_register reg),
enum ni_gpct_variant variant,
- unsigned num_counters)
+ unsigned int num_counters)
{
struct ni_gpct_device *counter_dev;
struct ni_gpct *counter;
- unsigned i;
+ unsigned int i;
if (num_counters == 0)
return NULL;
@@ -1387,8 +1522,8 @@ ni_gpct_device_construct(struct comedi_device *dev,
return NULL;
counter_dev->dev = dev;
- counter_dev->write_register = write_register;
- counter_dev->read_register = read_register;
+ counter_dev->write = write;
+ counter_dev->read = read;
counter_dev->variant = variant;
spin_lock_init(&counter_dev->regs_lock);
@@ -1413,7 +1548,7 @@ EXPORT_SYMBOL_GPL(ni_gpct_device_construct);
void ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
{
- if (!counter_dev->counters)
+ if (!counter_dev)
return;
kfree(counter_dev->counters);
kfree(counter_dev);
diff --git a/drivers/staging/comedi/drivers/ni_tio.h b/drivers/staging/comedi/drivers/ni_tio.h
index 25aedd0e5867..4978358f9b13 100644
--- a/drivers/staging/comedi/drivers/ni_tio.h
+++ b/drivers/staging/comedi/drivers/ni_tio.h
@@ -1,29 +1,24 @@
/*
- drivers/ni_tio.h
- Header file for NI general purpose counter support code (ni_tio.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header file for NI general purpose counter support code (ni_tio.c)
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_NI_TIO_H
#define _COMEDI_NI_TIO_H
#include "../comedidev.h"
-/* forward declarations */
-struct mite_struct;
-struct ni_gpct_device;
-
enum ni_gpct_register {
NITIO_G0_AUTO_INC,
NITIO_G1_AUTO_INC,
@@ -106,35 +101,34 @@ enum ni_gpct_variant {
struct ni_gpct {
struct ni_gpct_device *counter_dev;
- unsigned counter_index;
- unsigned chip_index;
- uint64_t clock_period_ps; /* clock period in picoseconds */
+ unsigned int counter_index;
+ unsigned int chip_index;
+ u64 clock_period_ps; /* clock period in picoseconds */
struct mite_channel *mite_chan;
- spinlock_t lock;
+ spinlock_t lock; /* protects 'mite_chan' */
};
struct ni_gpct_device {
struct comedi_device *dev;
- void (*write_register)(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg);
- unsigned (*read_register)(struct ni_gpct *counter,
- enum ni_gpct_register reg);
+ void (*write)(struct ni_gpct *, unsigned int value,
+ enum ni_gpct_register);
+ unsigned int (*read)(struct ni_gpct *, enum ni_gpct_register);
enum ni_gpct_variant variant;
struct ni_gpct *counters;
- unsigned num_counters;
- unsigned regs[NITIO_NUM_REGS];
- spinlock_t regs_lock;
+ unsigned int num_counters;
+ unsigned int regs[NITIO_NUM_REGS];
+ spinlock_t regs_lock; /* protects 'regs' */
};
struct ni_gpct_device *
ni_gpct_device_construct(struct comedi_device *,
- void (*write_register)(struct ni_gpct *,
- unsigned bits,
- enum ni_gpct_register),
- unsigned (*read_register)(struct ni_gpct *,
- enum ni_gpct_register),
+ void (*write)(struct ni_gpct *,
+ unsigned int value,
+ enum ni_gpct_register),
+ unsigned int (*read)(struct ni_gpct *,
+ enum ni_gpct_register),
enum ni_gpct_variant,
- unsigned num_counters);
+ unsigned int num_counters);
void ni_gpct_device_destroy(struct ni_gpct_device *);
void ni_tio_init_counter(struct ni_gpct *);
int ni_tio_insn_read(struct comedi_device *, struct comedi_subdevice *,
diff --git a/drivers/staging/comedi/drivers/ni_tio_internal.h b/drivers/staging/comedi/drivers/ni_tio_internal.h
index 2bceae493e23..b15b10833c42 100644
--- a/drivers/staging/comedi/drivers/ni_tio_internal.h
+++ b/drivers/staging/comedi/drivers/ni_tio_internal.h
@@ -1,20 +1,19 @@
/*
- drivers/ni_tio_internal.h
- Header file for NI general purpose counter support code (ni_tio.c and
- ni_tiocmd.c)
-
- COMEDI - Linux Control and Measurement Device Interface
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Header file for NI general purpose counter support code (ni_tio.c and
+ * ni_tiocmd.c)
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _COMEDI_NI_TIO_INTERNAL_H
#define _COMEDI_NI_TIO_INTERNAL_H
@@ -24,68 +23,73 @@
#define NITIO_AUTO_INC_REG(x) (NITIO_G0_AUTO_INC + (x))
#define GI_AUTO_INC_MASK 0xff
#define NITIO_CMD_REG(x) (NITIO_G0_CMD + (x))
-#define GI_ARM (1 << 0)
-#define GI_SAVE_TRACE (1 << 1)
-#define GI_LOAD (1 << 2)
-#define GI_DISARM (1 << 4)
+#define GI_ARM BIT(0)
+#define GI_SAVE_TRACE BIT(1)
+#define GI_LOAD BIT(2)
+#define GI_DISARM BIT(4)
#define GI_CNT_DIR(x) (((x) & 0x3) << 5)
-#define GI_CNT_DIR_MASK (3 << 5)
-#define GI_WRITE_SWITCH (1 << 7)
-#define GI_SYNC_GATE (1 << 8)
-#define GI_LITTLE_BIG_ENDIAN (1 << 9)
-#define GI_BANK_SWITCH_START (1 << 10)
-#define GI_BANK_SWITCH_MODE (1 << 11)
-#define GI_BANK_SWITCH_ENABLE (1 << 12)
-#define GI_ARM_COPY (1 << 13)
-#define GI_SAVE_TRACE_COPY (1 << 14)
-#define GI_DISARM_COPY (1 << 15)
+#define GI_CNT_DIR_MASK GI_CNT_DIR(3)
+#define GI_WRITE_SWITCH BIT(7)
+#define GI_SYNC_GATE BIT(8)
+#define GI_LITTLE_BIG_ENDIAN BIT(9)
+#define GI_BANK_SWITCH_START BIT(10)
+#define GI_BANK_SWITCH_MODE BIT(11)
+#define GI_BANK_SWITCH_ENABLE BIT(12)
+#define GI_ARM_COPY BIT(13)
+#define GI_SAVE_TRACE_COPY BIT(14)
+#define GI_DISARM_COPY BIT(15)
#define NITIO_HW_SAVE_REG(x) (NITIO_G0_HW_SAVE + (x))
#define NITIO_SW_SAVE_REG(x) (NITIO_G0_SW_SAVE + (x))
#define NITIO_MODE_REG(x) (NITIO_G0_MODE + (x))
-#define GI_GATING_DISABLED (0 << 0)
-#define GI_LEVEL_GATING (1 << 0)
-#define GI_RISING_EDGE_GATING (2 << 0)
-#define GI_FALLING_EDGE_GATING (3 << 0)
-#define GI_GATING_MODE_MASK (3 << 0)
-#define GI_GATE_ON_BOTH_EDGES (1 << 2)
-#define GI_EDGE_GATE_STARTS_STOPS (0 << 3)
-#define GI_EDGE_GATE_STOPS_STARTS (1 << 3)
-#define GI_EDGE_GATE_STARTS (2 << 3)
-#define GI_EDGE_GATE_NO_STARTS_OR_STOPS (3 << 3)
-#define GI_EDGE_GATE_MODE_MASK (3 << 3)
-#define GI_STOP_ON_GATE (0 << 5)
-#define GI_STOP_ON_GATE_OR_TC (1 << 5)
-#define GI_STOP_ON_GATE_OR_SECOND_TC (2 << 5)
-#define GI_STOP_MODE_MASK (3 << 5)
-#define GI_LOAD_SRC_SEL (1 << 7)
-#define GI_OUTPUT_TC_PULSE (1 << 8)
-#define GI_OUTPUT_TC_TOGGLE (2 << 8)
-#define GI_OUTPUT_TC_OR_GATE_TOGGLE (3 << 8)
-#define GI_OUTPUT_MODE_MASK (3 << 8)
-#define GI_NO_HARDWARE_DISARM (0 << 10)
-#define GI_DISARM_AT_TC (1 << 10)
-#define GI_DISARM_AT_GATE (2 << 10)
-#define GI_DISARM_AT_TC_OR_GATE (3 << 10)
-#define GI_COUNTING_ONCE_MASK (3 << 10)
-#define GI_LOADING_ON_TC (1 << 12)
-#define GI_GATE_POL_INVERT (1 << 13)
-#define GI_LOADING_ON_GATE (1 << 14)
-#define GI_RELOAD_SRC_SWITCHING (1 << 15)
+#define GI_GATING_MODE(x) (((x) & 0x3) << 0)
+#define GI_GATING_DISABLED GI_GATING_MODE(0)
+#define GI_LEVEL_GATING GI_GATING_MODE(1)
+#define GI_RISING_EDGE_GATING GI_GATING_MODE(2)
+#define GI_FALLING_EDGE_GATING GI_GATING_MODE(3)
+#define GI_GATING_MODE_MASK GI_GATING_MODE(3)
+#define GI_GATE_ON_BOTH_EDGES BIT(2)
+#define GI_EDGE_GATE_MODE(x) (((x) & 0x3) << 3)
+#define GI_EDGE_GATE_STARTS_STOPS GI_EDGE_GATE_MODE(0)
+#define GI_EDGE_GATE_STOPS_STARTS GI_EDGE_GATE_MODE(1)
+#define GI_EDGE_GATE_STARTS GI_EDGE_GATE_MODE(2)
+#define GI_EDGE_GATE_NO_STARTS_OR_STOPS GI_EDGE_GATE_MODE(3)
+#define GI_EDGE_GATE_MODE_MASK GI_EDGE_GATE_MODE(3)
+#define GI_STOP_MODE(x) (((x) & 0x3) << 5)
+#define GI_STOP_ON_GATE GI_STOP_MODE(0)
+#define GI_STOP_ON_GATE_OR_TC GI_STOP_MODE(1)
+#define GI_STOP_ON_GATE_OR_SECOND_TC GI_STOP_MODE(2)
+#define GI_STOP_MODE_MASK GI_STOP_MODE(3)
+#define GI_LOAD_SRC_SEL BIT(7)
+#define GI_OUTPUT_MODE(x) (((x) & 0x3) << 8)
+#define GI_OUTPUT_TC_PULSE GI_OUTPUT_MODE(1)
+#define GI_OUTPUT_TC_TOGGLE GI_OUTPUT_MODE(2)
+#define GI_OUTPUT_TC_OR_GATE_TOGGLE GI_OUTPUT_MODE(3)
+#define GI_OUTPUT_MODE_MASK GI_OUTPUT_MODE(3)
+#define GI_COUNTING_ONCE(x) (((x) & 0x3) << 10)
+#define GI_NO_HARDWARE_DISARM GI_COUNTING_ONCE(0)
+#define GI_DISARM_AT_TC GI_COUNTING_ONCE(1)
+#define GI_DISARM_AT_GATE GI_COUNTING_ONCE(2)
+#define GI_DISARM_AT_TC_OR_GATE GI_COUNTING_ONCE(3)
+#define GI_COUNTING_ONCE_MASK GI_COUNTING_ONCE(3)
+#define GI_LOADING_ON_TC BIT(12)
+#define GI_GATE_POL_INVERT BIT(13)
+#define GI_LOADING_ON_GATE BIT(14)
+#define GI_RELOAD_SRC_SWITCHING BIT(15)
#define NITIO_LOADA_REG(x) (NITIO_G0_LOADA + (x))
#define NITIO_LOADB_REG(x) (NITIO_G0_LOADB + (x))
#define NITIO_INPUT_SEL_REG(x) (NITIO_G0_INPUT_SEL + (x))
-#define GI_READ_ACKS_IRQ (1 << 0)
-#define GI_WRITE_ACKS_IRQ (1 << 1)
+#define GI_READ_ACKS_IRQ BIT(0)
+#define GI_WRITE_ACKS_IRQ BIT(1)
#define GI_BITS_TO_SRC(x) (((x) >> 2) & 0x1f)
#define GI_SRC_SEL(x) (((x) & 0x1f) << 2)
-#define GI_SRC_SEL_MASK (0x1f << 2)
+#define GI_SRC_SEL_MASK GI_SRC_SEL(0x1f)
#define GI_BITS_TO_GATE(x) (((x) >> 7) & 0x1f)
#define GI_GATE_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE_SEL_MASK (0x1f << 7)
-#define GI_GATE_SEL_LOAD_SRC (1 << 12)
-#define GI_OR_GATE (1 << 13)
-#define GI_OUTPUT_POL_INVERT (1 << 14)
-#define GI_SRC_POL_INVERT (1 << 15)
+#define GI_GATE_SEL_MASK GI_GATE_SEL(0x1f)
+#define GI_GATE_SEL_LOAD_SRC BIT(12)
+#define GI_OR_GATE BIT(13)
+#define GI_OUTPUT_POL_INVERT BIT(14)
+#define GI_SRC_POL_INVERT BIT(15)
#define NITIO_CNT_MODE_REG(x) (NITIO_G0_CNT_MODE + (x))
#define GI_CNT_MODE(x) (((x) & 0x7) << 0)
#define GI_CNT_MODE_NORMAL GI_CNT_MODE(0)
@@ -94,152 +98,84 @@
#define GI_CNT_MODE_QUADX4 GI_CNT_MODE(3)
#define GI_CNT_MODE_TWO_PULSE GI_CNT_MODE(4)
#define GI_CNT_MODE_SYNC_SRC GI_CNT_MODE(6)
-#define GI_CNT_MODE_MASK (7 << 0)
-#define GI_INDEX_MODE (1 << 4)
+#define GI_CNT_MODE_MASK GI_CNT_MODE(7)
+#define GI_INDEX_MODE BIT(4)
#define GI_INDEX_PHASE(x) (((x) & 0x3) << 5)
-#define GI_INDEX_PHASE_MASK (3 << 5)
-#define GI_HW_ARM_ENA (1 << 7)
+#define GI_INDEX_PHASE_MASK GI_INDEX_PHASE(3)
+#define GI_HW_ARM_ENA BIT(7)
#define GI_HW_ARM_SEL(x) ((x) << 8)
-#define GI_660X_HW_ARM_SEL_MASK (0x7 << 8)
-#define GI_M_HW_ARM_SEL_MASK (0x1f << 8)
-#define GI_660X_PRESCALE_X8 (1 << 12)
-#define GI_M_PRESCALE_X8 (1 << 13)
-#define GI_660X_ALT_SYNC (1 << 13)
-#define GI_M_ALT_SYNC (1 << 14)
-#define GI_660X_PRESCALE_X2 (1 << 14)
-#define GI_M_PRESCALE_X2 (1 << 15)
+#define GI_660X_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x7)
+#define GI_M_HW_ARM_SEL_MASK GI_HW_ARM_SEL(0x1f)
+#define GI_660X_PRESCALE_X8 BIT(12)
+#define GI_M_PRESCALE_X8 BIT(13)
+#define GI_660X_ALT_SYNC BIT(13)
+#define GI_M_ALT_SYNC BIT(14)
+#define GI_660X_PRESCALE_X2 BIT(14)
+#define GI_M_PRESCALE_X2 BIT(15)
#define NITIO_GATE2_REG(x) (NITIO_G0_GATE2 + (x))
-#define GI_GATE2_MODE (1 << 0)
+#define GI_GATE2_MODE BIT(0)
#define GI_BITS_TO_GATE2(x) (((x) >> 7) & 0x1f)
#define GI_GATE2_SEL(x) (((x) & 0x1f) << 7)
-#define GI_GATE2_SEL_MASK (0x1f << 7)
-#define GI_GATE2_POL_INVERT (1 << 13)
-#define GI_GATE2_SUBSEL (1 << 14)
-#define GI_SRC_SUBSEL (1 << 15)
+#define GI_GATE2_SEL_MASK GI_GATE2_SEL(0x1f)
+#define GI_GATE2_POL_INVERT BIT(13)
+#define GI_GATE2_SUBSEL BIT(14)
+#define GI_SRC_SUBSEL BIT(15)
#define NITIO_SHARED_STATUS_REG(x) (NITIO_G01_STATUS + ((x) / 2))
-#define GI_SAVE(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_COUNTING(x) (((x) % 2) ? (1 << 3) : (1 << 2))
-#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? (1 << 5) : (1 << 4))
-#define GI_STALE_DATA(x) (((x) % 2) ? (1 << 7) : (1 << 6))
-#define GI_ARMED(x) (((x) % 2) ? (1 << 9) : (1 << 8))
-#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? (1 << 11) : (1 << 10))
-#define GI_TC_ERROR(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_GATE_ERROR(x) (((x) % 2) ? (1 << 15) : (1 << 14))
+#define GI_SAVE(x) (((x) % 2) ? BIT(1) : BIT(0))
+#define GI_COUNTING(x) (((x) % 2) ? BIT(3) : BIT(2))
+#define GI_NEXT_LOAD_SRC(x) (((x) % 2) ? BIT(5) : BIT(4))
+#define GI_STALE_DATA(x) (((x) % 2) ? BIT(7) : BIT(6))
+#define GI_ARMED(x) (((x) % 2) ? BIT(9) : BIT(8))
+#define GI_NO_LOAD_BETWEEN_GATES(x) (((x) % 2) ? BIT(11) : BIT(10))
+#define GI_TC_ERROR(x) (((x) % 2) ? BIT(13) : BIT(12))
+#define GI_GATE_ERROR(x) (((x) % 2) ? BIT(15) : BIT(14))
#define NITIO_RESET_REG(x) (NITIO_G01_RESET + ((x) / 2))
-#define GI_RESET(x) (1 << (2 + ((x) % 2)))
+#define GI_RESET(x) BIT(2 + ((x) % 2))
#define NITIO_STATUS1_REG(x) (NITIO_G01_STATUS1 + ((x) / 2))
#define NITIO_STATUS2_REG(x) (NITIO_G01_STATUS2 + ((x) / 2))
-#define GI_OUTPUT(x) (((x) % 2) ? (1 << 1) : (1 << 0))
-#define GI_HW_SAVE(x) (((x) % 2) ? (1 << 13) : (1 << 12))
-#define GI_PERMANENT_STALE(x) (((x) % 2) ? (1 << 15) : (1 << 14))
+#define GI_OUTPUT(x) (((x) % 2) ? BIT(1) : BIT(0))
+#define GI_HW_SAVE(x) (((x) % 2) ? BIT(13) : BIT(12))
+#define GI_PERMANENT_STALE(x) (((x) % 2) ? BIT(15) : BIT(14))
#define NITIO_DMA_CFG_REG(x) (NITIO_G0_DMA_CFG + (x))
-#define GI_DMA_ENABLE (1 << 0)
-#define GI_DMA_WRITE (1 << 1)
-#define GI_DMA_INT_ENA (1 << 2)
-#define GI_DMA_RESET (1 << 3)
-#define GI_DMA_BANKSW_ERROR (1 << 4)
+#define GI_DMA_ENABLE BIT(0)
+#define GI_DMA_WRITE BIT(1)
+#define GI_DMA_INT_ENA BIT(2)
+#define GI_DMA_RESET BIT(3)
+#define GI_DMA_BANKSW_ERROR BIT(4)
#define NITIO_DMA_STATUS_REG(x) (NITIO_G0_DMA_STATUS + (x))
-#define GI_DMA_READBANK (1 << 13)
-#define GI_DRQ_ERROR (1 << 14)
-#define GI_DRQ_STATUS (1 << 15)
+#define GI_DMA_READBANK BIT(13)
+#define GI_DRQ_ERROR BIT(14)
+#define GI_DRQ_STATUS BIT(15)
#define NITIO_ABZ_REG(x) (NITIO_G0_ABZ + (x))
#define NITIO_INT_ACK_REG(x) (NITIO_G0_INT_ACK + (x))
-#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 1) : (1 << 5))
-#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? (1 << 2) : (1 << 6))
-#define GI_TC_INTERRUPT_ACK (1 << 14)
-#define GI_GATE_INTERRUPT_ACK (1 << 15)
+#define GI_GATE_ERROR_CONFIRM(x) (((x) % 2) ? BIT(1) : BIT(5))
+#define GI_TC_ERROR_CONFIRM(x) (((x) % 2) ? BIT(2) : BIT(6))
+#define GI_TC_INTERRUPT_ACK BIT(14)
+#define GI_GATE_INTERRUPT_ACK BIT(15)
#define NITIO_STATUS_REG(x) (NITIO_G0_STATUS + (x))
-#define GI_GATE_INTERRUPT (1 << 2)
-#define GI_TC (1 << 3)
-#define GI_INTERRUPT (1 << 15)
+#define GI_GATE_INTERRUPT BIT(2)
+#define GI_TC BIT(3)
+#define GI_INTERRUPT BIT(15)
#define NITIO_INT_ENA_REG(x) (NITIO_G0_INT_ENA + (x))
-#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 9) : (1 << 6))
-#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? (1 << 10) : (1 << 8))
-
-static inline void write_register(struct ni_gpct *counter, unsigned bits,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- counter->counter_dev->write_register(counter, bits, reg);
-}
-
-static inline unsigned read_register(struct ni_gpct *counter,
- enum ni_gpct_register reg)
-{
- BUG_ON(reg >= NITIO_NUM_REGS);
- return counter->counter_dev->read_register(counter, reg);
-}
+#define GI_TC_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(9) : BIT(6))
+#define GI_GATE_INTERRUPT_ENABLE(x) (((x) % 2) ? BIT(10) : BIT(8))
-static inline int ni_tio_counting_mode_registers_present(const struct
- ni_gpct_device
- *counter_dev)
-{
- switch (counter_dev->variant) {
- case ni_gpct_variant_e_series:
- return 0;
- case ni_gpct_variant_m_series:
- case ni_gpct_variant_660x:
- return 1;
- default:
- BUG();
- break;
- }
- return 0;
-}
-
-static inline void ni_tio_set_bits_transient(struct ni_gpct *counter,
- enum ni_gpct_register
- register_index, unsigned bit_mask,
- unsigned bit_values,
- unsigned transient_bit_values)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- counter_dev->regs[register_index] &= ~bit_mask;
- counter_dev->regs[register_index] |= (bit_values & bit_mask);
- write_register(counter,
- counter_dev->regs[register_index] | transient_bit_values,
- register_index);
- mmiowb();
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
-}
+void ni_tio_write(struct ni_gpct *, unsigned int value, enum ni_gpct_register);
+unsigned int ni_tio_read(struct ni_gpct *, enum ni_gpct_register);
-/* ni_tio_set_bits( ) is for safely writing to registers whose bits may be
- * twiddled in interrupt context, or whose software copy may be read in
- * interrupt context.
- */
-static inline void ni_tio_set_bits(struct ni_gpct *counter,
- enum ni_gpct_register register_index,
- unsigned bit_mask, unsigned bit_values)
+static inline bool
+ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
{
- ni_tio_set_bits_transient(counter, register_index, bit_mask, bit_values,
- 0x0);
+ /* m series and 660x variants have counting mode registers */
+ return counter_dev->variant != ni_gpct_variant_e_series;
}
-/* ni_tio_get_soft_copy( ) is for safely reading the software copy of a register
-whose bits might be modified in interrupt context, or whose software copy
-might need to be read in interrupt context.
-*/
-static inline unsigned ni_tio_get_soft_copy(const struct ni_gpct *counter,
- enum ni_gpct_register
- register_index)
-{
- struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned long flags;
- unsigned value;
-
- BUG_ON(register_index >= NITIO_NUM_REGS);
- spin_lock_irqsave(&counter_dev->regs_lock, flags);
- value = counter_dev->regs[register_index];
- spin_unlock_irqrestore(&counter_dev->regs_lock, flags);
- return value;
-}
+void ni_tio_set_bits(struct ni_gpct *, enum ni_gpct_register reg,
+ unsigned int mask, unsigned int value);
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *,
+ enum ni_gpct_register reg);
-int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned start_trigger);
-int ni_tio_set_gate_src(struct ni_gpct *counter, unsigned gate_index,
- unsigned int gate_source);
+int ni_tio_arm(struct ni_gpct *, bool arm, unsigned int start_trigger);
+int ni_tio_set_gate_src(struct ni_gpct *, unsigned int gate, unsigned int src);
#endif /* _COMEDI_NI_TIO_INTERNAL_H */
diff --git a/drivers/staging/comedi/drivers/ni_tiocmd.c b/drivers/staging/comedi/drivers/ni_tiocmd.c
index 823e47910004..9007c57544bf 100644
--- a/drivers/staging/comedi/drivers/ni_tiocmd.c
+++ b/drivers/staging/comedi/drivers/ni_tiocmd.c
@@ -1,19 +1,18 @@
/*
- comedi/drivers/ni_tiocmd.c
- Command support for NI general purpose counters
-
- Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Command support for NI general purpose counters
+ *
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
/*
* Module: ni_tiocmd
@@ -36,13 +35,10 @@
* DAQ 660x Register-Level Programmer Manual (NI 370505A-01)
* DAQ 6601/6602 User Manual (NI 322137B-01)
* 340934b.pdf DAQ-STC reference manual
+ *
+ * TODO: Support use of both banks X and Y
*/
-/*
-TODO:
- Support use of both banks X and Y
-*/
-
#include <linux/module.h>
#include "ni_tio_internal.h"
#include "mite.h"
@@ -51,9 +47,9 @@ static void ni_tio_configure_dma(struct ni_gpct *counter,
bool enable, bool read)
{
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
- unsigned mask;
- unsigned bits;
+ unsigned int cidx = counter->counter_index;
+ unsigned int mask;
+ unsigned int bits;
mask = GI_READ_ACKS_IRQ | GI_WRITE_ACKS_IRQ;
bits = 0;
@@ -103,7 +99,7 @@ static int ni_tio_input_inttrig(struct comedi_device *dev,
spin_unlock_irqrestore(&counter->lock, flags);
if (ret < 0)
return ret;
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+ ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
s->async->inttrig = NULL;
return ret;
@@ -113,7 +109,7 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
{
struct ni_gpct *counter = s->private;
struct ni_gpct_device *counter_dev = counter->counter_dev;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
struct comedi_async *async = s->async;
struct comedi_cmd *cmd = &async->cmd;
int ret = 0;
@@ -129,9 +125,6 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
case ni_gpct_variant_e_series:
mite_prep_dma(counter->mite_chan, 16, 32);
break;
- default:
- BUG();
- break;
}
ni_tio_set_bits(counter, NITIO_CMD_REG(cidx), GI_SAVE_TRACE, 0);
ni_tio_configure_dma(counter, true, true);
@@ -143,9 +136,9 @@ static int ni_tio_input_cmd(struct comedi_subdevice *s)
mite_dma_arm(counter->mite_chan);
if (cmd->start_src == TRIG_NOW)
- ret = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+ ret = ni_tio_arm(counter, true, NI_GPCT_ARM_IMMEDIATE);
else if (cmd->start_src == TRIG_EXT)
- ret = ni_tio_arm(counter, 1, cmd->start_arg);
+ ret = ni_tio_arm(counter, true, cmd->start_arg);
}
return ret;
}
@@ -163,9 +156,9 @@ static int ni_tio_cmd_setup(struct comedi_subdevice *s)
{
struct comedi_cmd *cmd = &s->async->cmd;
struct ni_gpct *counter = s->private;
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
int set_gate_source = 0;
- unsigned gate_source;
+ unsigned int gate_source;
int retval = 0;
if (cmd->scan_begin_src == TRIG_EXT) {
@@ -289,10 +282,10 @@ EXPORT_SYMBOL_GPL(ni_tio_cmdtest);
int ni_tio_cancel(struct ni_gpct *counter)
{
- unsigned cidx = counter->counter_index;
+ unsigned int cidx = counter->counter_index;
unsigned long flags;
- ni_tio_arm(counter, 0, 0);
+ ni_tio_arm(counter, false, 0);
spin_lock_irqsave(&counter->lock, flags);
if (counter->mite_chan)
mite_dma_disarm(counter->mite_chan);
@@ -305,9 +298,6 @@ int ni_tio_cancel(struct ni_gpct *counter)
}
EXPORT_SYMBOL_GPL(ni_tio_cancel);
- /* During buffered input counter operation for e-series, the gate
- interrupt is acked automatically by the dma controller, due to the
- Gi_Read/Write_Acknowledges_IRQ bits in the input select register. */
static int should_ack_gate(struct ni_gpct *counter)
{
unsigned long flags;
@@ -315,12 +305,19 @@ static int should_ack_gate(struct ni_gpct *counter)
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
- /* not sure if 660x really supports gate
- interrupts (the bits are not listed
- in register-level manual) */
case ni_gpct_variant_660x:
+ /*
+ * not sure if 660x really supports gate interrupts
+ * (the bits are not listed in register-level manual)
+ */
return 1;
case ni_gpct_variant_e_series:
+ /*
+ * During buffered input counter operation for e-series,
+ * the gate interrupt is acked automatically by the dma
+ * controller, due to the Gi_Read/Write_Acknowledges_IRQ
+ * bits in the input select register.
+ */
spin_lock_irqsave(&counter->lock, flags);
{
if (!counter->mite_chan ||
@@ -338,15 +335,14 @@ static int should_ack_gate(struct ni_gpct *counter)
static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
int *gate_error,
int *tc_error,
- int *perm_stale_data,
- int *stale_data)
+ int *perm_stale_data)
{
- unsigned cidx = counter->counter_index;
- const unsigned short gxx_status = read_register(counter,
+ unsigned int cidx = counter->counter_index;
+ const unsigned short gxx_status = ni_tio_read(counter,
NITIO_SHARED_STATUS_REG(cidx));
- const unsigned short gi_status = read_register(counter,
+ const unsigned short gi_status = ni_tio_read(counter,
NITIO_STATUS_REG(cidx));
- unsigned ack = 0;
+ unsigned int ack = 0;
if (gate_error)
*gate_error = 0;
@@ -354,15 +350,15 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
*tc_error = 0;
if (perm_stale_data)
*perm_stale_data = 0;
- if (stale_data)
- *stale_data = 0;
if (gxx_status & GI_GATE_ERROR(cidx)) {
ack |= GI_GATE_ERROR_CONFIRM(cidx);
if (gate_error) {
- /*660x don't support automatic acknowledgment
- of gate interrupt via dma read/write
- and report bogus gate errors */
+ /*
+ * 660x don't support automatic acknowledgment
+ * of gate interrupt via dma read/write
+ * and report bogus gate errors
+ */
if (counter->counter_dev->variant !=
ni_gpct_variant_660x)
*gate_error = 1;
@@ -380,14 +376,10 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
ack |= GI_GATE_INTERRUPT_ACK;
}
if (ack)
- write_register(counter, ack, NITIO_INT_ACK_REG(cidx));
+ ni_tio_write(counter, ack, NITIO_INT_ACK_REG(cidx));
if (ni_tio_get_soft_copy(counter, NITIO_MODE_REG(cidx)) &
GI_LOADING_ON_GATE) {
- if (gxx_status & GI_STALE_DATA(cidx)) {
- if (stale_data)
- *stale_data = 1;
- }
- if (read_register(counter, NITIO_STATUS2_REG(cidx)) &
+ if (ni_tio_read(counter, NITIO_STATUS2_REG(cidx)) &
GI_PERMANENT_STALE(cidx)) {
dev_info(counter->counter_dev->dev->class_dev,
"%s: Gi_Permanent_Stale_Data detected.\n",
@@ -400,22 +392,21 @@ static void ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
void ni_tio_acknowledge(struct ni_gpct *counter)
{
- ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
+ ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL);
}
EXPORT_SYMBOL_GPL(ni_tio_acknowledge);
void ni_tio_handle_interrupt(struct ni_gpct *counter,
struct comedi_subdevice *s)
{
- unsigned cidx = counter->counter_index;
- unsigned gpct_mite_status;
+ unsigned int cidx = counter->counter_index;
unsigned long flags;
int gate_error;
int tc_error;
int perm_stale_data;
ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
- &perm_stale_data, NULL);
+ &perm_stale_data);
if (gate_error) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_Gate_Error detected.\n", __func__);
@@ -426,7 +417,7 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
switch (counter->counter_dev->variant) {
case ni_gpct_variant_m_series:
case ni_gpct_variant_660x:
- if (read_register(counter, NITIO_DMA_STATUS_REG(cidx)) &
+ if (ni_tio_read(counter, NITIO_DMA_STATUS_REG(cidx)) &
GI_DRQ_ERROR) {
dev_notice(counter->counter_dev->dev->class_dev,
"%s: Gi_DRQ_Error detected.\n", __func__);
@@ -437,16 +428,8 @@ void ni_tio_handle_interrupt(struct ni_gpct *counter,
break;
}
spin_lock_irqsave(&counter->lock, flags);
- if (!counter->mite_chan) {
- spin_unlock_irqrestore(&counter->lock, flags);
- return;
- }
- gpct_mite_status = mite_get_status(counter->mite_chan);
- if (gpct_mite_status & CHSR_LINKC)
- writel(CHOR_CLRLC,
- counter->mite_chan->mite->mite_io_addr +
- MITE_CHOR(counter->mite_chan->channel));
- mite_sync_input_dma(counter->mite_chan, s);
+ if (counter->mite_chan)
+ mite_ack_linkc(counter->mite_chan, s, true);
spin_unlock_irqrestore(&counter->lock, flags);
}
EXPORT_SYMBOL_GPL(ni_tio_handle_interrupt);
diff --git a/drivers/staging/comedi/drivers/plx9052.h b/drivers/staging/comedi/drivers/plx9052.h
index fbcf25069807..2892e6528967 100644
--- a/drivers/staging/comedi/drivers/plx9052.h
+++ b/drivers/staging/comedi/drivers/plx9052.h
@@ -1,22 +1,21 @@
/*
- comedi/drivers/plx9052.h
- Definitions for the PLX-9052 PCI interface chip
-
- Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
-
- COMEDI - Linux Control and Measurement Device Interface
- Copyright (C) 2000 David A. Schleef <ds@schleef.org>
-
- This program is free software; you can redistribute it and/or modify
- it under the terms of the GNU General Public License as published by
- the Free Software Foundation; either version 2 of the License, or
- (at your option) any later version.
-
- This program is distributed in the hope that it will be useful,
- but WITHOUT ANY WARRANTY; without even the implied warranty of
- MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- GNU General Public License for more details.
-*/
+ * Definitions for the PLX-9052 PCI interface chip
+ *
+ * Copyright (C) 2002 MEV Ltd. <http://www.mev.co.uk/>
+ *
+ * COMEDI - Linux Control and Measurement Device Interface
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
#ifndef _PLX9052_H_
#define _PLX9052_H_
@@ -25,55 +24,56 @@
* INTCSR - Interrupt Control/Status register
*/
#define PLX9052_INTCSR 0x4c
-#define PLX9052_INTCSR_LI1ENAB (1 << 0) /* LI1 enabled */
-#define PLX9052_INTCSR_LI1POL (1 << 1) /* LI1 active high */
-#define PLX9052_INTCSR_LI1STAT (1 << 2) /* LI1 active */
-#define PLX9052_INTCSR_LI2ENAB (1 << 3) /* LI2 enabled */
-#define PLX9052_INTCSR_LI2POL (1 << 4) /* LI2 active high */
-#define PLX9052_INTCSR_LI2STAT (1 << 5) /* LI2 active */
-#define PLX9052_INTCSR_PCIENAB (1 << 6) /* PCIINT enabled */
-#define PLX9052_INTCSR_SOFTINT (1 << 7) /* generate soft int */
-#define PLX9052_INTCSR_LI1SEL (1 << 8) /* LI1 edge */
-#define PLX9052_INTCSR_LI2SEL (1 << 9) /* LI2 edge */
-#define PLX9052_INTCSR_LI1CLRINT (1 << 10) /* LI1 clear int */
-#define PLX9052_INTCSR_LI2CLRINT (1 << 11) /* LI2 clear int */
-#define PLX9052_INTCSR_ISAMODE (1 << 12) /* ISA interface mode */
+#define PLX9052_INTCSR_LI1ENAB BIT(0) /* LI1 enabled */
+#define PLX9052_INTCSR_LI1POL BIT(1) /* LI1 active high */
+#define PLX9052_INTCSR_LI1STAT BIT(2) /* LI1 active */
+#define PLX9052_INTCSR_LI2ENAB BIT(3) /* LI2 enabled */
+#define PLX9052_INTCSR_LI2POL BIT(4) /* LI2 active high */
+#define PLX9052_INTCSR_LI2STAT BIT(5) /* LI2 active */
+#define PLX9052_INTCSR_PCIENAB BIT(6) /* PCIINT enabled */
+#define PLX9052_INTCSR_SOFTINT BIT(7) /* generate soft int */
+#define PLX9052_INTCSR_LI1SEL BIT(8) /* LI1 edge */
+#define PLX9052_INTCSR_LI2SEL BIT(9) /* LI2 edge */
+#define PLX9052_INTCSR_LI1CLRINT BIT(10) /* LI1 clear int */
+#define PLX9052_INTCSR_LI2CLRINT BIT(11) /* LI2 clear int */
+#define PLX9052_INTCSR_ISAMODE BIT(12) /* ISA interface mode */
/*
* CNTRL - User I/O, Direct Slave Response, Serial EEPROM, and
* Initialization Control register
*/
#define PLX9052_CNTRL 0x50
-#define PLX9052_CNTRL_WAITO (1 << 0) /* UIO0 or WAITO# select */
-#define PLX9052_CNTRL_UIO0_DIR (1 << 1) /* UIO0 direction */
-#define PLX9052_CNTRL_UIO0_DATA (1 << 2) /* UIO0 data */
-#define PLX9052_CNTRL_LLOCKO (1 << 3) /* UIO1 or LLOCKo# select */
-#define PLX9052_CNTRL_UIO1_DIR (1 << 4) /* UIO1 direction */
-#define PLX9052_CNTRL_UIO1_DATA (1 << 5) /* UIO1 data */
-#define PLX9052_CNTRL_CS2 (1 << 6) /* UIO2 or CS2# select */
-#define PLX9052_CNTRL_UIO2_DIR (1 << 7) /* UIO2 direction */
-#define PLX9052_CNTRL_UIO2_DATA (1 << 8) /* UIO2 data */
-#define PLX9052_CNTRL_CS3 (1 << 9) /* UIO3 or CS3# select */
-#define PLX9052_CNTRL_UIO3_DIR (1 << 10) /* UIO3 direction */
-#define PLX9052_CNTRL_UIO3_DATA (1 << 11) /* UIO3 data */
-#define PLX9052_CNTRL_PCIBAR01 (0 << 12) /* bar 0 (mem) and 1 (I/O) */
-#define PLX9052_CNTRL_PCIBAR0 (1 << 12) /* bar 0 (mem) only */
-#define PLX9052_CNTRL_PCIBAR1 (2 << 12) /* bar 1 (I/O) only */
-#define PLX9052_CNTRL_PCI2_1_FEATURES (1 << 14) /* PCI r2.1 features enabled */
-#define PLX9052_CNTRL_PCI_R_W_FLUSH (1 << 15) /* read w/write flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_FLUSH (1 << 16) /* read no flush mode */
-#define PLX9052_CNTRL_PCI_R_NO_WRITE (1 << 17) /* read no write mode */
-#define PLX9052_CNTRL_PCI_W_RELEASE (1 << 18) /* write release bus mode */
-#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* slave retry clks */
-#define PLX9052_CNTRL_LOCK_ENAB (1 << 23) /* slave LOCK# enable */
+#define PLX9052_CNTRL_WAITO BIT(0) /* UIO0 or WAITO# select */
+#define PLX9052_CNTRL_UIO0_DIR BIT(1) /* UIO0 direction */
+#define PLX9052_CNTRL_UIO0_DATA BIT(2) /* UIO0 data */
+#define PLX9052_CNTRL_LLOCKO BIT(3) /* UIO1 or LLOCKo# select */
+#define PLX9052_CNTRL_UIO1_DIR BIT(4) /* UIO1 direction */
+#define PLX9052_CNTRL_UIO1_DATA BIT(5) /* UIO1 data */
+#define PLX9052_CNTRL_CS2 BIT(6) /* UIO2 or CS2# select */
+#define PLX9052_CNTRL_UIO2_DIR BIT(7) /* UIO2 direction */
+#define PLX9052_CNTRL_UIO2_DATA BIT(8) /* UIO2 data */
+#define PLX9052_CNTRL_CS3 BIT(9) /* UIO3 or CS3# select */
+#define PLX9052_CNTRL_UIO3_DIR BIT(10) /* UIO3 direction */
+#define PLX9052_CNTRL_UIO3_DATA BIT(11) /* UIO3 data */
+#define PLX9052_CNTRL_PCIBAR(x) (((x) & 0x3) << 12)
+#define PLX9052_CNTRL_PCIBAR01 PLX9052_CNTRL_PCIBAR(0) /* mem and IO */
+#define PLX9052_CNTRL_PCIBAR0 PLX9052_CNTRL_PCIBAR(1) /* mem only */
+#define PLX9052_CNTRL_PCIBAR1 PLX9052_CNTRL_PCIBAR(2) /* IO only */
+#define PLX9052_CNTRL_PCI2_1_FEATURES BIT(14) /* PCI v2.1 features enabled */
+#define PLX9052_CNTRL_PCI_R_W_FLUSH BIT(15) /* read w/write flush mode */
+#define PLX9052_CNTRL_PCI_R_NO_FLUSH BIT(16) /* read no flush mode */
+#define PLX9052_CNTRL_PCI_R_NO_WRITE BIT(17) /* read no write mode */
+#define PLX9052_CNTRL_PCI_W_RELEASE BIT(18) /* write release bus mode */
+#define PLX9052_CNTRL_RETRY_CLKS(x) (((x) & 0xf) << 19) /* retry clks */
+#define PLX9052_CNTRL_LOCK_ENAB BIT(23) /* slave LOCK# enable */
#define PLX9052_CNTRL_EEPROM_MASK (0x1f << 24) /* EEPROM bits */
-#define PLX9052_CNTRL_EEPROM_CLK (1 << 24) /* EEPROM clock */
-#define PLX9052_CNTRL_EEPROM_CS (1 << 25) /* EEPROM chip select */
-#define PLX9052_CNTRL_EEPROM_DOUT (1 << 26) /* EEPROM write bit */
-#define PLX9052_CNTRL_EEPROM_DIN (1 << 27) /* EEPROM read bit */
-#define PLX9052_CNTRL_EEPROM_PRESENT (1 << 28) /* EEPROM present */
-#define PLX9052_CNTRL_RELOAD_CFG (1 << 29) /* reload configuration */
-#define PLX9052_CNTRL_PCI_RESET (1 << 30) /* PCI adapter reset */
-#define PLX9052_CNTRL_MASK_REV (1 << 31) /* mask revision */
+#define PLX9052_CNTRL_EEPROM_CLK BIT(24) /* EEPROM clock */
+#define PLX9052_CNTRL_EEPROM_CS BIT(25) /* EEPROM chip select */
+#define PLX9052_CNTRL_EEPROM_DOUT BIT(26) /* EEPROM write bit */
+#define PLX9052_CNTRL_EEPROM_DIN BIT(27) /* EEPROM read bit */
+#define PLX9052_CNTRL_EEPROM_PRESENT BIT(28) /* EEPROM present */
+#define PLX9052_CNTRL_RELOAD_CFG BIT(29) /* reload configuration */
+#define PLX9052_CNTRL_PCI_RESET BIT(30) /* PCI adapter reset */
+#define PLX9052_CNTRL_MASK_REV BIT(31) /* mask revision */
#endif /* _PLX9052_H_ */
diff --git a/drivers/staging/comedi/drivers/plx9080.h b/drivers/staging/comedi/drivers/plx9080.h
index f5cd6d5004bd..8d1aee00b19f 100644
--- a/drivers/staging/comedi/drivers/plx9080.h
+++ b/drivers/staging/comedi/drivers/plx9080.h
@@ -88,7 +88,7 @@ enum marb_bits {
/* direct slave LLOCKo# enable */
MARB_DS_LLOCK_ENABLE = 0x00400000,
MARB_PCI_REQUEST_MODE = 0x00800000,
- MARB_PCIv21_MODE = 0x01000000, /* pci specification v2.1 mode */
+ MARB_PCIV21_MODE = 0x01000000, /* pci specification v2.1 mode */
MARB_PCI_READ_NO_WRITE_MODE = 0x02000000,
MARB_PCI_READ_WITH_WRITE_FLUSH_MODE = 0x04000000,
/* gate local bus latency timer with BREQ */
diff --git a/drivers/staging/comedi/drivers/z8536.h b/drivers/staging/comedi/drivers/z8536.h
index 7be53109cc8d..47eadbf4dcc0 100644
--- a/drivers/staging/comedi/drivers/z8536.h
+++ b/drivers/staging/comedi/drivers/z8536.h
@@ -24,11 +24,12 @@
#define Z8536_CFG_CTRL_PCE_CT3E BIT(4) /* Port C & C/T 3 Enable */
#define Z8536_CFG_CTRL_PLC BIT(3) /* Port A/B Link Control */
#define Z8536_CFG_CTRL_PAE BIT(2) /* Port A Enable */
-#define Z8536_CFG_CTRL_LC_INDEP (0 << 0)/* C/Ts Independent */
-#define Z8536_CFG_CTRL_LC_GATE (1 << 0)/* C/T 1 Out Gates C/T 2 */
-#define Z8536_CFG_CTRL_LC_TRIG (2 << 0)/* C/T 1 Out Triggers C/T 2 */
-#define Z8536_CFG_CTRL_LC_CLK (3 << 0)/* C/T 1 Out Clocks C/T 2 */
-#define Z8536_CFG_CTRL_LC_MASK (3 << 0)/* C/T Link Control mask */
+#define Z8536_CFG_CTRL_LC(x) (((x) & 0x3) << 0) /* Link Control */
+#define Z8536_CFG_CTRL_LC_INDEP Z8536_CFG_CTRL_LC(0)/* Independent */
+#define Z8536_CFG_CTRL_LC_GATE Z8536_CFG_CTRL_LC(1)/* 1 Gates 2 */
+#define Z8536_CFG_CTRL_LC_TRIG Z8536_CFG_CTRL_LC(2)/* 1 Triggers 2 */
+#define Z8536_CFG_CTRL_LC_CLK Z8536_CFG_CTRL_LC(3)/* 1 Clocks 2 */
+#define Z8536_CFG_CTRL_LC_MASK Z8536_CFG_CTRL_LC(3)
/* Interrupt Vector registers */
#define Z8536_PA_INT_VECT_REG 0x02
@@ -43,15 +44,16 @@
#define Z8536_CT2_CMDSTAT_REG 0x0b
#define Z8536_CT3_CMDSTAT_REG 0x0c
#define Z8536_CT_CMDSTAT_REG(x) (0x0a + (x))
-#define Z8536_CMD_NULL (0 << 5)/* Null Code */
-#define Z8536_CMD_CLR_IP_IUS (1 << 5)/* Clear IP & IUS */
-#define Z8536_CMD_SET_IUS (2 << 5)/* Set IUS */
-#define Z8536_CMD_CLR_IUS (3 << 5)/* Clear IUS */
-#define Z8536_CMD_SET_IP (4 << 5)/* Set IP */
-#define Z8536_CMD_CLR_IP (5 << 5)/* Clear IP */
-#define Z8536_CMD_SET_IE (6 << 5)/* Set IE */
-#define Z8536_CMD_CLR_IE (7 << 5)/* Clear IE */
-#define Z8536_CMD_MASK (7 << 5)
+#define Z8536_CMD(x) (((x) & 0x7) << 5)
+#define Z8536_CMD_NULL Z8536_CMD(0) /* Null Code */
+#define Z8536_CMD_CLR_IP_IUS Z8536_CMD(1) /* Clear IP & IUS */
+#define Z8536_CMD_SET_IUS Z8536_CMD(2) /* Set IUS */
+#define Z8536_CMD_CLR_IUS Z8536_CMD(3) /* Clear IUS */
+#define Z8536_CMD_SET_IP Z8536_CMD(4) /* Set IP */
+#define Z8536_CMD_CLR_IP Z8536_CMD(5) /* Clear IP */
+#define Z8536_CMD_SET_IE Z8536_CMD(6) /* Set IE */
+#define Z8536_CMD_CLR_IE Z8536_CMD(7) /* Clear IE */
+#define Z8536_CMD_MASK Z8536_CMD(7)
#define Z8536_STAT_IUS BIT(7) /* Interrupt Under Service */
#define Z8536_STAT_IE BIT(6) /* Interrupt Enable */
@@ -105,46 +107,51 @@
#define Z8536_CT_MODE_ETE BIT(4) /* External Trigger Enable */
#define Z8536_CT_MODE_EGE BIT(3) /* External Gate Enable */
#define Z8536_CT_MODE_REB BIT(2) /* Retrigger Enable Bit */
-#define Z8536_CT_MODE_DCS_PULSE (0 << 0)/* Duty Cycle - Pulse */
-#define Z8536_CT_MODE_DCS_ONESHOT (1 << 0)/* Duty Cycle - One-Shot */
-#define Z8536_CT_MODE_DCS_SQRWAVE (2 << 0)/* Duty Cycle - Square Wave */
-#define Z8536_CT_MODE_DCS_DO_NOT_USE (3 << 0)/* Duty Cycle - Do Not Use */
-#define Z8536_CT_MODE_DCS_MASK (3 << 0)/* Duty Cycle mask */
+#define Z8536_CT_MODE_DCS(x) (((x) & 0x3) << 0) /* Duty Cycle */
+#define Z8536_CT_MODE_DCS_PULSE Z8536_CT_MODE_DCS(0) /* Pulse */
+#define Z8536_CT_MODE_DCS_ONESHOT Z8536_CT_MODE_DCS(1) /* One-Shot */
+#define Z8536_CT_MODE_DCS_SQRWAVE Z8536_CT_MODE_DCS(2) /* Square Wave */
+#define Z8536_CT_MODE_DCS_DO_NOT_USE Z8536_CT_MODE_DCS(3) /* Do Not Use */
+#define Z8536_CT_MODE_DCS_MASK Z8536_CT_MODE_DCS(3)
/* Port A/B Mode Specification registers */
#define Z8536_PA_MODE_REG 0x20
#define Z8536_PB_MODE_REG 0x28
-#define Z8536_PAB_MODE_PTS_BIT (0 << 6)/* Bit Port */
-#define Z8536_PAB_MODE_PTS_INPUT (1 << 6)/* Input Port */
-#define Z8536_PAB_MODE_PTS_OUTPUT (2 << 6)/* Output Port */
-#define Z8536_PAB_MODE_PTS_BIDIR (3 << 6)/* Bidirectional Port */
-#define Z8536_PAB_MODE_PTS_MASK (3 << 6)/* Port Type Select mask */
+#define Z8536_PAB_MODE_PTS(x) (((x) & 0x3) << 6) /* Port type */
+#define Z8536_PAB_MODE_PTS_BIT Z8536_PAB_MODE_PTS(0 << 6)/* Bit */
+#define Z8536_PAB_MODE_PTS_INPUT Z8536_PAB_MODE_PTS(1 << 6)/* Input */
+#define Z8536_PAB_MODE_PTS_OUTPUT Z8536_PAB_MODE_PTS(2 << 6)/* Output */
+#define Z8536_PAB_MODE_PTS_BIDIR Z8536_PAB_MODE_PTS(3 << 6)/* Bidir */
+#define Z8536_PAB_MODE_PTS_MASK Z8536_PAB_MODE_PTS(3 << 6)
#define Z8536_PAB_MODE_ITB BIT(5) /* Interrupt on Two Bytes */
#define Z8536_PAB_MODE_SB BIT(4) /* Single Buffered mode */
#define Z8536_PAB_MODE_IMO BIT(3) /* Interrupt on Match Only */
-#define Z8536_PAB_MODE_PMS_DISABLE (0 << 1)/* Disable Pattern Match */
-#define Z8536_PAB_MODE_PMS_AND (1 << 1)/* "AND" mode */
-#define Z8536_PAB_MODE_PMS_OR (2 << 1)/* "OR" mode */
-#define Z8536_PAB_MODE_PMS_OR_PEV (3 << 1)/* "OR-Priority" mode */
-#define Z8536_PAB_MODE_PMS_MASK (3 << 1)/* Pattern Mode mask */
+#define Z8536_PAB_MODE_PMS(x) (((x) & 0x3) << 1) /* Pattern Mode */
+#define Z8536_PAB_MODE_PMS_DISABLE Z8536_PAB_MODE_PMS(0)/* Disabled */
+#define Z8536_PAB_MODE_PMS_AND Z8536_PAB_MODE_PMS(1)/* "AND" */
+#define Z8536_PAB_MODE_PMS_OR Z8536_PAB_MODE_PMS(2)/* "OR" */
+#define Z8536_PAB_MODE_PMS_OR_PEV Z8536_PAB_MODE_PMS(3)/* "OR-Priority" */
+#define Z8536_PAB_MODE_PMS_MASK Z8536_PAB_MODE_PMS(3)
#define Z8536_PAB_MODE_LPM BIT(0) /* Latch on Pattern Match */
#define Z8536_PAB_MODE_DTE BIT(0) /* Deskew Timer Enabled */
/* Port A/B Handshake Specification registers */
#define Z8536_PA_HANDSHAKE_REG 0x21
#define Z8536_PB_HANDSHAKE_REG 0x29
-#define Z8536_PAB_HANDSHAKE_HST_INTER (0 << 6)/* Interlocked Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_STROBED (1 << 6)/* Strobed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_PULSED (2 << 6)/* Pulsed Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_3WIRE (3 << 6)/* Three-Wire Handshake */
-#define Z8536_PAB_HANDSHAKE_HST_MASK (3 << 6)/* Handshake Type mask */
-#define Z8536_PAB_HANDSHAKE_RWS_DISABLE (0 << 3)/* Req/Wait Disabled */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT (1 << 3)/* Output Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_INWAIT (3 << 3)/* Input Wait */
-#define Z8536_PAB_HANDSHAKE_RWS_SPREQ (4 << 3)/* Special Request */
-#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ (5 << 4)/* Output Request */
-#define Z8536_PAB_HANDSHAKE_RWS_INREQ (7 << 3)/* Input Request */
-#define Z8536_PAB_HANDSHAKE_RWS_MASK (7 << 3)/* Req/Wait mask */
+#define Z8536_PAB_HANDSHAKE_HST(x) (((x) & 0x3) << 6) /* Handshake Type */
+#define Z8536_PAB_HANDSHAKE_HST_INTER Z8536_PAB_HANDSHAKE_HST(0)/*Interlock*/
+#define Z8536_PAB_HANDSHAKE_HST_STROBED Z8536_PAB_HANDSHAKE_HST(1)/* Strobed */
+#define Z8536_PAB_HANDSHAKE_HST_PULSED Z8536_PAB_HANDSHAKE_HST(2)/* Pulsed */
+#define Z8536_PAB_HANDSHAKE_HST_3WIRE Z8536_PAB_HANDSHAKE_HST(3)/* 3-Wire */
+#define Z8536_PAB_HANDSHAKE_HST_MASK Z8536_PAB_HANDSHAKE_HST(3)
+#define Z8536_PAB_HANDSHAKE_RWS(x) (((x) & 0x7) << 3) /* Req/Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_DISABLE Z8536_PAB_HANDSHAKE_RWS(0)/* Disabled */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTWAIT Z8536_PAB_HANDSHAKE_RWS(1)/* Out Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_INWAIT Z8536_PAB_HANDSHAKE_RWS(3)/* In Wait */
+#define Z8536_PAB_HANDSHAKE_RWS_SPREQ Z8536_PAB_HANDSHAKE_RWS(4)/* Special */
+#define Z8536_PAB_HANDSHAKE_RWS_OUTREQ Z8536_PAB_HANDSHAKE_RWS(5)/* Out Req */
+#define Z8536_PAB_HANDSHAKE_RWS_INREQ Z8536_PAB_HANDSHAKE_RWS(7)/* In Req */
+#define Z8536_PAB_HANDSHAKE_RWS_MASK Z8536_PAB_HANDSHAKE_RWS(7)
#define Z8536_PAB_HANDSHAKE_DESKEW(x) ((x) << 0)/* Deskew Time */
#define Z8536_PAB_HANDSHAKE_DESKEW_MASK (3 << 0)/* Deskew Time mask */
diff --git a/drivers/staging/dgnc/dgnc_cls.c b/drivers/staging/dgnc/dgnc_cls.c
index 0ff3139e52b6..46c050cc7dbe 100644
--- a/drivers/staging/dgnc/dgnc_cls.c
+++ b/drivers/staging/dgnc/dgnc_cls.c
@@ -1168,7 +1168,7 @@ static void cls_uart_init(struct channel_t *ch)
/* Clear out UART and FIFO */
readb(&ch->ch_cls_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO|UART_FCR_CLEAR_RCVR|UART_FCR_CLEAR_XMIT),
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
&ch->ch_cls_uart->isr_fcr);
udelay(10);
diff --git a/drivers/staging/dgnc/dgnc_driver.c b/drivers/staging/dgnc/dgnc_driver.c
index 4eb410e09609..af2e835efa1b 100644
--- a/drivers/staging/dgnc/dgnc_driver.c
+++ b/drivers/staging/dgnc/dgnc_driver.c
@@ -48,7 +48,7 @@ static void dgnc_do_remap(struct dgnc_board *brd);
/*
* File operations permitted on Control/Management major.
*/
-static const struct file_operations dgnc_BoardFops = {
+static const struct file_operations dgnc_board_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = dgnc_mgmt_ioctl,
.open = dgnc_mgmt_open,
@@ -58,11 +58,11 @@ static const struct file_operations dgnc_BoardFops = {
/*
* Globals
*/
-uint dgnc_NumBoards;
-struct dgnc_board *dgnc_Board[MAXBOARDS];
+uint dgnc_num_boards;
+struct dgnc_board *dgnc_board[MAXBOARDS];
DEFINE_SPINLOCK(dgnc_global_lock);
DEFINE_SPINLOCK(dgnc_poll_lock); /* Poll scheduling lock */
-uint dgnc_Major;
+uint dgnc_major;
int dgnc_poll_tick = 20; /* Poll interval - 20 ms */
/*
@@ -92,7 +92,7 @@ struct board_id {
unsigned int is_pci_express;
};
-static struct board_id dgnc_Ids[] = {
+static struct board_id dgnc_ids[] = {
{ PCI_DEVICE_CLASSIC_4_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_4_422_PCI_NAME, 4, 0 },
{ PCI_DEVICE_CLASSIC_8_PCI_NAME, 8, 0 },
@@ -140,14 +140,14 @@ static void cleanup(bool sysfiles)
if (sysfiles)
dgnc_remove_driver_sysfiles(&dgnc_driver);
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
class_destroy(dgnc_class);
- unregister_chrdev(dgnc_Major, "dgnc");
+ unregister_chrdev(dgnc_major, "dgnc");
- for (i = 0; i < dgnc_NumBoards; ++i) {
- dgnc_remove_ports_sysfiles(dgnc_Board[i]);
- dgnc_tty_uninit(dgnc_Board[i]);
- dgnc_cleanup_board(dgnc_Board[i]);
+ for (i = 0; i < dgnc_num_boards; ++i) {
+ dgnc_remove_ports_sysfiles(dgnc_board[i]);
+ dgnc_tty_uninit(dgnc_board[i]);
+ dgnc_cleanup_board(dgnc_board[i]);
}
dgnc_tty_post_uninit();
@@ -217,12 +217,12 @@ static int dgnc_start(void)
*
* Register management/dpa devices
*/
- rc = register_chrdev(0, "dgnc", &dgnc_BoardFops);
+ rc = register_chrdev(0, "dgnc", &dgnc_board_fops);
if (rc < 0) {
pr_err(DRVSTR ": Can't register dgnc driver device (%d)\n", rc);
return rc;
}
- dgnc_Major = rc;
+ dgnc_major = rc;
dgnc_class = class_create(THIS_MODULE, "dgnc_mgmt");
if (IS_ERR(dgnc_class)) {
@@ -232,7 +232,7 @@ static int dgnc_start(void)
}
dev = device_create(dgnc_class, NULL,
- MKDEV(dgnc_Major, 0),
+ MKDEV(dgnc_major, 0),
NULL, "dgnc_mgmt");
if (IS_ERR(dev)) {
rc = PTR_ERR(dev);
@@ -262,11 +262,11 @@ static int dgnc_start(void)
return 0;
failed_tty:
- device_destroy(dgnc_class, MKDEV(dgnc_Major, 0));
+ device_destroy(dgnc_class, MKDEV(dgnc_major, 0));
failed_device:
class_destroy(dgnc_class);
failed_class:
- unregister_chrdev(dgnc_Major, "dgnc");
+ unregister_chrdev(dgnc_major, "dgnc");
return rc;
}
@@ -283,7 +283,7 @@ static int dgnc_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
rc = dgnc_found_board(pdev, ent->driver_data);
if (rc == 0)
- dgnc_NumBoards++;
+ dgnc_num_boards++;
return rc;
}
@@ -346,7 +346,7 @@ static void dgnc_cleanup_board(struct dgnc_board *brd)
}
}
- dgnc_Board[brd->boardnum] = NULL;
+ dgnc_board[brd->boardnum] = NULL;
kfree(brd);
}
@@ -365,8 +365,8 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
unsigned long flags;
/* get the board structure and prep it */
- dgnc_Board[dgnc_NumBoards] = kzalloc(sizeof(*brd), GFP_KERNEL);
- brd = dgnc_Board[dgnc_NumBoards];
+ dgnc_board[dgnc_num_boards] = kzalloc(sizeof(*brd), GFP_KERNEL);
+ brd = dgnc_board[dgnc_num_boards];
if (!brd)
return -ENOMEM;
@@ -382,15 +382,15 @@ static int dgnc_found_board(struct pci_dev *pdev, int id)
/* store the info for the board we've found */
brd->magic = DGNC_BOARD_MAGIC;
- brd->boardnum = dgnc_NumBoards;
+ brd->boardnum = dgnc_num_boards;
brd->vendor = dgnc_pci_tbl[id].vendor;
brd->device = dgnc_pci_tbl[id].device;
brd->pdev = pdev;
brd->pci_bus = pdev->bus->number;
brd->pci_slot = PCI_SLOT(pdev->devfn);
- brd->name = dgnc_Ids[id].name;
- brd->maxports = dgnc_Ids[id].maxports;
- if (dgnc_Ids[i].is_pci_express)
+ brd->name = dgnc_ids[id].name;
+ brd->maxports = dgnc_ids[id].maxports;
+ if (dgnc_ids[i].is_pci_express)
brd->bd_flags |= BD_IS_PCI_EXPRESS;
brd->dpastatus = BD_NOFEP;
init_waitqueue_head(&brd->state_wait);
@@ -642,8 +642,8 @@ static void dgnc_poll_handler(ulong dummy)
unsigned long new_time;
/* Go thru each board, kicking off a tasklet for each if needed */
- for (i = 0; i < dgnc_NumBoards; i++) {
- brd = dgnc_Board[i];
+ for (i = 0; i < dgnc_num_boards; i++) {
+ brd = dgnc_board[i];
spin_lock_irqsave(&brd->bd_lock, flags);
diff --git a/drivers/staging/dgnc/dgnc_driver.h b/drivers/staging/dgnc/dgnc_driver.h
index e4be81b66041..95ec729fae38 100644
--- a/drivers/staging/dgnc/dgnc_driver.h
+++ b/drivers/staging/dgnc/dgnc_driver.h
@@ -202,18 +202,13 @@ struct dgnc_board {
* to our channels.
*/
- struct tty_driver SerialDriver;
- char SerialName[200];
- struct tty_driver PrintDriver;
- char PrintName[200];
+ struct tty_driver *serial_driver;
+ char serial_name[200];
+ struct tty_driver *print_driver;
+ char print_name[200];
- bool dgnc_Major_Serial_Registered;
- bool dgnc_Major_TransparentPrint_Registered;
-
- uint dgnc_Serial_Major;
- uint dgnc_TransparentPrint_Major;
-
- uint TtyRefCnt;
+ bool dgnc_major_serial_registered;
+ bool dgnc_major_transparent_print_registered;
u16 dpatype; /* The board "type",
* as defined by DPA
@@ -399,12 +394,12 @@ struct channel_t {
/*
* Our Global Variables.
*/
-extern uint dgnc_Major; /* Our driver/mgmt major */
+extern uint dgnc_major; /* Our driver/mgmt major */
extern int dgnc_poll_tick; /* Poll interval - 20 ms */
extern spinlock_t dgnc_global_lock; /* Driver global spinlock */
extern spinlock_t dgnc_poll_lock; /* Poll scheduling lock */
-extern uint dgnc_NumBoards; /* Total number of boards */
-extern struct dgnc_board *dgnc_Board[MAXBOARDS]; /* Array of board
+extern uint dgnc_num_boards; /* Total number of boards */
+extern struct dgnc_board *dgnc_board[MAXBOARDS]; /* Array of board
* structs
*/
diff --git a/drivers/staging/dgnc/dgnc_mgmt.c b/drivers/staging/dgnc/dgnc_mgmt.c
index ba29a8d913f2..683c098391d9 100644
--- a/drivers/staging/dgnc/dgnc_mgmt.c
+++ b/drivers/staging/dgnc/dgnc_mgmt.c
@@ -111,7 +111,7 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
spin_lock_irqsave(&dgnc_global_lock, flags);
memset(&ddi, 0, sizeof(ddi));
- ddi.dinfo_nboards = dgnc_NumBoards;
+ ddi.dinfo_nboards = dgnc_num_boards;
sprintf(ddi.dinfo_version, "%s", DG_PART);
spin_unlock_irqrestore(&dgnc_global_lock, flags);
@@ -131,27 +131,27 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&brd, uarg, sizeof(int)))
return -EFAULT;
- if (brd < 0 || brd >= dgnc_NumBoards)
+ if (brd < 0 || brd >= dgnc_num_boards)
return -ENODEV;
memset(&di, 0, sizeof(di));
di.info_bdnum = brd;
- spin_lock_irqsave(&dgnc_Board[brd]->bd_lock, flags);
+ spin_lock_irqsave(&dgnc_board[brd]->bd_lock, flags);
- di.info_bdtype = dgnc_Board[brd]->dpatype;
- di.info_bdstate = dgnc_Board[brd]->dpastatus;
+ di.info_bdtype = dgnc_board[brd]->dpatype;
+ di.info_bdstate = dgnc_board[brd]->dpastatus;
di.info_ioport = 0;
- di.info_physaddr = (ulong)dgnc_Board[brd]->membase;
- di.info_physsize = (ulong)dgnc_Board[brd]->membase
- - dgnc_Board[brd]->membase_end;
- if (dgnc_Board[brd]->state != BOARD_FAILED)
- di.info_nports = dgnc_Board[brd]->nasync;
+ di.info_physaddr = (ulong)dgnc_board[brd]->membase;
+ di.info_physsize = (ulong)dgnc_board[brd]->membase
+ - dgnc_board[brd]->membase_end;
+ if (dgnc_board[brd]->state != BOARD_FAILED)
+ di.info_nports = dgnc_board[brd]->nasync;
else
di.info_nports = 0;
- spin_unlock_irqrestore(&dgnc_Board[brd]->bd_lock, flags);
+ spin_unlock_irqrestore(&dgnc_board[brd]->bd_lock, flags);
if (copy_to_user(uarg, &di, sizeof(di)))
return -EFAULT;
@@ -174,14 +174,14 @@ long dgnc_mgmt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
channel = ni.channel;
/* Verify boundaries on board */
- if (board >= dgnc_NumBoards)
+ if (board >= dgnc_num_boards)
return -ENODEV;
/* Verify boundaries on channel */
- if (channel >= dgnc_Board[board]->nasync)
+ if (channel >= dgnc_board[board]->nasync)
return -ENODEV;
- ch = dgnc_Board[board]->channels[channel];
+ ch = dgnc_board[board]->channels[channel];
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return -ENODEV;
diff --git a/drivers/staging/dgnc/dgnc_neo.c b/drivers/staging/dgnc/dgnc_neo.c
index 31ac437cb4a4..ba57e9546f72 100644
--- a/drivers/staging/dgnc/dgnc_neo.c
+++ b/drivers/staging/dgnc/dgnc_neo.c
@@ -77,8 +77,6 @@ struct board_ops dgnc_neo_ops = {
.send_immediate_char = neo_send_immediate_char
};
-static uint dgnc_offset_table[8] = { 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80 };
-
/*
* This function allows calls to ensure that all outstanding
* PCI writes have been completed, by doing a PCI read against
@@ -116,7 +114,8 @@ static inline void neo_set_cts_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
+ &ch->ch_neo_uart->fctr);
/* Feed the UART our trigger levels */
writeb(8, &ch->ch_neo_uart->tfifo);
@@ -150,7 +149,8 @@ static inline void neo_set_rts_flow_control(struct channel_t *ch)
/* Turn on UART enhanced bits */
writeb(efr, &ch->ch_neo_uart->efr);
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_4DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 4;
writeb(32, &ch->ch_neo_uart->rfifo);
@@ -187,7 +187,8 @@ static inline void neo_set_ixon_flow_control(struct channel_t *ch)
/* Turn on UART enhanced bits */
writeb(efr, &ch->ch_neo_uart->efr);
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 4;
writeb(32, &ch->ch_neo_uart->rfifo);
@@ -225,7 +226,8 @@ static inline void neo_set_ixoff_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
writeb(8, &ch->ch_neo_uart->tfifo);
ch->ch_t_tlevel = 8;
@@ -265,7 +267,8 @@ static inline void neo_set_no_input_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 0;
@@ -302,7 +305,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
writeb(efr, &ch->ch_neo_uart->efr);
/* Turn on table D, with 8 char hi/low watermarks */
- writeb((UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY), &ch->ch_neo_uart->fctr);
+ writeb(UART_17158_FCTR_TRGD | UART_17158_FCTR_RTS_8DELAY,
+ &ch->ch_neo_uart->fctr);
ch->ch_r_watermark = 0;
@@ -321,7 +325,8 @@ static inline void neo_set_no_output_flow_control(struct channel_t *ch)
static inline void neo_set_new_start_stop_chars(struct channel_t *ch)
{
/* if hardware flow control is set, then skip this whole thing */
- if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) || ch->ch_c_cflag & CRTSCTS)
+ if (ch->ch_digi.digi_flags & (CTSPACE | RTSPACE) ||
+ ch->ch_c_cflag & CRTSCTS)
return;
/* Tell UART what start/stop chars it should be looking for */
@@ -351,8 +356,8 @@ static inline void neo_clear_break(struct channel_t *ch, int force)
/* Turn break off, and unset some variables */
if (ch->ch_flags & CH_BREAK_SENDING) {
- if (time_after_eq(jiffies, ch->ch_stop_sending_break)
- || force) {
+ if (force ||
+ time_after_eq(jiffies, ch->ch_stop_sending_break)) {
unsigned char temp = readb(&ch->ch_neo_uart->lcr);
writeb((temp & ~UART_LCR_SBC), &ch->ch_neo_uart->lcr);
@@ -374,14 +379,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
unsigned char cause;
unsigned long flags;
- if (!brd || brd->magic != DGNC_BOARD_MAGIC)
- return;
-
- if (port >= brd->maxports)
- return;
-
ch = brd->channels[port];
- if (ch->magic != DGNC_CHANNEL_MAGIC)
+ if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
/* Here we try to figure out what caused the interrupt to happen */
@@ -393,7 +392,8 @@ static inline void neo_parse_isr(struct dgnc_board *brd, uint port)
break;
/*
- * Yank off the upper 2 bits, which just show that the FIFO's are enabled.
+ * Yank off the upper 2 bits,
+ * which just show that the FIFO's are enabled.
*/
isr &= ~(UART_17158_IIR_FIFO_ENABLED);
@@ -666,7 +666,8 @@ static void neo_param(struct tty_struct *tty)
};
/* Only use the TXPrint baud rate if the terminal unit is NOT open */
- if (!(ch->ch_tun.un_flags & UN_ISOPEN) && (un->un_type == DGNC_PRINT))
+ if (!(ch->ch_tun.un_flags & UN_ISOPEN) &&
+ (un->un_type == DGNC_PRINT))
baud = C_BAUD(ch->ch_pun.un_tty) & 0xff;
else
baud = C_BAUD(ch->ch_tun.un_tty) & 0xff;
@@ -679,7 +680,8 @@ static void neo_param(struct tty_struct *tty)
jindex = baud;
- if ((iindex >= 0) && (iindex < 4) && (jindex >= 0) && (jindex < 16))
+ if ((iindex >= 0) && (iindex < 4) &&
+ (jindex >= 0) && (jindex < 16))
baud = bauds[iindex][jindex];
else
baud = 0;
@@ -787,7 +789,8 @@ static void neo_param(struct tty_struct *tty)
neo_set_cts_flow_control(ch);
} else if (ch->ch_c_iflag & IXON) {
/* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_output_flow_control(ch);
else
neo_set_ixon_flow_control(ch);
@@ -799,7 +802,8 @@ static void neo_param(struct tty_struct *tty)
neo_set_rts_flow_control(ch);
} else if (ch->ch_c_iflag & IXOFF) {
/* If start/stop is set to disable, then we should disable flow control */
- if ((ch->ch_startc == _POSIX_VDISABLE) || (ch->ch_stopc == _POSIX_VDISABLE))
+ if ((ch->ch_startc == _POSIX_VDISABLE) ||
+ (ch->ch_stopc == _POSIX_VDISABLE))
neo_set_no_input_flow_control(ch);
else
neo_set_ixoff_flow_control(ch);
@@ -910,9 +914,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
struct dgnc_board *brd = voidbrd;
struct channel_t *ch;
int port = 0;
- int type = 0;
- int current_port;
- u32 tmp;
+ int type;
u32 uart_poll;
unsigned long flags;
unsigned long flags2;
@@ -947,29 +949,12 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/* At this point, we have at least SOMETHING to service, dig further... */
- current_port = 0;
-
/* Loop on each port */
while ((uart_poll & 0xff) != 0) {
- tmp = uart_poll;
-
- /* Check current port to see if it has interrupt pending */
- if ((tmp & dgnc_offset_table[current_port]) != 0) {
- port = current_port;
- type = tmp >> (8 + (port * 3));
- type &= 0x7;
- } else {
- current_port++;
- continue;
- }
+ type = uart_poll >> (8 + (port * 3));
+ type &= 0x7;
- /* Remove this port + type from uart_poll */
- uart_poll &= ~(dgnc_offset_table[port]);
-
- if (!type) {
- /* If no type, just ignore it, and move onto next port */
- continue;
- }
+ uart_poll &= ~(0x01 << port);
/* Switch on type of interrupt we have */
switch (type) {
@@ -981,7 +966,7 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
/* Verify the port is in range. */
if (port >= brd->nasync)
- continue;
+ break;
ch = brd->channels[port];
neo_copy_data_from_uart_to_queue(ch);
@@ -991,14 +976,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
dgnc_check_queue_flow_control(ch);
spin_unlock_irqrestore(&ch->ch_lock, flags2);
- continue;
+ break;
case UART_17158_RX_LINE_STATUS:
/*
* RXRDY and RX LINE Status (logic OR of LSR[4:1])
*/
neo_parse_lsr(brd, port);
- continue;
+ break;
case UART_17158_TXRDY:
/*
@@ -1014,14 +999,14 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* it should be, I was getting things like RXDY too. Weird.
*/
neo_parse_isr(brd, port);
- continue;
+ break;
case UART_17158_MSR:
/*
* MSR or flow control was seen.
*/
neo_parse_isr(brd, port);
- continue;
+ break;
default:
/*
@@ -1030,8 +1015,10 @@ static irqreturn_t neo_intr(int irq, void *voidbrd)
* these once and awhile.
* Its harmless, just ignore it and move on.
*/
- continue;
+ break;
}
+
+ port++;
}
/*
@@ -1172,7 +1159,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
linestatus = 0;
/* Copy data from uart to the queue */
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, n);
+ memcpy_fromio(ch->ch_rqueue + head,
+ &ch->ch_neo_uart->txrxburst, n);
/*
* Since RX_FIFO_DATA_ERROR was 0, we are guaranteed
@@ -1225,7 +1213,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
* we don't miss our TX FIFO emptys.
*/
if (linestatus & (UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR)) {
- linestatus &= ~(UART_LSR_THRE | UART_17158_TX_AND_FIFO_CLR);
+ linestatus &= ~(UART_LSR_THRE |
+ UART_17158_TX_AND_FIFO_CLR);
ch->ch_flags |= (CH_TX_FIFO_EMPTY | CH_TX_FIFO_LWM);
}
@@ -1255,7 +1244,8 @@ static void neo_copy_data_from_uart_to_queue(struct channel_t *ch)
qleft++;
}
- memcpy_fromio(ch->ch_rqueue + head, &ch->ch_neo_uart->txrxburst, 1);
+ memcpy_fromio(ch->ch_rqueue + head,
+ &ch->ch_neo_uart->txrxburst, 1);
ch->ch_equeue[head] = (unsigned char)linestatus;
/* Ditch any remaining linestatus value. */
@@ -1328,7 +1318,8 @@ static void neo_flush_uart_write(struct channel_t *ch)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_XMIT),
+ &ch->ch_neo_uart->isr_fcr);
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
@@ -1356,7 +1347,8 @@ static void neo_flush_uart_read(struct channel_t *ch)
if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
return;
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR), &ch->ch_neo_uart->isr_fcr);
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR,
+ &ch->ch_neo_uart->isr_fcr);
neo_pci_posting_flush(ch->ch_bd);
for (i = 0; i < 10; i++) {
@@ -1427,7 +1419,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
- writeb(ch->ch_wqueue[ch->ch_w_tail], &ch->ch_neo_uart->txrx);
+ writeb(ch->ch_wqueue[ch->ch_w_tail],
+ &ch->ch_neo_uart->txrx);
ch->ch_w_tail++;
ch->ch_w_tail &= WQUEUEMASK;
ch->ch_txcount++;
@@ -1494,7 +1487,8 @@ static void neo_copy_data_from_queue_to_uart(struct channel_t *ch)
ch->ch_tun.un_flags |= (UN_EMPTY);
}
- memcpy_toio(&ch->ch_neo_uart->txrxburst, ch->ch_wqueue + tail, s);
+ memcpy_toio(&ch->ch_neo_uart->txrxburst,
+ ch->ch_wqueue + tail, s);
/* Add and flip queue if needed */
tail = (tail + s) & WQUEUEMASK;
@@ -1628,7 +1622,8 @@ static void neo_uart_init(struct channel_t *ch)
/* Clear out UART and FIFO */
readb(&ch->ch_neo_uart->txrx);
- writeb((UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT), &ch->ch_neo_uart->isr_fcr);
+ writeb(UART_FCR_ENABLE_FIFO | UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT,
+ &ch->ch_neo_uart->isr_fcr);
readb(&ch->ch_neo_uart->lsr);
readb(&ch->ch_neo_uart->msr);
@@ -1725,7 +1720,8 @@ static void neo_send_immediate_char(struct channel_t *ch, unsigned char c)
neo_pci_posting_flush(ch->ch_bd);
}
-static unsigned int neo_read_eeprom(unsigned char __iomem *base, unsigned int address)
+static unsigned int neo_read_eeprom(unsigned char __iomem *base,
+ unsigned int address)
{
unsigned int enable;
unsigned int bits;
@@ -1783,10 +1779,15 @@ static void neo_vpd(struct dgnc_board *brd)
brd->vpd[(i * 2) + 1] = (a >> 8) & 0xff;
}
- if (((brd->vpd[0x08] != 0x82) /* long resource name tag */
- && (brd->vpd[0x10] != 0x82)) /* long resource name tag (PCI-66 files)*/
- || (brd->vpd[0x7F] != 0x78)) { /* small resource end tag */
-
+ /*
+ * brd->vpd has different name tags by below index.
+ * 0x08 : long resource name tag
+ * 0x10 : long resource name tage (PCI-66 files)
+ * 0x7F : small resource end tag
+ */
+ if (((brd->vpd[0x08] != 0x82) &&
+ (brd->vpd[0x10] != 0x82)) ||
+ (brd->vpd[0x7F] != 0x78)) {
memset(brd->vpd, '\0', NEO_VPD_IMAGESIZE);
} else {
/* Search for the serial number */
diff --git a/drivers/staging/dgnc/dgnc_sysfs.c b/drivers/staging/dgnc/dgnc_sysfs.c
index 74a072599126..b8d41c5617e2 100644
--- a/drivers/staging/dgnc/dgnc_sysfs.c
+++ b/drivers/staging/dgnc/dgnc_sysfs.c
@@ -33,7 +33,7 @@ static DRIVER_ATTR(version, S_IRUSR, dgnc_driver_version_show, NULL);
static ssize_t dgnc_driver_boards_show(struct device_driver *ddp, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_NumBoards);
+ return snprintf(buf, PAGE_SIZE, "%d\n", dgnc_num_boards);
}
static DRIVER_ATTR(boards, S_IRUSR, dgnc_driver_boards_show, NULL);
@@ -189,19 +189,21 @@ static ssize_t dgnc_ports_msignals_show(struct device *p,
DGNC_VERIFY_BOARD(p, bd);
for (i = 0; i < bd->nasync; i++) {
- if (bd->channels[i]->ch_open_count) {
+ struct channel_t *ch = bd->channels[i];
+
+ if (ch->ch_open_count) {
count += snprintf(buf + count, PAGE_SIZE - count,
"%d %s %s %s %s %s %s\n",
- bd->channels[i]->ch_portnum,
- (bd->channels[i]->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
- (bd->channels[i]->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
- (bd->channels[i]->ch_mistat & UART_MSR_RI) ? "RI" : "");
+ ch->ch_portnum,
+ (ch->ch_mostat & UART_MCR_RTS) ? "RTS" : "",
+ (ch->ch_mistat & UART_MSR_CTS) ? "CTS" : "",
+ (ch->ch_mostat & UART_MCR_DTR) ? "DTR" : "",
+ (ch->ch_mistat & UART_MSR_DSR) ? "DSR" : "",
+ (ch->ch_mistat & UART_MSR_DCD) ? "DCD" : "",
+ (ch->ch_mistat & UART_MSR_RI) ? "RI" : "");
} else {
count += snprintf(buf + count, PAGE_SIZE - count,
- "%d\n", bd->channels[i]->ch_portnum);
+ "%d\n", ch->ch_portnum);
}
}
return count;
diff --git a/drivers/staging/dgnc/dgnc_tty.c b/drivers/staging/dgnc/dgnc_tty.c
index 5c221593a0c6..4eeecc992a02 100644
--- a/drivers/staging/dgnc/dgnc_tty.c
+++ b/drivers/staging/dgnc/dgnc_tty.c
@@ -176,57 +176,42 @@ int dgnc_tty_preinit(void)
*/
int dgnc_tty_register(struct dgnc_board *brd)
{
- int rc = 0;
-
- brd->SerialDriver.magic = TTY_DRIVER_MAGIC;
+ int rc;
- snprintf(brd->SerialName, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
+ brd->serial_driver = tty_alloc_driver(brd->maxports,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
- brd->SerialDriver.name = brd->SerialName;
- brd->SerialDriver.name_base = 0;
- brd->SerialDriver.major = 0;
- brd->SerialDriver.minor_start = 0;
- brd->SerialDriver.num = brd->maxports;
- brd->SerialDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->SerialDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->SerialDriver.init_termios = DgncDefaultTermios;
- brd->SerialDriver.driver_name = DRVSTR;
- brd->SerialDriver.flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
+ if (IS_ERR(brd->serial_driver))
+ return PTR_ERR(brd->serial_driver);
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's.
- */
- brd->SerialDriver.ttys = kcalloc(brd->maxports,
- sizeof(*brd->SerialDriver.ttys),
- GFP_KERNEL);
- if (!brd->SerialDriver.ttys)
- return -ENOMEM;
+ snprintf(brd->serial_name, MAXTTYNAMELEN, "tty_dgnc_%d_", brd->boardnum);
- kref_init(&brd->SerialDriver.kref);
- brd->SerialDriver.termios = kcalloc(brd->maxports,
- sizeof(*brd->SerialDriver.termios),
- GFP_KERNEL);
- if (!brd->SerialDriver.termios)
- return -ENOMEM;
+ brd->serial_driver->name = brd->serial_name;
+ brd->serial_driver->name_base = 0;
+ brd->serial_driver->major = 0;
+ brd->serial_driver->minor_start = 0;
+ brd->serial_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->serial_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->serial_driver->init_termios = DgncDefaultTermios;
+ brd->serial_driver->driver_name = DRVSTR;
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
- tty_set_operations(&brd->SerialDriver, &dgnc_tty_ops);
+ tty_set_operations(brd->serial_driver, &dgnc_tty_ops);
- if (!brd->dgnc_Major_Serial_Registered) {
+ if (!brd->dgnc_major_serial_registered) {
/* Register tty devices */
- rc = tty_register_driver(&brd->SerialDriver);
+ rc = tty_register_driver(brd->serial_driver);
if (rc < 0) {
dev_dbg(&brd->pdev->dev,
"Can't register tty device (%d)\n", rc);
- return rc;
+ goto free_serial_driver;
}
- brd->dgnc_Major_Serial_Registered = true;
+ brd->dgnc_major_serial_registered = true;
}
/*
@@ -234,60 +219,55 @@ int dgnc_tty_register(struct dgnc_board *brd)
* again, separately so we don't get the LD confused about what major
* we are when we get into the dgnc_tty_open() routine.
*/
- brd->PrintDriver.magic = TTY_DRIVER_MAGIC;
- snprintf(brd->PrintName, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
-
- brd->PrintDriver.name = brd->PrintName;
- brd->PrintDriver.name_base = 0;
- brd->PrintDriver.major = brd->SerialDriver.major;
- brd->PrintDriver.minor_start = 0x80;
- brd->PrintDriver.num = brd->maxports;
- brd->PrintDriver.type = TTY_DRIVER_TYPE_SERIAL;
- brd->PrintDriver.subtype = SERIAL_TYPE_NORMAL;
- brd->PrintDriver.init_termios = DgncDefaultTermios;
- brd->PrintDriver.driver_name = DRVSTR;
- brd->PrintDriver.flags = (TTY_DRIVER_REAL_RAW |
- TTY_DRIVER_DYNAMIC_DEV |
- TTY_DRIVER_HARDWARE_BREAK);
+ brd->print_driver = tty_alloc_driver(brd->maxports,
+ TTY_DRIVER_REAL_RAW |
+ TTY_DRIVER_DYNAMIC_DEV |
+ TTY_DRIVER_HARDWARE_BREAK);
+
+ if (IS_ERR(brd->print_driver)) {
+ rc = PTR_ERR(brd->print_driver);
+ goto unregister_serial_driver;
+ }
- /*
- * The kernel wants space to store pointers to
- * tty_struct's and termios's. Must be separated from
- * the Serial Driver so we don't get confused
- */
- brd->PrintDriver.ttys = kcalloc(brd->maxports,
- sizeof(*brd->PrintDriver.ttys),
- GFP_KERNEL);
- if (!brd->PrintDriver.ttys)
- return -ENOMEM;
- kref_init(&brd->PrintDriver.kref);
- brd->PrintDriver.termios = kcalloc(brd->maxports,
- sizeof(*brd->PrintDriver.termios),
- GFP_KERNEL);
- if (!brd->PrintDriver.termios)
- return -ENOMEM;
+ snprintf(brd->print_name, MAXTTYNAMELEN, "pr_dgnc_%d_", brd->boardnum);
+
+ brd->print_driver->name = brd->print_name;
+ brd->print_driver->name_base = 0;
+ brd->print_driver->major = brd->serial_driver->major;
+ brd->print_driver->minor_start = 0x80;
+ brd->print_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ brd->print_driver->subtype = SERIAL_TYPE_NORMAL;
+ brd->print_driver->init_termios = DgncDefaultTermios;
+ brd->print_driver->driver_name = DRVSTR;
/*
* Entry points for driver. Called by the kernel from
* tty_io.c and n_tty.c.
*/
- tty_set_operations(&brd->PrintDriver, &dgnc_tty_ops);
+ tty_set_operations(brd->print_driver, &dgnc_tty_ops);
- if (!brd->dgnc_Major_TransparentPrint_Registered) {
+ if (!brd->dgnc_major_transparent_print_registered) {
/* Register Transparent Print devices */
- rc = tty_register_driver(&brd->PrintDriver);
+ rc = tty_register_driver(brd->print_driver);
if (rc < 0) {
dev_dbg(&brd->pdev->dev,
"Can't register Transparent Print device(%d)\n",
rc);
- return rc;
+ goto free_print_driver;
}
- brd->dgnc_Major_TransparentPrint_Registered = true;
+ brd->dgnc_major_transparent_print_registered = true;
}
- dgnc_BoardsByMajor[brd->SerialDriver.major] = brd;
- brd->dgnc_Serial_Major = brd->SerialDriver.major;
- brd->dgnc_TransparentPrint_Major = brd->PrintDriver.major;
+ dgnc_BoardsByMajor[brd->serial_driver->major] = brd;
+
+ return 0;
+
+free_print_driver:
+ put_tty_driver(brd->print_driver);
+unregister_serial_driver:
+ tty_unregister_driver(brd->serial_driver);
+free_serial_driver:
+ put_tty_driver(brd->serial_driver);
return rc;
}
@@ -364,12 +344,12 @@ int dgnc_tty_init(struct dgnc_board *brd)
{
struct device *classp;
- classp = tty_register_device(&brd->SerialDriver, i,
+ classp = tty_register_device(brd->serial_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_tun.un_sysfs = classp;
dgnc_create_tty_sysfs(&ch->ch_tun, classp);
- classp = tty_register_device(&brd->PrintDriver, i,
+ classp = tty_register_device(brd->print_driver, i,
&ch->ch_bd->pdev->dev);
ch->ch_pun.un_sysfs = classp;
dgnc_create_tty_sysfs(&ch->ch_pun, classp);
@@ -407,40 +387,32 @@ void dgnc_tty_uninit(struct dgnc_board *brd)
{
int i = 0;
- if (brd->dgnc_Major_Serial_Registered) {
- dgnc_BoardsByMajor[brd->SerialDriver.major] = NULL;
- brd->dgnc_Serial_Major = 0;
+ if (brd->dgnc_major_serial_registered) {
+ dgnc_BoardsByMajor[brd->serial_driver->major] = NULL;
for (i = 0; i < brd->nasync; i++) {
if (brd->channels[i])
dgnc_remove_tty_sysfs(brd->channels[i]->
ch_tun.un_sysfs);
- tty_unregister_device(&brd->SerialDriver, i);
+ tty_unregister_device(brd->serial_driver, i);
}
- tty_unregister_driver(&brd->SerialDriver);
- brd->dgnc_Major_Serial_Registered = false;
+ tty_unregister_driver(brd->serial_driver);
+ brd->dgnc_major_serial_registered = false;
}
- if (brd->dgnc_Major_TransparentPrint_Registered) {
- dgnc_BoardsByMajor[brd->PrintDriver.major] = NULL;
- brd->dgnc_TransparentPrint_Major = 0;
+ if (brd->dgnc_major_transparent_print_registered) {
+ dgnc_BoardsByMajor[brd->print_driver->major] = NULL;
for (i = 0; i < brd->nasync; i++) {
if (brd->channels[i])
dgnc_remove_tty_sysfs(brd->channels[i]->
ch_pun.un_sysfs);
- tty_unregister_device(&brd->PrintDriver, i);
+ tty_unregister_device(brd->print_driver, i);
}
- tty_unregister_driver(&brd->PrintDriver);
- brd->dgnc_Major_TransparentPrint_Registered = false;
+ tty_unregister_driver(brd->print_driver);
+ brd->dgnc_major_transparent_print_registered = false;
}
- kfree(brd->SerialDriver.ttys);
- brd->SerialDriver.ttys = NULL;
- kfree(brd->SerialDriver.termios);
- brd->SerialDriver.termios = NULL;
- kfree(brd->PrintDriver.ttys);
- brd->PrintDriver.ttys = NULL;
- kfree(brd->PrintDriver.termios);
- brd->PrintDriver.termios = NULL;
+ put_tty_driver(brd->serial_driver);
+ put_tty_driver(brd->print_driver);
}
/*
@@ -606,6 +578,8 @@ void dgnc_input(struct channel_t *ch)
* or the amount of data the card actually has pending...
*/
while (n) {
+ unsigned char *ch_pos = ch->ch_equeue + tail;
+
s = ((head >= tail) ? head : RQUEUESIZE) - tail;
s = min(s, n);
@@ -620,29 +594,20 @@ void dgnc_input(struct channel_t *ch)
*/
if (I_PARMRK(tp) || I_BRKINT(tp) || I_INPCK(tp)) {
for (i = 0; i < s; i++) {
- if (*(ch->ch_equeue + tail + i) & UART_LSR_BI)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_BREAK);
- else if (*(ch->ch_equeue + tail + i) &
- UART_LSR_PE)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_PARITY);
- else if (*(ch->ch_equeue + tail + i) &
- UART_LSR_FE)
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_FRAME);
- else
- tty_insert_flip_char(tp->port,
- *(ch->ch_rqueue + tail + i),
- TTY_NORMAL);
+ unsigned char ch = *(ch_pos + i);
+ char flag = TTY_NORMAL;
+
+ if (ch & UART_LSR_BI)
+ flag = TTY_BREAK;
+ else if (ch & UART_LSR_PE)
+ flag = TTY_PARITY;
+ else if (ch & UART_LSR_FE)
+ flag = TTY_FRAME;
+
+ tty_insert_flip_char(tp->port, ch, flag);
}
} else {
- tty_insert_flip_string(tp->port,
- ch->ch_rqueue + tail,
- s);
+ tty_insert_flip_string(tp->port, ch_pos, s);
}
tail += s;
@@ -1117,6 +1082,14 @@ static int dgnc_tty_open(struct tty_struct *tty, struct file *file)
if (!ch->ch_wqueue)
ch->ch_wqueue = kzalloc(WQUEUESIZE, GFP_KERNEL);
+ if (!ch->ch_rqueue || !ch->ch_equeue || !ch->ch_wqueue) {
+ kfree(ch->ch_rqueue);
+ kfree(ch->ch_equeue);
+ kfree(ch->ch_wqueue);
+
+ return -ENOMEM;
+ }
+
spin_lock_irqsave(&ch->ch_lock, flags);
ch->ch_flags &= ~(CH_OPENING);
@@ -1539,19 +1512,8 @@ static int dgnc_tty_chars_in_buffer(struct tty_struct *tty)
*/
static int dgnc_maxcps_room(struct tty_struct *tty, int bytes_available)
{
- struct channel_t *ch = NULL;
- struct un_t *un = NULL;
-
- if (!tty)
- return bytes_available;
-
- un = tty->driver_data;
- if (!un || un->magic != DGNC_UNIT_MAGIC)
- return bytes_available;
-
- ch = un->un_ch;
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return bytes_available;
+ struct un_t *un = tty->driver_data;
+ struct channel_t *ch = un->un_ch;
/*
* If its not the Transparent print device, return
@@ -2058,17 +2020,7 @@ static inline int dgnc_get_mstat(struct channel_t *ch)
static int dgnc_get_modem_info(struct channel_t *ch,
unsigned int __user *value)
{
- int result;
-
- if (!ch || ch->magic != DGNC_CHANNEL_MAGIC)
- return -ENXIO;
-
- result = dgnc_get_mstat(ch);
-
- if (result < 0)
- return -ENXIO;
-
- return put_user(result, value);
+ return put_user(dgnc_get_mstat(ch), value);
}
/*
@@ -2529,6 +2481,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct dgnc_board *bd;
+ struct board_ops *ch_bd_ops;
struct channel_t *ch;
struct un_t *un;
int rc;
@@ -2550,6 +2503,8 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (!bd || bd->magic != DGNC_BOARD_MAGIC)
return -ENODEV;
+ ch_bd_ops = bd->bd_ops;
+
spin_lock_irqsave(&ch->ch_lock, flags);
if (un->un_open_count <= 0) {
@@ -2574,7 +2529,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2582,7 +2537,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
if (((cmd == TCSBRK) && (!arg)) || (cmd == TCSBRKP))
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2599,13 +2554,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2617,13 +2572,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_break(ch, 250);
+ ch_bd_ops->send_break(ch, 250);
spin_unlock_irqrestore(&ch->ch_lock, flags);
@@ -2652,7 +2607,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
spin_lock_irqsave(&ch->ch_lock, flags);
tty->termios.c_cflag = ((tty->termios.c_cflag & ~CLOCAL) |
(arg ? CLOCAL : 0));
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
@@ -2689,7 +2644,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if ((arg == TCIFLUSH) || (arg == TCIOFLUSH)) {
ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
+ ch_bd_ops->flush_uart_read(ch);
/* Force queue flow control to be released, if needed */
dgnc_check_queue_flow_control(ch);
}
@@ -2697,9 +2652,9 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if ((arg == TCOFLUSH) || (arg == TCIOFLUSH)) {
if (!(un->un_type == DGNC_PRINT)) {
ch->ch_w_head = ch->ch_w_tail;
- ch->ch_bd->bd_ops->flush_uart_write(ch);
+ ch_bd_ops->flush_uart_write(ch);
- if (ch->ch_tun.un_flags & (UN_LOW|UN_EMPTY)) {
+ if (ch->ch_tun.un_flags & (UN_LOW | UN_EMPTY)) {
ch->ch_tun.un_flags &=
~(UN_LOW | UN_EMPTY);
wake_up_interruptible(&ch->ch_tun.un_flags_wait);
@@ -2731,14 +2686,14 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* flush rx */
ch->ch_flags &= ~CH_STOP;
ch->ch_r_head = ch->ch_r_tail;
- ch->ch_bd->bd_ops->flush_uart_read(ch);
+ ch_bd_ops->flush_uart_read(ch);
/* Force queue flow control to be released, if needed */
dgnc_check_queue_flow_control(ch);
}
/* now wait for all the output to drain */
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2748,7 +2703,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
case TCSETAW:
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2771,7 +2726,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/* set information for ditty */
if (cmd == (DIGI_SETAW)) {
spin_unlock_irqrestore(&ch->ch_lock, flags);
- rc = ch->ch_bd->bd_ops->drain(tty, 0);
+ rc = ch_bd_ops->drain(tty, 0);
if (rc)
return -EINTR;
@@ -2804,7 +2759,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
else
ch->ch_flags &= ~(CH_LOOPBACK);
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2824,7 +2779,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
return rc;
spin_lock_irqsave(&ch->ch_lock, flags);
dgnc_set_custom_speed(ch, new_rate);
- ch->ch_bd->bd_ops->param(tty);
+ ch_bd_ops->param(tty);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2845,7 +2800,7 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
if (rc)
return rc;
spin_lock_irqsave(&ch->ch_lock, flags);
- ch->ch_bd->bd_ops->send_immediate_char(ch, c);
+ ch_bd_ops->send_immediate_char(ch, c);
spin_unlock_irqrestore(&ch->ch_lock, flags);
return 0;
}
@@ -2933,13 +2888,13 @@ static int dgnc_tty_ioctl(struct tty_struct *tty, unsigned int cmd,
/*
* Is the UART empty? Add that value to whats in our TX queue.
*/
- count = buf.txbuf + ch->ch_bd->bd_ops->get_uart_bytes_left(ch);
+ count = buf.txbuf + ch_bd_ops->get_uart_bytes_left(ch);
/*
* Figure out how much data the RealPort Server believes should
* be in our TX queue.
*/
- tdist = (buf.tIn - buf.tOut) & 0xffff;
+ tdist = (buf.tx_in - buf.tx_out) & 0xffff;
/*
* If we have more data than the RealPort Server believes we
diff --git a/drivers/staging/dgnc/digi.h b/drivers/staging/dgnc/digi.h
index 523a2d34f747..5b983e6f5ee2 100644
--- a/drivers/staging/dgnc/digi.h
+++ b/drivers/staging/dgnc/digi.h
@@ -109,8 +109,8 @@ struct digi_info {
struct digi_getbuffer /* Struct for holding buffer use counts */
{
- unsigned long tIn;
- unsigned long tOut;
+ unsigned long tx_in;
+ unsigned long tx_out;
unsigned long rxbuf;
unsigned long txbuf;
unsigned long txdone;
diff --git a/drivers/staging/emxx_udc/emxx_udc.c b/drivers/staging/emxx_udc/emxx_udc.c
index e8cacaecf9ad..3bd91758b2da 100644
--- a/drivers/staging/emxx_udc/emxx_udc.c
+++ b/drivers/staging/emxx_udc/emxx_udc.c
@@ -418,9 +418,9 @@ static void _nbu2ss_ep_dma_abort(struct nbu2ss_udc *udc, struct nbu2ss_ep *ep)
{
struct fc_regs *preg = udc->p_regs;
- _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum-1].EP_DCR1, DCR1_EPn_REQEN);
+ _nbu2ss_bitclr(&preg->EP_DCR[ep->epnum - 1].EP_DCR1, DCR1_EPn_REQEN);
mdelay(DMA_DISABLE_TIME); /* DCR1_EPn_REQEN Clear */
- _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum-1].EP_DMA_CTRL, EPn_DMA_EN);
+ _nbu2ss_bitclr(&preg->EP_REGS[ep->epnum - 1].EP_DMA_CTRL, EPn_DMA_EN);
}
/*-------------------------------------------------------------------------*/
@@ -909,7 +909,7 @@ static int _nbu2ss_epn_out_pio(
/* Copy of every four bytes */
for (i = 0; i < iWordLength; i++) {
pBuf32->dw =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
+ _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
pBuf32++;
}
result = iWordLength * sizeof(u32);
@@ -919,7 +919,7 @@ static int _nbu2ss_epn_out_pio(
if (data > 0) {
/*---------------------------------------------------------*/
/* Copy of fraction byte */
- Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_READ);
+ Temp32.dw = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_READ);
for (i = 0 ; i < data ; i++)
pBuf32->byte.DATA[i] = Temp32.byte.DATA[i];
result += data;
@@ -1128,7 +1128,7 @@ static int _nbu2ss_epn_in_pio(
if (iWordLength > 0) {
for (i = 0; i < iWordLength; i++) {
_nbu2ss_writel(
- &preg->EP_REGS[ep->epnum-1].EP_WRITE
+ &preg->EP_REGS[ep->epnum - 1].EP_WRITE
, pBuf32->dw
);
@@ -1290,7 +1290,7 @@ static void _nbu2ss_restert_transfer(struct nbu2ss_ep *ep)
if (ep->epnum > 0) {
length = _nbu2ss_readl(
- &ep->udc->p_regs->EP_REGS[ep->epnum-1].EP_LEN_DCNT);
+ &ep->udc->p_regs->EP_REGS[ep->epnum - 1].EP_LEN_DCNT);
length &= EPn_LDATA;
if (length < ep->ep.maxpacket)
@@ -1463,7 +1463,7 @@ static int _nbu2ss_get_ep_stall(struct nbu2ss_udc *udc, u8 ep_adrs)
bit_data = EP0_STL;
} else {
- data = _nbu2ss_readl(&preg->EP_REGS[epnum-1].EP_CONTROL);
+ data = _nbu2ss_readl(&preg->EP_REGS[epnum - 1].EP_CONTROL);
if ((data & EPn_EN) == 0)
return -1;
@@ -1558,7 +1558,7 @@ static void _nbu2ss_epn_set_stall(
; limit_cnt++) {
regdata = _nbu2ss_readl(
- &preg->EP_REGS[ep->epnum-1].EP_STATUS);
+ &preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((regdata & EPn_IN_DATA) == 0)
break;
@@ -1983,7 +1983,7 @@ static inline void _nbu2ss_epn_in_int(
if (req->zero && ((req->req.actual % ep->ep.maxpacket) == 0)) {
status =
- _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_STATUS);
+ _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_STATUS);
if ((status & EPn_IN_FULL) == 0) {
/*-----------------------------------------*/
@@ -2894,7 +2894,7 @@ static int nbu2ss_ep_fifo_status(struct usb_ep *_ep)
data = _nbu2ss_readl(&preg->EP0_LENGTH) & EP0_LDATA;
} else {
- data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum-1].EP_LEN_DCNT)
+ data = _nbu2ss_readl(&preg->EP_REGS[ep->epnum - 1].EP_LEN_DCNT)
& EPn_LDATA;
}
@@ -3051,7 +3051,7 @@ static int nbu2ss_gad_vbus_session(struct usb_gadget *pgadget, int is_active)
}
/*-------------------------------------------------------------------------*/
-static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned mA)
+static int nbu2ss_gad_vbus_draw(struct usb_gadget *pgadget, unsigned int mA)
{
struct nbu2ss_udc *udc;
unsigned long flags;
@@ -3101,7 +3101,7 @@ static int nbu2ss_gad_pullup(struct usb_gadget *pgadget, int is_on)
/*-------------------------------------------------------------------------*/
static int nbu2ss_gad_ioctl(
struct usb_gadget *pgadget,
- unsigned code,
+ unsigned int code,
unsigned long param)
{
return 0;
diff --git a/drivers/staging/emxx_udc/emxx_udc.h b/drivers/staging/emxx_udc/emxx_udc.h
index 4a2cc38de7b3..39769e3a801c 100644
--- a/drivers/staging/emxx_udc/emxx_udc.h
+++ b/drivers/staging/emxx_udc/emxx_udc.h
@@ -97,7 +97,7 @@
#define BIT30 0x40000000
#define BIT31 0x80000000
-#define TEST_FORCE_ENABLE (BIT18+BIT16)
+#define TEST_FORCE_ENABLE (BIT18 + BIT16)
#define INT_SEL BIT10
#define CONSTFS BIT09
@@ -125,15 +125,15 @@
/*------- (0x0008) USB Address Register */
#define USB_ADDR 0x007F0000
#define SOF_STATUS BIT15
-#define UFRAME (BIT14+BIT13+BIT12)
+#define UFRAME (BIT14 + BIT13 + BIT12)
#define FRAME 0x000007FF
#define USB_ADRS_SHIFT 16
/*------- (0x000C) UTMI Characteristic 1 Register */
-#define SQUSET (BIT07+BIT06+BIT05+BIT04)
+#define SQUSET (BIT07 + BIT06 + BIT05 + BIT04)
-#define USB_SQUSET (BIT06+BIT05+BIT04)
+#define USB_SQUSET (BIT06 + BIT05 + BIT04)
/*------- (0x0010) TEST Control Register */
#define FORCEHS BIT02
@@ -196,7 +196,7 @@
#define RSUM_EN BIT01
#define USB_INT_EN_BIT \
- (EP0_EN|SPEED_MODE_EN|USB_RST_EN|SPND_EN|RSUM_EN)
+ (EP0_EN | SPEED_MODE_EN | USB_RST_EN | SPND_EN | RSUM_EN)
/*------- (0x0028) EP0 Control Register */
#define EP0_STGSEL BIT18
@@ -205,9 +205,9 @@
#define EP0_PIDCLR BIT09
#define EP0_BCLR BIT08
#define EP0_DEND BIT07
-#define EP0_DW (BIT06+BIT05)
+#define EP0_DW (BIT06 + BIT05)
#define EP0_DW4 0
-#define EP0_DW3 (BIT06+BIT05)
+#define EP0_DW3 (BIT06 + BIT05)
#define EP0_DW2 BIT06
#define EP0_DW1 BIT05
@@ -238,7 +238,7 @@
#define STG_START_INT BIT01
#define SETUP_INT BIT00
-#define EP0_STATUS_RW_BIT (BIT16|BIT15|BIT11|0xFF)
+#define EP0_STATUS_RW_BIT (BIT16 | BIT15 | BIT11 | 0xFF)
/*------- (0x0030) EP0 Interrupt Enable Register */
#define EP0_PERR_NAK_EN BIT16
@@ -256,7 +256,7 @@
#define SETUP_EN BIT00
#define EP0_INT_EN_BIT \
- (EP0_OUT_OR_EN|EP0_OUT_EN|EP0_IN_EN|STG_END_EN|SETUP_EN)
+ (EP0_OUT_OR_EN | EP0_OUT_EN | EP0_IN_EN | STG_END_EN | SETUP_EN)
/*------- (0x0034) EP0 Length Register */
#define EP0_LDATA 0x0000007F
@@ -270,7 +270,7 @@
#define EPn_BUF_SINGLE BIT30
#define EPn_DIR0 BIT26
-#define EPn_MODE (BIT25+BIT24)
+#define EPn_MODE (BIT25 + BIT24)
#define EPn_BULK 0
#define EPn_INTERRUPT BIT24
#define EPn_ISO BIT25
@@ -283,9 +283,9 @@
#define EPn_BCLR BIT09
#define EPn_CBCLR BIT08
#define EPn_DEND BIT07
-#define EPn_DW (BIT06+BIT05)
+#define EPn_DW (BIT06 + BIT05)
#define EPn_DW4 0
-#define EPn_DW3 (BIT06+BIT05)
+#define EPn_DW3 (BIT06 + BIT05)
#define EPn_DW2 BIT06
#define EPn_DW1 BIT05
@@ -324,7 +324,7 @@
#define EPn_IN_EMPTY BIT00 /* R */
#define EPn_INT_EN \
- (EPn_OUT_END_INT|EPn_OUT_INT|EPn_IN_END_INT|EPn_IN_INT)
+ (EPn_OUT_END_INT | EPn_OUT_INT | EPn_IN_END_INT | EPn_IN_INT)
/*------- (0x0048:) EPn Interrupt Enable Register */
#define EPn_OUT_END_EN BIT23 /* RW */
@@ -368,7 +368,7 @@
#define ARBITER_CTR BIT31 /* RW */
#define MCYCLE_RST BIT12 /* RW */
-#define ENDIAN_CTR (BIT09+BIT08) /* RW */
+#define ENDIAN_CTR (BIT09 + BIT08) /* RW */
#define ENDIAN_BYTE_SWAP BIT09
#define ENDIAN_HALF_WORD_SWAP ENDIAN_CTR
@@ -376,7 +376,7 @@
#define HTRANS_MODE BIT04 /* RW */
#define WBURST_TYPE BIT02 /* RW */
-#define BURST_TYPE (BIT01+BIT00) /* RW */
+#define BURST_TYPE (BIT01 + BIT00) /* RW */
#define BURST_MAX_16 0
#define BURST_MAX_8 BIT00
#define BURST_MAX_4 BIT01
@@ -412,7 +412,7 @@
#define EPC_RST BIT00 /* RW */
/*------- (0x1014) USBF_EPTEST Register */
-#define LINESTATE (BIT09+BIT08) /* R */
+#define LINESTATE (BIT09 + BIT08) /* R */
#define DM_LEVEL BIT09 /* R */
#define DP_LEVEL BIT08 /* R */
@@ -485,7 +485,7 @@ struct fc_regs {
struct ep_regs EP_REGS[REG_EP_NUM]; /* Endpoint Register */
- u8 Reserved220[0x1000-0x220]; /* (0x0220:0x0FFF) Reserved */
+ u8 Reserved220[0x1000 - 0x220]; /* (0x0220:0x0FFF) Reserved */
u32 AHBSCTR; /* (0x1000) AHBSCTR */
u32 AHBMCTR; /* (0x1004) AHBMCTR */
@@ -494,16 +494,16 @@ struct fc_regs {
u32 EPCTR; /* (0x1010) EPCTR */
u32 USBF_EPTEST; /* (0x1014) USBF_EPTEST */
- u8 Reserved1018[0x20-0x18]; /* (0x1018:0x101F) Reserved */
+ u8 Reserved1018[0x20 - 0x18]; /* (0x1018:0x101F) Reserved */
u32 USBSSVER; /* (0x1020) USBSSVER */
u32 USBSSCONF; /* (0x1024) USBSSCONF */
- u8 Reserved1028[0x110-0x28]; /* (0x1028:0x110F) Reserved */
+ u8 Reserved1028[0x110 - 0x28]; /* (0x1028:0x110F) Reserved */
struct ep_dcr EP_DCR[REG_EP_NUM]; /* */
- u8 Reserved1200[0x1000-0x200]; /* Reserved */
+ u8 Reserved1200[0x1000 - 0x200]; /* Reserved */
} __aligned(32);
#define EP0_PACKETSIZE 64
diff --git a/drivers/staging/fbtft/fb_agm1264k-fl.c b/drivers/staging/fbtft/fb_agm1264k-fl.c
index ba9fc444b848..82b46cd27ca7 100644
--- a/drivers/staging/fbtft/fb_agm1264k-fl.c
+++ b/drivers/staging/fbtft/fb_agm1264k-fl.c
@@ -414,7 +414,7 @@ static int write(struct fbtft_par *par, void *buf, size_t len)
while (len--) {
u8 i, data;
- data = *(u8 *) buf++;
+ data = *(u8 *)buf++;
/* set data bus */
for (i = 0; i < 8; ++i)
diff --git a/drivers/staging/fbtft/fbtft-io.c b/drivers/staging/fbtft/fbtft-io.c
index a6f091fb975c..4dcea2e0b3ae 100644
--- a/drivers/staging/fbtft/fbtft-io.c
+++ b/drivers/staging/fbtft/fbtft-io.c
@@ -141,7 +141,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len--) {
- data = *(u8 *) buf;
+ data = *(u8 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -170,7 +170,7 @@ int fbtft_write_gpio8_wr(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u8 *) buf;
+ prev_data = *(u8 *)buf;
#endif
buf++;
}
@@ -191,7 +191,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -220,7 +220,7 @@ int fbtft_write_gpio16_wr(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
diff --git a/drivers/staging/fbtft/fbtft_device.c b/drivers/staging/fbtft/fbtft_device.c
index 241d7c6bebde..e4a355aefb25 100644
--- a/drivers/staging/fbtft/fbtft_device.c
+++ b/drivers/staging/fbtft/fbtft_device.c
@@ -1254,7 +1254,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
"%s(len=%d): ", __func__, len);
while (len) {
- data = *(u16 *) buf;
+ data = *(u16 *)buf;
/* Start writing by pulling down /WR */
gpio_set_value(par->gpio.wr, 0);
@@ -1283,7 +1283,7 @@ static int write_gpio16_wr_slow(struct fbtft_par *par, void *buf, size_t len)
gpio_set_value(par->gpio.wr, 1);
#ifndef DO_NOT_OPTIMIZE_FBTFT_WRITE_GPIO
- prev_data = *(u16 *) buf;
+ prev_data = *(u16 *)buf;
#endif
buf += 2;
len -= 2;
@@ -1436,7 +1436,7 @@ static int __init fbtft_device_init(void)
}
strncpy(fbtft_device_param_gpios[i].name, p_name,
FBTFT_GPIO_NAME_SIZE - 1);
- fbtft_device_param_gpios[i++].gpio = (int) val;
+ fbtft_device_param_gpios[i++].gpio = (int)val;
if (i == MAX_GPIOS) {
pr_err("gpios parameter: exceeded max array size: %d\n",
MAX_GPIOS);
diff --git a/drivers/staging/fsl-mc/README.txt b/drivers/staging/fsl-mc/README.txt
index 8214102f104b..179536a9b7a1 100644
--- a/drivers/staging/fsl-mc/README.txt
+++ b/drivers/staging/fsl-mc/README.txt
@@ -11,11 +11,11 @@ Contents summary
-Overview of DPAA2 objects
-DPAA2 Linux driver architecture overview
-bus driver
- -dprc driver
+ -DPRC driver
-allocator
- -dpio driver
+ -DPIO driver
-Ethernet
- -mac
+ -MAC
DPAA2 Overview
--------------
@@ -37,6 +37,9 @@ interfaces, an L2 switch, or accelerator instances.
The MC provides memory-mapped I/O command interfaces (MC portals)
which DPAA2 software drivers use to operate on DPAA2 objects:
+The diagram below shows an overview of the DPAA2 resource management
+architecture:
+
+--------------------------------------+
| OS |
| DPAA2 drivers |
@@ -77,13 +80,13 @@ DPIO objects.
Overview of DPAA2 Objects
-------------------------
-The section provides a brief overview of some key objects
-in the DPAA2 hardware. A simple scenario is described illustrating
-the objects involved in creating a network interfaces.
+The section provides a brief overview of some key DPAA2 objects.
+A simple scenario is described illustrating the objects involved
+in creating a network interfaces.
-DPRC (Datapath Resource Container)
- A DPRC is an container object that holds all the other
+ A DPRC is a container object that holds all the other
types of DPAA2 objects. In the example diagram below there
are 8 objects of 5 types (DPMCP, DPIO, DPBP, DPNI, and DPMAC)
in the container.
@@ -101,23 +104,23 @@ the objects involved in creating a network interfaces.
| |
+---------------------------------------------------------+
- From the point of view of an OS, a DPRC is bus-like. Like
- a plug-and-play bus, such as PCI, DPRC commands can be used to
- enumerate the contents of the DPRC, discover the hardware
- objects present (including mappable regions and interrupts).
+ From the point of view of an OS, a DPRC behaves similar to a plug and
+ play bus, like PCI. DPRC commands can be used to enumerate the contents
+ of the DPRC, discover the hardware objects present (including mappable
+ regions and interrupts).
- dprc.1 (bus)
+ DPRC.1 (bus)
|
+--+--------+-------+-------+-------+
| | | | |
- dpmcp.1 dpio.1 dpbp.1 dpni.1 dpmac.1
- dpmcp.2 dpio.2
- dpmcp.3
+ DPMCP.1 DPIO.1 DPBP.1 DPNI.1 DPMAC.1
+ DPMCP.2 DPIO.2
+ DPMCP.3
Hardware objects can be created and destroyed dynamically, providing
the ability to hot plug/unplug objects in and out of the DPRC.
- A DPRC has a mappable mmio region (an MC portal) that can be used
+ A DPRC has a mappable MMIO region (an MC portal) that can be used
to send MC commands. It has an interrupt for status events (like
hotplug).
@@ -137,10 +140,11 @@ the objects involved in creating a network interfaces.
A typical Ethernet NIC is monolithic-- the NIC device contains TX/RX
queuing mechanisms, configuration mechanisms, buffer management,
physical ports, and interrupts. DPAA2 uses a more granular approach
- utilizing multiple hardware objects. Each object has specialized
- functions, and are used together by software to provide Ethernet network
- interface functionality. This approach provides efficient use of finite
- hardware resources, flexibility, and performance advantages.
+ utilizing multiple hardware objects. Each object provides specialized
+ functions. Groups of these objects are used by software to provide
+ Ethernet network interface functionality. This approach provides
+ efficient use of finite hardware resources, flexibility, and
+ performance advantages.
The diagram below shows the objects needed for a simple
network interface configuration on a system with 2 CPUs.
@@ -168,46 +172,52 @@ the objects involved in creating a network interfaces.
Below the objects are described. For each object a brief description
is provided along with a summary of the kinds of operations the object
- supports and a summary of key resources of the object (mmio regions
- and irqs).
+ supports and a summary of key resources of the object (MMIO regions
+ and IRQs).
-DPMAC (Datapath Ethernet MAC): represents an Ethernet MAC, a
hardware device that connects to an Ethernet PHY and allows
physical transmission and reception of Ethernet frames.
- -mmio regions: none
- -irqs: dpni link change
+ -MMIO regions: none
+ -IRQs: DPNI link change
-commands: set link up/down, link config, get stats,
- irq config, enable, reset
+ IRQ config, enable, reset
-DPNI (Datapath Network Interface): contains TX/RX queues,
- network interface configuration, and rx buffer pool configuration
- mechanisms.
- -mmio regions: none
- -irqs: link state
+ network interface configuration, and RX buffer pool configuration
+ mechanisms. The TX/RX queues are in memory and are identified by
+ queue number.
+ -MMIO regions: none
+ -IRQs: link state
-commands: port config, offload config, queue config,
- parse/classify config, irq config, enable, reset
+ parse/classify config, IRQ config, enable, reset
-DPIO (Datapath I/O): provides interfaces to enqueue and dequeue
- packets and do hardware buffer pool management operations. For
- optimum performance there is typically DPIO per CPU. This allows
- each CPU to perform simultaneous enqueue/dequeue operations.
- -mmio regions: queue operations, buffer mgmt
- -irqs: data availability, congestion notification, buffer
+ packets and do hardware buffer pool management operations. The DPAA2
+ architecture separates the mechanism to access queues (the DPIO object)
+ from the queues themselves. The DPIO provides an MMIO interface to
+ enqueue/dequeue packets. To enqueue something a descriptor is written
+ to the DPIO MMIO region, which includes the target queue number.
+ There will typically be one DPIO assigned to each CPU. This allows all
+ CPUs to simultaneously perform enqueue/dequeued operations. DPIOs are
+ expected to be shared by different DPAA2 drivers.
+ -MMIO regions: queue operations, buffer management
+ -IRQs: data availability, congestion notification, buffer
pool depletion
- -commands: irq config, enable, reset
+ -commands: IRQ config, enable, reset
-DPBP (Datapath Buffer Pool): represents a hardware buffer
pool.
- -mmio regions: none
- -irqs: none
+ -MMIO regions: none
+ -IRQs: none
-commands: enable, reset
-DPMCP (Datapath MC Portal): provides an MC command portal.
Used by drivers to send commands to the MC to manage
objects.
- -mmio regions: MC command portal
- -irqs: command completion
- -commands: irq config, enable, reset
+ -MMIO regions: MC command portal
+ -IRQs: command completion
+ -commands: IRQ config, enable, reset
Object Connections
------------------
@@ -268,22 +278,22 @@ of each driver follows.
| Stack |
+------------+ +------------+
| Allocator |. . . . . . . | Ethernet |
- |(dpmcp,dpbp)| | (dpni) |
+ |(DPMCP,DPBP)| | (DPNI) |
+-.----------+ +---+---+----+
. . ^ |
. . <data avail, | |<enqueue,
. . tx confirm> | | dequeue>
+-------------+ . | |
| DPRC driver | . +---+---V----+ +---------+
- | (dprc) | . . . . . .| DPIO driver| | MAC |
- +----------+--+ | (dpio) | | (dpmac) |
+ | (DPRC) | . . . . . .| DPIO driver| | MAC |
+ +----------+--+ | (DPIO) | | (DPMAC) |
| +------+-----+ +-----+---+
|<dev add/remove> | |
| | |
+----+--------------+ | +--+---+
- | mc-bus driver | | | PHY |
+ | MC-bus driver | | | PHY |
| | | |driver|
- | /fsl-mc@80c000000 | | +--+---+
+ | /soc/fsl-mc | | +--+---+
+-------------------+ | |
| |
================================ HARDWARE =========|=================|======
@@ -298,25 +308,27 @@ of each driver follows.
A brief description of each driver is provided below.
- mc-bus driver
+ MC-bus driver
-------------
- The mc-bus driver is a platform driver and is probed from an
- "/fsl-mc@xxxx" node in the device tree passed in by boot firmware.
- It is responsible for bootstrapping the DPAA2 kernel infrastructure.
+ The MC-bus driver is a platform driver and is probed from a
+ node in the device tree (compatible "fsl,qoriq-mc") passed in by boot
+ firmware. It is responsible for bootstrapping the DPAA2 kernel
+ infrastructure.
Key functions include:
-registering a new bus type named "fsl-mc" with the kernel,
and implementing bus call-backs (e.g. match/uevent/dev_groups)
- -implemeting APIs for DPAA2 driver registration and for device
+ -implementing APIs for DPAA2 driver registration and for device
add/remove
- -creates an MSI irq domain
- -do a device add of the 'root' DPRC device, which is needed
- to bootstrap things
+ -creates an MSI IRQ domain
+ -doing a 'device add' to expose the 'root' DPRC, in turn triggering
+ a bind of the root DPRC to the DPRC driver
DPRC driver
-----------
- The dprc-driver is bound DPRC objects and does runtime management
+ The DPRC driver is bound to DPRC objects and does runtime management
of a bus instance. It performs the initial bus scan of the DPRC
- and handles interrupts for container events such as hot plug.
+ and handles interrupts for container events such as hot plug by
+ re-scanning the DPRC.
Allocator
----------
@@ -334,14 +346,20 @@ A brief description of each driver is provided below.
DPIO driver
-----------
The DPIO driver is bound to DPIO objects and provides services that allow
- other drivers such as the Ethernet driver to receive and transmit data.
+ other drivers such as the Ethernet driver to enqueue and dequeue data for
+ their respective objects.
Key services include:
-data availability notifications
-hardware queuing operations (enqueue and dequeue of data)
-hardware buffer pool management
+ To transmit a packet the Ethernet driver puts data on a queue and
+ invokes a DPIO API. For receive, the Ethernet driver registers
+ a data availability notification callback. To dequeue a packet
+ a DPIO API is used.
+
There is typically one DPIO object per physical CPU for optimum
- performance, allowing each CPU to simultaneously enqueue
+ performance, allowing different CPUs to simultaneously enqueue
and dequeue data.
The DPIO driver operates on behalf of all DPAA2 drivers
@@ -362,3 +380,7 @@ A brief description of each driver is provided below.
by the appropriate PHY driver via an mdio bus. The MAC driver
plays a role of being a proxy between the PHY driver and the
MC. It does this proxy via the MC commands to a DPMAC object.
+ If the PHY driver signals a link change, the MAC driver notifies
+ the MC via a DPMAC command. If a network interface is brought
+ up or down, the MC notifies the DPMAC driver via an interrupt and
+ the driver can take appropriate action.
diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO
index 389436891b93..54a8bc69222e 100644
--- a/drivers/staging/fsl-mc/TODO
+++ b/drivers/staging/fsl-mc/TODO
@@ -1,21 +1,8 @@
-* Decide if multiple root fsl-mc buses will be supported per Linux instance,
- and if so add support for this.
-
* Add at least one device driver for a DPAA2 object (child device of the
fsl-mc bus). Most likely candidate for this is adding DPAA2 Ethernet
driver support, which depends on drivers for several objects: DPNI,
DPIO, DPMAC. Other pre-requisites include:
- * interrupt support. for meaningful driver support we need
- interrupts, and thus need message interrupt support by the bus
- driver.
- -Note: this has dependencies on generic MSI support work
- in process upstream, see [1] and [2].
-
- * Management Complex (MC) command serialization. locking mechanisms
- are needed by drivers to serialize commands sent to the MC, including
- from atomic context.
-
* MC firmware uprev. The MC firmware upon which the fsl-mc
bus driver and DPAA2 object drivers are based is continuing
to evolve, so minor updates are needed to keep in sync with binary
diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c
index 2d97173f8e91..c31fe1bca191 100644
--- a/drivers/staging/fsl-mc/bus/dpbp.c
+++ b/drivers/staging/fsl-mc/bus/dpbp.c
@@ -293,7 +293,7 @@ int dpbp_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(0, 8, irq_index);
cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -334,7 +334,7 @@ int dpbp_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->addr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
}
@@ -502,6 +502,7 @@ int dpbp_get_irq_status(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
+ cmd.params[0] |= mc_enc(0, 32, *status);
cmd.params[0] |= mc_enc(32, 8, irq_index);
/* send command to mc*/
@@ -580,3 +581,75 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
return 0;
}
EXPORT_SYMBOL(dpbp_get_attributes);
+
+/**
+ * dpbp_set_notifications() - Set notifications towards software
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @cfg: notifications configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS,
+ cmd_flags,
+ token);
+
+ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry);
+ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit);
+ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry);
+ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit);
+ cmd.params[2] |= mc_enc(0, 16, cfg->options);
+ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx);
+ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova);
+
+ /* send command to mc*/
+ return mc_send_command(mc_io, &cmd);
+}
+
+/**
+ * dpbp_get_notifications() - Get the notifications configuration
+ * @mc_io: Pointer to MC portal's I/O object
+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
+ * @token: Token of DPBP object
+ * @cfg: notifications configuration
+ *
+ * Return: '0' on Success; Error code otherwise.
+ */
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg)
+{
+ struct mc_command cmd = { 0 };
+ int err;
+
+ /* prepare command */
+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS,
+ cmd_flags,
+ token);
+
+ /* send command to mc*/
+ err = mc_send_command(mc_io, &cmd);
+ if (err)
+ return err;
+
+ /* retrieve response parameters */
+ cfg->depletion_entry = (u32)mc_dec(cmd.params[0], 0, 32);
+ cfg->depletion_exit = (u32)mc_dec(cmd.params[0], 32, 32);
+ cfg->surplus_entry = (u32)mc_dec(cmd.params[1], 0, 32);
+ cfg->surplus_exit = (u32)mc_dec(cmd.params[1], 32, 32);
+ cfg->options = (u16)mc_dec(cmd.params[2], 0, 16);
+ cfg->message_ctx = (u64)mc_dec(cmd.params[3], 0, 64);
+ cfg->message_iova = (u64)mc_dec(cmd.params[4], 0, 64);
+
+ return 0;
+}
diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
index a87e9f84fa42..c9b52dd7ba31 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h
@@ -32,9 +32,9 @@
#ifndef _FSL_DPMCP_CMD_H
#define _FSL_DPMCP_CMD_H
-/* DPMCP Version */
-#define DPMCP_VER_MAJOR 2
-#define DPMCP_VER_MINOR 1
+/* Minimal supported DPMCP Version */
+#define DPMCP_MIN_VER_MAJOR 3
+#define DPMCP_MIN_VER_MINOR 0
/* Command IDs */
#define DPMCP_CMDID_CLOSE 0x800
@@ -52,6 +52,5 @@
#define DPMCP_CMDID_SET_IRQ_MASK 0x014
#define DPMCP_CMDID_GET_IRQ_MASK 0x015
#define DPMCP_CMDID_GET_IRQ_STATUS 0x016
-#define DPMCP_CMDID_CLEAR_IRQ_STATUS 0x017
#endif /* _FSL_DPMCP_CMD_H */
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c
index b0248f574619..fd6dd4e07b87 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.c
+++ b/drivers/staging/fsl-mc/bus/dpmcp.c
@@ -213,7 +213,7 @@ int dpmcp_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(0, 8, irq_index);
cmd.params[0] |= mc_enc(32, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -254,7 +254,7 @@ int dpmcp_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
}
@@ -435,37 +435,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
}
/**
- * dpmcp_clear_irq_status() - Clear a pending interrupt's status
- *
- * @mc_io: Pointer to MC portal's I/O object
- * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_'
- * @token: Token of DPMCP object
- * @irq_index: The interrupt index to configure
- * @status: Bits to clear (W1C) - one bit per cause:
- * 0 = don't change
- * 1 = clear status bit
- *
- * Return: '0' on Success; Error code otherwise.
- */
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- u32 cmd_flags,
- u16 token,
- u8 irq_index,
- u32 status)
-{
- struct mc_command cmd = { 0 };
-
- /* prepare command */
- cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLEAR_IRQ_STATUS,
- cmd_flags, token);
- cmd.params[0] |= mc_enc(0, 32, status);
- cmd.params[0] |= mc_enc(32, 8, irq_index);
-
- /* send command to mc*/
- return mc_send_command(mc_io, &cmd);
-}
-
-/**
* dpmcp_get_attributes - Retrieve DPMCP attributes.
*
* @mc_io: Pointer to MC portal's I/O object
diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h
index 6df351f0caa5..fe79d4d9293d 100644
--- a/drivers/staging/fsl-mc/bus/dpmcp.h
+++ b/drivers/staging/fsl-mc/bus/dpmcp.h
@@ -82,12 +82,12 @@ int dpmcp_reset(struct fsl_mc_io *mc_io,
* struct dpmcp_irq_cfg - IRQ configuration
* @paddr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dpmcp_irq_cfg {
uint64_t paddr;
uint32_t val;
- int user_irq_id;
+ int irq_num;
};
int dpmcp_set_irq(struct fsl_mc_io *mc_io,
@@ -133,12 +133,6 @@ int dpmcp_get_irq_status(struct fsl_mc_io *mc_io,
uint8_t irq_index,
uint32_t *status);
-int dpmcp_clear_irq_status(struct fsl_mc_io *mc_io,
- uint32_t cmd_flags,
- uint16_t token,
- uint8_t irq_index,
- uint32_t status);
-
/**
* struct dpmcp_attr - Structure representing DPMCP attributes
* @id: DPMCP object ID
diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h
index 6552c2034947..9b854fa8e84d 100644
--- a/drivers/staging/fsl-mc/bus/dprc-cmd.h
+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h
@@ -40,9 +40,9 @@
#ifndef _FSL_DPRC_CMD_H
#define _FSL_DPRC_CMD_H
-/* DPRC Version */
-#define DPRC_VER_MAJOR 4
-#define DPRC_VER_MINOR 0
+/* Minimal supported DPRC Version */
+#define DPRC_MIN_VER_MAJOR 5
+#define DPRC_MIN_VER_MINOR 0
/* Command IDs */
#define DPRC_CMDID_CLOSE 0x800
diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c
index 31488a7b9e86..7fc47173c164 100644
--- a/drivers/staging/fsl-mc/bus/dprc-driver.c
+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c
@@ -312,6 +312,15 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev,
continue;
}
+ /*
+ * add a quirk for all versions of dpsec < 4.0...none
+ * are coherent regardless of what the MC reports.
+ */
+ if ((strcmp(obj_desc->type, "dpseci") == 0) &&
+ (obj_desc->ver_major < 4))
+ obj_desc->flags |=
+ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY;
+
irq_count += obj_desc->irq_count;
dev_dbg(&mc_bus_dev->dev,
"Discovered object: type %s, id %d\n",
@@ -423,6 +432,7 @@ static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg)
if (WARN_ON(!msi_desc || msi_desc->irq != (u32)irq_num))
goto out;
+ status = 0;
error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, 0,
&status);
if (error < 0) {
@@ -692,6 +702,25 @@ static int dprc_probe(struct fsl_mc_device *mc_dev)
goto error_cleanup_msi_domain;
}
+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle,
+ &mc_bus->dprc_attr);
+ if (error < 0) {
+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n",
+ error);
+ goto error_cleanup_open;
+ }
+
+ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR ||
+ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR &&
+ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) {
+ dev_err(&mc_dev->dev,
+ "ERROR: DPRC version %d.%d not supported\n",
+ mc_bus->dprc_attr.version.major,
+ mc_bus->dprc_attr.version.minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_open;
+ }
+
mutex_init(&mc_bus->scan_mutex);
/*
@@ -779,9 +808,7 @@ static int dprc_remove(struct fsl_mc_device *mc_dev)
static const struct fsl_mc_device_match_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
- .obj_type = "dprc",
- .ver_major = DPRC_VER_MAJOR,
- .ver_minor = DPRC_VER_MINOR},
+ .obj_type = "dprc"},
{.vendor = 0x0},
};
diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c
index 381b9a96a14b..a2c47377cc4e 100644
--- a/drivers/staging/fsl-mc/bus/dprc.c
+++ b/drivers/staging/fsl-mc/bus/dprc.c
@@ -265,7 +265,7 @@ int dprc_get_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32);
*type = mc_dec(cmd.params[2], 32, 32);
return 0;
@@ -296,7 +296,7 @@ int dprc_set_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(32, 8, irq_index);
cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
/* send command to mc*/
return mc_send_command(mc_io, &cmd);
@@ -466,6 +466,7 @@ int dprc_get_irq_status(struct fsl_mc_io *mc_io,
/* prepare command */
cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS,
cmd_flags, token);
+ cmd.params[0] |= mc_enc(0, 32, *status);
cmd.params[0] |= mc_enc(32, 8, irq_index);
/* send command to mc*/
@@ -948,6 +949,7 @@ int dprc_get_obj(struct fsl_mc_io *mc_io,
obj_desc->state = mc_dec(cmd.params[1], 32, 32);
obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16);
obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16);
+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8);
obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8);
obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8);
@@ -1042,6 +1044,7 @@ int dprc_get_obj_desc(struct fsl_mc_io *mc_io,
obj_desc->state = (u32)mc_dec(cmd.params[1], 32, 32);
obj_desc->ver_major = (u16)mc_dec(cmd.params[2], 0, 16);
obj_desc->ver_minor = (u16)mc_dec(cmd.params[2], 16, 16);
+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16);
obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8);
obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8);
obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8);
@@ -1108,7 +1111,7 @@ int dprc_set_obj_irq(struct fsl_mc_io *mc_io,
cmd.params[0] |= mc_enc(32, 8, irq_index);
cmd.params[0] |= mc_enc(0, 32, irq_cfg->val);
cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr);
- cmd.params[2] |= mc_enc(0, 32, irq_cfg->user_irq_id);
+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num);
cmd.params[2] |= mc_enc(32, 32, obj_id);
cmd.params[3] |= mc_enc(0, 8, obj_type[0]);
cmd.params[3] |= mc_enc(8, 8, obj_type[1]);
@@ -1189,7 +1192,7 @@ int dprc_get_obj_irq(struct fsl_mc_io *mc_io,
/* retrieve response parameters */
irq_cfg->val = (u32)mc_dec(cmd.params[0], 0, 32);
irq_cfg->paddr = (u64)mc_dec(cmd.params[1], 0, 64);
- irq_cfg->user_irq_id = (int)mc_dec(cmd.params[2], 0, 32);
+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32);
*type = (int)mc_dec(cmd.params[2], 32, 32);
return 0;
@@ -1437,14 +1440,8 @@ EXPORT_SYMBOL(dprc_set_obj_label);
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Endpoint 2 configuration parameters
* @cfg: Connection configuration. The connection configuration is ignored for
- * connections made to DPMAC objects, where rate is set according to
- * MAC configuration.
- * The committed rate is the guaranteed rate for the connection.
- * The maximum rate is an upper limit allowed for the connection; it is
- * expected to be equal or higher than the committed rate.
- * When committed and maximum rates are both zero, the connection is set
- * to "best effort" mode, having lower priority compared to connections
- * with committed or maximum rates.
+ * connections made to DPMAC objects, where rate is retrieved from the
+ * MAC configuration.
*
* Return: '0' on Success; Error code otherwise.
*/
@@ -1555,7 +1552,10 @@ int dprc_disconnect(struct fsl_mc_io *mc_io,
* @token: Token of DPRC object
* @endpoint1: Endpoint 1 configuration parameters
* @endpoint2: Returned endpoint 2 configuration parameters
-* @state: Returned link state: 1 - link is up, 0 - link is down
+* @state: Returned link state:
+* 1 - link is up;
+* 0 - link is down;
+* -1 - no connection (endpoint2 information is irrelevant)
*
* Return: '0' on Success; -ENAVAIL if connection does not exist.
*/
diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c
index 86f8543c2b9a..fb08f22a7f9c 100644
--- a/drivers/staging/fsl-mc/bus/mc-allocator.c
+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c
@@ -39,7 +39,6 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- bool mutex_locked = false;
if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
goto out;
@@ -55,13 +54,12 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->max_count < 0))
- goto out;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count < 0 ||
res_pool->free_count > res_pool->max_count))
- goto out;
+ goto out_unlock;
resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource),
GFP_KERNEL);
@@ -69,7 +67,7 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
error = -ENOMEM;
dev_err(&mc_bus_dev->dev,
"Failed to allocate memory for fsl_mc_resource\n");
- goto out;
+ goto out_unlock;
}
resource->type = pool_type;
@@ -82,10 +80,9 @@ static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus
res_pool->free_count++;
res_pool->max_count++;
error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
return error;
}
@@ -106,7 +103,6 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
struct fsl_mc_resource_pool *res_pool;
struct fsl_mc_resource *resource;
int error = -EINVAL;
- bool mutex_locked = false;
if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type)))
goto out;
@@ -122,13 +118,12 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->max_count <= 0))
- goto out;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count))
- goto out;
+ goto out_unlock;
/*
* If the device is currently allocated, its resource is not
@@ -139,7 +134,7 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
dev_err(&mc_bus_dev->dev,
"Device %s cannot be removed from resource pool\n",
dev_name(&mc_dev->dev));
- goto out;
+ goto out_unlock;
}
list_del(&resource->node);
@@ -150,10 +145,9 @@ static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device
devm_kfree(&mc_bus_dev->dev, resource);
mc_dev->resource = NULL;
error = 0;
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
return error;
}
@@ -188,21 +182,19 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
struct fsl_mc_resource *resource;
struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev;
int error = -EINVAL;
- bool mutex_locked = false;
BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) !=
FSL_MC_NUM_POOL_TYPES);
*new_resource = NULL;
if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES))
- goto error;
+ goto out;
res_pool = &mc_bus->resource_pools[pool_type];
if (WARN_ON(res_pool->mc_bus != mc_bus))
- goto error;
+ goto out;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
resource = list_first_entry_or_null(&res_pool->free_list,
struct fsl_mc_resource, node);
@@ -212,28 +204,26 @@ int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus,
dev_err(&mc_bus_dev->dev,
"No more resources of type %s left\n",
fsl_mc_pool_type_strings[pool_type]);
- goto error;
+ goto out_unlock;
}
if (WARN_ON(resource->type != pool_type))
- goto error;
+ goto out_unlock;
if (WARN_ON(resource->parent_pool != res_pool))
- goto error;
+ goto out_unlock;
if (WARN_ON(res_pool->free_count <= 0 ||
res_pool->free_count > res_pool->max_count))
- goto error;
+ goto out_unlock;
list_del(&resource->node);
INIT_LIST_HEAD(&resource->node);
res_pool->free_count--;
+ error = 0;
+out_unlock:
mutex_unlock(&res_pool->mutex);
*new_resource = resource;
- return 0;
-error:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
-
+out:
return error;
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
@@ -241,26 +231,23 @@ EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate);
void fsl_mc_resource_free(struct fsl_mc_resource *resource)
{
struct fsl_mc_resource_pool *res_pool;
- bool mutex_locked = false;
res_pool = resource->parent_pool;
if (WARN_ON(resource->type != res_pool->type))
- goto out;
+ return;
mutex_lock(&res_pool->mutex);
- mutex_locked = true;
if (WARN_ON(res_pool->free_count < 0 ||
res_pool->free_count >= res_pool->max_count))
- goto out;
+ goto out_unlock;
if (WARN_ON(!list_empty(&resource->node)))
- goto out;
+ goto out_unlock;
list_add_tail(&resource->node, &res_pool->free_list);
res_pool->free_count++;
-out:
- if (mutex_locked)
- mutex_unlock(&res_pool->mutex);
+out_unlock:
+ mutex_unlock(&res_pool->mutex);
}
EXPORT_SYMBOL_GPL(fsl_mc_resource_free);
@@ -306,10 +293,22 @@ int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev,
if (error < 0)
return error;
+ error = -EINVAL;
dpmcp_dev = resource->data;
if (WARN_ON(!dpmcp_dev))
goto error_cleanup_resource;
+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR ||
+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR &&
+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) {
+ dev_err(&dpmcp_dev->dev,
+ "ERROR: Version %d.%d of DPMCP not supported.\n",
+ dpmcp_dev->obj_desc.ver_major,
+ dpmcp_dev->obj_desc.ver_minor);
+ error = -ENOTSUPP;
+ goto error_cleanup_resource;
+ }
+
if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0))
goto error_cleanup_resource;
@@ -722,20 +721,14 @@ static const struct fsl_mc_device_match_id match_id_table[] = {
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpbp",
- .ver_major = DPBP_VER_MAJOR,
- .ver_minor = DPBP_VER_MINOR
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpmcp",
- .ver_major = DPMCP_VER_MAJOR,
- .ver_minor = DPMCP_VER_MINOR
},
{
.vendor = FSL_MC_VENDOR_FREESCALE,
.obj_type = "dpcon",
- .ver_major = DPCON_VER_MAJOR,
- .ver_minor = DPCON_VER_MINOR
},
{.vendor = 0x0},
};
diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c
index b59455661f4d..405364307561 100644
--- a/drivers/staging/fsl-mc/bus/mc-bus.c
+++ b/drivers/staging/fsl-mc/bus/mc-bus.c
@@ -40,8 +40,6 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev);
struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv);
bool found = false;
- bool major_version_mismatch = false;
- bool minor_version_mismatch = false;
if (WARN_ON(!fsl_mc_bus_exists()))
goto out;
@@ -64,32 +62,12 @@ static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv)
for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) {
if (id->vendor == mc_dev->obj_desc.vendor &&
strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) {
- if (id->ver_major == mc_dev->obj_desc.ver_major) {
- found = true;
- if (id->ver_minor != mc_dev->obj_desc.ver_minor)
- minor_version_mismatch = true;
- } else {
- major_version_mismatch = true;
- }
+ found = true;
break;
}
}
- if (major_version_mismatch) {
- dev_warn(dev,
- "Major version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- } else if (minor_version_mismatch) {
- dev_warn(dev,
- "Minor version mismatch: driver version %u.%u, MC object version %u.%u\n",
- id->ver_major, id->ver_minor,
- mc_dev->obj_desc.ver_major,
- mc_dev->obj_desc.ver_minor);
- }
-
out:
dev_dbg(dev, "%smatched\n", found ? "" : "not ");
return found;
@@ -251,11 +229,10 @@ static bool fsl_mc_is_root_dprc(struct device *dev)
return dev == root_dprc_dev;
}
-static int get_dprc_icid(struct fsl_mc_io *mc_io,
- int container_id, u16 *icid)
+static int get_dprc_attr(struct fsl_mc_io *mc_io,
+ int container_id, struct dprc_attributes *attr)
{
u16 dprc_handle;
- struct dprc_attributes attr;
int error;
error = dprc_open(mc_io, 0, container_id, &dprc_handle);
@@ -264,15 +241,14 @@ static int get_dprc_icid(struct fsl_mc_io *mc_io,
return error;
}
- memset(&attr, 0, sizeof(attr));
- error = dprc_get_attributes(mc_io, 0, dprc_handle, &attr);
+ memset(attr, 0, sizeof(struct dprc_attributes));
+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr);
if (error < 0) {
dev_err(mc_io->dev, "dprc_get_attributes() failed: %d\n",
error);
goto common_cleanup;
}
- *icid = attr.icid;
error = 0;
common_cleanup:
@@ -280,6 +256,34 @@ common_cleanup:
return error;
}
+static int get_dprc_icid(struct fsl_mc_io *mc_io,
+ int container_id, u16 *icid)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0)
+ *icid = attr.icid;
+
+ return error;
+}
+
+static int get_dprc_version(struct fsl_mc_io *mc_io,
+ int container_id, u16 *major, u16 *minor)
+{
+ struct dprc_attributes attr;
+ int error;
+
+ error = get_dprc_attr(mc_io, container_id, &attr);
+ if (error == 0) {
+ *major = attr.version.major;
+ *minor = attr.version.minor;
+ }
+
+ return error;
+}
+
static int translate_mc_addr(struct fsl_mc_device *mc_dev,
enum dprc_region_type mc_region_type,
u64 mc_offset, phys_addr_t *phys_addr)
@@ -376,6 +380,8 @@ static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev,
regions[i].end = regions[i].start + region_desc.size - 1;
regions[i].name = "fsl-mc object MMIO region";
regions[i].flags = IORESOURCE_IO;
+ if (region_desc.flags & DPRC_REGION_CACHEABLE)
+ regions[i].flags |= IORESOURCE_CACHEABLE;
}
mc_dev->regions = regions;
@@ -491,6 +497,10 @@ int fsl_mc_device_add(struct dprc_obj_desc *obj_desc,
goto error_cleanup_dev;
}
+ /* Objects are coherent, unless 'no shareability' flag set. */
+ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY))
+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true);
+
/*
* The device-specific probe callback will get invoked by device_add()
*/
@@ -722,20 +732,6 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
"Freescale Management Complex Firmware version: %u.%u.%u\n",
mc_version.major, mc_version.minor, mc_version.revision);
- if (mc_version.major < MC_VER_MAJOR) {
- dev_err(&pdev->dev,
- "ERROR: MC firmware version not supported by driver (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- error = -ENOTSUPP;
- goto error_cleanup_mc_io;
- }
-
- if (mc_version.major > MC_VER_MAJOR) {
- dev_warn(&pdev->dev,
- "WARNING: driver may not support newer MC firmware features (driver version: %u.%u)\n",
- MC_VER_MAJOR, MC_VER_MINOR);
- }
-
error = get_mc_addr_translation_ranges(&pdev->dev,
&mc->translation_ranges,
&mc->num_translation_ranges);
@@ -749,11 +745,15 @@ static int fsl_mc_bus_probe(struct platform_device *pdev)
goto error_cleanup_mc_io;
}
+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc));
+ error = get_dprc_version(mc_io, container_id,
+ &obj_desc.ver_major, &obj_desc.ver_minor);
+ if (error < 0)
+ goto error_cleanup_mc_io;
+
obj_desc.vendor = FSL_MC_VENDOR_FREESCALE;
strcpy(obj_desc.type, "dprc");
obj_desc.id = container_id;
- obj_desc.ver_major = DPRC_VER_MAJOR;
- obj_desc.ver_minor = DPRC_VER_MINOR;
obj_desc.irq_count = 1;
obj_desc.region_count = 0;
diff --git a/drivers/staging/fsl-mc/bus/mc-msi.c b/drivers/staging/fsl-mc/bus/mc-msi.c
index 3a8258ff4426..e202b2b88c63 100644
--- a/drivers/staging/fsl-mc/bus/mc-msi.c
+++ b/drivers/staging/fsl-mc/bus/mc-msi.c
@@ -37,10 +37,8 @@ static void fsl_mc_msi_update_dom_ops(struct msi_domain_info *info)
/*
* set_desc should not be set by the caller
*/
- if (WARN_ON(ops->set_desc))
- return;
-
- ops->set_desc = fsl_mc_msi_set_desc;
+ if (ops->set_desc == NULL)
+ ops->set_desc = fsl_mc_msi_set_desc;
}
static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
@@ -65,7 +63,7 @@ static void __fsl_mc_msi_write_msg(struct fsl_mc_device *mc_bus_dev,
irq_cfg.paddr = ((u64)msi_desc->msg.address_hi << 32) |
msi_desc->msg.address_lo;
irq_cfg.val = msi_desc->msg.data;
- irq_cfg.user_irq_id = msi_desc->irq;
+ irq_cfg.irq_num = msi_desc->irq;
if (owner_mc_dev == mc_bus_dev) {
/*
@@ -129,10 +127,8 @@ static void fsl_mc_msi_update_chip_ops(struct msi_domain_info *info)
/*
* irq_write_msi_msg should not be set by the caller
*/
- if (WARN_ON(chip->irq_write_msi_msg))
- return;
-
- chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
+ if (chip->irq_write_msi_msg == NULL)
+ chip->irq_write_msi_msg = fsl_mc_msi_write_msg;
}
/**
diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h
index efa9bf33c1a5..c57b454a2912 100644
--- a/drivers/staging/fsl-mc/include/dpbp-cmd.h
+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h
@@ -34,7 +34,7 @@
/* DPBP Version */
#define DPBP_VER_MAJOR 2
-#define DPBP_VER_MINOR 1
+#define DPBP_VER_MINOR 2
/* Command IDs */
#define DPBP_CMDID_CLOSE 0x800
@@ -57,4 +57,6 @@
#define DPBP_CMDID_GET_IRQ_STATUS 0x016
#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017
+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0
+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1
#endif /* _FSL_DPBP_CMD_H */
diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h
index 37ed951436d5..e14e85a5d6df 100644
--- a/drivers/staging/fsl-mc/include/dpbp.h
+++ b/drivers/staging/fsl-mc/include/dpbp.h
@@ -85,12 +85,12 @@ int dpbp_reset(struct fsl_mc_io *mc_io,
* struct dpbp_irq_cfg - IRQ configuration
* @addr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dpbp_irq_cfg {
u64 addr;
u32 val;
- int user_irq_id;
+ int irq_num;
};
int dpbp_set_irq(struct fsl_mc_io *mc_io,
@@ -168,6 +168,53 @@ int dpbp_get_attributes(struct fsl_mc_io *mc_io,
u16 token,
struct dpbp_attr *attr);
+/**
+ * DPBP notifications options
+ */
+
+/**
+ * BPSCN write will attempt to allocate into a cache (coherent write)
+ */
+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001
+
+/**
+ * struct dpbp_notification_cfg - Structure representing DPBP notifications
+ * towards software
+ * @depletion_entry: below this threshold the pool is "depleted";
+ * set it to '0' to disable it
+ * @depletion_exit: greater than or equal to this threshold the pool exit its
+ * "depleted" state
+ * @surplus_entry: above this threshold the pool is in "surplus" state;
+ * set it to '0' to disable it
+ * @surplus_exit: less than or equal to this threshold the pool exit its
+ * "surplus" state
+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry'
+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory),
+ * must be 16B aligned.
+ * @message_ctx: The context that will be part of the BPSCN message and will
+ * be written to 'message_iova'
+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_<X>' values
+ */
+struct dpbp_notification_cfg {
+ u32 depletion_entry;
+ u32 depletion_exit;
+ u32 surplus_entry;
+ u32 surplus_exit;
+ u64 message_iova;
+ u64 message_ctx;
+ u16 options;
+};
+
+int dpbp_set_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
+int dpbp_get_notifications(struct fsl_mc_io *mc_io,
+ u32 cmd_flags,
+ u16 token,
+ struct dpbp_notification_cfg *cfg);
+
/** @} */
#endif /* __FSL_DPBP_H */
diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h
index 94c492706315..593b2bbe7f71 100644
--- a/drivers/staging/fsl-mc/include/dprc.h
+++ b/drivers/staging/fsl-mc/include/dprc.h
@@ -94,11 +94,6 @@ int dprc_close(struct fsl_mc_io *mc_io,
*/
#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008
-/* IOMMU bypass - indicates whether objects of this container are permitted
- * to bypass the IOMMU.
- */
-#define DPRC_CFG_OPT_IOMMU_BYPASS 0x00000010
-
/* AIOP - Indicates that container belongs to AIOP. */
#define DPRC_CFG_OPT_AIOP 0x00000020
@@ -173,12 +168,12 @@ int dprc_reset_container(struct fsl_mc_io *mc_io,
* struct dprc_irq_cfg - IRQ configuration
* @paddr: Address that must be written to signal a message-based interrupt
* @val: Value to write into irq_addr address
- * @user_irq_id: A user defined number associated with this IRQ
+ * @irq_num: A user defined number associated with this IRQ
*/
struct dprc_irq_cfg {
phys_addr_t paddr;
u32 val;
- int user_irq_id;
+ int irq_num;
};
int dprc_set_irq(struct fsl_mc_io *mc_io,
@@ -353,6 +348,14 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
#define DPRC_OBJ_STATE_PLUGGED 0x00000002
/**
+ * Shareability flag - Object flag indicating no memory shareability.
+ * the object generates memory accesses that are non coherent with other
+ * masters;
+ * user is responsible for proper memory handling through IOMMU configuration.
+ */
+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001
+
+/**
* struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj()
* @type: Type of object: NULL terminated string
* @id: ID of logical object resource
@@ -363,6 +366,7 @@ int dprc_get_obj_count(struct fsl_mc_io *mc_io,
* @region_count: Number of mappable regions supported by the object
* @state: Object state: combination of DPRC_OBJ_STATE_ states
* @label: Object label
+ * @flags: Object's flags
*/
struct dprc_obj_desc {
char type[16];
@@ -374,6 +378,7 @@ struct dprc_obj_desc {
u8 region_count;
u32 state;
char label[16];
+ u16 flags;
};
int dprc_get_obj(struct fsl_mc_io *mc_io,
diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h
index ee5f1d2bf604..cab1ae90f09e 100644
--- a/drivers/staging/fsl-mc/include/mc-private.h
+++ b/drivers/staging/fsl-mc/include/mc-private.h
@@ -94,12 +94,14 @@ struct fsl_mc_resource_pool {
* from the physical DPRC.
* @irq_resources: Pointer to array of IRQ objects for the IRQ pool
* @scan_mutex: Serializes bus scanning
+ * @dprc_attr: DPRC attributes
*/
struct fsl_mc_bus {
struct fsl_mc_device mc_dev;
struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES];
struct fsl_mc_device_irq *irq_resources;
struct mutex scan_mutex; /* serializes bus scanning */
+ struct dprc_attributes dprc_attr;
};
#define to_fsl_mc_bus(_mc_dev) \
diff --git a/drivers/staging/fwserial/dma_fifo.c b/drivers/staging/fwserial/dma_fifo.c
index 4cd3ed3ee141..8b23a553fd4a 100644
--- a/drivers/staging/fwserial/dma_fifo.c
+++ b/drivers/staging/fwserial/dma_fifo.c
@@ -35,7 +35,7 @@
/*
* private helper fn to determine if check is in open interval (lo,hi)
*/
-static bool addr_check(unsigned check, unsigned lo, unsigned hi)
+static bool addr_check(unsigned int check, unsigned int lo, unsigned int hi)
{
return check - (lo + 1) < (hi - 1) - lo;
}
@@ -64,7 +64,7 @@ void dma_fifo_init(struct dma_fifo *fifo)
* The 'apparent' size will be rounded up to next greater aligned size.
* Returns 0 if no error, otherwise an error code
*/
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
+int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
int tx_limit, int open_limit, gfp_t gfp_mask)
{
int capacity;
@@ -190,7 +190,7 @@ int dma_fifo_in(struct dma_fifo *fifo, const void *src, int n)
*/
int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
{
- unsigned len, n, ofs, l, limit;
+ unsigned int len, n, ofs, l, limit;
if (!fifo->data)
return -ENOENT;
@@ -210,7 +210,7 @@ int dma_fifo_out_pend(struct dma_fifo *fifo, struct dma_pending *pended)
n = len;
ofs = fifo->out % fifo->capacity;
l = fifo->capacity - ofs;
- limit = min_t(unsigned, l, fifo->tx_limit);
+ limit = min_t(unsigned int, l, fifo->tx_limit);
if (n > limit) {
n = limit;
fifo->out += limit;
diff --git a/drivers/staging/fwserial/dma_fifo.h b/drivers/staging/fwserial/dma_fifo.h
index 410988224f89..37a91c6a1709 100644
--- a/drivers/staging/fwserial/dma_fifo.h
+++ b/drivers/staging/fwserial/dma_fifo.h
@@ -45,9 +45,9 @@
#define DMA_FIFO_GUARD 3 /* # of cache lines to reserve for the guard area */
struct dma_fifo {
- unsigned in;
- unsigned out; /* updated when dma is pended */
- unsigned done; /* updated upon dma completion */
+ unsigned int in;
+ unsigned int out; /* updated when dma is pended */
+ unsigned int done; /* updated upon dma completion */
struct {
unsigned corrupt:1;
};
@@ -55,7 +55,7 @@ struct dma_fifo {
int guard; /* ofs of guard area */
int capacity; /* size + reserved */
int avail; /* # of unused bytes in fifo */
- unsigned align; /* must be power of 2 */
+ unsigned int align; /* must be power of 2 */
int tx_limit; /* max # of bytes per dma transaction */
int open_limit; /* max # of outstanding allowed */
int open; /* # of outstanding dma transactions */
@@ -66,9 +66,9 @@ struct dma_fifo {
struct dma_pending {
struct list_head link;
void *data;
- unsigned len;
- unsigned next;
- unsigned out;
+ unsigned int len;
+ unsigned int next;
+ unsigned int out;
};
static inline void dp_mark_completed(struct dma_pending *dp)
@@ -82,7 +82,7 @@ static inline bool dp_is_completed(struct dma_pending *dp)
}
void dma_fifo_init(struct dma_fifo *fifo);
-int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned align,
+int dma_fifo_alloc(struct dma_fifo *fifo, int size, unsigned int align,
int tx_limit, int open_limit, gfp_t gfp_mask);
void dma_fifo_free(struct dma_fifo *fifo);
void dma_fifo_reset(struct dma_fifo *fifo);
diff --git a/drivers/staging/fwserial/fwserial.c b/drivers/staging/fwserial/fwserial.c
index 1f9389d8c152..c241c0ae3f20 100644
--- a/drivers/staging/fwserial/fwserial.c
+++ b/drivers/staging/fwserial/fwserial.c
@@ -132,7 +132,7 @@ static struct fwtty_peer *__fwserial_peer_by_node_id(struct fw_card *card,
#ifdef FWTTY_PROFILING
-static void fwtty_profile_fifo(struct fwtty_port *port, unsigned *stat)
+static void fwtty_profile_fifo(struct fwtty_port *port, unsigned int *stat)
{
spin_lock_bh(&port->lock);
fwtty_profile_data(stat, dma_fifo_avail(&port->tx_fifo));
@@ -143,7 +143,7 @@ static void fwtty_dump_profile(struct seq_file *m, struct stats *stats)
{
/* for each stat, print sum of 0 to 2^k, then individually */
int k = 4;
- unsigned sum;
+ unsigned int sum;
int j;
char t[10];
@@ -303,9 +303,10 @@ static void fwtty_restart_tx(struct fwtty_port *port)
* Note: in loopback, the port->lock is being held. Only use functions that
* don't attempt to reclaim the port->lock.
*/
-static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
+static void fwtty_update_port_status(struct fwtty_port *port,
+ unsigned int status)
{
- unsigned delta;
+ unsigned int delta;
struct tty_struct *tty;
/* simulated LSR/MSR status from remote */
@@ -396,9 +397,9 @@ static void fwtty_update_port_status(struct fwtty_port *port, unsigned status)
*
* Note: caller must be holding port lock
*/
-static unsigned __fwtty_port_line_status(struct fwtty_port *port)
+static unsigned int __fwtty_port_line_status(struct fwtty_port *port)
{
- unsigned status = 0;
+ unsigned int status = 0;
/* TODO: add module param to tie RNG to DTR as well */
@@ -424,7 +425,7 @@ static int __fwtty_write_port_status(struct fwtty_port *port)
{
struct fwtty_peer *peer;
int err = -ENOENT;
- unsigned status = __fwtty_port_line_status(port);
+ unsigned int status = __fwtty_port_line_status(port);
rcu_read_lock();
peer = rcu_dereference(port->peer);
@@ -454,7 +455,7 @@ static int fwtty_write_port_status(struct fwtty_port *port)
static void fwtty_throttle_port(struct fwtty_port *port)
{
struct tty_struct *tty;
- unsigned old;
+ unsigned int old;
tty = tty_port_tty_get(&port->port);
if (!tty)
@@ -540,7 +541,7 @@ static void fwtty_emit_breaks(struct work_struct *work)
static int fwtty_rx(struct fwtty_port *port, unsigned char *data, size_t len)
{
int c, n = len;
- unsigned lsr;
+ unsigned int lsr;
int err = 0;
fwtty_dbg(port, "%d\n", n);
@@ -635,7 +636,7 @@ static void fwtty_port_handler(struct fw_card *card,
if (addr != port->rx_handler.offset || len != 4) {
rcode = RCODE_ADDRESS_ERROR;
} else {
- fwtty_update_port_status(port, *(unsigned *)data);
+ fwtty_update_port_status(port, *(unsigned int *)data);
rcode = RCODE_COMPLETE;
}
break;
@@ -828,7 +829,7 @@ static void fwtty_write_xchar(struct fwtty_port *port, char ch)
rcu_read_unlock();
}
-static struct fwtty_port *fwtty_port_get(unsigned index)
+static struct fwtty_port *fwtty_port_get(unsigned int index)
{
struct fwtty_port *port;
@@ -934,9 +935,9 @@ static int fwtty_port_carrier_raised(struct tty_port *tty_port)
return rc;
}
-static unsigned set_termios(struct fwtty_port *port, struct tty_struct *tty)
+static unsigned int set_termios(struct fwtty_port *port, struct tty_struct *tty)
{
- unsigned baud, frame;
+ unsigned int baud, frame;
baud = tty_termios_baud_rate(&tty->termios);
tty_termios_encode_baud_rate(&tty->termios, baud, baud);
@@ -988,7 +989,7 @@ static int fwtty_port_activate(struct tty_port *tty_port,
struct tty_struct *tty)
{
struct fwtty_port *port = to_port(tty_port, port);
- unsigned baud;
+ unsigned int baud;
int err;
set_bit(TTY_IO_ERROR, &tty->flags);
@@ -1264,7 +1265,7 @@ static int set_serial_info(struct fwtty_port *port,
return 0;
}
-static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
+static int fwtty_ioctl(struct tty_struct *tty, unsigned int cmd,
unsigned long arg)
{
struct fwtty_port *port = tty->driver_data;
@@ -1297,7 +1298,7 @@ static int fwtty_ioctl(struct tty_struct *tty, unsigned cmd,
static void fwtty_set_termios(struct tty_struct *tty, struct ktermios *old)
{
struct fwtty_port *port = tty->driver_data;
- unsigned baud;
+ unsigned int baud;
spin_lock_bh(&port->lock);
baud = set_termios(port, tty);
@@ -1369,7 +1370,7 @@ static int fwtty_break_ctl(struct tty_struct *tty, int state)
static int fwtty_tiocmget(struct tty_struct *tty)
{
struct fwtty_port *port = tty->driver_data;
- unsigned tiocm;
+ unsigned int tiocm;
spin_lock_bh(&port->lock);
tiocm = (port->mctrl & MCTRL_MASK) | (port->mstatus & ~MCTRL_MASK);
@@ -1380,7 +1381,8 @@ static int fwtty_tiocmget(struct tty_struct *tty)
return tiocm;
}
-static int fwtty_tiocmset(struct tty_struct *tty, unsigned set, unsigned clear)
+static int fwtty_tiocmset(struct tty_struct *tty,
+ unsigned int set, unsigned int clear)
{
struct fwtty_port *port = tty->driver_data;
@@ -1699,7 +1701,7 @@ static void fwserial_virt_plug_complete(struct fwtty_peer *peer,
dma_fifo_change_tx_limit(&port->tx_fifo, port->max_payload);
spin_unlock_bh(&peer->port->lock);
- if (port->port.console && port->fwcon_ops->notify != NULL)
+ if (port->port.console && port->fwcon_ops->notify)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_ATTACH, port->con_data);
fwtty_info(&peer->unit, "peer (guid:%016llx) connected on %s\n",
@@ -1806,7 +1808,7 @@ static void fwserial_release_port(struct fwtty_port *port, bool reset)
RCU_INIT_POINTER(port->peer, NULL);
spin_unlock_bh(&port->lock);
- if (port->port.console && port->fwcon_ops->notify != NULL)
+ if (port->port.console && port->fwcon_ops->notify)
(*port->fwcon_ops->notify)(FWCON_NOTIFY_DETACH, port->con_data);
}
diff --git a/drivers/staging/fwserial/fwserial.h b/drivers/staging/fwserial/fwserial.h
index 6fa936501b3f..30b2481fe32b 100644
--- a/drivers/staging/fwserial/fwserial.h
+++ b/drivers/staging/fwserial/fwserial.h
@@ -22,7 +22,7 @@
#ifdef FWTTY_PROFILING
#define DISTRIBUTION_MAX_SIZE 8192
#define DISTRIBUTION_MAX_INDEX (ilog2(DISTRIBUTION_MAX_SIZE) + 1)
-static inline void fwtty_profile_data(unsigned stat[], unsigned val)
+static inline void fwtty_profile_data(unsigned int stat[], unsigned int val)
{
int n = (val) ? min(ilog2(val) + 1, DISTRIBUTION_MAX_INDEX) : 0;
++stat[n];
@@ -78,7 +78,7 @@ struct fwtty_peer {
u64 guid;
int generation;
int node_id;
- unsigned speed;
+ unsigned int speed;
int max_payload;
u64 mgmt_addr;
@@ -160,17 +160,17 @@ struct fwserial_mgmt_pkt {
#define VIRT_CABLE_PLUG_TIMEOUT (60 * HZ)
struct stats {
- unsigned xchars;
- unsigned dropped;
- unsigned tx_stall;
- unsigned fifo_errs;
- unsigned sent;
- unsigned lost;
- unsigned throttled;
- unsigned reads[DISTRIBUTION_MAX_INDEX + 1];
- unsigned writes[DISTRIBUTION_MAX_INDEX + 1];
- unsigned txns[DISTRIBUTION_MAX_INDEX + 1];
- unsigned unthrottle[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int xchars;
+ unsigned int dropped;
+ unsigned int tx_stall;
+ unsigned int fifo_errs;
+ unsigned int sent;
+ unsigned int lost;
+ unsigned int throttled;
+ unsigned int reads[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int writes[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int txns[DISTRIBUTION_MAX_INDEX + 1];
+ unsigned int unthrottle[DISTRIBUTION_MAX_INDEX + 1];
};
struct fwconsole_ops {
@@ -237,7 +237,7 @@ struct fwconsole_ops {
struct fwtty_port {
struct tty_port port;
struct device *device;
- unsigned index;
+ unsigned int index;
struct fw_serial *serial;
struct fw_address_handler rx_handler;
@@ -246,21 +246,21 @@ struct fwtty_port {
wait_queue_head_t wait_tx;
struct delayed_work emit_breaks;
- unsigned cps;
+ unsigned int cps;
unsigned long break_last;
struct work_struct hangup;
- unsigned mstatus;
+ unsigned int mstatus;
spinlock_t lock;
- unsigned mctrl;
+ unsigned int mctrl;
struct delayed_work drain;
struct dma_fifo tx_fifo;
int max_payload;
- unsigned status_mask;
- unsigned ignore_mask;
- unsigned break_ctl:1,
+ unsigned int status_mask;
+ unsigned int ignore_mask;
+ unsigned int break_ctl:1,
write_only:1,
overrun:1,
loopback:1;
@@ -349,7 +349,7 @@ extern struct tty_driver *fwtty_driver;
* being used for isochronous traffic)
* 2) isochronous arbitration always wins.
*/
-static inline int link_speed_to_max_payload(unsigned speed)
+static inline int link_speed_to_max_payload(unsigned int speed)
{
/* Max async payload is 4096 - see IEEE 1394-2008 tables 6-4, 16-18 */
return min(512 << speed, 4096);
diff --git a/drivers/staging/gdm724x/gdm_mux.c b/drivers/staging/gdm724x/gdm_mux.c
index 6bedd668324c..400969170d1c 100644
--- a/drivers/staging/gdm724x/gdm_mux.c
+++ b/drivers/staging/gdm724x/gdm_mux.c
@@ -278,8 +278,9 @@ static void gdm_mux_rcv_complete(struct urb *urb)
}
}
-static int gdm_mux_recv(void *priv_dev, int (*cb)(void *data, int len,
- int tty_index, struct tty_dev *tty_dev, int complete))
+static int gdm_mux_recv(void *priv_dev,
+ int (*cb)(void *data, int len, int tty_index,
+ struct tty_dev *tty_dev, int complete))
{
struct mux_dev *mux_dev = priv_dev;
struct usb_device *usbdev = mux_dev->usbdev;
diff --git a/drivers/staging/gdm724x/gdm_usb.c b/drivers/staging/gdm724x/gdm_usb.c
index 9db9b903f1db..d650d772095b 100644
--- a/drivers/staging/gdm724x/gdm_usb.c
+++ b/drivers/staging/gdm724x/gdm_usb.c
@@ -708,7 +708,7 @@ static void do_tx(struct work_struct *work)
#define SDU_PARAM_LEN 12
static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
- unsigned int dftEpsId, unsigned int epsId,
+ unsigned int dft_eps_ID, unsigned int eps_ID,
void (*cb)(void *data), void *cb_data,
int dev_idx, int nic_type)
{
@@ -746,8 +746,8 @@ static int gdm_usb_sdu_send(void *priv_dev, void *data, int len,
}
sdu->len = gdm_cpu_to_dev16(&udev->gdm_ed, send_len);
- sdu->dftEpsId = gdm_cpu_to_dev32(&udev->gdm_ed, dftEpsId);
- sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, epsId);
+ sdu->dft_eps_ID = gdm_cpu_to_dev32(&udev->gdm_ed, dft_eps_ID);
+ sdu->bearer_ID = gdm_cpu_to_dev32(&udev->gdm_ed, eps_ID);
sdu->nic_type = gdm_cpu_to_dev32(&udev->gdm_ed, nic_type);
t_sdu->len = send_len + HCI_HEADER_SIZE;
diff --git a/drivers/staging/gdm724x/hci_packet.h b/drivers/staging/gdm724x/hci_packet.h
index 7fba8a687faf..dbc4446cf78d 100644
--- a/drivers/staging/gdm724x/hci_packet.h
+++ b/drivers/staging/gdm724x/hci_packet.h
@@ -58,7 +58,7 @@ struct sdu_header {
struct sdu {
u16 cmd_evt;
u16 len;
- u32 dftEpsId;
+ u32 dft_eps_ID;
u32 bearer_ID;
u32 nic_type;
u8 data[0];
diff --git a/drivers/staging/gdm724x/netlink_k.c b/drivers/staging/gdm724x/netlink_k.c
index 9d8347769e88..a0232e8aec10 100644
--- a/drivers/staging/gdm724x/netlink_k.c
+++ b/drivers/staging/gdm724x/netlink_k.c
@@ -88,7 +88,8 @@ static void netlink_rcv(struct sk_buff *skb)
}
struct sock *netlink_init(int unit,
- void (*cb)(struct net_device *dev, u16 type, void *msg, int len))
+ void (*cb)(struct net_device *dev, u16 type,
+ void *msg, int len))
{
struct sock *sock;
struct netlink_kernel_cfg cfg = {
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
index 7b7c9786c162..a221f261c3d3 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.c
@@ -93,7 +93,6 @@ static int readlength_bitstream(char *bitdata, int *lendata, int *offset)
return 0;
}
-
/*
* read first 13 bytes to check bitstream magic number
*/
@@ -201,7 +200,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
#endif /* DEBUG_FPGA */
if (!xl_supported_prog_bus_width(bus_bytes)) {
pr_err("unsupported program bus width %d\n",
- bus_bytes);
+ bus_bytes);
return -1;
}
@@ -222,7 +221,7 @@ static int gs_download_image(struct fpgaimage *fimage, enum wbus bus_bytes)
pr_info("device init done\n");
for (i = 0; i < size; i += bus_bytes)
- xl_shift_bytes_out(bus_bytes, bitdata+i);
+ xl_shift_bytes_out(bus_bytes, bitdata + i);
pr_info("program done\n");
@@ -277,7 +276,7 @@ static int gs_set_download_method(struct fpgaimage *fimage)
static int init_driver(void)
{
firmware_pdev = platform_device_register_simple("fpgaboot", -1,
- NULL, 0);
+ NULL, 0);
return PTR_ERR_OR_ZERO(firmware_pdev);
}
@@ -331,7 +330,6 @@ err_out1:
kfree(fimage);
return -1;
-
}
static int __init gs_fpgaboot_init(void)
diff --git a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
index f41f4cc798cc..8cc32555dbf3 100644
--- a/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
+++ b/drivers/staging/gs_fpgaboot/gs_fpgaboot.h
@@ -51,6 +51,6 @@ struct fpgaimage {
char part[MAX_STR];
char date[MAX_STR];
char time[MAX_STR];
- int32_t lendata;
+ int lendata;
char *fpgadata;
};
diff --git a/drivers/staging/gs_fpgaboot/io.c b/drivers/staging/gs_fpgaboot/io.c
index 819db53da64d..c9391198fbfb 100644
--- a/drivers/staging/gs_fpgaboot/io.c
+++ b/drivers/staging/gs_fpgaboot/io.c
@@ -35,7 +35,6 @@ static inline void byte0_out(unsigned char data);
static inline void byte1_out(unsigned char data);
static inline void xl_cclk_b(int32_t i);
-
/* Assert and Deassert CCLK */
void xl_shift_cclk(int count)
{
diff --git a/drivers/staging/i4l/act2000/act2000_isa.c b/drivers/staging/i4l/act2000/act2000_isa.c
index b5fad29a9ba6..f0eb8441deed 100644
--- a/drivers/staging/i4l/act2000/act2000_isa.c
+++ b/drivers/staging/i4l/act2000/act2000_isa.c
@@ -31,7 +31,8 @@ act2000_isa_reset(unsigned short portbase)
int serial = 0;
found = 0;
- if ((reg = inb(portbase + ISA_COR)) != 0xff) {
+ reg = inb(portbase + ISA_COR);
+ if (reg != 0xff) {
outb(reg | ISA_COR_RESET, portbase + ISA_COR);
mdelay(10);
outb(reg, portbase + ISA_COR);
@@ -232,7 +233,7 @@ act2000_isa_receive(act2000_card *card)
{
u_char c;
- if (test_and_set_bit(ACT2000_LOCK_RX, (void *) &card->ilock) != 0)
+ if (test_and_set_bit(ACT2000_LOCK_RX, (void *)&card->ilock) != 0)
return;
while (!act2000_isa_readb(card, &c)) {
if (card->idat.isa.rcvidx < 8) {
@@ -247,7 +248,7 @@ act2000_isa_receive(act2000_card *card)
card->idat.isa.rcvignore = 1;
printk(KERN_WARNING
"act2000_isa_receive: no memory\n");
- test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
return;
}
memcpy(skb_put(card->idat.isa.rcvskb, 8), card->idat.isa.rcvhdr, 8);
@@ -287,7 +288,7 @@ act2000_isa_receive(act2000_card *card)
(card->idat.isa.rcvidx < card->idat.isa.rcvlen)))
act2000_schedule_poll(card);
}
- test_and_clear_bit(ACT2000_LOCK_RX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_RX, (void *)&card->ilock);
}
void
@@ -298,12 +299,13 @@ act2000_isa_send(act2000_card *card)
actcapi_msg *msg;
int l;
- if (test_and_set_bit(ACT2000_LOCK_TX, (void *) &card->ilock) != 0)
+ if (test_and_set_bit(ACT2000_LOCK_TX, (void *)&card->ilock) != 0)
return;
while (1) {
spin_lock_irqsave(&card->lock, flags);
if (!(card->sbuf)) {
- if ((card->sbuf = skb_dequeue(&card->sndq))) {
+ card->sbuf = skb_dequeue(&card->sndq);
+ if (card->sbuf) {
card->ack_msg = card->sbuf->data;
msg = (actcapi_msg *)card->sbuf->data;
if ((msg->hdr.cmd.cmd == 0x86) &&
@@ -317,7 +319,7 @@ act2000_isa_send(act2000_card *card)
spin_unlock_irqrestore(&card->lock, flags);
if (!(card->sbuf)) {
/* No more data to send */
- test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
return;
}
skb = card->sbuf;
@@ -325,7 +327,7 @@ act2000_isa_send(act2000_card *card)
while (skb->len) {
if (act2000_isa_writeb(card, *(skb->data))) {
/* Fifo is full, but more data to send */
- test_and_clear_bit(ACT2000_LOCK_TX, (void *) &card->ilock);
+ test_and_clear_bit(ACT2000_LOCK_TX, (void *)&card->ilock);
/* Schedule myself */
act2000_schedule_tx(card);
return;
@@ -356,7 +358,6 @@ act2000_isa_send(act2000_card *card)
static int
act2000_isa_getid(act2000_card *card)
{
-
act2000_fwid fid;
u_char *p = (u_char *)&fid;
int count = 0;
@@ -378,7 +379,8 @@ act2000_isa_getid(act2000_card *card)
printk(KERN_WARNING "act2000: Wrong Firmware-ID!\n");
return -EPROTO;
}
- if ((p = strchr(fid.revision, '\n')))
+ p = strchr(fid.revision, '\n');
+ if (p)
*p = '\0';
printk(KERN_INFO "act2000: Firmware-ID: %s\n", fid.revision);
if (card->flags & ACT2000_FLAGS_IVALID) {
@@ -439,5 +441,5 @@ act2000_isa_download(act2000_card *card, act2000_ddef __user *cb)
}
kfree(buf);
msleep_interruptible(500);
- return (act2000_isa_getid(card));
+ return act2000_isa_getid(card);
}
diff --git a/drivers/staging/i4l/pcbit/capi.h b/drivers/staging/i4l/pcbit/capi.h
index 635f63476944..6f6f4dd0714e 100644
--- a/drivers/staging/i4l/pcbit/capi.h
+++ b/drivers/staging/i4l/pcbit/capi.h
@@ -17,7 +17,7 @@
#define REQ_DISPLAY 0x04
#define REQ_USER_TO_USER 0x08
-#define AppInfoMask REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER
+#define AppInfoMask (REQ_CAUSE | REQ_DISPLAY | REQ_USER_TO_USER)
/* Connection Setup */
extern int capi_conn_req(const char *calledPN, struct sk_buff **buf,
diff --git a/drivers/staging/i4l/pcbit/drv.c b/drivers/staging/i4l/pcbit/drv.c
index 4172e22ae7ed..c5270e229efb 100644
--- a/drivers/staging/i4l/pcbit/drv.c
+++ b/drivers/staging/i4l/pcbit/drv.c
@@ -284,7 +284,7 @@ static int pcbit_command(isdn_ctrl *ctl)
default:
printk(KERN_DEBUG "pcbit_command: unknown command\n");
break;
- };
+ }
return 0;
}
@@ -699,8 +699,8 @@ void pcbit_l3_receive(struct pcbit_dev *dev, ulong msg,
*/
static char statbuf[STATBUF_LEN];
-static int stat_st = 0;
-static int stat_end = 0;
+static int stat_st;
+static int stat_end;
static int pcbit_stat(u_char __user *buf, int len, int driver, int channel)
{
@@ -968,7 +968,7 @@ static int pcbit_ioctl(isdn_ctrl *ctl)
default:
printk("error: unknown ioctl\n");
break;
- };
+ }
return 0;
}
diff --git a/drivers/staging/i4l/pcbit/edss1.c b/drivers/staging/i4l/pcbit/edss1.c
index b2262ba6f0c9..e72c16420712 100644
--- a/drivers/staging/i4l/pcbit/edss1.c
+++ b/drivers/staging/i4l/pcbit/edss1.c
@@ -254,7 +254,7 @@ static void pcbit_fsm_timer(unsigned long data)
dev = chan2dev(chan);
- if (dev == NULL) {
+ if (!dev) {
printk(KERN_WARNING "pcbit: timer for unknown device\n");
return;
}
diff --git a/drivers/staging/i4l/pcbit/layer2.h b/drivers/staging/i4l/pcbit/layer2.h
index be1327bc162a..6b9063e388cd 100644
--- a/drivers/staging/i4l/pcbit/layer2.h
+++ b/drivers/staging/i4l/pcbit/layer2.h
@@ -109,7 +109,7 @@
#define SCHED_READ 0x01
#define SCHED_WRITE 0x02
-#define SET_RUN_TIMEOUT 2 * HZ /* 2 seconds */
+#define SET_RUN_TIMEOUT (2 * HZ) /* 2 seconds */
struct frame_buf {
ulong msg;
diff --git a/drivers/staging/iio/accel/Kconfig b/drivers/staging/iio/accel/Kconfig
index fa67da9408b6..f066aa30f0ac 100644
--- a/drivers/staging/iio/accel/Kconfig
+++ b/drivers/staging/iio/accel/Kconfig
@@ -27,18 +27,6 @@ config ADIS16203
To compile this driver as a module, say M here: the module will be
called adis16203.
-config ADIS16204
- tristate "Analog Devices ADIS16204 Programmable High-g Digital Impact Sensor and Recorder"
- depends on SPI
- select IIO_ADIS_LIB
- select IIO_ADIS_LIB_BUFFER if IIO_BUFFER
- help
- Say Y here to build support for Analog Devices adis16204 Programmable
- High-g Digital Impact Sensor and Recorder.
-
- To compile this driver as a module, say M here: the module will be
- called adis16204.
-
config ADIS16209
tristate "Analog Devices ADIS16209 Dual-Axis Digital Inclinometer and Accelerometer"
depends on SPI
@@ -51,17 +39,6 @@ config ADIS16209
To compile this driver as a module, say M here: the module will be
called adis16209.
-config ADIS16220
- tristate "Analog Devices ADIS16220 Programmable Digital Vibration Sensor"
- depends on SPI
- select IIO_ADIS_LIB
- help
- Say Y here to build support for Analog Devices adis16220 programmable
- digital vibration sensor.
-
- To compile this driver as a module, say M here: the module will be
- called adis16220.
-
config ADIS16240
tristate "Analog Devices ADIS16240 Programmable Impact Sensor and Recorder"
depends on SPI
diff --git a/drivers/staging/iio/accel/Makefile b/drivers/staging/iio/accel/Makefile
index 1ed137f1a506..415329c96f0c 100644
--- a/drivers/staging/iio/accel/Makefile
+++ b/drivers/staging/iio/accel/Makefile
@@ -8,15 +8,9 @@ obj-$(CONFIG_ADIS16201) += adis16201.o
adis16203-y := adis16203_core.o
obj-$(CONFIG_ADIS16203) += adis16203.o
-adis16204-y := adis16204_core.o
-obj-$(CONFIG_ADIS16204) += adis16204.o
-
adis16209-y := adis16209_core.o
obj-$(CONFIG_ADIS16209) += adis16209.o
-adis16220-y := adis16220_core.o
-obj-$(CONFIG_ADIS16220) += adis16220.o
-
adis16240-y := adis16240_core.o
obj-$(CONFIG_ADIS16240) += adis16240.o
diff --git a/drivers/staging/iio/accel/adis16201.h b/drivers/staging/iio/accel/adis16201.h
index e6b8c9af6e22..64844adcaacd 100644
--- a/drivers/staging/iio/accel/adis16201.h
+++ b/drivers/staging/iio/accel/adis16201.h
@@ -3,51 +3,129 @@
#define ADIS16201_STARTUP_DELAY 220 /* ms */
-#define ADIS16201_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16201_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16201_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16201_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16201_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16201_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16201_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16201_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16201_XACCL_OFFS 0x10 /* Calibration, x-axis acceleration offset */
-#define ADIS16201_YACCL_OFFS 0x12 /* Calibration, y-axis acceleration offset */
-#define ADIS16201_XACCL_SCALE 0x14 /* x-axis acceleration scale factor */
-#define ADIS16201_YACCL_SCALE 0x16 /* y-axis acceleration scale factor */
-#define ADIS16201_XINCL_OFFS 0x18 /* Calibration, x-axis inclination offset */
-#define ADIS16201_YINCL_OFFS 0x1A /* Calibration, y-axis inclination offset */
-#define ADIS16201_XINCL_SCALE 0x1C /* x-axis inclination scale factor */
-#define ADIS16201_YINCL_SCALE 0x1E /* y-axis inclination scale factor */
-#define ADIS16201_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16201_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16201_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16201_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16201_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16201_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16201_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16201_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16201_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16201_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16201_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16201_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16201_GLOB_CMD 0x3E /* Operation, system command register */
+/* Flash memory write count */
+#define ADIS16201_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16201_SUPPLY_OUT 0x02
+
+/* Output, x-axis accelerometer */
+#define ADIS16201_XACCL_OUT 0x04
+
+/* Output, y-axis accelerometer */
+#define ADIS16201_YACCL_OUT 0x06
+
+/* Output, auxiliary ADC input */
+#define ADIS16201_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16201_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16201_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16201_YINCL_OUT 0x0E
+
+/* Calibration, x-axis acceleration offset */
+#define ADIS16201_XACCL_OFFS 0x10
+
+/* Calibration, y-axis acceleration offset */
+#define ADIS16201_YACCL_OFFS 0x12
+
+/* x-axis acceleration scale factor */
+#define ADIS16201_XACCL_SCALE 0x14
+
+/* y-axis acceleration scale factor */
+#define ADIS16201_YACCL_SCALE 0x16
+
+/* Calibration, x-axis inclination offset */
+#define ADIS16201_XINCL_OFFS 0x18
+
+/* Calibration, y-axis inclination offset */
+#define ADIS16201_YINCL_OFFS 0x1A
+
+/* x-axis inclination scale factor */
+#define ADIS16201_XINCL_SCALE 0x1C
+
+/* y-axis inclination scale factor */
+#define ADIS16201_YINCL_SCALE 0x1E
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16201_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16201_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16201_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16201_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16201_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16201_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16201_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16201_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16201_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16201_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16201_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16201_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16201_GLOB_CMD 0x3E
/* MSC_CTRL */
-#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* Self-test enable */
+#define ADIS16201_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16201_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16201_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16201_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
/* DIAG_STAT */
-#define ADIS16201_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16201_DIAG_STAT_ALARM1 BIT(8)
+
+/* SPI communications failure */
+#define ADIS16201_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16201_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16201_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16201_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16201_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16201_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16201_core.c b/drivers/staging/iio/accel/adis16201_core.c
index 06c0b75ed26a..6f3f8ff2a066 100644
--- a/drivers/staging/iio/accel/adis16201_core.c
+++ b/drivers/staging/iio/accel/adis16201_core.c
@@ -167,6 +167,7 @@ static const struct adis_data adis16201_data = {
.diag_stat_reg = ADIS16201_DIAG_STAT,
.self_test_mask = ADIS16201_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16201_STARTUP_DELAY,
.status_error_msgs = adis16201_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16203.h b/drivers/staging/iio/accel/adis16203.h
index 6426e38bf006..b483e4e6475b 100644
--- a/drivers/staging/iio/accel/adis16203.h
+++ b/drivers/staging/iio/accel/adis16203.h
@@ -3,45 +3,111 @@
#define ADIS16203_STARTUP_DELAY 220 /* ms */
-#define ADIS16203_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16203_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16203_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16203_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16203_XINCL_OUT 0x0C /* Output, x-axis inclination */
-#define ADIS16203_YINCL_OUT 0x0E /* Output, y-axis inclination */
-#define ADIS16203_INCL_NULL 0x18 /* Incline null calibration */
-#define ADIS16203_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16203_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16203_ALM_SMPL1 0x24 /* Alarm 1, sample period */
-#define ADIS16203_ALM_SMPL2 0x26 /* Alarm 2, sample period */
-#define ADIS16203_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16203_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16203_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16203_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16203_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16203_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16203_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16203_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16203_GLOB_CMD 0x3E /* Operation, system command register */
+/* Flash memory write count */
+#define ADIS16203_FLASH_CNT 0x00
+
+/* Output, power supply */
+#define ADIS16203_SUPPLY_OUT 0x02
+
+/* Output, auxiliary ADC input */
+#define ADIS16203_AUX_ADC 0x08
+
+/* Output, temperature */
+#define ADIS16203_TEMP_OUT 0x0A
+
+/* Output, x-axis inclination */
+#define ADIS16203_XINCL_OUT 0x0C
+
+/* Output, y-axis inclination */
+#define ADIS16203_YINCL_OUT 0x0E
+
+/* Incline null calibration */
+#define ADIS16203_INCL_NULL 0x18
+
+/* Alarm 1 amplitude threshold */
+#define ADIS16203_ALM_MAG1 0x20
+
+/* Alarm 2 amplitude threshold */
+#define ADIS16203_ALM_MAG2 0x22
+
+/* Alarm 1, sample period */
+#define ADIS16203_ALM_SMPL1 0x24
+
+/* Alarm 2, sample period */
+#define ADIS16203_ALM_SMPL2 0x26
+
+/* Alarm control */
+#define ADIS16203_ALM_CTRL 0x28
+
+/* Auxiliary DAC data */
+#define ADIS16203_AUX_DAC 0x30
+
+/* General-purpose digital input/output control */
+#define ADIS16203_GPIO_CTRL 0x32
+
+/* Miscellaneous control */
+#define ADIS16203_MSC_CTRL 0x34
+
+/* Internal sample period (rate) control */
+#define ADIS16203_SMPL_PRD 0x36
+
+/* Operation, filter configuration */
+#define ADIS16203_AVG_CNT 0x38
+
+/* Operation, sleep mode control */
+#define ADIS16203_SLP_CNT 0x3A
+
+/* Diagnostics, system status register */
+#define ADIS16203_DIAG_STAT 0x3C
+
+/* Operation, system command register */
+#define ADIS16203_GLOB_CMD 0x3E
/* MSC_CTRL */
-#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9) /* Reverses rotation of both inclination outputs */
-#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0) /* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+
+/* Self-test at power-on: 1 = disabled, 0 = enabled */
+#define ADIS16203_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
+/* Reverses rotation of both inclination outputs */
+#define ADIS16203_MSC_CTRL_REVERSE_ROT_EN BIT(9)
+
+/* Self-test enable */
+#define ADIS16203_MSC_CTRL_SELF_TEST_EN BIT(8)
+
+/* Data-ready enable: 1 = enabled, 0 = disabled */
+#define ADIS16203_MSC_CTRL_DATA_RDY_EN BIT(2)
+
+/* Data-ready polarity: 1 = active high, 0 = active low */
+#define ADIS16203_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
+/* Data-ready line selection: 1 = DIO1, 0 = DIO0 */
+#define ADIS16203_MSC_CTRL_DATA_RDY_DIO1 BIT(0)
/* DIAG_STAT */
-#define ADIS16203_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag */
-#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 3.15 V */
+
+/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM2 BIT(9)
+
+/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
+#define ADIS16203_DIAG_STAT_ALARM1 BIT(8)
+
+/* Self-test diagnostic error flag */
+#define ADIS16203_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
+/* SPI communications failure */
+#define ADIS16203_DIAG_STAT_SPI_FAIL_BIT 3
+
+/* Flash update failure */
+#define ADIS16203_DIAG_STAT_FLASH_UPT_BIT 2
+
+/* Power supply above 3.625 V */
+#define ADIS16203_DIAG_STAT_POWER_HIGH_BIT 1
+
+/* Power supply below 3.15 V */
+#define ADIS16203_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16203_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16203_GLOB_CMD_CLEAR_STAT BIT(4)
#define ADIS16203_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16203_core.c b/drivers/staging/iio/accel/adis16203_core.c
index de5b84ac842b..c70671778bae 100644
--- a/drivers/staging/iio/accel/adis16203_core.c
+++ b/drivers/staging/iio/accel/adis16203_core.c
@@ -134,6 +134,7 @@ static const struct adis_data adis16203_data = {
.diag_stat_reg = ADIS16203_DIAG_STAT,
.self_test_mask = ADIS16203_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16203_STARTUP_DELAY,
.status_error_msgs = adis16203_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16204.h b/drivers/staging/iio/accel/adis16204.h
deleted file mode 100644
index 0b23f0b5c52f..000000000000
--- a/drivers/staging/iio/accel/adis16204.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#ifndef SPI_ADIS16204_H_
-#define SPI_ADIS16204_H_
-
-#define ADIS16204_STARTUP_DELAY 220 /* ms */
-
-#define ADIS16204_FLASH_CNT 0x00 /* Flash memory write count */
-#define ADIS16204_SUPPLY_OUT 0x02 /* Output, power supply */
-#define ADIS16204_XACCL_OUT 0x04 /* Output, x-axis accelerometer */
-#define ADIS16204_YACCL_OUT 0x06 /* Output, y-axis accelerometer */
-#define ADIS16204_AUX_ADC 0x08 /* Output, auxiliary ADC input */
-#define ADIS16204_TEMP_OUT 0x0A /* Output, temperature */
-#define ADIS16204_X_PEAK_OUT 0x0C /* Twos complement */
-#define ADIS16204_Y_PEAK_OUT 0x0E /* Twos complement */
-#define ADIS16204_XACCL_NULL 0x10 /* Calibration, x-axis acceleration offset null */
-#define ADIS16204_YACCL_NULL 0x12 /* Calibration, y-axis acceleration offset null */
-#define ADIS16204_XACCL_SCALE 0x14 /* X-axis scale factor calibration register */
-#define ADIS16204_YACCL_SCALE 0x16 /* Y-axis scale factor calibration register */
-#define ADIS16204_XY_RSS_OUT 0x18 /* XY combined acceleration (RSS) */
-#define ADIS16204_XY_PEAK_OUT 0x1A /* Peak, XY combined output (RSS) */
-#define ADIS16204_CAP_BUF_1 0x1C /* Capture buffer output register 1 */
-#define ADIS16204_CAP_BUF_2 0x1E /* Capture buffer output register 2 */
-#define ADIS16204_ALM_MAG1 0x20 /* Alarm 1 amplitude threshold */
-#define ADIS16204_ALM_MAG2 0x22 /* Alarm 2 amplitude threshold */
-#define ADIS16204_ALM_CTRL 0x28 /* Alarm control */
-#define ADIS16204_CAPT_PNTR 0x2A /* Capture register address pointer */
-#define ADIS16204_AUX_DAC 0x30 /* Auxiliary DAC data */
-#define ADIS16204_GPIO_CTRL 0x32 /* General-purpose digital input/output control */
-#define ADIS16204_MSC_CTRL 0x34 /* Miscellaneous control */
-#define ADIS16204_SMPL_PRD 0x36 /* Internal sample period (rate) control */
-#define ADIS16204_AVG_CNT 0x38 /* Operation, filter configuration */
-#define ADIS16204_SLP_CNT 0x3A /* Operation, sleep mode control */
-#define ADIS16204_DIAG_STAT 0x3C /* Diagnostics, system status register */
-#define ADIS16204_GLOB_CMD 0x3E /* Operation, system command register */
-
-/* MSC_CTRL */
-#define ADIS16204_MSC_CTRL_PWRUP_SELF_TEST BIT(10) /* Self-test at power-on: 1 = disabled, 0 = enabled */
-#define ADIS16204_MSC_CTRL_SELF_TEST_EN BIT(8) /* Self-test enable */
-#define ADIS16204_MSC_CTRL_DATA_RDY_EN BIT(2) /* Data-ready enable: 1 = enabled, 0 = disabled */
-#define ADIS16204_MSC_CTRL_ACTIVE_HIGH BIT(1) /* Data-ready polarity: 1 = active high, 0 = active low */
-#define ADIS16204_MSC_CTRL_DATA_RDY_DIO2 BIT(0) /* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
-
-/* DIAG_STAT */
-#define ADIS16204_DIAG_STAT_ALARM2 BIT(9) /* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_ALARM1 BIT(8) /* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
-#define ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT 5 /* Self-test diagnostic error flag: 1 = error condition,
- 0 = normal operation */
-#define ADIS16204_DIAG_STAT_SPI_FAIL_BIT 3 /* SPI communications failure */
-#define ADIS16204_DIAG_STAT_FLASH_UPT_BIT 2 /* Flash update failure */
-#define ADIS16204_DIAG_STAT_POWER_HIGH_BIT 1 /* Power supply above 3.625 V */
-#define ADIS16204_DIAG_STAT_POWER_LOW_BIT 0 /* Power supply below 2.975 V */
-
-/* GLOB_CMD */
-#define ADIS16204_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16204_GLOB_CMD_CLEAR_STAT BIT(4)
-#define ADIS16204_GLOB_CMD_FACTORY_CAL BIT(1)
-
-#define ADIS16204_ERROR_ACTIVE BIT(14)
-
-enum adis16204_scan {
- ADIS16204_SCAN_ACC_X,
- ADIS16204_SCAN_ACC_Y,
- ADIS16204_SCAN_ACC_XY,
- ADIS16204_SCAN_SUPPLY,
- ADIS16204_SCAN_AUX_ADC,
- ADIS16204_SCAN_TEMP,
-};
-
-#endif /* SPI_ADIS16204_H_ */
diff --git a/drivers/staging/iio/accel/adis16204_core.c b/drivers/staging/iio/accel/adis16204_core.c
deleted file mode 100644
index 20a9df64f1ed..000000000000
--- a/drivers/staging/iio/accel/adis16204_core.c
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * ADIS16204 Programmable High-g Digital Impact Sensor and Recorder
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/list.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-#include <linux/iio/buffer.h>
-#include <linux/iio/imu/adis.h>
-
-#include "adis16204.h"
-
-/* Unique to this driver currently */
-
-static const u8 adis16204_addresses[][2] = {
- [ADIS16204_SCAN_ACC_X] = { ADIS16204_XACCL_NULL, ADIS16204_X_PEAK_OUT },
- [ADIS16204_SCAN_ACC_Y] = { ADIS16204_YACCL_NULL, ADIS16204_Y_PEAK_OUT },
- [ADIS16204_SCAN_ACC_XY] = { 0, ADIS16204_XY_PEAK_OUT },
-};
-
-static int adis16204_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int ret;
- int bits;
- u8 addr;
- s16 val16;
- int addrind;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- return adis_single_conversion(indio_dev, chan,
- ADIS16204_ERROR_ACTIVE, val);
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220000; /* 1.22 mV */
- } else {
- *val = 0;
- *val2 = 610000; /* 0.61 mV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_TEMP:
- *val = -470; /* 0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val = 0;
- switch (chan->channel2) {
- case IIO_MOD_X:
- case IIO_MOD_ROOT_SUM_SQUARED_X_Y:
- *val2 = IIO_G_TO_M_S_2(17125); /* 17.125 mg */
- break;
- case IIO_MOD_Y:
- case IIO_MOD_Z:
- *val2 = IIO_G_TO_M_S_2(8407); /* 8.407 mg */
- break;
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- break;
- case IIO_CHAN_INFO_OFFSET:
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- case IIO_CHAN_INFO_CALIBBIAS:
- case IIO_CHAN_INFO_PEAK:
- if (mask == IIO_CHAN_INFO_CALIBBIAS) {
- bits = 12;
- addrind = 0;
- } else { /* PEAK_SEPARATE */
- bits = 14;
- addrind = 1;
- }
- mutex_lock(&indio_dev->mlock);
- addr = adis16204_addresses[chan->scan_index][addrind];
- ret = adis_read_reg_16(st, addr, &val16);
- if (ret) {
- mutex_unlock(&indio_dev->mlock);
- return ret;
- }
- val16 &= (1 << bits) - 1;
- val16 = (s16)(val16 << (16 - bits)) >> (16 - bits);
- *val = val16;
- mutex_unlock(&indio_dev->mlock);
- return IIO_VAL_INT;
- }
- return -EINVAL;
-}
-
-static int adis16204_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
-{
- struct adis *st = iio_priv(indio_dev);
- int bits;
- s16 val16;
- u8 addr;
-
- switch (mask) {
- case IIO_CHAN_INFO_CALIBBIAS:
- switch (chan->type) {
- case IIO_ACCEL:
- bits = 12;
- break;
- default:
- return -EINVAL;
- }
- val16 = val & ((1 << bits) - 1);
- addr = adis16204_addresses[chan->scan_index][1];
- return adis_write_reg_16(st, addr, val16);
- }
- return -EINVAL;
-}
-
-static const struct iio_chan_spec adis16204_channels[] = {
- ADIS_SUPPLY_CHAN(ADIS16204_SUPPLY_OUT, ADIS16204_SCAN_SUPPLY, 0, 12),
- ADIS_AUX_ADC_CHAN(ADIS16204_AUX_ADC, ADIS16204_SCAN_AUX_ADC, 0, 12),
- ADIS_TEMP_CHAN(ADIS16204_TEMP_OUT, ADIS16204_SCAN_TEMP, 0, 12),
- ADIS_ACCEL_CHAN(X, ADIS16204_XACCL_OUT, ADIS16204_SCAN_ACC_X,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(Y, ADIS16204_YACCL_OUT, ADIS16204_SCAN_ACC_Y,
- BIT(IIO_CHAN_INFO_CALIBBIAS) | BIT(IIO_CHAN_INFO_PEAK),
- 0, 14),
- ADIS_ACCEL_CHAN(ROOT_SUM_SQUARED_X_Y, ADIS16204_XY_RSS_OUT,
- ADIS16204_SCAN_ACC_XY, BIT(IIO_CHAN_INFO_PEAK), 0, 14),
- IIO_CHAN_SOFT_TIMESTAMP(5),
-};
-
-static const struct iio_info adis16204_info = {
- .read_raw = &adis16204_read_raw,
- .write_raw = &adis16204_write_raw,
- .update_scan_mode = adis_update_scan_mode,
- .driver_module = THIS_MODULE,
-};
-
-static const char * const adis16204_status_error_msgs[] = {
- [ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT] = "Self test failure",
- [ADIS16204_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16204_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16204_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16204_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 2.975V",
-};
-
-static const struct adis_data adis16204_data = {
- .read_delay = 20,
- .msc_ctrl_reg = ADIS16204_MSC_CTRL,
- .glob_cmd_reg = ADIS16204_GLOB_CMD,
- .diag_stat_reg = ADIS16204_DIAG_STAT,
-
- .self_test_mask = ADIS16204_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16204_STARTUP_DELAY,
-
- .status_error_msgs = adis16204_status_error_msgs,
- .status_error_mask = BIT(ADIS16204_DIAG_STAT_SELFTEST_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16204_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16204_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16204_probe(struct spi_device *spi)
-{
- int ret;
- struct adis *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16204_info;
- indio_dev->channels = adis16204_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16204_channels);
- indio_dev->modes = INDIO_DIRECT_MODE;
-
- ret = adis_init(st, indio_dev, spi, &adis16204_data);
- if (ret)
- return ret;
-
- ret = adis_setup_buffer_and_trigger(st, indio_dev, NULL);
- if (ret)
- return ret;
-
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(st);
- if (ret)
- goto error_cleanup_buffer_trigger;
- ret = iio_device_register(indio_dev);
- if (ret)
- goto error_cleanup_buffer_trigger;
-
- return 0;
-
-error_cleanup_buffer_trigger:
- adis_cleanup_buffer_and_trigger(st, indio_dev);
- return ret;
-}
-
-static int adis16204_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
- struct adis *st = iio_priv(indio_dev);
-
- iio_device_unregister(indio_dev);
- adis_cleanup_buffer_and_trigger(st, indio_dev);
-
- return 0;
-}
-
-static struct spi_driver adis16204_driver = {
- .driver = {
- .name = "adis16204",
- },
- .probe = adis16204_probe,
- .remove = adis16204_remove,
-};
-module_spi_driver(adis16204_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("ADIS16204 High-g Digital Impact Sensor and Recorder");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16204");
diff --git a/drivers/staging/iio/accel/adis16209.h b/drivers/staging/iio/accel/adis16209.h
index 813698d18ec8..315f1c0c46e8 100644
--- a/drivers/staging/iio/accel/adis16209.h
+++ b/drivers/staging/iio/accel/adis16209.h
@@ -5,88 +5,127 @@
/* Flash memory write count */
#define ADIS16209_FLASH_CNT 0x00
+
/* Output, power supply */
#define ADIS16209_SUPPLY_OUT 0x02
+
/* Output, x-axis accelerometer */
#define ADIS16209_XACCL_OUT 0x04
+
/* Output, y-axis accelerometer */
#define ADIS16209_YACCL_OUT 0x06
+
/* Output, auxiliary ADC input */
#define ADIS16209_AUX_ADC 0x08
+
/* Output, temperature */
#define ADIS16209_TEMP_OUT 0x0A
+
/* Output, x-axis inclination */
#define ADIS16209_XINCL_OUT 0x0C
+
/* Output, y-axis inclination */
#define ADIS16209_YINCL_OUT 0x0E
+
/* Output, +/-180 vertical rotational position */
#define ADIS16209_ROT_OUT 0x10
+
/* Calibration, x-axis acceleration offset null */
#define ADIS16209_XACCL_NULL 0x12
+
/* Calibration, y-axis acceleration offset null */
#define ADIS16209_YACCL_NULL 0x14
+
/* Calibration, x-axis inclination offset null */
#define ADIS16209_XINCL_NULL 0x16
+
/* Calibration, y-axis inclination offset null */
#define ADIS16209_YINCL_NULL 0x18
+
/* Calibration, vertical rotation offset null */
#define ADIS16209_ROT_NULL 0x1A
+
/* Alarm 1 amplitude threshold */
#define ADIS16209_ALM_MAG1 0x20
+
/* Alarm 2 amplitude threshold */
#define ADIS16209_ALM_MAG2 0x22
+
/* Alarm 1, sample period */
#define ADIS16209_ALM_SMPL1 0x24
+
/* Alarm 2, sample period */
#define ADIS16209_ALM_SMPL2 0x26
+
/* Alarm control */
#define ADIS16209_ALM_CTRL 0x28
+
/* Auxiliary DAC data */
#define ADIS16209_AUX_DAC 0x30
+
/* General-purpose digital input/output control */
#define ADIS16209_GPIO_CTRL 0x32
+
/* Miscellaneous control */
#define ADIS16209_MSC_CTRL 0x34
+
/* Internal sample period (rate) control */
#define ADIS16209_SMPL_PRD 0x36
+
/* Operation, filter configuration */
#define ADIS16209_AVG_CNT 0x38
+
/* Operation, sleep mode control */
#define ADIS16209_SLP_CNT 0x3A
+
/* Diagnostics, system status register */
#define ADIS16209_DIAG_STAT 0x3C
+
/* Operation, system command register */
#define ADIS16209_GLOB_CMD 0x3E
/* MSC_CTRL */
+
/* Self-test at power-on: 1 = disabled, 0 = enabled */
#define ADIS16209_MSC_CTRL_PWRUP_SELF_TEST BIT(10)
+
/* Self-test enable */
#define ADIS16209_MSC_CTRL_SELF_TEST_EN BIT(8)
+
/* Data-ready enable: 1 = enabled, 0 = disabled */
#define ADIS16209_MSC_CTRL_DATA_RDY_EN BIT(2)
+
/* Data-ready polarity: 1 = active high, 0 = active low */
#define ADIS16209_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
#define ADIS16209_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
/* DIAG_STAT */
+
/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16209_DIAG_STAT_ALARM2 BIT(9)
+
/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16209_DIAG_STAT_ALARM1 BIT(8)
+
/* Self-test diagnostic error flag: 1 = error condition, 0 = normal operation */
#define ADIS16209_DIAG_STAT_SELFTEST_FAIL_BIT 5
+
/* SPI communications failure */
#define ADIS16209_DIAG_STAT_SPI_FAIL_BIT 3
+
/* Flash update failure */
#define ADIS16209_DIAG_STAT_FLASH_UPT_BIT 2
+
/* Power supply above 3.625 V */
#define ADIS16209_DIAG_STAT_POWER_HIGH_BIT 1
+
/* Power supply below 3.15 V */
#define ADIS16209_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16209_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16209_GLOB_CMD_CLEAR_STAT BIT(4)
#define ADIS16209_GLOB_CMD_FACTORY_CAL BIT(1)
diff --git a/drivers/staging/iio/accel/adis16209_core.c b/drivers/staging/iio/accel/adis16209_core.c
index 8b42bf8c3f60..8dbad58628a1 100644
--- a/drivers/staging/iio/accel/adis16209_core.c
+++ b/drivers/staging/iio/accel/adis16209_core.c
@@ -168,6 +168,7 @@ static const struct adis_data adis16209_data = {
.diag_stat_reg = ADIS16209_DIAG_STAT,
.self_test_mask = ADIS16209_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16209_STARTUP_DELAY,
.status_error_msgs = adis16209_status_error_msgs,
diff --git a/drivers/staging/iio/accel/adis16220.h b/drivers/staging/iio/accel/adis16220.h
deleted file mode 100644
index eab86331124f..000000000000
--- a/drivers/staging/iio/accel/adis16220.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#ifndef SPI_ADIS16220_H_
-#define SPI_ADIS16220_H_
-
-#include <linux/iio/imu/adis.h>
-
-#define ADIS16220_STARTUP_DELAY 220 /* ms */
-
-/* Flash memory write count */
-#define ADIS16220_FLASH_CNT 0x00
-/* Control, acceleration offset adjustment control */
-#define ADIS16220_ACCL_NULL 0x02
-/* Control, AIN1 offset adjustment control */
-#define ADIS16220_AIN1_NULL 0x04
-/* Control, AIN2 offset adjustment control */
-#define ADIS16220_AIN2_NULL 0x06
-/* Output, power supply during capture */
-#define ADIS16220_CAPT_SUPPLY 0x0A
-/* Output, temperature during capture */
-#define ADIS16220_CAPT_TEMP 0x0C
-/* Output, peak acceleration during capture */
-#define ADIS16220_CAPT_PEAKA 0x0E
-/* Output, peak AIN1 level during capture */
-#define ADIS16220_CAPT_PEAK1 0x10
-/* Output, peak AIN2 level during capture */
-#define ADIS16220_CAPT_PEAK2 0x12
-/* Output, capture buffer for acceleration */
-#define ADIS16220_CAPT_BUFA 0x14
-/* Output, capture buffer for AIN1 */
-#define ADIS16220_CAPT_BUF1 0x16
-/* Output, capture buffer for AIN2 */
-#define ADIS16220_CAPT_BUF2 0x18
-/* Control, capture buffer address pointer */
-#define ADIS16220_CAPT_PNTR 0x1A
-/* Control, capture control register */
-#define ADIS16220_CAPT_CTRL 0x1C
-/* Control, capture period (automatic mode) */
-#define ADIS16220_CAPT_PRD 0x1E
-/* Control, Alarm A, acceleration peak threshold */
-#define ADIS16220_ALM_MAGA 0x20
-/* Control, Alarm 1, AIN1 peak threshold */
-#define ADIS16220_ALM_MAG1 0x22
-/* Control, Alarm 2, AIN2 peak threshold */
-#define ADIS16220_ALM_MAG2 0x24
-/* Control, Alarm S, peak threshold */
-#define ADIS16220_ALM_MAGS 0x26
-/* Control, alarm configuration register */
-#define ADIS16220_ALM_CTRL 0x28
-/* Control, general I/O configuration */
-#define ADIS16220_GPIO_CTRL 0x32
-/* Control, self-test control, AIN configuration */
-#define ADIS16220_MSC_CTRL 0x34
-/* Control, digital I/O configuration */
-#define ADIS16220_DIO_CTRL 0x36
-/* Control, filter configuration */
-#define ADIS16220_AVG_CNT 0x38
-/* Status, system status */
-#define ADIS16220_DIAG_STAT 0x3C
-/* Control, system commands */
-#define ADIS16220_GLOB_CMD 0x3E
-/* Status, self-test response */
-#define ADIS16220_ST_DELTA 0x40
-/* Lot Identification Code 1 */
-#define ADIS16220_LOT_ID1 0x52
-/* Lot Identification Code 2 */
-#define ADIS16220_LOT_ID2 0x54
-/* Product identifier; convert to decimal = 16220 */
-#define ADIS16220_PROD_ID 0x56
-/* Serial number */
-#define ADIS16220_SERIAL_NUM 0x58
-
-#define ADIS16220_CAPTURE_SIZE 2048
-
-/* MSC_CTRL */
-#define ADIS16220_MSC_CTRL_SELF_TEST_EN BIT(8)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN1 BIT(1)
-#define ADIS16220_MSC_CTRL_POWER_SUP_COM_AIN2 BIT(0)
-
-/* DIO_CTRL */
-#define ADIS16220_MSC_CTRL_DIO2_BUSY_IND (BIT(5) | BIT(4))
-#define ADIS16220_MSC_CTRL_DIO1_BUSY_IND (BIT(3) | BIT(2))
-#define ADIS16220_MSC_CTRL_DIO2_ACT_HIGH BIT(1)
-#define ADIS16220_MSC_CTRL_DIO1_ACT_HIGH BIT(0)
-
-/* DIAG_STAT */
-/* AIN2 sample > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_ALM_MAG2 BIT(14)
-/* AIN1 sample > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_ALM_MAG1 BIT(13)
-/* Acceleration sample > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_ALM_MAGA BIT(12)
-/* Error condition programmed into ALM_MAGS[11:0] and ALM_CTRL[5:4] is true */
-#define ADIS16220_DIAG_STAT_ALM_MAGS BIT(11)
-/* |Peak value in AIN2 data capture| > ALM_MAG2 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN2 BIT(10)
-/* |Peak value in AIN1 data capture| > ALM_MAG1 */
-#define ADIS16220_DIAG_STAT_PEAK_AIN1 BIT(9)
-/* |Peak value in acceleration data capture| > ALM_MAGA */
-#define ADIS16220_DIAG_STAT_PEAK_ACCEL BIT(8)
-/* Data ready, capture complete */
-#define ADIS16220_DIAG_STAT_DATA_RDY BIT(7)
-#define ADIS16220_DIAG_STAT_FLASH_CHK BIT(6)
-#define ADIS16220_DIAG_STAT_SELF_TEST BIT(5)
-/* Capture period violation/interruption */
-#define ADIS16220_DIAG_STAT_VIOLATION_BIT 4
-/* SPI communications failure */
-#define ADIS16220_DIAG_STAT_SPI_FAIL_BIT 3
-/* Flash update failure */
-#define ADIS16220_DIAG_STAT_FLASH_UPT_BIT 2
-/* Power supply above 3.625 V */
-#define ADIS16220_DIAG_STAT_POWER_HIGH_BIT 1
-/* Power supply below 3.15 V */
-#define ADIS16220_DIAG_STAT_POWER_LOW_BIT 0
-
-/* GLOB_CMD */
-#define ADIS16220_GLOB_CMD_SW_RESET BIT(7)
-#define ADIS16220_GLOB_CMD_SELF_TEST BIT(2)
-#define ADIS16220_GLOB_CMD_PWR_DOWN BIT(1)
-
-#define ADIS16220_MAX_TX 2048
-#define ADIS16220_MAX_RX 2048
-
-#define ADIS16220_SPI_BURST (u32)(1000 * 1000)
-#define ADIS16220_SPI_FAST (u32)(2000 * 1000)
-
-/**
- * struct adis16220_state - device instance specific data
- * @adis: adis device
- * @tx: transmit buffer
- * @rx: receive buffer
- * @buf_lock: mutex to protect tx and rx
- **/
-struct adis16220_state {
- struct adis adis;
-
- struct mutex buf_lock;
- u8 tx[ADIS16220_MAX_TX] ____cacheline_aligned;
- u8 rx[ADIS16220_MAX_RX];
-};
-
-#endif /* SPI_ADIS16220_H_ */
diff --git a/drivers/staging/iio/accel/adis16220_core.c b/drivers/staging/iio/accel/adis16220_core.c
deleted file mode 100644
index d0165218b60c..000000000000
--- a/drivers/staging/iio/accel/adis16220_core.c
+++ /dev/null
@@ -1,494 +0,0 @@
-/*
- * ADIS16220 Programmable Digital Vibration Sensor driver
- *
- * Copyright 2010 Analog Devices Inc.
- *
- * Licensed under the GPL-2 or later.
- */
-
-#include <linux/delay.h>
-#include <linux/mutex.h>
-#include <linux/device.h>
-#include <linux/kernel.h>
-#include <linux/spi/spi.h>
-#include <linux/slab.h>
-#include <linux/sysfs.h>
-#include <linux/module.h>
-
-#include <linux/iio/iio.h>
-#include <linux/iio/sysfs.h>
-
-#include "adis16220.h"
-
-static ssize_t adis16220_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct adis16220_state *st = iio_priv(indio_dev);
- ssize_t ret;
- u16 val;
-
- /* Take the iio_dev status lock */
- mutex_lock(&indio_dev->mlock);
- ret = adis_read_reg_16(&st->adis, this_attr->address, &val);
- mutex_unlock(&indio_dev->mlock);
- if (ret)
- return ret;
- return sprintf(buf, "%u\n", val);
-}
-
-static ssize_t adis16220_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
- u16 val;
-
- ret = kstrtou16(buf, 10, &val);
- if (ret)
- goto error_ret;
- ret = adis_write_reg_16(&st->adis, this_attr->address, val);
-
-error_ret:
- return ret ? ret : len;
-}
-
-static int adis16220_capture(struct iio_dev *indio_dev)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- int ret;
-
- /* initiates a manual data capture */
- ret = adis_write_reg_16(&st->adis, ADIS16220_GLOB_CMD, 0xBF08);
- if (ret)
- dev_err(&indio_dev->dev, "problem beginning capture");
-
- usleep_range(10000, 11000); /* delay for capture to finish */
-
- return ret;
-}
-
-static ssize_t adis16220_write_capture(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t len)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(dev);
- bool val;
- int ret;
-
- ret = strtobool(buf, &val);
- if (ret)
- return ret;
- if (!val)
- return -EINVAL;
- ret = adis16220_capture(indio_dev);
- if (ret)
- return ret;
-
- return len;
-}
-
-static ssize_t adis16220_capture_buffer_read(struct iio_dev *indio_dev,
- char *buf,
- loff_t off,
- size_t count,
- int addr)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- struct spi_transfer xfers[] = {
- {
- .tx_buf = st->tx,
- .bits_per_word = 8,
- .len = 2,
- .cs_change = 1,
- .delay_usecs = 25,
- }, {
- .tx_buf = st->tx,
- .rx_buf = st->rx,
- .bits_per_word = 8,
- .cs_change = 1,
- .delay_usecs = 25,
- },
- };
- int ret;
- int i;
-
- if (unlikely(!count))
- return count;
-
- if ((off >= ADIS16220_CAPTURE_SIZE) || (count & 1) || (off & 1))
- return -EINVAL;
-
- if (off + count > ADIS16220_CAPTURE_SIZE)
- count = ADIS16220_CAPTURE_SIZE - off;
-
- /* write the begin position of capture buffer */
- ret = adis_write_reg_16(&st->adis,
- ADIS16220_CAPT_PNTR,
- off > 1);
- if (ret)
- return -EIO;
-
- /* read count/2 values from capture buffer */
- mutex_lock(&st->buf_lock);
-
- for (i = 0; i < count; i += 2) {
- st->tx[i] = ADIS_READ_REG(addr);
- st->tx[i + 1] = 0;
- }
- xfers[1].len = count;
-
- ret = spi_sync_transfer(st->adis.spi, xfers, ARRAY_SIZE(xfers));
- if (ret) {
- mutex_unlock(&st->buf_lock);
- return -EIO;
- }
-
- memcpy(buf, st->rx, count);
-
- mutex_unlock(&st->buf_lock);
- return count;
-}
-
-static ssize_t adis16220_accel_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf,
- loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUFA);
-}
-
-static struct bin_attribute accel_bin = {
- .attr = {
- .name = "accel_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_accel_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc1_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF1);
-}
-
-static struct bin_attribute adc1_bin = {
- .attr = {
- .name = "in0_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc1_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-static ssize_t adis16220_adc2_bin_read(struct file *filp, struct kobject *kobj,
- struct bin_attribute *attr,
- char *buf, loff_t off,
- size_t count)
-{
- struct iio_dev *indio_dev = dev_to_iio_dev(kobj_to_dev(kobj));
-
- return adis16220_capture_buffer_read(indio_dev, buf,
- off, count,
- ADIS16220_CAPT_BUF2);
-}
-
-static struct bin_attribute adc2_bin = {
- .attr = {
- .name = "in1_bin",
- .mode = S_IRUGO,
- },
- .read = adis16220_adc2_bin_read,
- .size = ADIS16220_CAPTURE_SIZE,
-};
-
-#define IIO_DEV_ATTR_CAPTURE(_store) \
- IIO_DEVICE_ATTR(capture, S_IWUSR, NULL, _store, 0)
-
-static IIO_DEV_ATTR_CAPTURE(adis16220_write_capture);
-
-#define IIO_DEV_ATTR_CAPTURE_COUNT(_mode, _show, _store, _addr) \
- IIO_DEVICE_ATTR(capture_count, _mode, _show, _store, _addr)
-
-static IIO_DEV_ATTR_CAPTURE_COUNT(S_IWUSR | S_IRUGO,
- adis16220_read_16bit,
- adis16220_write_16bit,
- ADIS16220_CAPT_PNTR);
-
-enum adis16220_channel {
- in_supply, in_1, in_2, accel, temp
-};
-
-struct adis16220_address_spec {
- u8 addr;
- u8 bits;
- bool sign;
-};
-
-/* Address / bits / signed */
-static const struct adis16220_address_spec adis16220_addresses[][3] = {
- [in_supply] = { { ADIS16220_CAPT_SUPPLY, 12, 0 }, },
- [in_1] = { { ADIS16220_CAPT_BUF1, 16, 1 },
- { ADIS16220_AIN1_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK1, 16, 1 }, },
- [in_2] = { { ADIS16220_CAPT_BUF2, 16, 1 },
- { ADIS16220_AIN2_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAK2, 16, 1 }, },
- [accel] = { { ADIS16220_CAPT_BUFA, 16, 1 },
- { ADIS16220_ACCL_NULL, 16, 1 },
- { ADIS16220_CAPT_PEAKA, 16, 1 }, },
- [temp] = { { ADIS16220_CAPT_TEMP, 12, 0 }, }
-};
-
-static int adis16220_read_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int *val, int *val2,
- long mask)
-{
- struct adis16220_state *st = iio_priv(indio_dev);
- const struct adis16220_address_spec *addr;
- int ret = -EINVAL;
- int addrind = 0;
- u16 uval;
- s16 sval;
- u8 bits;
-
- switch (mask) {
- case IIO_CHAN_INFO_RAW:
- addrind = 0;
- break;
- case IIO_CHAN_INFO_OFFSET:
- if (chan->type == IIO_TEMP) {
- *val = 25000 / -470 - 1278; /* 25 C = 1278 */
- return IIO_VAL_INT;
- }
- addrind = 1;
- break;
- case IIO_CHAN_INFO_PEAK:
- addrind = 2;
- break;
- case IIO_CHAN_INFO_SCALE:
- switch (chan->type) {
- case IIO_TEMP:
- *val = -470; /* -0.47 C */
- *val2 = 0;
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_ACCEL:
- *val2 = IIO_G_TO_M_S_2(19073); /* 19.073 g */
- return IIO_VAL_INT_PLUS_MICRO;
- case IIO_VOLTAGE:
- if (chan->channel == 0) {
- *val = 1;
- *val2 = 220700; /* 1.2207 mV */
- } else {
- /* Should really be dependent on VDD */
- *val2 = 305180; /* 305.18 uV */
- }
- return IIO_VAL_INT_PLUS_MICRO;
- default:
- return -EINVAL;
- }
- default:
- return -EINVAL;
- }
- addr = &adis16220_addresses[chan->address][addrind];
- if (addr->sign) {
- ret = adis_read_reg_16(&st->adis, addr->addr, &sval);
- if (ret)
- return ret;
- bits = addr->bits;
- sval &= (1 << bits) - 1;
- sval = (s16)(sval << (16 - bits)) >> (16 - bits);
- *val = sval;
- return IIO_VAL_INT;
- }
- ret = adis_read_reg_16(&st->adis, addr->addr, &uval);
- if (ret)
- return ret;
- bits = addr->bits;
- uval &= (1 << bits) - 1;
- *val = uval;
- return IIO_VAL_INT;
-}
-
-static const struct iio_chan_spec adis16220_channels[] = {
- {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 0,
- .extend_name = "supply",
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_supply,
- }, {
- .type = IIO_ACCEL,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE) |
- BIT(IIO_CHAN_INFO_PEAK),
- .address = accel,
- }, {
- .type = IIO_TEMP,
- .indexed = 1,
- .channel = 0,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = temp,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 1,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
- BIT(IIO_CHAN_INFO_OFFSET) |
- BIT(IIO_CHAN_INFO_SCALE),
- .address = in_1,
- }, {
- .type = IIO_VOLTAGE,
- .indexed = 1,
- .channel = 2,
- .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
- .address = in_2,
- }
-};
-
-static struct attribute *adis16220_attributes[] = {
- &iio_dev_attr_capture.dev_attr.attr,
- &iio_dev_attr_capture_count.dev_attr.attr,
- NULL
-};
-
-static const struct attribute_group adis16220_attribute_group = {
- .attrs = adis16220_attributes,
-};
-
-static const struct iio_info adis16220_info = {
- .attrs = &adis16220_attribute_group,
- .driver_module = THIS_MODULE,
- .read_raw = &adis16220_read_raw,
-};
-
-static const char * const adis16220_status_error_msgs[] = {
- [ADIS16220_DIAG_STAT_VIOLATION_BIT] = "Capture period violation/interruption",
- [ADIS16220_DIAG_STAT_SPI_FAIL_BIT] = "SPI failure",
- [ADIS16220_DIAG_STAT_FLASH_UPT_BIT] = "Flash update failed",
- [ADIS16220_DIAG_STAT_POWER_HIGH_BIT] = "Power supply above 3.625V",
- [ADIS16220_DIAG_STAT_POWER_LOW_BIT] = "Power supply below 3.15V",
-};
-
-static const struct adis_data adis16220_data = {
- .read_delay = 35,
- .write_delay = 35,
- .msc_ctrl_reg = ADIS16220_MSC_CTRL,
- .glob_cmd_reg = ADIS16220_GLOB_CMD,
- .diag_stat_reg = ADIS16220_DIAG_STAT,
-
- .self_test_mask = ADIS16220_MSC_CTRL_SELF_TEST_EN,
- .startup_delay = ADIS16220_STARTUP_DELAY,
-
- .status_error_msgs = adis16220_status_error_msgs,
- .status_error_mask = BIT(ADIS16220_DIAG_STAT_VIOLATION_BIT) |
- BIT(ADIS16220_DIAG_STAT_SPI_FAIL_BIT) |
- BIT(ADIS16220_DIAG_STAT_FLASH_UPT_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_HIGH_BIT) |
- BIT(ADIS16220_DIAG_STAT_POWER_LOW_BIT),
-};
-
-static int adis16220_probe(struct spi_device *spi)
-{
- int ret;
- struct adis16220_state *st;
- struct iio_dev *indio_dev;
-
- /* setup the industrialio driver allocated elements */
- indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
- if (!indio_dev)
- return -ENOMEM;
-
- st = iio_priv(indio_dev);
- /* this is only used for removal purposes */
- spi_set_drvdata(spi, indio_dev);
-
- indio_dev->name = spi->dev.driver->name;
- indio_dev->dev.parent = &spi->dev;
- indio_dev->info = &adis16220_info;
- indio_dev->modes = INDIO_DIRECT_MODE;
- indio_dev->channels = adis16220_channels;
- indio_dev->num_channels = ARRAY_SIZE(adis16220_channels);
-
- ret = devm_iio_device_register(&spi->dev, indio_dev);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &accel_bin);
- if (ret)
- return ret;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- if (ret)
- goto error_rm_accel_bin;
-
- ret = sysfs_create_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- if (ret)
- goto error_rm_adc1_bin;
-
- ret = adis_init(&st->adis, indio_dev, spi, &adis16220_data);
- if (ret)
- goto error_rm_adc2_bin;
- /* Get the device into a sane initial state */
- ret = adis_initial_startup(&st->adis);
- if (ret)
- goto error_rm_adc2_bin;
- return 0;
-
-error_rm_adc2_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
-error_rm_adc1_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
-error_rm_accel_bin:
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
- return ret;
-}
-
-static int adis16220_remove(struct spi_device *spi)
-{
- struct iio_dev *indio_dev = spi_get_drvdata(spi);
-
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc2_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &adc1_bin);
- sysfs_remove_bin_file(&indio_dev->dev.kobj, &accel_bin);
-
- return 0;
-}
-
-static struct spi_driver adis16220_driver = {
- .driver = {
- .name = "adis16220",
- },
- .probe = adis16220_probe,
- .remove = adis16220_remove,
-};
-module_spi_driver(adis16220_driver);
-
-MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>");
-MODULE_DESCRIPTION("Analog Devices ADIS16220 Digital Vibration Sensor");
-MODULE_LICENSE("GPL v2");
-MODULE_ALIAS("spi:adis16220");
diff --git a/drivers/staging/iio/accel/adis16240.h b/drivers/staging/iio/accel/adis16240.h
index 66b5ad2f42c5..b2cb37b95913 100644
--- a/drivers/staging/iio/accel/adis16240.h
+++ b/drivers/staging/iio/accel/adis16240.h
@@ -5,110 +5,160 @@
/* Flash memory write count */
#define ADIS16240_FLASH_CNT 0x00
+
/* Output, power supply */
#define ADIS16240_SUPPLY_OUT 0x02
+
/* Output, x-axis accelerometer */
#define ADIS16240_XACCL_OUT 0x04
+
/* Output, y-axis accelerometer */
#define ADIS16240_YACCL_OUT 0x06
+
/* Output, z-axis accelerometer */
#define ADIS16240_ZACCL_OUT 0x08
+
/* Output, auxiliary ADC input */
#define ADIS16240_AUX_ADC 0x0A
+
/* Output, temperature */
#define ADIS16240_TEMP_OUT 0x0C
+
/* Output, x-axis acceleration peak */
#define ADIS16240_XPEAK_OUT 0x0E
+
/* Output, y-axis acceleration peak */
#define ADIS16240_YPEAK_OUT 0x10
+
/* Output, z-axis acceleration peak */
#define ADIS16240_ZPEAK_OUT 0x12
+
/* Output, sum-of-squares acceleration peak */
#define ADIS16240_XYZPEAK_OUT 0x14
+
/* Output, Capture Buffer 1, X and Y acceleration */
#define ADIS16240_CAPT_BUF1 0x16
+
/* Output, Capture Buffer 2, Z acceleration */
#define ADIS16240_CAPT_BUF2 0x18
+
/* Diagnostic, error flags */
#define ADIS16240_DIAG_STAT 0x1A
+
/* Diagnostic, event counter */
#define ADIS16240_EVNT_CNTR 0x1C
+
/* Diagnostic, check sum value from firmware test */
#define ADIS16240_CHK_SUM 0x1E
+
/* Calibration, x-axis acceleration offset adjustment */
#define ADIS16240_XACCL_OFF 0x20
+
/* Calibration, y-axis acceleration offset adjustment */
#define ADIS16240_YACCL_OFF 0x22
+
/* Calibration, z-axis acceleration offset adjustment */
#define ADIS16240_ZACCL_OFF 0x24
+
/* Clock, hour and minute */
#define ADIS16240_CLK_TIME 0x2E
+
/* Clock, month and day */
#define ADIS16240_CLK_DATE 0x30
+
/* Clock, year */
#define ADIS16240_CLK_YEAR 0x32
+
/* Wake-up setting, hour and minute */
#define ADIS16240_WAKE_TIME 0x34
+
/* Wake-up setting, month and day */
#define ADIS16240_WAKE_DATE 0x36
+
/* Alarm 1 amplitude threshold */
#define ADIS16240_ALM_MAG1 0x38
+
/* Alarm 2 amplitude threshold */
#define ADIS16240_ALM_MAG2 0x3A
+
/* Alarm control */
#define ADIS16240_ALM_CTRL 0x3C
+
/* Capture, external trigger control */
#define ADIS16240_XTRIG_CTRL 0x3E
+
/* Capture, address pointer */
#define ADIS16240_CAPT_PNTR 0x40
+
/* Capture, configuration and control */
#define ADIS16240_CAPT_CTRL 0x42
+
/* General-purpose digital input/output control */
#define ADIS16240_GPIO_CTRL 0x44
+
/* Miscellaneous control */
#define ADIS16240_MSC_CTRL 0x46
+
/* Internal sample period (rate) control */
#define ADIS16240_SMPL_PRD 0x48
+
/* System command */
#define ADIS16240_GLOB_CMD 0x4A
/* MSC_CTRL */
+
/* Enables sum-of-squares output (XYZPEAK_OUT) */
#define ADIS16240_MSC_CTRL_XYZPEAK_OUT_EN BIT(15)
+
/* Enables peak tracking output (XPEAK_OUT, YPEAK_OUT, and ZPEAK_OUT) */
#define ADIS16240_MSC_CTRL_X_Y_ZPEAK_OUT_EN BIT(14)
+
/* Self-test enable: 1 = apply electrostatic force, 0 = disabled */
#define ADIS16240_MSC_CTRL_SELF_TEST_EN BIT(8)
+
/* Data-ready enable: 1 = enabled, 0 = disabled */
#define ADIS16240_MSC_CTRL_DATA_RDY_EN BIT(2)
+
/* Data-ready polarity: 1 = active high, 0 = active low */
#define ADIS16240_MSC_CTRL_ACTIVE_HIGH BIT(1)
+
/* Data-ready line selection: 1 = DIO2, 0 = DIO1 */
#define ADIS16240_MSC_CTRL_DATA_RDY_DIO2 BIT(0)
/* DIAG_STAT */
+
/* Alarm 2 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16240_DIAG_STAT_ALARM2 BIT(9)
+
/* Alarm 1 status: 1 = alarm active, 0 = alarm inactive */
#define ADIS16240_DIAG_STAT_ALARM1 BIT(8)
+
/* Capture buffer full: 1 = capture buffer is full */
#define ADIS16240_DIAG_STAT_CPT_BUF_FUL BIT(7)
+
/* Flash test, checksum flag: 1 = mismatch, 0 = match */
#define ADIS16240_DIAG_STAT_CHKSUM BIT(6)
+
/* Power-on, self-test flag: 1 = failure, 0 = pass */
#define ADIS16240_DIAG_STAT_PWRON_FAIL_BIT 5
+
/* Power-on self-test: 1 = in-progress, 0 = complete */
#define ADIS16240_DIAG_STAT_PWRON_BUSY BIT(4)
+
/* SPI communications failure */
#define ADIS16240_DIAG_STAT_SPI_FAIL_BIT 3
+
/* Flash update failure */
#define ADIS16240_DIAG_STAT_FLASH_UPT_BIT 2
+
/* Power supply above 3.625 V */
#define ADIS16240_DIAG_STAT_POWER_HIGH_BIT 1
+
/* Power supply below 3.15 V */
#define ADIS16240_DIAG_STAT_POWER_LOW_BIT 0
/* GLOB_CMD */
+
#define ADIS16240_GLOB_CMD_RESUME BIT(8)
#define ADIS16240_GLOB_CMD_SW_RESET BIT(7)
#define ADIS16240_GLOB_CMD_STANDBY BIT(2)
diff --git a/drivers/staging/iio/accel/adis16240_core.c b/drivers/staging/iio/accel/adis16240_core.c
index 1b5b685a8691..d5b99e610d08 100644
--- a/drivers/staging/iio/accel/adis16240_core.c
+++ b/drivers/staging/iio/accel/adis16240_core.c
@@ -29,13 +29,13 @@
static ssize_t adis16240_spi_read_signed(struct device *dev,
struct device_attribute *attr,
char *buf,
- unsigned bits)
+ unsigned int bits)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct adis *st = iio_priv(indio_dev);
int ret;
s16 val = 0;
- unsigned shift = 16 - bits;
+ unsigned int shift = 16 - bits;
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
ret = adis_read_reg_16(st,
@@ -222,6 +222,7 @@ static const struct adis_data adis16240_data = {
.diag_stat_reg = ADIS16240_DIAG_STAT,
.self_test_mask = ADIS16240_MSC_CTRL_SELF_TEST_EN,
+ .self_test_no_autoclear = true,
.startup_delay = ADIS16240_STARTUP_DELAY,
.status_error_msgs = adis16240_status_error_msgs,
diff --git a/drivers/staging/iio/adc/ad7192.c b/drivers/staging/iio/adc/ad7192.c
index f843f19cf675..1cf6b79801a9 100644
--- a/drivers/staging/iio/adc/ad7192.c
+++ b/drivers/staging/iio/adc/ad7192.c
@@ -35,10 +35,10 @@
#define AD7192_REG_DATA 3 /* Data Register (RO, 24/32-bit) */
#define AD7192_REG_ID 4 /* ID Register (RO, 8-bit) */
#define AD7192_REG_GPOCON 5 /* GPOCON Register (RO, 8-bit) */
-#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit
- * (AD7792)/24-bit (AD7192)) */
-#define AD7192_REG_FULLSALE 7 /* Full-Scale Register
- * (RW, 16-bit (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_OFFSET 6 /* Offset Register (RW, 16-bit */
+ /* (AD7792)/24-bit (AD7192)) */
+#define AD7192_REG_FULLSALE 7 /* Full-Scale Register */
+ /* (RW, 16-bit (AD7792)/24-bit (AD7192)) */
/* Communications Register Bit Designations (AD7192_REG_COMM) */
#define AD7192_COMM_WEN BIT(7) /* Write Enable */
@@ -80,13 +80,13 @@
#define AD7192_MODE_CAL_SYS_FULL 7 /* System Full-Scale Calibration */
/* Mode Register: AD7192_MODE_CLKSRC options */
-#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected
- * from MCLK1 to MCLK2 */
+#define AD7192_CLK_EXT_MCLK1_2 0 /* External 4.92 MHz Clock connected*/
+ /* from MCLK1 to MCLK2 */
#define AD7192_CLK_EXT_MCLK2 1 /* External Clock applied to MCLK2 */
-#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not
- * available at the MCLK2 pin */
-#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available
- * at the MCLK2 pin */
+#define AD7192_CLK_INT 2 /* Internal 4.92 MHz Clock not */
+ /* available at the MCLK2 pin */
+#define AD7192_CLK_INT_CO 3 /* Internal 4.92 MHz Clock available*/
+ /* at the MCLK2 pin */
/* Configuration Register Bit Designations (AD7192_REG_CONF) */
@@ -349,11 +349,9 @@ static ssize_t ad7192_write_frequency(struct device *dev,
if (lval == 0)
return -EINVAL;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
div = st->mclk / (lval * st->f_order * 1024);
if (div < 1 || div > 1023) {
@@ -366,7 +364,7 @@ static ssize_t ad7192_write_frequency(struct device *dev,
ad_sd_write_reg(&st->sd, AD7192_REG_MODE, 3, st->mode);
out:
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
@@ -434,11 +432,9 @@ static ssize_t ad7192_set(struct device *dev,
if (ret < 0)
return ret;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch ((u32)this_attr->address) {
case AD7192_REG_GPOCON:
@@ -461,7 +457,7 @@ static ssize_t ad7192_set(struct device *dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret ? ret : len;
}
@@ -555,11 +551,9 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
int ret, i;
unsigned int tmp;
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev)) {
- mutex_unlock(&indio_dev->mlock);
- return -EBUSY;
- }
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
switch (mask) {
case IIO_CHAN_INFO_SCALE:
@@ -582,7 +576,7 @@ static int ad7192_write_raw(struct iio_dev *indio_dev,
ret = -EINVAL;
}
- mutex_unlock(&indio_dev->mlock);
+ iio_device_release_direct_mode(indio_dev);
return ret;
}
diff --git a/drivers/staging/iio/adc/ad7280a.c b/drivers/staging/iio/adc/ad7280a.c
index 62e5ecacf634..a06b46cb81ca 100644
--- a/drivers/staging/iio/adc/ad7280a.c
+++ b/drivers/staging/iio/adc/ad7280a.c
@@ -155,7 +155,7 @@ static void ad7280_crc8_build_table(unsigned char *crc_tab)
}
}
-static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
+static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned int val)
{
unsigned char crc;
@@ -165,7 +165,7 @@ static unsigned char ad7280_calc_crc8(unsigned char *crc_tab, unsigned val)
return crc ^ (val & 0xFF);
}
-static int ad7280_check_crc(struct ad7280_state *st, unsigned val)
+static int ad7280_check_crc(struct ad7280_state *st, unsigned int val)
{
unsigned char crc = ad7280_calc_crc8(st->crc_tab, val >> 10);
@@ -191,7 +191,7 @@ static void ad7280_delay(struct ad7280_state *st)
usleep_range(250, 500);
}
-static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
+static int __ad7280_read32(struct ad7280_state *st, unsigned int *val)
{
int ret;
struct spi_transfer t = {
@@ -211,10 +211,10 @@ static int __ad7280_read32(struct ad7280_state *st, unsigned *val)
return 0;
}
-static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
- unsigned addr, bool all, unsigned val)
+static int ad7280_write(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr, bool all, unsigned int val)
{
- unsigned reg = devaddr << 27 | addr << 21 |
+ unsigned int reg = devaddr << 27 | addr << 21 |
(val & 0xFF) << 13 | all << 12;
reg |= ad7280_calc_crc8(st->crc_tab, reg >> 11) << 3 | 0x2;
@@ -223,11 +223,11 @@ static int ad7280_write(struct ad7280_state *st, unsigned devaddr,
return spi_write(st->spi, &st->buf[0], 4);
}
-static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
+static int ad7280_read(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr)
{
int ret;
- unsigned tmp;
+ unsigned int tmp;
/* turns off the read operation on all parts */
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_HB, 1,
@@ -261,11 +261,11 @@ static int ad7280_read(struct ad7280_state *st, unsigned devaddr,
return (tmp >> 13) & 0xFF;
}
-static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
- unsigned addr)
+static int ad7280_read_channel(struct ad7280_state *st, unsigned int devaddr,
+ unsigned int addr)
{
int ret;
- unsigned tmp;
+ unsigned int tmp;
ret = ad7280_write(st, devaddr, AD7280A_READ, 0, addr << 2);
if (ret)
@@ -299,11 +299,11 @@ static int ad7280_read_channel(struct ad7280_state *st, unsigned devaddr,
return (tmp >> 11) & 0xFFF;
}
-static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
- unsigned *array)
+static int ad7280_read_all_channels(struct ad7280_state *st, unsigned int cnt,
+ unsigned int *array)
{
int i, ret;
- unsigned tmp, sum = 0;
+ unsigned int tmp, sum = 0;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_READ, 1,
AD7280A_CELL_VOLTAGE_1 << 2);
@@ -338,7 +338,7 @@ static int ad7280_read_all_channels(struct ad7280_state *st, unsigned cnt,
static int ad7280_chain_setup(struct ad7280_state *st)
{
- unsigned val, n;
+ unsigned int val, n;
int ret;
ret = ad7280_write(st, AD7280A_DEVADDR_MASTER, AD7280A_CONTROL_LB, 1,
@@ -401,7 +401,7 @@ static ssize_t ad7280_store_balance_sw(struct device *dev,
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
bool readin;
int ret;
- unsigned devaddr, ch;
+ unsigned int devaddr, ch;
ret = strtobool(buf, &readin);
if (ret)
@@ -431,7 +431,7 @@ static ssize_t ad7280_show_balance_timer(struct device *dev,
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
- unsigned msecs;
+ unsigned int msecs;
mutex_lock(&indio_dev->mlock);
ret = ad7280_read(st, this_attr->address >> 8,
@@ -602,7 +602,7 @@ static ssize_t ad7280_read_channel_config(struct device *dev,
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ad7280_state *st = iio_priv(indio_dev);
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
- unsigned val;
+ unsigned int val;
switch ((u32)this_attr->address) {
case AD7280A_CELL_OVERVOLTAGE:
@@ -683,7 +683,7 @@ static irqreturn_t ad7280_event_handler(int irq, void *private)
{
struct iio_dev *indio_dev = private;
struct ad7280_state *st = iio_priv(indio_dev);
- unsigned *channels;
+ unsigned int *channels;
int i, ret;
channels = kcalloc(st->scan_cnt, sizeof(*channels), GFP_KERNEL);
diff --git a/drivers/staging/iio/adc/ad7280a.h b/drivers/staging/iio/adc/ad7280a.h
index 732347a9bce4..ccfb90d20e71 100644
--- a/drivers/staging/iio/adc/ad7280a.h
+++ b/drivers/staging/iio/adc/ad7280a.h
@@ -29,10 +29,10 @@
#define AD7280A_ALERT_REMOVE_AUX4_AUX5 BIT(1)
struct ad7280_platform_data {
- unsigned acquisition_time;
- unsigned conversion_averaging;
- unsigned chain_last_alert_ignore;
- bool thermistor_term_en;
+ unsigned int acquisition_time;
+ unsigned int conversion_averaging;
+ unsigned int chain_last_alert_ignore;
+ bool thermistor_term_en;
};
#endif /* IIO_ADC_AD7280_H_ */
diff --git a/drivers/staging/iio/adc/ad7606.h b/drivers/staging/iio/adc/ad7606.h
index cca946924c58..39f50440d915 100644
--- a/drivers/staging/iio/adc/ad7606.h
+++ b/drivers/staging/iio/adc/ad7606.h
@@ -28,16 +28,16 @@
*/
struct ad7606_platform_data {
- unsigned default_os;
- unsigned default_range;
- unsigned gpio_convst;
- unsigned gpio_reset;
- unsigned gpio_range;
- unsigned gpio_os0;
- unsigned gpio_os1;
- unsigned gpio_os2;
- unsigned gpio_frstdata;
- unsigned gpio_stby;
+ unsigned int default_os;
+ unsigned int default_range;
+ unsigned int gpio_convst;
+ unsigned int gpio_reset;
+ unsigned int gpio_range;
+ unsigned int gpio_os0;
+ unsigned int gpio_os1;
+ unsigned int gpio_os2;
+ unsigned int gpio_frstdata;
+ unsigned int gpio_stby;
};
/**
@@ -52,7 +52,7 @@ struct ad7606_chip_info {
const char *name;
u16 int_vref_mv;
const struct iio_chan_spec *channels;
- unsigned num_channels;
+ unsigned int num_channels;
};
/**
@@ -67,8 +67,8 @@ struct ad7606_state {
struct work_struct poll_work;
wait_queue_head_t wq_data_avail;
const struct ad7606_bus_ops *bops;
- unsigned range;
- unsigned oversampling;
+ unsigned int range;
+ unsigned int oversampling;
bool done;
void __iomem *base_address;
@@ -86,7 +86,7 @@ struct ad7606_bus_ops {
};
struct iio_dev *ad7606_probe(struct device *dev, int irq,
- void __iomem *base_address, unsigned id,
+ void __iomem *base_address, unsigned int id,
const struct ad7606_bus_ops *bops);
int ad7606_remove(struct iio_dev *indio_dev, int irq);
int ad7606_reset(struct ad7606_state *st);
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index fe6caeee0843..f79ee61851f6 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -36,7 +36,7 @@ int ad7606_reset(struct ad7606_state *st)
return -ENODEV;
}
-static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned ch)
+static int ad7606_scan_direct(struct iio_dev *indio_dev, unsigned int ch)
{
struct ad7606_state *st = iio_priv(indio_dev);
int ret;
@@ -88,12 +88,12 @@ static int ad7606_read_raw(struct iio_dev *indio_dev,
switch (m) {
case IIO_CHAN_INFO_RAW:
- mutex_lock(&indio_dev->mlock);
- if (iio_buffer_enabled(indio_dev))
- ret = -EBUSY;
- else
- ret = ad7606_scan_direct(indio_dev, chan->address);
- mutex_unlock(&indio_dev->mlock);
+ ret = iio_device_claim_direct_mode(indio_dev);
+ if (ret)
+ return ret;
+
+ ret = ad7606_scan_direct(indio_dev, chan->address);
+ iio_device_release_direct_mode(indio_dev);
if (ret < 0)
return ret;
@@ -155,7 +155,7 @@ static ssize_t ad7606_show_oversampling_ratio(struct device *dev,
return sprintf(buf, "%u\n", st->oversampling);
}
-static int ad7606_oversampling_get_index(unsigned val)
+static int ad7606_oversampling_get_index(unsigned int val)
{
unsigned char supported[] = {0, 2, 4, 8, 16, 32, 64};
int i;
@@ -446,7 +446,7 @@ static const struct iio_info ad7606_info_range = {
struct iio_dev *ad7606_probe(struct device *dev, int irq,
void __iomem *base_address,
- unsigned id,
+ unsigned int id,
const struct ad7606_bus_ops *bops)
{
struct ad7606_platform_data *pdata = dev->platform_data;
diff --git a/drivers/staging/iio/adc/ad7606_spi.c b/drivers/staging/iio/adc/ad7606_spi.c
index d873a5164595..825da0769936 100644
--- a/drivers/staging/iio/adc/ad7606_spi.c
+++ b/drivers/staging/iio/adc/ad7606_spi.c
@@ -21,7 +21,8 @@ static int ad7606_spi_read_block(struct device *dev,
{
struct spi_device *spi = to_spi_device(dev);
int i, ret;
- unsigned short *data = buf;
+ unsigned short *data;
+ __be16 *bdata = buf;
ret = spi_read(spi, buf, count * 2);
if (ret < 0) {
@@ -30,7 +31,7 @@ static int ad7606_spi_read_block(struct device *dev,
}
for (i = 0; i < count; i++)
- data[i] = be16_to_cpu(data[i]);
+ data[i] = be16_to_cpu(bdata[i]);
return 0;
}
diff --git a/drivers/staging/iio/adc/ad7780.c b/drivers/staging/iio/adc/ad7780.c
index 1439cfdbb09c..c9a0c2aa602f 100644
--- a/drivers/staging/iio/adc/ad7780.c
+++ b/drivers/staging/iio/adc/ad7780.c
@@ -63,7 +63,7 @@ static int ad7780_set_mode(struct ad_sigma_delta *sigma_delta,
enum ad_sigma_delta_mode mode)
{
struct ad7780_state *st = ad_sigma_delta_to_ad7780(sigma_delta);
- unsigned val;
+ unsigned int val;
switch (mode) {
case AD_SD_MODE_SINGLE:
diff --git a/drivers/staging/iio/frequency/ad9832.c b/drivers/staging/iio/frequency/ad9832.c
index 18b27a1984b2..358400b22d33 100644
--- a/drivers/staging/iio/frequency/ad9832.c
+++ b/drivers/staging/iio/frequency/ad9832.c
@@ -31,7 +31,7 @@ static unsigned long ad9832_calc_freqreg(unsigned long mclk, unsigned long fout)
}
static int ad9832_write_frequency(struct ad9832_state *st,
- unsigned addr, unsigned long fout)
+ unsigned int addr, unsigned long fout)
{
unsigned long regval;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index d1218d896725..9f43976f4ef2 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -12,20 +12,16 @@
#include <linux/sysfs.h>
#include <linux/i2c.h>
#include <linux/regulator/consumer.h>
-#include <linux/slab.h>
#include <linux/types.h>
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/module.h>
-#include <asm/div64.h>
#include <linux/iio/iio.h>
#include <linux/iio/sysfs.h>
#include <linux/iio/buffer.h>
#include <linux/iio/kfifo_buf.h>
-#include "ad5933.h"
-
/* AD5933/AD5934 Registers */
#define AD5933_REG_CONTROL_HB 0x80 /* R/W, 2 bytes */
#define AD5933_REG_CONTROL_LB 0x81 /* R/W, 2 bytes */
@@ -86,6 +82,18 @@
#define AD5933_POLL_TIME_ms 10
#define AD5933_INIT_EXCITATION_TIME_ms 100
+/**
+ * struct ad5933_platform_data - platform specific data
+ * @ext_clk_Hz: the external clock frequency in Hz, if not set
+ * the driver uses the internal clock (16.776 MHz)
+ * @vref_mv: the external reference voltage in millivolt
+ */
+
+struct ad5933_platform_data {
+ unsigned long ext_clk_Hz;
+ unsigned short vref_mv;
+};
+
struct ad5933_state {
struct i2c_client *client;
struct regulator *reg;
@@ -93,14 +101,14 @@ struct ad5933_state {
unsigned long mclk_hz;
unsigned char ctrl_hb;
unsigned char ctrl_lb;
- unsigned range_avail[4];
+ unsigned int range_avail[4];
unsigned short vref_mv;
unsigned short settling_cycles;
unsigned short freq_points;
- unsigned freq_start;
- unsigned freq_inc;
- unsigned state;
- unsigned poll_time_jiffies;
+ unsigned int freq_start;
+ unsigned int freq_inc;
+ unsigned int state;
+ unsigned int poll_time_jiffies;
};
static struct ad5933_platform_data ad5933_default_pdata = {
@@ -214,7 +222,7 @@ static int ad5933_wait_busy(struct ad5933_state *st, unsigned char event)
}
static int ad5933_set_freq(struct ad5933_state *st,
- unsigned reg, unsigned long freq)
+ unsigned int reg, unsigned long freq)
{
unsigned long long freqreg;
union {
@@ -274,7 +282,7 @@ static int ad5933_setup(struct ad5933_state *st)
static void ad5933_calc_out_ranges(struct ad5933_state *st)
{
int i;
- unsigned normalized_3v3[4] = {1980, 198, 383, 970};
+ unsigned int normalized_3v3[4] = {1980, 198, 383, 970};
for (i = 0; i < 4; i++)
st->range_avail[i] = normalized_3v3[i] * st->vref_mv / 3300;
@@ -307,10 +315,10 @@ static ssize_t ad5933_show_frequency(struct device *dev,
freqreg = be32_to_cpu(dat.d32) & 0xFFFFFF;
- freqreg = (u64) freqreg * (u64) (st->mclk_hz / 4);
+ freqreg = (u64)freqreg * (u64)(st->mclk_hz / 4);
do_div(freqreg, 1 << 27);
- return sprintf(buf, "%d\n", (int) freqreg);
+ return sprintf(buf, "%d\n", (int)freqreg);
}
static ssize_t ad5933_store_frequency(struct device *dev,
@@ -358,7 +366,7 @@ static ssize_t ad5933_show(struct device *dev,
int ret = 0, len = 0;
mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
len = sprintf(buf, "%u\n",
st->range_avail[(st->ctrl_hb >> 1) & 0x3]);
@@ -409,7 +417,7 @@ static ssize_t ad5933_store(struct device *dev,
}
mutex_lock(&indio_dev->mlock);
- switch ((u32) this_attr->address) {
+ switch ((u32)this_attr->address) {
case AD5933_OUT_RANGE:
for (i = 0; i < 4; i++)
if (val == st->range_avail[i]) {
@@ -683,8 +691,9 @@ static void ad5933_work(struct work_struct *work)
}
if (status & AD5933_STAT_SWEEP_DONE) {
- /* last sample received - power down do nothing until
- * the ring enable is toggled */
+ /* last sample received - power down do
+ * nothing until the ring enable is toggled
+ */
ad5933_cmd(st, AD5933_CTRL_POWER_DOWN);
} else {
/* we just received a valid datum, move on to the next */
@@ -699,7 +708,7 @@ static int ad5933_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
int ret, voltage_uv = 0;
- struct ad5933_platform_data *pdata = client->dev.platform_data;
+ struct ad5933_platform_data *pdata = dev_get_platdata(&client->dev);
struct ad5933_state *st;
struct iio_dev *indio_dev;
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.h b/drivers/staging/iio/impedance-analyzer/ad5933.h
deleted file mode 100644
index b140e42d67cf..000000000000
--- a/drivers/staging/iio/impedance-analyzer/ad5933.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * AD5933 AD5934 Impedance Converter, Network Analyzer
- *
- * Copyright 2011 Analog Devices Inc.
- *
- * Licensed under the GPL-2.
- */
-
-#ifndef IIO_ADC_AD5933_H_
-#define IIO_ADC_AD5933_H_
-
-/*
- * TODO: struct ad5933_platform_data needs to go into include/linux/iio
- */
-
-/**
- * struct ad5933_platform_data - platform specific data
- * @ext_clk_Hz: the external clock frequency in Hz, if not set
- * the driver uses the internal clock (16.776 MHz)
- * @vref_mv: the external reference voltage in millivolt
- */
-
-struct ad5933_platform_data {
- unsigned long ext_clk_Hz;
- unsigned short vref_mv;
-};
-
-#endif /* IIO_ADC_AD5933_H_ */
diff --git a/drivers/staging/iio/light/isl29028.c b/drivers/staging/iio/light/isl29028.c
index 6e2ba458c24d..2e3b1d64e32a 100644
--- a/drivers/staging/iio/light/isl29028.c
+++ b/drivers/staging/iio/light/isl29028.c
@@ -69,7 +69,6 @@ enum als_ir_mode {
};
struct isl29028_chip {
- struct device *dev;
struct mutex lock;
struct regmap *regmap;
@@ -166,20 +165,21 @@ static int isl29028_set_als_ir_mode(struct isl29028_chip *chip,
static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
{
+ struct device *dev = regmap_get_device(chip->regmap);
unsigned int lsb;
unsigned int msb;
int ret;
ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_L, &lsb);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading register ALSIR_L err %d\n", ret);
return ret;
}
ret = regmap_read(chip->regmap, ISL29028_REG_ALSIR_U, &msb);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in reading register ALSIR_U err %d\n", ret);
return ret;
}
@@ -190,12 +190,13 @@ static int isl29028_read_als_ir(struct isl29028_chip *chip, int *als_ir)
static int isl29028_read_proxim(struct isl29028_chip *chip, int *prox)
{
+ struct device *dev = regmap_get_device(chip->regmap);
unsigned int data;
int ret;
ret = regmap_read(chip->regmap, ISL29028_REG_PROX_DATA, &data);
if (ret < 0) {
- dev_err(chip->dev, "Error in reading register %d, error %d\n",
+ dev_err(dev, "Error in reading register %d, error %d\n",
ISL29028_REG_PROX_DATA, ret);
return ret;
}
@@ -218,13 +219,14 @@ static int isl29028_proxim_get(struct isl29028_chip *chip, int *prox_data)
static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
int als_ir_data;
if (chip->als_ir_mode != MODE_ALS) {
ret = isl29028_set_als_ir_mode(chip, MODE_ALS);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in enabling ALS mode err %d\n", ret);
return ret;
}
@@ -251,12 +253,13 @@ static int isl29028_als_get(struct isl29028_chip *chip, int *als_data)
static int isl29028_ir_get(struct isl29028_chip *chip, int *ir_data)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
if (chip->als_ir_mode != MODE_IR) {
ret = isl29028_set_als_ir_mode(chip, MODE_IR);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Error in enabling IR mode err %d\n", ret);
return ret;
}
@@ -271,25 +274,26 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
int val, int val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(chip->regmap);
int ret = -EINVAL;
mutex_lock(&chip->lock);
switch (chan->type) {
case IIO_PROXIMITY:
if (mask != IIO_CHAN_INFO_SAMP_FREQ) {
- dev_err(chip->dev,
+ dev_err(dev,
"proximity: mask value 0x%08lx not supported\n",
mask);
break;
}
if (val < 1 || val > 100) {
- dev_err(chip->dev,
+ dev_err(dev,
"Samp_freq %d is not in range[1:100]\n", val);
break;
}
ret = isl29028_set_proxim_sampling(chip, val);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Setting proximity samp_freq fail, err %d\n",
ret);
break;
@@ -299,19 +303,19 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
case IIO_LIGHT:
if (mask != IIO_CHAN_INFO_SCALE) {
- dev_err(chip->dev,
+ dev_err(dev,
"light: mask value 0x%08lx not supported\n",
mask);
break;
}
if ((val != 125) && (val != 2000)) {
- dev_err(chip->dev,
+ dev_err(dev,
"lux scale %d is invalid [125, 2000]\n", val);
break;
}
ret = isl29028_set_als_scale(chip, val);
if (ret < 0) {
- dev_err(chip->dev,
+ dev_err(dev,
"Setting lux scale fail with error %d\n", ret);
break;
}
@@ -319,7 +323,7 @@ static int isl29028_write_raw(struct iio_dev *indio_dev,
break;
default:
- dev_err(chip->dev, "Unsupported channel type\n");
+ dev_err(dev, "Unsupported channel type\n");
break;
}
mutex_unlock(&chip->lock);
@@ -331,6 +335,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
int *val, int *val2, long mask)
{
struct isl29028_chip *chip = iio_priv(indio_dev);
+ struct device *dev = regmap_get_device(chip->regmap);
int ret = -EINVAL;
mutex_lock(&chip->lock);
@@ -370,7 +375,7 @@ static int isl29028_read_raw(struct iio_dev *indio_dev,
break;
default:
- dev_err(chip->dev, "mask value 0x%08lx not supported\n", mask);
+ dev_err(dev, "mask value 0x%08lx not supported\n", mask);
break;
}
mutex_unlock(&chip->lock);
@@ -417,6 +422,7 @@ static const struct iio_info isl29028_info = {
static int isl29028_chip_init(struct isl29028_chip *chip)
{
+ struct device *dev = regmap_get_device(chip->regmap);
int ret;
chip->enable_prox = false;
@@ -426,35 +432,33 @@ static int isl29028_chip_init(struct isl29028_chip *chip)
ret = regmap_write(chip->regmap, ISL29028_REG_TEST1_MODE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_TEST1_MODE, ret);
return ret;
}
ret = regmap_write(chip->regmap, ISL29028_REG_TEST2_MODE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_TEST2_MODE, ret);
return ret;
}
ret = regmap_write(chip->regmap, ISL29028_REG_CONFIGURE, 0x0);
if (ret < 0) {
- dev_err(chip->dev, "%s(): write to reg %d failed, err = %d\n",
+ dev_err(dev, "%s(): write to reg %d failed, err = %d\n",
__func__, ISL29028_REG_CONFIGURE, ret);
return ret;
}
ret = isl29028_set_proxim_sampling(chip, chip->prox_sampling);
if (ret < 0) {
- dev_err(chip->dev, "setting the proximity, err = %d\n",
- ret);
+ dev_err(dev, "setting the proximity, err = %d\n", ret);
return ret;
}
ret = isl29028_set_als_scale(chip, chip->lux_scale);
if (ret < 0)
- dev_err(chip->dev,
- "setting als scale failed, err = %d\n", ret);
+ dev_err(dev, "setting als scale failed, err = %d\n", ret);
return ret;
}
@@ -496,19 +500,19 @@ static int isl29028_probe(struct i2c_client *client,
chip = iio_priv(indio_dev);
i2c_set_clientdata(client, indio_dev);
- chip->dev = &client->dev;
mutex_init(&chip->lock);
chip->regmap = devm_regmap_init_i2c(client, &isl29028_regmap_config);
if (IS_ERR(chip->regmap)) {
ret = PTR_ERR(chip->regmap);
- dev_err(chip->dev, "regmap initialization failed: %d\n", ret);
+ dev_err(&client->dev, "regmap initialization failed: %d\n",
+ ret);
return ret;
}
ret = isl29028_chip_init(chip);
if (ret < 0) {
- dev_err(chip->dev, "chip initialization failed: %d\n", ret);
+ dev_err(&client->dev, "chip initialization failed: %d\n", ret);
return ret;
}
@@ -520,7 +524,8 @@ static int isl29028_probe(struct i2c_client *client,
indio_dev->modes = INDIO_DIRECT_MODE;
ret = devm_iio_device_register(indio_dev->dev.parent, indio_dev);
if (ret < 0) {
- dev_err(chip->dev, "iio registration fails with error %d\n",
+ dev_err(&client->dev,
+ "iio registration fails with error %d\n",
ret);
return ret;
}
diff --git a/drivers/staging/iio/light/tsl2x7x_core.c b/drivers/staging/iio/light/tsl2x7x_core.c
index 5f308bae41b9..d553c8e18fcc 100644
--- a/drivers/staging/iio/light/tsl2x7x_core.c
+++ b/drivers/staging/iio/light/tsl2x7x_core.c
@@ -187,9 +187,11 @@ struct tsl2X7X_chip {
const struct tsl2x7x_chip_info *chip_info;
const struct iio_info *info;
s64 event_timestamp;
- /* This structure is intentionally large to accommodate
- * updates via sysfs. */
- /* Sized to 9 = max 8 segments + 1 termination segment */
+ /*
+ * This structure is intentionally large to accommodate
+ * updates via sysfs.
+ * Sized to 9 = max 8 segments + 1 termination segment
+ */
struct tsl2x7x_lux tsl2x7x_device_lux[TSL2X7X_MAX_LUX_TABLE_SIZE];
};
@@ -349,13 +351,13 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
if (chip->tsl2x7x_chip_status != TSL2X7X_CHIP_WORKING) {
/* device is not enabled */
dev_err(&chip->client->dev, "%s: device is not enabled\n",
- __func__);
+ __func__);
ret = -EBUSY;
goto out_unlock;
}
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &buf[0]);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to read STATUS Reg\n", __func__);
@@ -371,8 +373,8 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
for (i = 0; i < 4; i++) {
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | (TSL2X7X_ALS_CHAN0LO + i)),
- &buf[i]);
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_ALS_CHAN0LO + i)), &buf[i]);
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to read. err=%x\n", ret);
@@ -382,9 +384,9 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* clear any existing interrupt status */
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG |
- TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_ALS_INT_CLR));
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_ALS_INT_CLR));
if (ret < 0) {
dev_err(&chip->client->dev,
"i2c_write_command failed - err = %d\n", ret);
@@ -411,7 +413,7 @@ static int tsl2x7x_get_lux(struct iio_dev *indio_dev)
/* calculate ratio */
ratio = (ch1 << 15) / ch0;
/* convert to unscaled lux using the pointer to the table */
- p = (struct tsl2x7x_lux *) chip->tsl2x7x_device_lux;
+ p = (struct tsl2x7x_lux *)chip->tsl2x7x_device_lux;
while (p->ratio != 0 && p->ratio < ratio)
p++;
@@ -488,7 +490,7 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
}
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
+ (TSL2X7X_CMD_REG | TSL2X7X_STATUS), &status);
if (ret < 0) {
dev_err(&chip->client->dev, "i2c err=%d\n", ret);
goto prox_poll_err;
@@ -515,8 +517,8 @@ static int tsl2x7x_get_prox(struct iio_dev *indio_dev)
for (i = 0; i < 2; i++) {
ret = tsl2x7x_i2c_read(chip->client,
- (TSL2X7X_CMD_REG |
- (TSL2X7X_PRX_LO + i)), &chdata[i]);
+ (TSL2X7X_CMD_REG |
+ (TSL2X7X_PRX_LO + i)), &chdata[i]);
if (ret < 0)
goto prox_poll_err;
}
@@ -542,19 +544,19 @@ static void tsl2x7x_defaults(struct tsl2X7X_chip *chip)
{
/* If Operational settings defined elsewhere.. */
if (chip->pdata && chip->pdata->platform_default_settings)
- memcpy(&(chip->tsl2x7x_settings),
- chip->pdata->platform_default_settings,
- sizeof(tsl2x7x_default_settings));
+ memcpy(&chip->tsl2x7x_settings,
+ chip->pdata->platform_default_settings,
+ sizeof(tsl2x7x_default_settings));
else
- memcpy(&(chip->tsl2x7x_settings),
- &tsl2x7x_default_settings,
- sizeof(tsl2x7x_default_settings));
+ memcpy(&chip->tsl2x7x_settings,
+ &tsl2x7x_default_settings,
+ sizeof(tsl2x7x_default_settings));
/* Load up the proper lux table. */
if (chip->pdata && chip->pdata->platform_lux_table[0].ratio != 0)
memcpy(chip->tsl2x7x_device_lux,
- chip->pdata->platform_lux_table,
- sizeof(chip->pdata->platform_lux_table));
+ chip->pdata->platform_lux_table,
+ sizeof(chip->pdata->platform_lux_table));
else
memcpy(chip->tsl2x7x_device_lux,
(struct tsl2x7x_lux *)tsl2x7x_default_lux_table_group[chip->id],
@@ -576,7 +578,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
int lux_val;
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to write CNTRL register, ret=%d\n", ret);
@@ -592,7 +594,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
}
ret = i2c_smbus_write_byte(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
+ (TSL2X7X_CMD_REG | TSL2X7X_CNTRL));
if (ret < 0) {
dev_err(&chip->client->dev,
"failed to write ctrl reg: ret=%d\n", ret);
@@ -609,7 +611,7 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
lux_val = tsl2x7x_get_lux(indio_dev);
if (lux_val < 0) {
dev_err(&chip->client->dev,
- "%s: failed to get lux\n", __func__);
+ "%s: failed to get lux\n", __func__);
return lux_val;
}
@@ -620,9 +622,9 @@ static int tsl2x7x_als_calibrate(struct iio_dev *indio_dev)
chip->tsl2x7x_settings.als_gain_trim = gain_trim_val;
dev_info(&chip->client->dev,
- "%s als_calibrate completed\n", chip->client->name);
+ "%s als_calibrate completed\n", chip->client->name);
- return (int) gain_trim_val;
+ return (int)gain_trim_val;
}
static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
@@ -695,23 +697,28 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
chip->als_saturation = als_count * 922; /* 90% of full scale */
chip->als_time_scale = (als_time + 25) / 50;
- /* TSL2X7X Specific power-on / adc enable sequence
- * Power on the device 1st. */
+ /*
+ * TSL2X7X Specific power-on / adc enable sequence
+ * Power on the device 1st.
+ */
utmp = TSL2X7X_CNTL_PWR_ON;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on CNTRL reg.\n", __func__);
return ret;
}
- /* Use the following shadow copy for our delay before enabling ADC.
- * Write all the registers. */
+ /*
+ * Use the following shadow copy for our delay before enabling ADC.
+ * Write all the registers.
+ */
for (i = 0, dev_reg = chip->tsl2x7x_config;
i < TSL2X7X_MAX_CONFIG_REG; i++) {
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG + i, *dev_reg++);
+ TSL2X7X_CMD_REG + i,
+ *dev_reg++);
if (ret < 0) {
dev_err(&chip->client->dev,
"failed on write to reg %d.\n", i);
@@ -721,13 +728,15 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
mdelay(3); /* Power-on settling time */
- /* NOW enable the ADC
- * initialize the desired mode of operation */
+ /*
+ * NOW enable the ADC
+ * initialize the desired mode of operation
+ */
utmp = TSL2X7X_CNTL_PWR_ON |
TSL2X7X_CNTL_ADC_ENBL |
TSL2X7X_CNTL_PROX_DET_ENBL;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, utmp);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: failed on 2nd CTRL reg.\n", __func__);
@@ -741,12 +750,13 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
reg_val = TSL2X7X_CNTL_PWR_ON | TSL2X7X_CNTL_ADC_ENBL;
if ((chip->tsl2x7x_settings.interrupts_en == 0x20) ||
- (chip->tsl2x7x_settings.interrupts_en == 0x30))
+ (chip->tsl2x7x_settings.interrupts_en == 0x30))
reg_val |= TSL2X7X_CNTL_PROX_DET_ENBL;
reg_val |= chip->tsl2x7x_settings.interrupts_en;
ret = i2c_smbus_write_byte_data(chip->client,
- (TSL2X7X_CMD_REG | TSL2X7X_CNTRL), reg_val);
+ (TSL2X7X_CMD_REG |
+ TSL2X7X_CNTRL), reg_val);
if (ret < 0)
dev_err(&chip->client->dev,
"%s: failed in tsl2x7x_IOCTL_INT_SET.\n",
@@ -754,8 +764,9 @@ static int tsl2x7x_chip_on(struct iio_dev *indio_dev)
/* Clear out any initial interrupts */
ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
+ TSL2X7X_CMD_REG |
+ TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0) {
dev_err(&chip->client->dev,
"%s: Failed to clear Int status\n",
@@ -776,7 +787,7 @@ static int tsl2x7x_chip_off(struct iio_dev *indio_dev)
chip->tsl2x7x_chip_status = TSL2X7X_CHIP_SUSPENDED;
ret = i2c_smbus_write_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
+ TSL2X7X_CMD_REG | TSL2X7X_CNTRL, 0x00);
if (chip->pdata && chip->pdata->power_off)
chip->pdata->power_off(chip->client);
@@ -819,7 +830,7 @@ int tsl2x7x_invoke_change(struct iio_dev *indio_dev)
static
void tsl2x7x_prox_calculate(int *data, int length,
- struct tsl2x7x_prox_stat *statP)
+ struct tsl2x7x_prox_stat *statP)
{
int i;
int sample_sum;
@@ -843,7 +854,7 @@ void tsl2x7x_prox_calculate(int *data, int length,
tmp = data[i] - statP->mean;
sample_sum += tmp * tmp;
}
- statP->stddev = int_sqrt((long)sample_sum)/length;
+ statP->stddev = int_sqrt((long)sample_sum) / length;
}
/**
@@ -886,20 +897,21 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
tsl2x7x_get_prox(indio_dev);
prox_history[i] = chip->prox_data;
dev_info(&chip->client->dev, "2 i=%d prox data= %d\n",
- i, chip->prox_data);
+ i, chip->prox_data);
}
tsl2x7x_chip_off(indio_dev);
calP = &prox_stat_data[PROX_STAT_CAL];
tsl2x7x_prox_calculate(prox_history,
- chip->tsl2x7x_settings.prox_max_samples_cal, calP);
+ chip->tsl2x7x_settings.prox_max_samples_cal,
+ calP);
chip->tsl2x7x_settings.prox_thres_high = (calP->max << 1) - calP->mean;
dev_info(&chip->client->dev, " cal min=%d mean=%d max=%d\n",
- calP->min, calP->mean, calP->max);
+ calP->min, calP->mean, calP->max);
dev_info(&chip->client->dev,
- "%s proximity threshold set to %d\n",
- chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
+ "%s proximity threshold set to %d\n",
+ chip->client->name, chip->tsl2x7x_settings.prox_thres_high);
/* back to the way they were */
chip->tsl2x7x_settings.interrupts_en = tmp_irq_settings;
@@ -908,7 +920,8 @@ static void tsl2x7x_prox_cal(struct iio_dev *indio_dev)
}
static ssize_t tsl2x7x_power_state_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -916,7 +929,8 @@ static ssize_t tsl2x7x_power_state_show(struct device *dev,
}
static ssize_t tsl2x7x_power_state_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -933,7 +947,8 @@ static ssize_t tsl2x7x_power_state_store(struct device *dev,
}
static ssize_t tsl2x7x_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -950,13 +965,15 @@ static ssize_t tsl2x7x_gain_available_show(struct device *dev,
}
static ssize_t tsl2x7x_prox_gain_available_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return snprintf(buf, PAGE_SIZE, "%s\n", "1 2 4 8");
}
static ssize_t tsl2x7x_als_time_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z;
@@ -970,7 +987,8 @@ static ssize_t tsl2x7x_als_time_show(struct device *dev,
}
static ssize_t tsl2x7x_als_time_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -986,7 +1004,7 @@ static ssize_t tsl2x7x_als_time_store(struct device *dev,
TSL2X7X_MAX_TIMER_CNT - (u8)result.fract;
dev_info(&chip->client->dev, "%s: als time = %d",
- __func__, chip->tsl2x7x_settings.als_time);
+ __func__, chip->tsl2x7x_settings.als_time);
tsl2x7x_invoke_change(indio_dev);
@@ -997,7 +1015,8 @@ static IIO_CONST_ATTR(in_illuminance0_integration_time_available,
".00272 - .696");
static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
@@ -1006,7 +1025,8 @@ static ssize_t tsl2x7x_als_cal_target_show(struct device *dev,
}
static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1025,7 +1045,8 @@ static ssize_t tsl2x7x_als_cal_target_store(struct device *dev,
/* persistence settings */
static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
@@ -1041,7 +1062,8 @@ static ssize_t tsl2x7x_als_persistence_show(struct device *dev,
}
static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1063,7 +1085,7 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
chip->tsl2x7x_settings.persistence |= (filter_delay & 0x0F);
dev_info(&chip->client->dev, "%s: als persistence = %d",
- __func__, filter_delay);
+ __func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
@@ -1071,7 +1093,8 @@ static ssize_t tsl2x7x_als_persistence_store(struct device *dev,
}
static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int y, z, filter_delay;
@@ -1087,7 +1110,8 @@ static ssize_t tsl2x7x_prox_persistence_show(struct device *dev,
}
static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1109,7 +1133,7 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
chip->tsl2x7x_settings.persistence |= ((filter_delay << 4) & 0xF0);
dev_info(&chip->client->dev, "%s: prox persistence = %d",
- __func__, filter_delay);
+ __func__, filter_delay);
tsl2x7x_invoke_change(indio_dev);
@@ -1117,7 +1141,8 @@ static ssize_t tsl2x7x_prox_persistence_store(struct device *dev,
}
static ssize_t tsl2x7x_do_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1134,7 +1159,8 @@ static ssize_t tsl2x7x_do_calibrate(struct device *dev,
}
static ssize_t tsl2x7x_luxtable_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct tsl2X7X_chip *chip = iio_priv(dev_to_iio_dev(dev));
int i = 0;
@@ -1146,8 +1172,10 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
chip->tsl2x7x_device_lux[i].ch0,
chip->tsl2x7x_device_lux[i].ch1);
if (chip->tsl2x7x_device_lux[i].ratio == 0) {
- /* We just printed the first "0" entry.
- * Now get rid of the extra "," and break. */
+ /*
+ * We just printed the first "0" entry.
+ * Now get rid of the extra "," and break.
+ */
offset--;
break;
}
@@ -1159,11 +1187,12 @@ static ssize_t tsl2x7x_luxtable_show(struct device *dev,
}
static ssize_t tsl2x7x_luxtable_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
- int value[ARRAY_SIZE(chip->tsl2x7x_device_lux)*3 + 1];
+ int value[ARRAY_SIZE(chip->tsl2x7x_device_lux) * 3 + 1];
int n;
get_options(buf, ARRAY_SIZE(value), value);
@@ -1175,7 +1204,7 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
*/
n = value[0];
if ((n % 3) || n < 6 ||
- n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
+ n > ((ARRAY_SIZE(chip->tsl2x7x_device_lux) - 1) * 3)) {
dev_info(dev, "LUX TABLE INPUT ERROR 1 Value[0]=%d\n", n);
return -EINVAL;
}
@@ -1198,7 +1227,8 @@ static ssize_t tsl2x7x_luxtable_store(struct device *dev,
}
static ssize_t tsl2x7x_do_prox_calibrate(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
bool value;
@@ -1391,10 +1421,10 @@ static int tsl2x7x_read_raw(struct iio_dev *indio_dev,
}
static int tsl2x7x_write_raw(struct iio_dev *indio_dev,
- struct iio_chan_spec const *chan,
- int val,
- int val2,
- long mask)
+ struct iio_chan_spec const *chan,
+ int val,
+ int val2,
+ long mask)
{
struct tsl2X7X_chip *chip = iio_priv(indio_dev);
@@ -1529,7 +1559,7 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
u8 value;
value = i2c_smbus_read_byte_data(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_STATUS);
+ TSL2X7X_CMD_REG | TSL2X7X_STATUS);
/* What type of interrupt do we need to process */
if (value & TSL2X7X_STA_PRX_INTR) {
@@ -1545,16 +1575,16 @@ static irqreturn_t tsl2x7x_event_handler(int irq, void *private)
if (value & TSL2X7X_STA_ALS_INTR) {
tsl2x7x_get_lux(indio_dev); /* freshen data for ABI */
iio_push_event(indio_dev,
- IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
- 0,
- IIO_EV_TYPE_THRESH,
- IIO_EV_DIR_EITHER),
- timestamp);
+ IIO_UNMOD_EVENT_CODE(IIO_LIGHT,
+ 0,
+ IIO_EV_TYPE_THRESH,
+ IIO_EV_DIR_EITHER),
+ timestamp);
}
/* Clear interrupt now that we have handled it. */
ret = i2c_smbus_write_byte(chip->client,
- TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
- TSL2X7X_CMD_PROXALS_INT_CLR);
+ TSL2X7X_CMD_REG | TSL2X7X_CMD_SPL_FN |
+ TSL2X7X_CMD_PROXALS_INT_CLR);
if (ret < 0)
dev_err(&chip->client->dev,
"Failed to clear irq from event handler. err = %d\n",
@@ -1616,6 +1646,7 @@ static struct attribute *tsl2X7X_ALS_event_attrs[] = {
&dev_attr_in_intensity0_thresh_period.attr,
NULL,
};
+
static struct attribute *tsl2X7X_PRX_event_attrs[] = {
&dev_attr_in_proximity0_thresh_period.attr,
NULL,
@@ -1857,7 +1888,7 @@ static const struct tsl2x7x_chip_info tsl2x7x_chip_info_tbl[] = {
};
static int tsl2x7x_probe(struct i2c_client *clientp,
- const struct i2c_device_id *id)
+ const struct i2c_device_id *id)
{
int ret;
unsigned char device_id;
@@ -1873,14 +1904,14 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
i2c_set_clientdata(clientp, indio_dev);
ret = tsl2x7x_i2c_read(chip->client,
- TSL2X7X_CHIPID, &device_id);
+ TSL2X7X_CHIPID, &device_id);
if (ret < 0)
return ret;
if ((!tsl2x7x_device_id(&device_id, id->driver_data)) ||
- (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
+ (tsl2x7x_device_id(&device_id, id->driver_data) == -EINVAL)) {
dev_info(&chip->client->dev,
- "%s: i2c device found does not match expected id\n",
+ "%s: i2c device found does not match expected id\n",
__func__);
return -EINVAL;
}
@@ -1892,8 +1923,10 @@ static int tsl2x7x_probe(struct i2c_client *clientp,
return ret;
}
- /* ALS and PROX functions can be invoked via user space poll
- * or H/W interrupt. If busy return last sample. */
+ /*
+ * ALS and PROX functions can be invoked via user space poll
+ * or H/W interrupt. If busy return last sample.
+ */
mutex_init(&chip->als_mutex);
mutex_init(&chip->prox_mutex);
diff --git a/drivers/staging/iio/meter/ade7753.c b/drivers/staging/iio/meter/ade7753.c
index 69287108f793..4b5f05fdadcd 100644
--- a/drivers/staging/iio/meter/ade7753.c
+++ b/drivers/staging/iio/meter/ade7753.c
@@ -333,7 +333,8 @@ static int ade7753_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(3);
@@ -528,7 +529,6 @@ static int ade7753_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-/* fixme, confirm ordering in this function */
static int ade7753_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7754.c b/drivers/staging/iio/meter/ade7754.c
index f4188e17d30b..c46bef641613 100644
--- a/drivers/staging/iio/meter/ade7754.c
+++ b/drivers/staging/iio/meter/ade7754.c
@@ -351,7 +351,8 @@ static int ade7754_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(14); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(14);
@@ -558,7 +559,6 @@ powerdown_on_error:
return ret;
}
-/* fixme, confirm ordering in this function */
static int ade7754_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index f6739e2c24b1..1d04ec9524c8 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -129,6 +129,7 @@ struct ade7758_state {
unsigned char tx_buf[8];
};
+
#ifdef CONFIG_IIO_BUFFER
/* At the moment triggers are only used for ring buffer
* filling. This may change!
@@ -138,25 +139,22 @@ void ade7758_remove_trigger(struct iio_dev *indio_dev);
int ade7758_probe_trigger(struct iio_dev *indio_dev);
ssize_t ade7758_read_data_from_ring(struct device *dev,
- struct device_attribute *attr,
- char *buf);
-
+ struct device_attribute *attr, char *buf);
int ade7758_configure_ring(struct iio_dev *indio_dev);
void ade7758_unconfigure_ring(struct iio_dev *indio_dev);
int ade7758_set_irq(struct device *dev, bool enable);
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address, u8 val);
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address, u8 *val);
+int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val);
+int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val);
#else /* CONFIG_IIO_BUFFER */
static inline void ade7758_remove_trigger(struct iio_dev *indio_dev)
{
}
+
static inline int ade7758_probe_trigger(struct iio_dev *indio_dev)
{
return 0;
@@ -166,16 +164,20 @@ static int ade7758_configure_ring(struct iio_dev *indio_dev)
{
return 0;
}
+
static inline void ade7758_unconfigure_ring(struct iio_dev *indio_dev)
{
}
+
static inline int ade7758_initialize_ring(struct iio_ring_buffer *ring)
{
return 0;
}
+
static inline void ade7758_uninitialize_ring(struct iio_dev *indio_dev)
{
}
+
#endif /* CONFIG_IIO_BUFFER */
#endif
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index 40f5afaa984b..ebb8a1993303 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -24,9 +24,7 @@
#include "meter.h"
#include "ade7758.h"
-int ade7758_spi_write_reg_8(struct device *dev,
- u8 reg_address,
- u8 val)
+int ade7758_spi_write_reg_8(struct device *dev, u8 reg_address, u8 val)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -42,9 +40,8 @@ int ade7758_spi_write_reg_8(struct device *dev,
return ret;
}
-static int ade7758_spi_write_reg_16(struct device *dev,
- u8 reg_address,
- u16 value)
+static int ade7758_spi_write_reg_16(struct device *dev, u8 reg_address,
+ u16 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -68,9 +65,8 @@ static int ade7758_spi_write_reg_16(struct device *dev,
return ret;
}
-static int ade7758_spi_write_reg_24(struct device *dev,
- u8 reg_address,
- u32 value)
+static int ade7758_spi_write_reg_24(struct device *dev, u8 reg_address,
+ u32 value)
{
int ret;
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
@@ -95,9 +91,7 @@ static int ade7758_spi_write_reg_24(struct device *dev,
return ret;
}
-int ade7758_spi_read_reg_8(struct device *dev,
- u8 reg_address,
- u8 *val)
+int ade7758_spi_read_reg_8(struct device *dev, u8 reg_address, u8 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -124,7 +118,7 @@ int ade7758_spi_read_reg_8(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 8 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = st->rx[0];
@@ -134,9 +128,8 @@ error_ret:
return ret;
}
-static int ade7758_spi_read_reg_16(struct device *dev,
- u8 reg_address,
- u16 *val)
+static int ade7758_spi_read_reg_16(struct device *dev, u8 reg_address,
+ u16 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -156,7 +149,6 @@ static int ade7758_spi_read_reg_16(struct device *dev,
},
};
-
mutex_lock(&st->buf_lock);
st->tx[0] = ADE7758_READ_REG(reg_address);
st->tx[1] = 0;
@@ -165,7 +157,7 @@ static int ade7758_spi_read_reg_16(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 16 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
@@ -176,9 +168,8 @@ error_ret:
return ret;
}
-static int ade7758_spi_read_reg_24(struct device *dev,
- u8 reg_address,
- u32 *val)
+static int ade7758_spi_read_reg_24(struct device *dev, u8 reg_address,
+ u32 *val)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
struct ade7758_state *st = iio_priv(indio_dev);
@@ -207,7 +198,7 @@ static int ade7758_spi_read_reg_24(struct device *dev,
ret = spi_sync_transfer(st->us, xfers, ARRAY_SIZE(xfers));
if (ret) {
dev_err(&st->us->dev, "problem when reading 24 bit register 0x%02X",
- reg_address);
+ reg_address);
goto error_ret;
}
*val = (st->rx[0] << 16) | (st->rx[1] << 8) | st->rx[2];
@@ -218,8 +209,7 @@ error_ret:
}
static ssize_t ade7758_read_8bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 val = 0;
@@ -233,8 +223,7 @@ static ssize_t ade7758_read_8bit(struct device *dev,
}
static ssize_t ade7758_read_16bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u16 val = 0;
@@ -248,8 +237,7 @@ static ssize_t ade7758_read_16bit(struct device *dev,
}
static ssize_t ade7758_read_24bit(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u32 val = 0;
@@ -263,9 +251,8 @@ static ssize_t ade7758_read_24bit(struct device *dev,
}
static ssize_t ade7758_write_8bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -281,9 +268,8 @@ error_ret:
}
static ssize_t ade7758_write_16bit(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev_attr *this_attr = to_iio_dev_attr(attr);
int ret;
@@ -427,7 +413,8 @@ int ade7758_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(16); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(16);
@@ -479,16 +466,13 @@ err_ret:
}
static ssize_t ade7758_read_frequency(struct device *dev,
- struct device_attribute *attr,
- char *buf)
+ struct device_attribute *attr, char *buf)
{
int ret;
u8 t;
int sps;
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &t);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &t);
if (ret)
return ret;
@@ -499,9 +483,8 @@ static ssize_t ade7758_read_frequency(struct device *dev,
}
static ssize_t ade7758_write_frequency(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct iio_dev *indio_dev = dev_to_iio_dev(dev);
u16 val;
@@ -532,18 +515,14 @@ static ssize_t ade7758_write_frequency(struct device *dev,
goto out;
}
- ret = ade7758_spi_read_reg_8(dev,
- ADE7758_WAVMODE,
- &reg);
+ ret = ade7758_spi_read_reg_8(dev, ADE7758_WAVMODE, &reg);
if (ret)
goto out;
reg &= ~(5 << 3);
reg |= t << 5;
- ret = ade7758_spi_write_reg_8(dev,
- ADE7758_WAVMODE,
- reg);
+ ret = ade7758_spi_write_reg_8(dev, ADE7758_WAVMODE, reg);
out:
mutex_unlock(&indio_dev->mlock);
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index 9a24e0226f8b..a6b76d4b1c80 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -33,7 +33,7 @@ static int ade7758_spi_read_burst(struct iio_dev *indio_dev)
return ret;
}
-static int ade7758_write_waveform_type(struct device *dev, unsigned type)
+static int ade7758_write_waveform_type(struct device *dev, unsigned int type)
{
int ret;
u8 reg;
@@ -85,7 +85,7 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
**/
static int ade7758_ring_preenable(struct iio_dev *indio_dev)
{
- unsigned channel;
+ unsigned int channel;
if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
return -EINVAL;
diff --git a/drivers/staging/iio/meter/ade7759.c b/drivers/staging/iio/meter/ade7759.c
index 684e612a88b9..80144d40d9ca 100644
--- a/drivers/staging/iio/meter/ade7759.c
+++ b/drivers/staging/iio/meter/ade7759.c
@@ -289,7 +289,8 @@ static int ade7759_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(3); /* Enables an interrupt when a data is
- present in the waveform register */
+ * present in the waveform register
+ */
else
irqen &= ~BIT(3);
@@ -476,7 +477,6 @@ static int ade7759_probe(struct spi_device *spi)
return iio_device_register(indio_dev);
}
-/* fixme, confirm ordering in this function */
static int ade7759_remove(struct spi_device *spi)
{
struct iio_dev *indio_dev = spi_get_drvdata(spi);
diff --git a/drivers/staging/iio/meter/ade7854.c b/drivers/staging/iio/meter/ade7854.c
index 9e439af7100d..75e8685e6df2 100644
--- a/drivers/staging/iio/meter/ade7854.c
+++ b/drivers/staging/iio/meter/ade7854.c
@@ -421,7 +421,8 @@ static int ade7854_set_irq(struct device *dev, bool enable)
if (enable)
irqen |= BIT(17); /* 1: interrupt enabled when all periodical
- (at 8 kHz rate) DSP computations finish. */
+ * (at 8 kHz rate) DSP computations finish.
+ */
else
irqen &= ~BIT(17);
diff --git a/drivers/staging/iio/resolver/ad2s1210.h b/drivers/staging/iio/resolver/ad2s1210.h
index c7158f6e61c2..e9b2147701fc 100644
--- a/drivers/staging/iio/resolver/ad2s1210.h
+++ b/drivers/staging/iio/resolver/ad2s1210.h
@@ -12,9 +12,9 @@
#define _AD2S1210_H
struct ad2s1210_platform_data {
- unsigned sample;
- unsigned a[2];
- unsigned res[2];
- bool gpioin;
+ unsigned int sample;
+ unsigned int a[2];
+ unsigned int res[2];
+ bool gpioin;
};
#endif /* _AD2S1210_H */
diff --git a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
index 035dd456d7d6..38dca69a06eb 100644
--- a/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
+++ b/drivers/staging/iio/trigger/iio-trig-bfin-timer.c
@@ -55,12 +55,12 @@ static struct bfin_timer iio_bfin_timer_code[MAX_BLACKFIN_GPTIMERS] = {
};
struct bfin_tmr_state {
- struct iio_trigger *trig;
- struct bfin_timer *t;
- unsigned timer_num;
- bool output_enable;
- unsigned int duty;
- int irq;
+ struct iio_trigger *trig;
+ struct bfin_timer *t;
+ unsigned int timer_num;
+ bool output_enable;
+ unsigned int duty;
+ int irq;
};
static int iio_bfin_tmr_set_state(struct iio_trigger *trig, bool state)
@@ -178,7 +178,7 @@ static const struct iio_trigger_ops iio_bfin_tmr_trigger_ops = {
static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
{
- struct iio_bfin_timer_trigger_pdata *pdata = pdev->dev.platform_data;
+ struct iio_bfin_timer_trigger_pdata *pdata;
struct bfin_tmr_state *st;
unsigned int config;
int ret;
@@ -221,6 +221,7 @@ static int iio_bfin_tmr_trigger_probe(struct platform_device *pdev)
config = PWM_OUT | PERIOD_CNT | IRQ_ENA;
+ pdata = dev_get_platdata(&pdev->dev);
if (pdata && pdata->output_enable) {
unsigned long long val;
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
index 40af75c4201a..4141afb101bb 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs.h
@@ -60,41 +60,12 @@
#define LNET_ACCEPTOR_MAX_RESERVED_PORT 1023
/*
- * libcfs pseudo device operations
- *
- * It's just draft now.
- */
-
-struct cfs_psdev_file {
- unsigned long off;
- void *private_data;
- unsigned long reserved1;
- unsigned long reserved2;
-};
-
-struct cfs_psdev_ops {
- int (*p_open)(unsigned long, void *);
- int (*p_close)(unsigned long, void *);
- int (*p_read)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_write)(struct cfs_psdev_file *, char *, unsigned long);
- int (*p_ioctl)(struct cfs_psdev_file *, unsigned long, void __user *);
-};
-
-/*
- * Drop into debugger, if possible. Implementation is provided by platform.
- */
-
-void cfs_enter_debugger(void);
-
-/*
* Defined by platform
*/
-int unshare_fs_struct(void);
sigset_t cfs_block_allsigs(void);
sigset_t cfs_block_sigs(unsigned long sigs);
sigset_t cfs_block_sigsinv(unsigned long sigs);
void cfs_restore_sigs(sigset_t);
-int cfs_signal_pending(void);
void cfs_clear_sigpending(void);
/*
@@ -117,7 +88,25 @@ void cfs_get_random_bytes(void *buf, int size);
#include "libcfs_workitem.h"
#include "libcfs_hash.h"
#include "libcfs_fail.h"
-#include "libcfs_crypto.h"
+
+struct libcfs_ioctl_handler {
+ struct list_head item;
+ int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
+};
+
+#define DECLARE_IOCTL_HANDLER(ident, func) \
+ struct libcfs_ioctl_handler ident = { \
+ .item = LIST_HEAD_INIT(ident.item), \
+ .handle_ioctl = func \
+ }
+
+int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
+int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
+
+int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ const struct libcfs_ioctl_hdr __user *uparam);
+int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
+int libcfs_ioctl(unsigned long cmd, void __user *arg);
/* container_of depends on "likely" which is defined in libcfs_private.h */
static inline void *__container_of(void *ptr, unsigned long shift)
@@ -143,8 +132,6 @@ extern struct miscdevice libcfs_dev;
extern char lnet_upcall[1024];
extern char lnet_debug_log_upcall[1024];
-extern struct cfs_psdev_ops libcfs_psdev_ops;
-
extern struct cfs_wi_sched *cfs_sched_rehash;
struct lnet_debugfs_symlink_def {
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
index 9e62c59714b7..81d8079e3b5e 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_cpu.h
@@ -203,6 +203,85 @@ int cfs_cpt_spread_node(struct cfs_cpt_table *cptab, int cpt);
*/
int cfs_cpu_ht_nsiblings(int cpu);
+/*
+ * allocate per-cpu-partition data, returned value is an array of pointers,
+ * variable can be indexed by CPU ID.
+ * cptab != NULL: size of array is number of CPU partitions
+ * cptab == NULL: size of array is number of HW cores
+ */
+void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
+/*
+ * destory per-cpu-partition variable
+ */
+void cfs_percpt_free(void *vars);
+int cfs_percpt_number(void *vars);
+
+#define cfs_percpt_for_each(var, i, vars) \
+ for (i = 0; i < cfs_percpt_number(vars) && \
+ ((var) = (vars)[i]) != NULL; i++)
+
+/*
+ * percpu partition lock
+ *
+ * There are some use-cases like this in Lustre:
+ * . each CPU partition has it's own private data which is frequently changed,
+ * and mostly by the local CPU partition.
+ * . all CPU partitions share some global data, these data are rarely changed.
+ *
+ * LNet is typical example.
+ * CPU partition lock is designed for this kind of use-cases:
+ * . each CPU partition has it's own private lock
+ * . change on private data just needs to take the private lock
+ * . read on shared data just needs to take _any_ of private locks
+ * . change on shared data needs to take _all_ private locks,
+ * which is slow and should be really rare.
+ */
+enum {
+ CFS_PERCPT_LOCK_EX = -1, /* negative */
+};
+
+struct cfs_percpt_lock {
+ /* cpu-partition-table for this lock */
+ struct cfs_cpt_table *pcl_cptab;
+ /* exclusively locked */
+ unsigned int pcl_locked;
+ /* private lock table */
+ spinlock_t **pcl_locks;
+};
+
+/* return number of private locks */
+#define cfs_percpt_lock_num(pcl) cfs_cpt_number(pcl->pcl_cptab)
+
+/*
+ * create a cpu-partition lock based on CPU partition table \a cptab,
+ * each private lock has extra \a psize bytes padding data
+ */
+struct cfs_percpt_lock *cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys);
+/* destroy a cpu-partition lock */
+void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
+
+/* lock private lock \a index of \a pcl */
+void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
+
+/* unlock private lock \a index of \a pcl */
+void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
+
+#define CFS_PERCPT_LOCK_KEYS 256
+
+/* NB: don't allocate keys dynamically, lockdep needs them to be in ".data" */
+#define cfs_percpt_lock_alloc(cptab) \
+({ \
+ static struct lock_class_key ___keys[CFS_PERCPT_LOCK_KEYS]; \
+ struct cfs_percpt_lock *___lk; \
+ \
+ if (cfs_cpt_number(cptab) > CFS_PERCPT_LOCK_KEYS) \
+ ___lk = cfs_percpt_lock_create(cptab, NULL); \
+ else \
+ ___lk = cfs_percpt_lock_create(cptab, ___keys); \
+ ___lk; \
+})
+
/**
* iterate over all CPU partitions in \a cptab
*/
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
index e8663697e7a6..02be7d7608a5 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_crypto.h
@@ -46,7 +46,8 @@ enum cfs_crypto_hash_alg {
CFS_HASH_ALG_SHA384,
CFS_HASH_ALG_SHA512,
CFS_HASH_ALG_CRC32C,
- CFS_HASH_ALG_MAX
+ CFS_HASH_ALG_MAX,
+ CFS_HASH_ALG_UNKNOWN = 0xff
};
static struct cfs_crypto_hash_type hash_types[] = {
@@ -59,11 +60,22 @@ static struct cfs_crypto_hash_type hash_types[] = {
[CFS_HASH_ALG_SHA256] = { "sha256", 0, 32 },
[CFS_HASH_ALG_SHA384] = { "sha384", 0, 48 },
[CFS_HASH_ALG_SHA512] = { "sha512", 0, 64 },
+ [CFS_HASH_ALG_MAX] = { NULL, 0, 64 },
};
-/** Return pointer to type of hash for valid hash algorithm identifier */
+/* Maximum size of hash_types[].cht_size */
+#define CFS_CRYPTO_HASH_DIGESTSIZE_MAX 64
+
+/**
+ * Return hash algorithm information for the specified algorithm identifier
+ *
+ * Hash information includes algorithm name, initial seed, hash size.
+ *
+ * \retval cfs_crypto_hash_type for valid ID (CFS_HASH_ALG_*)
+ * \retval NULL for unknown algorithm identifier
+ */
static inline const struct cfs_crypto_hash_type *
- cfs_crypto_hash_type(unsigned char hash_alg)
+cfs_crypto_hash_type(enum cfs_crypto_hash_alg hash_alg)
{
struct cfs_crypto_hash_type *ht;
@@ -75,8 +87,16 @@ static inline const struct cfs_crypto_hash_type *
return NULL;
}
-/** Return hash name for valid hash algorithm identifier or "unknown" */
-static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
+/**
+ * Return hash name for hash algorithm identifier
+ *
+ * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval string name of known hash algorithm
+ * \retval "unknown" if hash algorithm is unknown
+ */
+static inline const char *
+cfs_crypto_hash_name(enum cfs_crypto_hash_alg hash_alg)
{
const struct cfs_crypto_hash_type *ht;
@@ -86,8 +106,15 @@ static inline const char *cfs_crypto_hash_name(unsigned char hash_alg)
return "unknown";
}
-/** Return digest size for valid algorithm identifier or 0 */
-static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
+/**
+ * Return digest size for hash algorithm type
+ *
+ * \param[in] hash_alg hash alrgorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval hash algorithm digest size in bytes
+ * \retval 0 if hash algorithm type is unknown
+ */
+static inline int cfs_crypto_hash_digestsize(enum cfs_crypto_hash_alg hash_alg)
{
const struct cfs_crypto_hash_type *ht;
@@ -97,36 +124,24 @@ static inline int cfs_crypto_hash_digestsize(unsigned char hash_alg)
return 0;
}
-/** Return hash identifier for valid hash algorithm name or 0xFF */
+/**
+ * Find hash algorithm ID for the specified algorithm name
+ *
+ * \retval hash algorithm ID for valid ID (CFS_HASH_ALG_*)
+ * \retval CFS_HASH_ALG_UNKNOWN for unknown algorithm name
+ */
static inline unsigned char cfs_crypto_hash_alg(const char *algname)
{
- unsigned char i;
+ enum cfs_crypto_hash_alg hash_alg;
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- if (!strcmp(hash_types[i].cht_name, algname))
- break;
- return (i == CFS_HASH_ALG_MAX ? 0xFF : i);
+ for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
+ if (strcmp(hash_types[hash_alg].cht_name, algname) == 0)
+ return hash_alg;
+
+ return CFS_HASH_ALG_UNKNOWN;
}
-/** Calculate hash digest for buffer.
- * @param alg id of hash algorithm
- * @param buf buffer of data
- * @param buf_len buffer len
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @param hash [out] pointer to hash, if it is NULL, hash_len is
- * set to valid digest size in bytes, retval -ENOSPC.
- * @param hash_len [in,out] size of hash buffer
- * @returns status of operation
- * @retval -EINVAL if buf, buf_len, hash_len or alg_id is invalid
- * @retval -ENODEV if this algorithm is unsupported
- * @retval -ENOSPC if pointer to hash is NULL, or hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
-int cfs_crypto_hash_digest(unsigned char alg,
+int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len);
@@ -134,66 +149,17 @@ int cfs_crypto_hash_digest(unsigned char alg,
/* cfs crypto hash descriptor */
struct cfs_crypto_hash_desc;
-/** Allocate and initialize descriptor for hash algorithm.
- * @param alg algorithm id
- * @param key initial value for algorithm, if it is NULL,
- * default initial value should be used.
- * @param key_len len of initial value
- * @returns pointer to descriptor of hash instance
- * @retval ERR_PTR(error) when errors occurred.
- */
-struct cfs_crypto_hash_desc*
- cfs_crypto_hash_init(unsigned char alg,
- unsigned char *key, unsigned int key_len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param page data page
- * @param offset data offset
- * @param len data len
- * @returns status of operation
- * @retval 0 for success.
- */
+struct cfs_crypto_hash_desc *
+cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
+ unsigned char *key, unsigned int key_len);
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *desc,
struct page *page, unsigned int offset,
unsigned int len);
-
-/** Update digest by part of data.
- * @param desc hash descriptor
- * @param buf pointer to data buffer
- * @param buf_len size of data at buffer
- * @returns status of operation
- * @retval 0 for success.
- */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *desc, const void *buf,
unsigned int buf_len);
-
-/** Finalize hash calculation, copy hash digest to buffer, destroy hash
- * descriptor.
- * @param desc hash descriptor
- * @param hash buffer pointer to store hash digest
- * @param hash_len pointer to hash buffer size, if NULL
- * destroy hash descriptor
- * @returns status of operation
- * @retval -ENOSPC if hash is NULL, or *hash_len less than
- * digest size
- * @retval 0 for success
- * @retval < 0 other errors from lower layers.
- */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *desc,
unsigned char *hash, unsigned int *hash_len);
-/**
- * Register crypto hash algorithms
- */
int cfs_crypto_register(void);
-
-/**
- * Unregister
- */
void cfs_crypto_unregister(void);
-
-/** Return hash speed in Mbytes per second for valid hash algorithm
- * identifier. If test was unsuccessful -1 would be returned.
- */
-int cfs_crypto_hash_speed(unsigned char hash_alg);
+int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg);
#endif
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
index 98430e7108c1..455c54d0d17c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_debug.h
@@ -85,7 +85,6 @@ struct ptldebug_header {
#define PH_FLAG_FIRST_RECORD 1
/* Debugging subsystems (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
#define S_UNDEFINED 0x00000001
#define S_MDC 0x00000002
#define S_MDS 0x00000004
@@ -118,10 +117,14 @@ struct ptldebug_header {
#define S_MGS 0x20000000
#define S_FID 0x40000000 /* b_new_cmd */
#define S_FLD 0x80000000 /* b_new_cmd */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
+
+#define LIBCFS_DEBUG_SUBSYS_NAMES { \
+ "undefined", "mdc", "mds", "osc", "ost", "class", "log", \
+ "llite", "rpc", "mgmt", "lnet", "lnd", "pinger", "filter", "", \
+ "echo", "ldlm", "lov", "lquota", "osd", "lfsck", "", "", "lmv", \
+ "", "sec", "gss", "", "mgc", "mgs", "fid", "fld", NULL }
/* Debugging masks (32 bits, non-overlapping) */
-/* keep these in sync with lnet/utils/debug.c and lnet/libcfs/debug.c */
#define D_TRACE 0x00000001 /* ENTRY/EXIT markers */
#define D_INODE 0x00000002
#define D_SUPER 0x00000004
@@ -151,9 +154,14 @@ struct ptldebug_header {
#define D_QUOTA 0x04000000
#define D_SEC 0x08000000
#define D_LFSCK 0x10000000 /* For both OI scrub and LFSCK */
-/* keep these in sync with lnet/{utils,libcfs}/debug.c */
+#define D_HSM 0x20000000
-#define D_HSM D_TRACE
+#define LIBCFS_DEBUG_MASKS_NAMES { \
+ "trace", "inode", "super", "ext2", "malloc", "cache", "info", \
+ "ioctl", "neterror", "net", "warning", "buffs", "other", \
+ "dentry", "nettrace", "page", "dlmtrace", "error", "emerg", \
+ "ha", "rpctrace", "vfstrace", "reada", "mmap", "config", \
+ "console", "quota", "sec", "lfsck", "hsm", NULL }
#define D_CANTMASK (D_ERROR | D_EMERG | D_WARNING | D_CONSOLE)
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
index aa69c6a33d19..2e008bffc89a 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_fail.h
@@ -38,6 +38,7 @@
extern unsigned long cfs_fail_loc;
extern unsigned int cfs_fail_val;
+extern int cfs_fail_err;
extern wait_queue_head_t cfs_race_waitq;
extern int cfs_race_state;
@@ -70,9 +71,14 @@ enum {
#define CFS_FAIL_RAND 0x08000000 /* fail 1/N of the times */
#define CFS_FAIL_USR1 0x04000000 /* user flag */
-#define CFS_FAIL_PRECHECK(id) (cfs_fail_loc && \
- (cfs_fail_loc & CFS_FAIL_MASK_LOC) == \
- ((id) & CFS_FAIL_MASK_LOC))
+#define CFS_FAULT 0x02000000 /* match any CFS_FAULT_CHECK */
+
+static inline bool CFS_FAIL_PRECHECK(__u32 id)
+{
+ return cfs_fail_loc != 0 &&
+ ((cfs_fail_loc & CFS_FAIL_MASK_LOC) == (id & CFS_FAIL_MASK_LOC) ||
+ (cfs_fail_loc & id & CFS_FAULT));
+}
static inline int cfs_fail_check_set(__u32 id, __u32 value,
int set, int quiet)
@@ -144,6 +150,9 @@ static inline int cfs_fail_timeout_set(__u32 id, __u32 value, int ms, int set)
#define CFS_FAIL_TIMEOUT_MS_ORSET(id, value, ms) \
cfs_fail_timeout_set(id, value, ms, CFS_FAIL_LOC_ORSET)
+#define CFS_FAULT_CHECK(id) \
+ CFS_FAIL_CHECK(CFS_FAULT | (id))
+
/* The idea here is to synchronise two threads to force a race. The
* first thread that calls this with a matching fail_loc is put to
* sleep. The next thread that calls with the same fail_loc wakes up
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
index c3f2332fa043..119986bc7961 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_hash.h
@@ -245,7 +245,7 @@ struct cfs_hash {
/** # of iterators (caller of cfs_hash_for_each_*) */
__u32 hs_iterators;
/** rehash workitem */
- cfs_workitem_t hs_rehash_wi;
+ struct cfs_workitem hs_rehash_wi;
/** refcount on this hash table */
atomic_t hs_refcount;
/** rehash buckets-table */
@@ -262,7 +262,7 @@ struct cfs_hash {
/** bits when we found the max depth */
unsigned int hs_dep_bits;
/** workitem to output max depth */
- cfs_workitem_t hs_dep_wi;
+ struct cfs_workitem hs_dep_wi;
#endif
/** name of htable */
char hs_name[0];
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
index 5ca99bd6f4e9..4b9102bd95d5 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_ioctl.h
@@ -34,13 +34,16 @@
* libcfs/include/libcfs/libcfs_ioctl.h
*
* Low-level ioctl data structures. Kernel ioctl functions declared here,
- * and user space functions are in libcfsutil_ioctl.h.
+ * and user space functions are in libcfs/util/ioctl.h.
*
*/
#ifndef __LIBCFS_IOCTL_H__
#define __LIBCFS_IOCTL_H__
+#include <linux/types.h>
+#include <linux/ioctl.h>
+
#define LIBCFS_IOCTL_VERSION 0x0001000a
#define LIBCFS_IOCTL_VERSION2 0x0001000b
@@ -49,6 +52,9 @@ struct libcfs_ioctl_hdr {
__u32 ioc_version;
};
+/** max size to copy from userspace */
+#define LIBCFS_IOC_DATA_MAX (128 * 1024)
+
struct libcfs_ioctl_data {
struct libcfs_ioctl_hdr ioc_hdr;
@@ -73,67 +79,48 @@ struct libcfs_ioctl_data {
char ioc_bulk[0];
};
-#define ioc_priority ioc_u32[0]
-
struct libcfs_debug_ioctl_data {
struct libcfs_ioctl_hdr hdr;
unsigned int subs;
unsigned int debug;
};
-#define LIBCFS_IOC_INIT(data) \
-do { \
- memset(&data, 0, sizeof(data)); \
- data.ioc_version = LIBCFS_IOCTL_VERSION; \
- data.ioc_len = sizeof(data); \
-} while (0)
-
-struct libcfs_ioctl_handler {
- struct list_head item;
- int (*handle_ioctl)(unsigned int cmd, struct libcfs_ioctl_hdr *hdr);
-};
-
-#define DECLARE_IOCTL_HANDLER(ident, func) \
- struct libcfs_ioctl_handler ident = { \
- /* .item = */ LIST_HEAD_INIT(ident.item), \
- /* .handle_ioctl = */ func \
- }
+/* 'f' ioctls are defined in lustre_ioctl.h and lustre_user.h except for: */
+#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
+#define IOCTL_LIBCFS_TYPE long
-/* FIXME check conflict with lustre_lib.h */
-#define LIBCFS_IOC_DEBUG_MASK _IOWR('f', 250, long)
-
-#define IOC_LIBCFS_TYPE 'e'
-#define IOC_LIBCFS_MIN_NR 30
+#define IOC_LIBCFS_TYPE ('e')
+#define IOC_LIBCFS_MIN_NR 30
/* libcfs ioctls */
-#define IOC_LIBCFS_PANIC _IOWR('e', 30, long)
-#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, long)
-#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, long)
-#define IOC_LIBCFS_MEMHOG _IOWR('e', 36, long)
+/* IOC_LIBCFS_PANIC obsolete in 2.8.0, was _IOWR('e', 30, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_CLEAR_DEBUG _IOWR('e', 31, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_MARK_DEBUG _IOWR('e', 32, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_MEMHOG obsolete in 2.8.0, was _IOWR('e', 36, IOCTL_LIBCFS_TYPE) */
/* lnet ioctls */
-#define IOC_LIBCFS_GET_NI _IOWR('e', 50, long)
-#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, long)
-#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, long)
-#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, long)
-/* #define IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, long) */
-#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, long)
-#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, long)
-#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, long)
-#define IOC_LIBCFS_PING _IOWR('e', 61, long)
-/* #define IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, long) */
-#define IOC_LIBCFS_LNETST _IOWR('e', 63, long)
-#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, long)
+#define IOC_LIBCFS_GET_NI _IOWR('e', 50, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_FAIL_NID _IOWR('e', 51, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_NOTIFY_ROUTER _IOWR('e', 55, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_UNCONFIGURE _IOWR('e', 56, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_PORTALS_COMPATIBILITY _IOWR('e', 57, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_LNET_DIST _IOWR('e', 58, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CONFIGURE _IOWR('e', 59, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_TESTPROTOCOMPAT _IOWR('e', 60, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PING _IOWR('e', 61, IOCTL_LIBCFS_TYPE)
+/* IOC_LIBCFS_DEBUG_PEER _IOWR('e', 62, IOCTL_LIBCFS_TYPE) */
+#define IOC_LIBCFS_LNETST _IOWR('e', 63, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_LNET_FAULT _IOWR('e', 64, IOCTL_LIBCFS_TYPE)
/* lnd ioctls */
-#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, long)
-#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, long)
-#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, long)
-#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, long)
-#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, long)
-#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, long)
-#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, long)
+#define IOC_LIBCFS_REGISTER_MYNID _IOWR('e', 70, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_CLOSE_CONNECTION _IOWR('e', 71, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_PUSH_CONNECTION _IOWR('e', 72, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_CONN _IOWR('e', 73, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_PEER _IOWR('e', 74, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_ADD_PEER _IOWR('e', 75, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_PEER _IOWR('e', 76, IOCTL_LIBCFS_TYPE)
/* ioctl 77 is free for use */
-#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, long)
-#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, long)
-#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, long)
+#define IOC_LIBCFS_ADD_INTERFACE _IOWR('e', 78, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_DEL_INTERFACE _IOWR('e', 79, IOCTL_LIBCFS_TYPE)
+#define IOC_LIBCFS_GET_INTERFACE _IOWR('e', 80, IOCTL_LIBCFS_TYPE)
/*
* DLC Specific IOCTL numbers.
@@ -155,76 +142,4 @@ struct libcfs_ioctl_handler {
#define IOC_LIBCFS_GET_LNET_STATS _IOWR(IOC_LIBCFS_TYPE, 91, IOCTL_CONFIG_SIZE)
#define IOC_LIBCFS_MAX_NR 91
-static inline int libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
-{
- int len = sizeof(*data);
-
- len += cfs_size_round(data->ioc_inllen1);
- len += cfs_size_round(data->ioc_inllen2);
- return len;
-}
-
-static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
-{
- if (data->ioc_hdr.ioc_len > (1 << 30)) {
- CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen1 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inllen2 > (1<<30)) {
- CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
- return 1;
- }
- if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
- CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
- CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf1 && !data->ioc_plen1) {
- CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_pbuf2 && !data->ioc_plen2) {
- CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
- return 1;
- }
- if (data->ioc_plen1 && !data->ioc_pbuf1) {
- CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
- return 1;
- }
- if (data->ioc_plen2 && !data->ioc_pbuf2) {
- CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
- return 1;
- }
- if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
- CERROR("LIBCFS ioctl: packlen != ioc_len\n");
- return 1;
- }
- if (data->ioc_inllen1 &&
- data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
- return 1;
- }
- if (data->ioc_inllen2 &&
- data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
- data->ioc_inllen2 - 1] != '\0') {
- CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
- return 1;
- }
- return 0;
-}
-
-int libcfs_register_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand);
-int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
- __u32 *buf_len);
-int libcfs_ioctl_popdata(void __user *arg, void *buf, int size);
-int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data);
-
#endif /* __LIBCFS_IOCTL_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
index 082fe6de90e4..ac4e8cfe6c8c 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_prim.h
@@ -40,21 +40,32 @@
#ifndef __LIBCFS_PRIM_H__
#define __LIBCFS_PRIM_H__
-void add_wait_queue_exclusive_head(wait_queue_head_t *, wait_queue_t *);
-
/*
* Memory
*/
-#ifndef memory_pressure_get
-#define memory_pressure_get() (0)
-#endif
-#ifndef memory_pressure_set
-#define memory_pressure_set() do {} while (0)
-#endif
-#ifndef memory_pressure_clr
-#define memory_pressure_clr() do {} while (0)
+#if BITS_PER_LONG == 32
+/* limit to lowmem on 32-bit systems */
+#define NUM_CACHEPAGES \
+ min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
+#else
+#define NUM_CACHEPAGES totalram_pages
#endif
+static inline unsigned int memory_pressure_get(void)
+{
+ return current->flags & PF_MEMALLOC;
+}
+
+static inline void memory_pressure_set(void)
+{
+ current->flags |= PF_MEMALLOC;
+}
+
+static inline void memory_pressure_clr(void)
+{
+ current->flags &= ~PF_MEMALLOC;
+}
+
static inline int cfs_memory_pressure_get_and_set(void)
{
int old = memory_pressure_get();
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
index 13335437c69c..2fd2a9690a34 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_private.h
@@ -182,25 +182,6 @@ int libcfs_debug_clear_buffer(void);
int libcfs_debug_mark_buffer(const char *text);
/*
- * allocate per-cpu-partition data, returned value is an array of pointers,
- * variable can be indexed by CPU ID.
- * cptable != NULL: size of array is number of CPU partitions
- * cptable == NULL: size of array is number of HW cores
- */
-void *cfs_percpt_alloc(struct cfs_cpt_table *cptab, unsigned int size);
-/*
- * destroy per-cpu-partition variable
- */
-void cfs_percpt_free(void *vars);
-int cfs_percpt_number(void *vars);
-void *cfs_percpt_current(void *vars);
-void *cfs_percpt_index(void *vars, int idx);
-
-#define cfs_percpt_for_each(var, i, vars) \
- for (i = 0; i < cfs_percpt_number(vars) && \
- ((var) = (vars)[i]) != NULL; i++)
-
-/*
* allocate a variable array, returned value is an array of pointers.
* Caller can specify length of array by count.
*/
@@ -302,62 +283,6 @@ do { \
#define CFS_ALLOC_PTR(ptr) LIBCFS_ALLOC(ptr, sizeof(*(ptr)))
#define CFS_FREE_PTR(ptr) LIBCFS_FREE(ptr, sizeof(*(ptr)))
-/*
- * percpu partition lock
- *
- * There are some use-cases like this in Lustre:
- * . each CPU partition has it's own private data which is frequently changed,
- * and mostly by the local CPU partition.
- * . all CPU partitions share some global data, these data are rarely changed.
- *
- * LNet is typical example.
- * CPU partition lock is designed for this kind of use-cases:
- * . each CPU partition has it's own private lock
- * . change on private data just needs to take the private lock
- * . read on shared data just needs to take _any_ of private locks
- * . change on shared data needs to take _all_ private locks,
- * which is slow and should be really rare.
- */
-
-enum {
- CFS_PERCPT_LOCK_EX = -1, /* negative */
-};
-
-struct cfs_percpt_lock {
- /* cpu-partition-table for this lock */
- struct cfs_cpt_table *pcl_cptab;
- /* exclusively locked */
- unsigned int pcl_locked;
- /* private lock table */
- spinlock_t **pcl_locks;
-};
-
-/* return number of private locks */
-static inline int
-cfs_percpt_lock_num(struct cfs_percpt_lock *pcl)
-{
- return cfs_cpt_number(pcl->pcl_cptab);
-}
-
-/*
- * create a cpu-partition lock based on CPU partition table \a cptab,
- * each private lock has extra \a psize bytes padding data
- */
-struct cfs_percpt_lock *cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab);
-/* destroy a cpu-partition lock */
-void cfs_percpt_lock_free(struct cfs_percpt_lock *pcl);
-
-/* lock private lock \a index of \a pcl */
-void cfs_percpt_lock(struct cfs_percpt_lock *pcl, int index);
-/* unlock private lock \a index of \a pcl */
-void cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index);
-/* create percpt (atomic) refcount based on @cptab */
-atomic_t **cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int val);
-/* destroy percpt refcount */
-void cfs_percpt_atomic_free(atomic_t **refs);
-/* return sum of all percpu refs */
-int cfs_percpt_atomic_summary(atomic_t **refs);
-
/** Compile-time assertion.
* Check an invariant described by a constant expression at compile time by
diff --git a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
index 5cc64f327a87..f9b20c5accbf 100644
--- a/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
+++ b/drivers/staging/lustre/include/linux/libcfs/libcfs_workitem.h
@@ -73,7 +73,7 @@ int cfs_wi_sched_create(char *name, struct cfs_cpt_table *cptab, int cpt,
struct cfs_workitem;
typedef int (*cfs_wi_action_t) (struct cfs_workitem *);
-typedef struct cfs_workitem {
+struct cfs_workitem {
/** chain on runq or rerunq */
struct list_head wi_list;
/** working function */
@@ -84,10 +84,10 @@ typedef struct cfs_workitem {
unsigned short wi_running:1;
/** scheduled */
unsigned short wi_scheduled:1;
-} cfs_workitem_t;
+};
static inline void
-cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
+cfs_wi_init(struct cfs_workitem *wi, void *data, cfs_wi_action_t action)
{
INIT_LIST_HEAD(&wi->wi_list);
@@ -97,9 +97,9 @@ cfs_wi_init(cfs_workitem_t *wi, void *data, cfs_wi_action_t action)
wi->wi_action = action;
}
-void cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-int cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
-void cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi);
+void cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
+int cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
+void cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi);
int cfs_wi_startup(void);
void cfs_wi_shutdown(void);
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
index d94b2661658a..a268ef7aa19d 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/libcfs.h
@@ -60,6 +60,7 @@
#include <linux/moduleparam.h>
#include <linux/mutex.h>
#include <linux/notifier.h>
+#include <linux/pagemap.h>
#include <linux/random.h>
#include <linux/rbtree.h>
#include <linux/rwsem.h>
@@ -83,7 +84,6 @@
#include <stdarg.h>
#include "linux-cpu.h"
#include "linux-time.h"
-#include "linux-mem.h"
#define LUSTRE_TRACE_SIZE (THREAD_SIZE >> 5)
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
index c04979ae0a38..f63cb47bc309 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-cpu.h
@@ -23,7 +23,7 @@
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
- * libcfs/include/libcfs/linux/linux-mem.h
+ * libcfs/include/libcfs/linux/linux-cpu.h
*
* Basic library routines.
*
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
deleted file mode 100644
index 837eb22749c3..000000000000
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-mem.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * libcfs/include/libcfs/linux/linux-mem.h
- *
- * Basic library routines.
- */
-
-#ifndef __LIBCFS_LINUX_CFS_MEM_H__
-#define __LIBCFS_LINUX_CFS_MEM_H__
-
-#ifndef __LIBCFS_LIBCFS_H__
-#error Do not #include this file directly. #include <linux/libcfs/libcfs.h> instead
-#endif
-
-#include <linux/mm.h>
-#include <linux/vmalloc.h>
-#include <linux/pagemap.h>
-#include <linux/slab.h>
-#include <linux/memcontrol.h>
-#include <linux/mm_inline.h>
-
-#ifndef HAVE_LIBCFS_CPT
-/* Need this for cfs_cpt_table */
-#include "../libcfs_cpu.h"
-#endif
-
-#define CFS_PAGE_MASK (~((__u64)PAGE_SIZE-1))
-#define page_index(p) ((p)->index)
-
-#define memory_pressure_get() (current->flags & PF_MEMALLOC)
-#define memory_pressure_set() do { current->flags |= PF_MEMALLOC; } while (0)
-#define memory_pressure_clr() do { current->flags &= ~PF_MEMALLOC; } while (0)
-
-#if BITS_PER_LONG == 32
-/* limit to lowmem on 32-bit systems */
-#define NUM_CACHEPAGES \
- min(totalram_pages, 1UL << (30 - PAGE_SHIFT) * 3 / 4)
-#else
-#define NUM_CACHEPAGES totalram_pages
-#endif
-
-#define DECL_MMSPACE mm_segment_t __oldfs
-#define MMSPACE_OPEN \
- do { __oldfs = get_fs(); set_fs(get_ds()); } while (0)
-#define MMSPACE_CLOSE set_fs(__oldfs)
-
-#endif /* __LINUX_CFS_MEM_H__ */
diff --git a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
index ed8764b11c80..7656b09b8752 100644
--- a/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
+++ b/drivers/staging/lustre/include/linux/libcfs/linux/linux-time.h
@@ -70,12 +70,12 @@ static inline unsigned long cfs_time_current(void)
static inline long cfs_time_seconds(int seconds)
{
- return ((long)seconds) * HZ;
+ return ((long)seconds) * msecs_to_jiffies(MSEC_PER_SEC);
}
static inline long cfs_duration_sec(long d)
{
- return d / HZ;
+ return d / msecs_to_jiffies(MSEC_PER_SEC);
}
#define cfs_time_current_64 get_jiffies_64
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
index 84a19e96ea04..6ce9accb91ad 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-dlc.h
@@ -37,10 +37,37 @@
#define LNET_MAX_SHOW_NUM_CPT 128
#define LNET_UNDEFINED_HOPS ((__u32) -1)
+struct lnet_ioctl_config_lnd_cmn_tunables {
+ __u32 lct_version;
+ __u32 lct_peer_timeout;
+ __u32 lct_peer_tx_credits;
+ __u32 lct_peer_rtr_credits;
+ __u32 lct_max_tx_credits;
+};
+
+struct lnet_ioctl_config_o2iblnd_tunables {
+ __u32 lnd_version;
+ __u32 lnd_peercredits_hiw;
+ __u32 lnd_map_on_demand;
+ __u32 lnd_concurrent_sends;
+ __u32 lnd_fmr_pool_size;
+ __u32 lnd_fmr_flush_trigger;
+ __u32 lnd_fmr_cache;
+ __u32 pad;
+};
+
+struct lnet_ioctl_config_lnd_tunables {
+ struct lnet_ioctl_config_lnd_cmn_tunables lt_cmn;
+ union {
+ struct lnet_ioctl_config_o2iblnd_tunables lt_o2ib;
+ } lt_tun_u;
+};
+
struct lnet_ioctl_net_config {
char ni_interfaces[LNET_MAX_INTERFACES][LNET_MAX_STR_LEN];
__u32 ni_status;
__u32 ni_cpts[LNET_MAX_SHOW_NUM_CPT];
+ char cfg_bulk[0];
};
#define LNET_TINY_BUF_IDX 0
@@ -81,7 +108,7 @@ struct lnet_ioctl_config_data {
__s32 net_peer_rtr_credits;
__s32 net_max_tx_credits;
__u32 net_cksum_algo;
- __u32 net_pad;
+ __u32 net_interface_count;
} cfg_net;
struct {
__u32 buf_enable;
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
index dfc0208dc3a7..513a8225f888 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-lnet.h
@@ -463,10 +463,6 @@ int lnet_del_route(__u32 net, lnet_nid_t gw_nid);
void lnet_destroy_routes(void);
int lnet_get_route(int idx, __u32 *net, __u32 *hops,
lnet_nid_t *gateway, __u32 *alive, __u32 *priority);
-int lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid,
- int *peer_timeout, int *peer_tx_credits,
- int *peer_rtr_cr, int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config);
int lnet_get_rtr_pool_cfg(int idx, struct lnet_ioctl_pool_cfg *pool_cfg);
void lnet_router_debugfs_init(void);
@@ -478,9 +474,8 @@ int lnet_rtrpools_enable(void);
void lnet_rtrpools_disable(void);
void lnet_rtrpools_free(int keep_pools);
lnet_remotenet_t *lnet_find_net_locked(__u32 net);
-int lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits);
+int lnet_dyn_add_ni(lnet_pid_t requested_pid,
+ struct lnet_ioctl_config_data *conf);
int lnet_dyn_del_ni(__u32 net);
int lnet_clear_lazy_portal(struct lnet_ni *ni, int portal, char *reason);
diff --git a/drivers/staging/lustre/include/linux/lnet/lib-types.h b/drivers/staging/lustre/include/linux/lnet/lib-types.h
index 29c72f8c2f99..24c4a08e6dc6 100644
--- a/drivers/staging/lustre/include/linux/lnet/lib-types.h
+++ b/drivers/staging/lustre/include/linux/lnet/lib-types.h
@@ -273,6 +273,8 @@ typedef struct lnet_ni {
int **ni_refs; /* percpt reference count */
time64_t ni_last_alive;/* when I was last alive */
lnet_ni_status_t *ni_status; /* my health status */
+ /* per NI LND tunables */
+ struct lnet_ioctl_config_lnd_tunables *ni_lnd_tunables;
/* equivalent interfaces to use */
char *ni_interfaces[LNET_MAX_INTERFACES];
} lnet_ni_t;
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
index 0d32e6541a3f..6c59f2ff2220 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.c
@@ -335,8 +335,8 @@ int kiblnd_create_peer(lnet_ni_t *ni, kib_peer_t **peerp, lnet_nid_t nid)
peer->ibp_nid = nid;
peer->ibp_error = 0;
peer->ibp_last_alive = 0;
- peer->ibp_max_frags = IBLND_CFG_RDMA_FRAGS;
- peer->ibp_queue_depth = *kiblnd_tunables.kib_peertxcredits;
+ peer->ibp_max_frags = kiblnd_cfg_rdma_frags(peer->ibp_ni);
+ peer->ibp_queue_depth = ni->ni_peertxcredits;
atomic_set(&peer->ibp_refcount, 1); /* 1 ref for caller */
INIT_LIST_HEAD(&peer->ibp_list); /* not in the peer table yet */
@@ -1283,65 +1283,86 @@ static void kiblnd_map_tx_pool(kib_tx_pool_t *tpo)
}
}
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev, kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags)
{
- __u16 nfrags = (negotiated_nfrags != -1) ?
- negotiated_nfrags : *kiblnd_tunables.kib_map_on_demand;
+ kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ __u16 nfrags;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ nfrags = (negotiated_nfrags != -1) ? negotiated_nfrags : mod;
LASSERT(hdev->ibh_mrs);
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- nfrags <= rd->rd_nfrags)
+ if (mod > 0 && nfrags <= rd->rd_nfrags)
return NULL;
return hdev->ibh_mrs;
}
-static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *pool)
+static void kiblnd_destroy_fmr_pool(kib_fmr_pool_t *fpo)
{
- LASSERT(!pool->fpo_map_count);
+ LASSERT(!fpo->fpo_map_count);
- if (pool->fpo_fmr_pool)
- ib_destroy_fmr_pool(pool->fpo_fmr_pool);
+ if (fpo->fpo_is_fmr) {
+ if (fpo->fmr.fpo_fmr_pool)
+ ib_destroy_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ } else {
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i = 0;
+
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ i++;
+ }
+ if (i < fpo->fast_reg.fpo_pool_size)
+ CERROR("FastReg pool still has %d regions registered\n",
+ fpo->fast_reg.fpo_pool_size - i);
+ }
- if (pool->fpo_hdev)
- kiblnd_hdev_decref(pool->fpo_hdev);
+ if (fpo->fpo_hdev)
+ kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(pool, sizeof(*pool));
+ LIBCFS_FREE(fpo, sizeof(*fpo));
}
static void kiblnd_destroy_fmr_pool_list(struct list_head *head)
{
- kib_fmr_pool_t *pool;
+ kib_fmr_pool_t *fpo, *tmp;
- while (!list_empty(head)) {
- pool = list_entry(head->next, kib_fmr_pool_t, fpo_list);
- list_del(&pool->fpo_list);
- kiblnd_destroy_fmr_pool(pool);
+ list_for_each_entry_safe(fpo, tmp, head, fpo_list) {
+ list_del(&fpo->fpo_list);
+ kiblnd_destroy_fmr_pool(fpo);
}
}
-static int kiblnd_fmr_pool_size(int ncpts)
+static int
+kiblnd_fmr_pool_size(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_pool_size / ncpts;
+ int size = tunables->lnd_fmr_pool_size / ncpts;
return max(IBLND_FMR_POOL, size);
}
-static int kiblnd_fmr_flush_trigger(int ncpts)
+static int
+kiblnd_fmr_flush_trigger(struct lnet_ioctl_config_o2iblnd_tunables *tunables,
+ int ncpts)
{
- int size = *kiblnd_tunables.kib_fmr_flush_trigger / ncpts;
+ int size = tunables->lnd_fmr_flush_trigger / ncpts;
return max(IBLND_FMR_POOL_FLUSH, size);
}
-static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
- kib_fmr_pool_t **pp_fpo)
+static int kiblnd_alloc_fmr_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
{
- /* FMR pool for RDMA */
- kib_dev_t *dev = fps->fps_net->ibn_dev;
- kib_fmr_pool_t *fpo;
struct ib_fmr_pool_param param = {
.max_pages_per_fmr = LNET_MAX_PAYLOAD / PAGE_SIZE,
.page_shift = PAGE_SHIFT,
@@ -1351,7 +1372,78 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
.dirty_watermark = fps->fps_flush_trigger,
.flush_function = NULL,
.flush_arg = NULL,
- .cache = !!*kiblnd_tunables.kib_fmr_cache};
+ .cache = !!fps->fps_cache };
+ int rc = 0;
+
+ fpo->fmr.fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd,
+ &param);
+ if (IS_ERR(fpo->fmr.fpo_fmr_pool)) {
+ rc = PTR_ERR(fpo->fmr.fpo_fmr_pool);
+ if (rc != -ENOSYS)
+ CERROR("Failed to create FMR pool: %d\n", rc);
+ else
+ CERROR("FMRs are not supported\n");
+ }
+
+ return rc;
+}
+
+static int kiblnd_alloc_freg_pool(kib_fmr_poolset_t *fps, kib_fmr_pool_t *fpo)
+{
+ struct kib_fast_reg_descriptor *frd, *tmp;
+ int i, rc;
+
+ INIT_LIST_HEAD(&fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size = 0;
+ for (i = 0; i < fps->fps_pool_size; i++) {
+ LIBCFS_CPT_ALLOC(frd, lnet_cpt_table(), fps->fps_cpt,
+ sizeof(*frd));
+ if (!frd) {
+ CERROR("Failed to allocate a new fast_reg descriptor\n");
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ frd->frd_mr = ib_alloc_mr(fpo->fpo_hdev->ibh_pd,
+ IB_MR_TYPE_MEM_REG,
+ LNET_MAX_PAYLOAD / PAGE_SIZE);
+ if (IS_ERR(frd->frd_mr)) {
+ rc = PTR_ERR(frd->frd_mr);
+ CERROR("Failed to allocate ib_alloc_mr: %d\n", rc);
+ frd->frd_mr = NULL;
+ goto out_middle;
+ }
+
+ frd->frd_valid = true;
+
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ fpo->fast_reg.fpo_pool_size++;
+ }
+
+ return 0;
+
+out_middle:
+ if (frd->frd_mr)
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+
+out:
+ list_for_each_entry_safe(frd, tmp, &fpo->fast_reg.fpo_pool_list,
+ frd_list) {
+ list_del(&frd->frd_list);
+ ib_dereg_mr(frd->frd_mr);
+ LIBCFS_FREE(frd, sizeof(*frd));
+ }
+
+ return rc;
+}
+
+static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
+ kib_fmr_pool_t **pp_fpo)
+{
+ kib_dev_t *dev = fps->fps_net->ibn_dev;
+ struct ib_device_attr *dev_attr;
+ kib_fmr_pool_t *fpo;
int rc;
LIBCFS_CPT_ALLOC(fpo, lnet_cpt_table(), fps->fps_cpt, sizeof(*fpo));
@@ -1359,22 +1451,41 @@ static int kiblnd_create_fmr_pool(kib_fmr_poolset_t *fps,
return -ENOMEM;
fpo->fpo_hdev = kiblnd_current_hdev(dev);
-
- fpo->fpo_fmr_pool = ib_create_fmr_pool(fpo->fpo_hdev->ibh_pd, &param);
- if (IS_ERR(fpo->fpo_fmr_pool)) {
- rc = PTR_ERR(fpo->fpo_fmr_pool);
- CERROR("Failed to create FMR pool: %d\n", rc);
-
- kiblnd_hdev_decref(fpo->fpo_hdev);
- LIBCFS_FREE(fpo, sizeof(*fpo));
- return rc;
+ dev_attr = &fpo->fpo_hdev->ibh_ibdev->attrs;
+
+ /* Check for FMR or FastReg support */
+ fpo->fpo_is_fmr = 0;
+ if (fpo->fpo_hdev->ibh_ibdev->alloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->dealloc_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->map_phys_fmr &&
+ fpo->fpo_hdev->ibh_ibdev->unmap_fmr) {
+ LCONSOLE_INFO("Using FMR for registration\n");
+ fpo->fpo_is_fmr = 1;
+ } else if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS) {
+ LCONSOLE_INFO("Using FastReg for registration\n");
+ } else {
+ rc = -ENOSYS;
+ LCONSOLE_ERROR_MSG(rc, "IB device does not support FMRs nor FastRegs, can't register memory\n");
+ goto out_fpo;
}
+ if (fpo->fpo_is_fmr)
+ rc = kiblnd_alloc_fmr_pool(fps, fpo);
+ else
+ rc = kiblnd_alloc_freg_pool(fps, fpo);
+ if (rc)
+ goto out_fpo;
+
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
- fpo->fpo_owner = fps;
+ fpo->fpo_owner = fps;
*pp_fpo = fpo;
return 0;
+
+out_fpo:
+ kiblnd_hdev_decref(fpo->fpo_hdev);
+ LIBCFS_FREE(fpo, sizeof(*fpo));
+ return rc;
}
static void kiblnd_fail_fmr_poolset(kib_fmr_poolset_t *fps,
@@ -1407,9 +1518,10 @@ static void kiblnd_fini_fmr_poolset(kib_fmr_poolset_t *fps)
}
}
-static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
- kib_net_t *net, int pool_size,
- int flush_trigger)
+static int
+kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt, int ncpts,
+ kib_net_t *net,
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables)
{
kib_fmr_pool_t *fpo;
int rc;
@@ -1418,8 +1530,11 @@ static int kiblnd_init_fmr_poolset(kib_fmr_poolset_t *fps, int cpt,
fps->fps_net = net;
fps->fps_cpt = cpt;
- fps->fps_pool_size = pool_size;
- fps->fps_flush_trigger = flush_trigger;
+
+ fps->fps_pool_size = kiblnd_fmr_pool_size(tunables, ncpts);
+ fps->fps_flush_trigger = kiblnd_fmr_flush_trigger(tunables, ncpts);
+ fps->fps_cache = tunables->lnd_fmr_cache;
+
spin_lock_init(&fps->fps_lock);
INIT_LIST_HEAD(&fps->fps_pool_list);
INIT_LIST_HEAD(&fps->fps_failed_pool_list);
@@ -1440,25 +1555,64 @@ static int kiblnd_fmr_pool_is_idle(kib_fmr_pool_t *fpo, unsigned long now)
return cfs_time_aftereq(now, fpo->fpo_deadline);
}
+static int
+kiblnd_map_tx_pages(kib_tx_t *tx, kib_rdma_desc_t *rd)
+{
+ __u64 *pages = tx->tx_pages;
+ kib_hca_dev_t *hdev;
+ int npages;
+ int size;
+ int i;
+
+ hdev = tx->tx_pool->tpo_hdev;
+
+ for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
+ for (size = 0; size < rd->rd_frags[i].rf_nob;
+ size += hdev->ibh_page_size) {
+ pages[npages++] = (rd->rd_frags[i].rf_addr &
+ hdev->ibh_page_mask) + size;
+ }
+ }
+
+ return npages;
+}
+
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
{
LIST_HEAD(zombies);
kib_fmr_pool_t *fpo = fmr->fmr_pool;
- kib_fmr_poolset_t *fps = fpo->fpo_owner;
+ kib_fmr_poolset_t *fps;
unsigned long now = cfs_time_current();
kib_fmr_pool_t *tmp;
int rc;
- rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
- LASSERT(!rc);
+ if (!fpo)
+ return;
- if (status) {
- rc = ib_flush_fmr_pool(fpo->fpo_fmr_pool);
- LASSERT(!rc);
- }
+ fps = fpo->fpo_owner;
+ if (fpo->fpo_is_fmr) {
+ if (fmr->fmr_pfmr) {
+ rc = ib_fmr_pool_unmap(fmr->fmr_pfmr);
+ LASSERT(!rc);
+ fmr->fmr_pfmr = NULL;
+ }
+
+ if (status) {
+ rc = ib_flush_fmr_pool(fpo->fmr.fpo_fmr_pool);
+ LASSERT(!rc);
+ }
+ } else {
+ struct kib_fast_reg_descriptor *frd = fmr->fmr_frd;
+ if (frd) {
+ frd->frd_valid = false;
+ spin_lock(&fps->fps_lock);
+ list_add_tail(&frd->frd_list, &fpo->fast_reg.fpo_pool_list);
+ spin_unlock(&fps->fps_lock);
+ fmr->fmr_frd = NULL;
+ }
+ }
fmr->fmr_pool = NULL;
- fmr->fmr_pfmr = NULL;
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--; /* decref the pool */
@@ -1479,11 +1633,15 @@ void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status)
kiblnd_destroy_fmr_pool_list(&zombies);
}
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
- __u64 iov, kib_fmr_t *fmr)
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
+ kib_fmr_t *fmr)
{
- struct ib_pool_fmr *pfmr;
+ __u64 *pages = tx->tx_pages;
+ bool is_rx = (rd != tx->tx_rd);
+ bool tx_pages_mapped = 0;
kib_fmr_pool_t *fpo;
+ int npages = 0;
__u64 version;
int rc;
@@ -1493,21 +1651,95 @@ int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages, int npages,
list_for_each_entry(fpo, &fps->fps_pool_list, fpo_list) {
fpo->fpo_deadline = cfs_time_shift(IBLND_POOL_DEADLINE);
fpo->fpo_map_count++;
- spin_unlock(&fps->fps_lock);
- pfmr = ib_fmr_pool_map_phys(fpo->fpo_fmr_pool,
- pages, npages, iov);
- if (likely(!IS_ERR(pfmr))) {
- fmr->fmr_pool = fpo;
- fmr->fmr_pfmr = pfmr;
- return 0;
+ if (fpo->fpo_is_fmr) {
+ struct ib_pool_fmr *pfmr;
+
+ spin_unlock(&fps->fps_lock);
+
+ if (!tx_pages_mapped) {
+ npages = kiblnd_map_tx_pages(tx, rd);
+ tx_pages_mapped = 1;
+ }
+
+ pfmr = ib_fmr_pool_map_phys(fpo->fmr.fpo_fmr_pool,
+ pages, npages, iov);
+ if (likely(!IS_ERR(pfmr))) {
+ fmr->fmr_key = is_rx ? pfmr->fmr->rkey :
+ pfmr->fmr->lkey;
+ fmr->fmr_frd = NULL;
+ fmr->fmr_pfmr = pfmr;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ rc = PTR_ERR(pfmr);
+ } else {
+ if (!list_empty(&fpo->fast_reg.fpo_pool_list)) {
+ struct kib_fast_reg_descriptor *frd;
+ struct ib_reg_wr *wr;
+ struct ib_mr *mr;
+ int n;
+
+ frd = list_first_entry(&fpo->fast_reg.fpo_pool_list,
+ struct kib_fast_reg_descriptor,
+ frd_list);
+ list_del(&frd->frd_list);
+ spin_unlock(&fps->fps_lock);
+
+ mr = frd->frd_mr;
+
+ if (!frd->frd_valid) {
+ __u32 key = is_rx ? mr->rkey : mr->lkey;
+ struct ib_send_wr *inv_wr;
+
+ inv_wr = &frd->frd_inv_wr;
+ memset(inv_wr, 0, sizeof(*inv_wr));
+ inv_wr->opcode = IB_WR_LOCAL_INV;
+ inv_wr->wr_id = IBLND_WID_MR;
+ inv_wr->ex.invalidate_rkey = key;
+
+ /* Bump the key */
+ key = ib_inc_rkey(key);
+ ib_update_fast_reg_key(mr, key);
+ }
+
+ n = ib_map_mr_sg(mr, tx->tx_frags,
+ tx->tx_nfrags, NULL, PAGE_SIZE);
+ if (unlikely(n != tx->tx_nfrags)) {
+ CERROR("Failed to map mr %d/%d elements\n",
+ n, tx->tx_nfrags);
+ return n < 0 ? n : -EINVAL;
+ }
+
+ mr->iova = iov;
+
+ /* Prepare FastReg WR */
+ wr = &frd->frd_fastreg_wr;
+ memset(wr, 0, sizeof(*wr));
+ wr->wr.opcode = IB_WR_REG_MR;
+ wr->wr.wr_id = IBLND_WID_MR;
+ wr->wr.num_sge = 0;
+ wr->wr.send_flags = 0;
+ wr->mr = mr;
+ wr->key = is_rx ? mr->rkey : mr->lkey;
+ wr->access = (IB_ACCESS_LOCAL_WRITE |
+ IB_ACCESS_REMOTE_WRITE);
+
+ fmr->fmr_key = is_rx ? mr->rkey : mr->lkey;
+ fmr->fmr_frd = frd;
+ fmr->fmr_pfmr = NULL;
+ fmr->fmr_pool = fpo;
+ return 0;
+ }
+ spin_unlock(&fps->fps_lock);
+ rc = -EBUSY;
}
spin_lock(&fps->fps_lock);
fpo->fpo_map_count--;
- if (PTR_ERR(pfmr) != -EAGAIN) {
+ if (rc != -EAGAIN) {
spin_unlock(&fps->fps_lock);
- return PTR_ERR(pfmr);
+ return rc;
}
/* EAGAIN and ... */
@@ -1932,25 +2164,28 @@ static void kiblnd_net_fini_pools(kib_net_t *net)
}
}
-static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
+static int kiblnd_net_init_pools(kib_net_t *net, lnet_ni_t *ni, __u32 *cpts,
+ int ncpts)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
unsigned long flags;
int cpt;
- int rc = 0;
+ int rc;
int i;
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
read_lock_irqsave(&kiblnd_data.kib_global_lock, flags);
- if (!*kiblnd_tunables.kib_map_on_demand) {
+ if (!tunables->lnd_map_on_demand) {
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
goto create_tx_pool;
}
read_unlock_irqrestore(&kiblnd_data.kib_global_lock, flags);
- if (*kiblnd_tunables.kib_fmr_pool_size <
- *kiblnd_tunables.kib_ntx / 4) {
+ if (tunables->lnd_fmr_pool_size < *kiblnd_tunables.kib_ntx / 4) {
CERROR("Can't set fmr pool size (%d) < ntx / 4(%d)\n",
- *kiblnd_tunables.kib_fmr_pool_size,
+ tunables->lnd_fmr_pool_size,
*kiblnd_tunables.kib_ntx / 4);
rc = -EINVAL;
goto failed;
@@ -1965,8 +2200,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
/*
* premapping can fail if ibd_nmr > 1, so we always create
* FMR pool and map-on-demand if premapping failed
+ *
+ * cfs_precpt_alloc is creating an array of struct kib_fmr_poolset
+ * The number of struct kib_fmr_poolsets create is equal to the
+ * number of CPTs that exist, i.e net->ibn_fmr_ps[cpt].
*/
-
net->ibn_fmr_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_fmr_poolset_t));
if (!net->ibn_fmr_ps) {
@@ -1977,9 +2215,8 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
for (i = 0; i < ncpts; i++) {
cpt = !cpts ? i : cpts[i];
- rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, net,
- kiblnd_fmr_pool_size(ncpts),
- kiblnd_fmr_flush_trigger(ncpts));
+ rc = kiblnd_init_fmr_poolset(net->ibn_fmr_ps[cpt], cpt, ncpts,
+ net, tunables);
if (rc) {
CERROR("Can't initialize FMR pool for CPT %d: %d\n",
cpt, rc);
@@ -1991,6 +2228,11 @@ static int kiblnd_net_init_pools(kib_net_t *net, __u32 *cpts, int ncpts)
LASSERT(i == ncpts);
create_tx_pool:
+ /*
+ * cfs_precpt_alloc is creating an array of struct kib_tx_poolset
+ * The number of struct kib_tx_poolsets create is equal to the
+ * number of CPTs that exist, i.e net->ibn_tx_ps[cpt].
+ */
net->ibn_tx_ps = cfs_percpt_alloc(lnet_cpt_table(),
sizeof(kib_tx_poolset_t));
if (!net->ibn_tx_ps) {
@@ -2694,10 +2936,9 @@ static int kiblnd_startup(lnet_ni_t *ni)
net->ibn_incarnation = tv.tv_sec * USEC_PER_SEC +
tv.tv_nsec / NSEC_PER_USEC;
- ni->ni_peertimeout = *kiblnd_tunables.kib_peertimeout;
- ni->ni_maxtxcredits = *kiblnd_tunables.kib_credits;
- ni->ni_peertxcredits = *kiblnd_tunables.kib_peertxcredits;
- ni->ni_peerrtrcredits = *kiblnd_tunables.kib_peerrtrcredits;
+ rc = kiblnd_tunables_setup(ni);
+ if (rc)
+ goto net_failed;
if (ni->ni_interfaces[0]) {
/* Use the IPoIB interface specified in 'networks=' */
@@ -2736,7 +2977,7 @@ static int kiblnd_startup(lnet_ni_t *ni)
if (rc)
goto failed;
- rc = kiblnd_net_init_pools(net, ni->ni_cpts, ni->ni_ncpts);
+ rc = kiblnd_net_init_pools(net, ni, ni->ni_cpts, ni->ni_ncpts);
if (rc) {
CERROR("Failed to initialize NI pools: %d\n", rc);
goto failed;
@@ -2779,8 +3020,6 @@ static void __exit ko2iblnd_exit(void)
static int __init ko2iblnd_init(void)
{
- int rc;
-
CLASSERT(sizeof(kib_msg_t) <= IBLND_MSG_SIZE);
CLASSERT(offsetof(kib_msg_t,
ibm_u.get.ibgm_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
@@ -2789,9 +3028,7 @@ static int __init ko2iblnd_init(void)
ibm_u.putack.ibpam_rd.rd_frags[IBLND_MAX_RDMA_FRAGS])
<= IBLND_MSG_SIZE);
- rc = kiblnd_tunables_init();
- if (rc)
- return rc;
+ kiblnd_tunables_init();
lnet_register_lnd(&the_o2iblnd);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
index bfcbdd167da7..b22984fd9ad3 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd.h
@@ -87,22 +87,10 @@ typedef struct {
int *kib_timeout; /* comms timeout (seconds) */
int *kib_keepalive; /* keepalive timeout (seconds) */
int *kib_ntx; /* # tx descs */
- int *kib_credits; /* # concurrent sends */
- int *kib_peertxcredits; /* # concurrent sends to 1 peer */
- int *kib_peerrtrcredits; /* # per-peer router buffer credits */
- int *kib_peercredits_hiw; /* # when eagerly to return credits */
- int *kib_peertimeout; /* seconds to consider peer dead */
char **kib_default_ipif; /* default IPoIB interface */
int *kib_retry_count;
int *kib_rnr_retry_count;
- int *kib_concurrent_sends; /* send work queue sizing */
int *kib_ib_mtu; /* IB MTU */
- int *kib_map_on_demand; /* map-on-demand if RD has more */
- /* fragments than this value, 0 */
- /* disable map-on-demand */
- int *kib_fmr_pool_size; /* # FMRs in pool */
- int *kib_fmr_flush_trigger; /* When to trigger FMR flush */
- int *kib_fmr_cache; /* enable FMR pool cache? */
int *kib_require_priv_port; /* accept only privileged ports */
int *kib_use_priv_port; /* use privileged port for active connect */
int *kib_nscheds; /* # threads on each CPT */
@@ -116,43 +104,21 @@ extern kib_tunables_t kiblnd_tunables;
#define IBLND_CREDITS_DEFAULT 8 /* default # of peer credits */
#define IBLND_CREDITS_MAX ((typeof(((kib_msg_t *) 0)->ibm_credits)) - 1) /* Max # of peer credits */
-#define IBLND_MSG_QUEUE_SIZE(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MSG_QUEUE_SIZE_V1 : \
- *kiblnd_tunables.kib_peertxcredits) /* # messages/RDMAs in-flight */
-#define IBLND_CREDITS_HIGHWATER(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_CREDIT_HIGHWATER_V1 : \
- *kiblnd_tunables.kib_peercredits_hiw) /* when eagerly to return credits */
+/* when eagerly to return credits */
+#define IBLND_CREDITS_HIGHWATER(t, v) ((v) == IBLND_MSG_VERSION_1 ? \
+ IBLND_CREDIT_HIGHWATER_V1 : \
+ t->lnd_peercredits_hiw)
#define kiblnd_rdma_create_id(cb, dev, ps, qpt) rdma_create_id(&init_net, \
cb, dev, \
ps, qpt)
-static inline int
-kiblnd_concurrent_sends_v1(void)
-{
- if (*kiblnd_tunables.kib_concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
- return IBLND_MSG_QUEUE_SIZE_V1 * 2;
-
- if (*kiblnd_tunables.kib_concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
- return IBLND_MSG_QUEUE_SIZE_V1 / 2;
-
- return *kiblnd_tunables.kib_concurrent_sends;
-}
-
-#define IBLND_CONCURRENT_SENDS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- kiblnd_concurrent_sends_v1() : \
- *kiblnd_tunables.kib_concurrent_sends)
/* 2 OOB shall suffice for 1 keepalive and 1 returning credits */
#define IBLND_OOB_CAPABLE(v) ((v) != IBLND_MSG_VERSION_1)
#define IBLND_OOB_MSGS(v) (IBLND_OOB_CAPABLE(v) ? 2 : 0)
#define IBLND_MSG_SIZE (4 << 10) /* max size of queued messages (inc hdr) */
#define IBLND_MAX_RDMA_FRAGS LNET_MAX_IOV /* max # of fragments supported */
-#define IBLND_CFG_RDMA_FRAGS (*kiblnd_tunables.kib_map_on_demand ? \
- *kiblnd_tunables.kib_map_on_demand : \
- IBLND_MAX_RDMA_FRAGS) /* max # of fragments configured by user */
-#define IBLND_RDMA_FRAGS(v) ((v) == IBLND_MSG_VERSION_1 ? \
- IBLND_MAX_RDMA_FRAGS : IBLND_CFG_RDMA_FRAGS)
/************************/
/* derived constants... */
@@ -171,7 +137,8 @@ kiblnd_concurrent_sends_v1(void)
/* WRs and CQEs (per connection) */
#define IBLND_RECV_WRS(c) IBLND_RX_MSGS(c)
#define IBLND_SEND_WRS(c) \
- ((c->ibc_max_frags + 1) * IBLND_CONCURRENT_SENDS(c->ibc_version))
+ ((c->ibc_max_frags + 1) * kiblnd_concurrent_sends(c->ibc_version, \
+ c->ibc_peer->ibp_ni))
#define IBLND_CQ_ENTRIES(c) (IBLND_RECV_WRS(c) + IBLND_SEND_WRS(c))
struct kib_hca_dev;
@@ -286,24 +253,44 @@ typedef struct {
int fps_cpt; /* CPT id */
int fps_pool_size;
int fps_flush_trigger;
+ int fps_cache;
int fps_increasing; /* is allocating new pool */
unsigned long fps_next_retry; /* time stamp for retry if*/
/* failed to allocate */
} kib_fmr_poolset_t;
+struct kib_fast_reg_descriptor { /* For fast registration */
+ struct list_head frd_list;
+ struct ib_send_wr frd_inv_wr;
+ struct ib_reg_wr frd_fastreg_wr;
+ struct ib_mr *frd_mr;
+ bool frd_valid;
+};
+
typedef struct {
struct list_head fpo_list; /* chain on pool list */
struct kib_hca_dev *fpo_hdev; /* device for this pool */
kib_fmr_poolset_t *fpo_owner; /* owner of this pool */
- struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ union {
+ struct {
+ struct ib_fmr_pool *fpo_fmr_pool; /* IB FMR pool */
+ } fmr;
+ struct { /* For fast registration */
+ struct list_head fpo_pool_list;
+ int fpo_pool_size;
+ } fast_reg;
+ };
unsigned long fpo_deadline; /* deadline of this pool */
int fpo_failed; /* fmr pool is failed */
int fpo_map_count; /* # of mapped FMR */
+ int fpo_is_fmr;
} kib_fmr_pool_t;
typedef struct {
- struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
- kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ kib_fmr_pool_t *fmr_pool; /* pool of FMR */
+ struct ib_pool_fmr *fmr_pfmr; /* IB pool fmr */
+ struct kib_fast_reg_descriptor *fmr_frd;
+ u32 fmr_key;
} kib_fmr_t;
typedef struct kib_net {
@@ -615,6 +602,48 @@ extern kib_data_t kiblnd_data;
void kiblnd_hdev_destroy(kib_hca_dev_t *hdev);
+int kiblnd_msg_queue_size(int version, struct lnet_ni *ni);
+
+/* max # of fragments configured by user */
+static inline int
+kiblnd_cfg_rdma_frags(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int mod;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ mod = tunables->lnd_map_on_demand;
+ return mod ? mod : IBLND_MAX_RDMA_FRAGS;
+}
+
+static inline int
+kiblnd_rdma_frags(int version, struct lnet_ni *ni)
+{
+ return version == IBLND_MSG_VERSION_1 ?
+ IBLND_MAX_RDMA_FRAGS :
+ kiblnd_cfg_rdma_frags(ni);
+}
+
+static inline int
+kiblnd_concurrent_sends(int version, struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ int concurrent_sends;
+
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+ concurrent_sends = tunables->lnd_concurrent_sends;
+
+ if (version == IBLND_MSG_VERSION_1) {
+ if (concurrent_sends > IBLND_MSG_QUEUE_SIZE_V1 * 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 * 2;
+
+ if (concurrent_sends < IBLND_MSG_QUEUE_SIZE_V1 / 2)
+ return IBLND_MSG_QUEUE_SIZE_V1 / 2;
+ }
+
+ return concurrent_sends;
+}
+
static inline void
kiblnd_hdev_addref_locked(kib_hca_dev_t *hdev)
{
@@ -737,10 +766,14 @@ kiblnd_send_keepalive(kib_conn_t *conn)
static inline int
kiblnd_need_noop(kib_conn_t *conn)
{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+ lnet_ni_t *ni = conn->ibc_peer->ibp_ni;
+
LASSERT(conn->ibc_state >= IBLND_CONN_ESTABLISHED);
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
if (conn->ibc_outstanding_credits <
- IBLND_CREDITS_HIGHWATER(conn->ibc_version) &&
+ IBLND_CREDITS_HIGHWATER(tunables, conn->ibc_version) &&
!kiblnd_send_keepalive(conn))
return 0; /* No need to send NOOP */
@@ -799,7 +832,8 @@ kiblnd_queue2str(kib_conn_t *conn, struct list_head *q)
#define IBLND_WID_TX 1
#define IBLND_WID_RX 2
#define IBLND_WID_RDMA 3
-#define IBLND_WID_MASK 3UL
+#define IBLND_WID_MR 4
+#define IBLND_WID_MASK 7UL
static inline __u64
kiblnd_ptr2wreqid(void *ptr, int type)
@@ -947,20 +981,20 @@ static inline unsigned int kiblnd_sg_dma_len(struct ib_device *dev,
#define KIBLND_CONN_PARAM(e) ((e)->param.conn.private_data)
#define KIBLND_CONN_PARAM_LEN(e) ((e)->param.conn.private_data_len)
-struct ib_mr *kiblnd_find_rd_dma_mr(kib_hca_dev_t *hdev,
- kib_rdma_desc_t *rd,
+struct ib_mr *kiblnd_find_rd_dma_mr(struct lnet_ni *ni, kib_rdma_desc_t *rd,
int negotiated_nfrags);
void kiblnd_map_rx_descs(kib_conn_t *conn);
void kiblnd_unmap_rx_descs(kib_conn_t *conn);
void kiblnd_pool_free_node(kib_pool_t *pool, struct list_head *node);
struct list_head *kiblnd_pool_alloc_node(kib_poolset_t *ps);
-int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, __u64 *pages,
- int npages, __u64 iov, kib_fmr_t *fmr);
+int kiblnd_fmr_pool_map(kib_fmr_poolset_t *fps, kib_tx_t *tx,
+ kib_rdma_desc_t *rd, __u32 nob, __u64 iov,
+ kib_fmr_t *fmr);
void kiblnd_fmr_pool_unmap(kib_fmr_t *fmr, int status);
-int kiblnd_tunables_init(void);
-void kiblnd_tunables_fini(void);
+int kiblnd_tunables_setup(struct lnet_ni *ni);
+void kiblnd_tunables_init(void);
int kiblnd_connd(void *arg);
int kiblnd_scheduler(void *arg);
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
index 2323e8d3a318..bbfee53cfcf5 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_cb.c
@@ -561,36 +561,23 @@ kiblnd_kvaddr_to_page(unsigned long vaddr)
}
static int
-kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
+kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, __u32 nob)
{
kib_hca_dev_t *hdev;
- __u64 *pages = tx->tx_pages;
kib_fmr_poolset_t *fps;
- int npages;
- int size;
int cpt;
int rc;
- int i;
LASSERT(tx->tx_pool);
LASSERT(tx->tx_pool->tpo_pool.po_owner);
hdev = tx->tx_pool->tpo_hdev;
-
- for (i = 0, npages = 0; i < rd->rd_nfrags; i++) {
- for (size = 0; size < rd->rd_frags[i].rf_nob;
- size += hdev->ibh_page_size) {
- pages[npages++] = (rd->rd_frags[i].rf_addr &
- hdev->ibh_page_mask) + size;
- }
- }
-
cpt = tx->tx_pool->tpo_pool.po_owner->ps_cpt;
fps = net->ibn_fmr_ps[cpt];
- rc = kiblnd_fmr_pool_map(fps, pages, npages, 0, &tx->fmr);
+ rc = kiblnd_fmr_pool_map(fps, tx, rd, nob, 0, &tx->fmr);
if (rc) {
- CERROR("Can't map %d pages: %d\n", npages, rc);
+ CERROR("Can't map %u bytes: %d\n", nob, rc);
return rc;
}
@@ -598,8 +585,7 @@ kiblnd_fmr_map_tx(kib_net_t *net, kib_tx_t *tx, kib_rdma_desc_t *rd, int nob)
* If rd is not tx_rd, it's going to get sent to a peer, who will need
* the rkey
*/
- rd->rd_key = (rd != tx->tx_rd) ? tx->fmr.fmr_pfmr->fmr->rkey :
- tx->fmr.fmr_pfmr->fmr->lkey;
+ rd->rd_key = tx->fmr.fmr_key;
rd->rd_frags[0].rf_addr &= ~hdev->ibh_page_mask;
rd->rd_frags[0].rf_nob = nob;
rd->rd_nfrags = 1;
@@ -613,10 +599,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
LASSERT(net);
- if (net->ibn_fmr_ps && tx->fmr.fmr_pfmr) {
+ if (net->ibn_fmr_ps)
kiblnd_fmr_pool_unmap(&tx->fmr, tx->tx_status);
- tx->fmr.fmr_pfmr = NULL;
- }
if (tx->tx_nfrags) {
kiblnd_dma_unmap_sg(tx->tx_pool->tpo_hdev->ibh_ibdev,
@@ -628,8 +612,8 @@ static void kiblnd_unmap_tx(lnet_ni_t *ni, kib_tx_t *tx)
static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
int nfrags)
{
- kib_hca_dev_t *hdev = tx->tx_pool->tpo_hdev;
kib_net_t *net = ni->ni_data;
+ kib_hca_dev_t *hdev = net->ibn_dev->ibd_hdev;
struct ib_mr *mr = NULL;
__u32 nob;
int i;
@@ -652,7 +636,7 @@ static int kiblnd_map_tx(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
nob += rd->rd_frags[i].rf_nob;
}
- mr = kiblnd_find_rd_dma_mr(hdev, rd, tx->tx_conn ?
+ mr = kiblnd_find_rd_dma_mr(ni, rd, tx->tx_conn ?
tx->tx_conn->ibc_max_frags : -1);
if (mr) {
/* found pre-mapping MR */
@@ -704,7 +688,7 @@ kiblnd_setup_rd_iov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
fragnob = min(fragnob, (int)PAGE_SIZE - page_offset);
sg_set_page(sg, page, fragnob, page_offset);
- sg++;
+ sg = sg_next(sg);
if (offset + fragnob < iov->iov_len) {
offset += fragnob;
@@ -748,7 +732,7 @@ kiblnd_setup_rd_kiov(lnet_ni_t *ni, kib_tx_t *tx, kib_rdma_desc_t *rd,
sg_set_page(sg, kiov->kiov_page, fragnob,
kiov->kiov_offset + offset);
- sg++;
+ sg = sg_next(sg);
offset = 0;
kiov++;
@@ -765,6 +749,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
{
kib_msg_t *msg = tx->tx_msg;
kib_peer_t *peer = conn->ibc_peer;
+ struct lnet_ni *ni = peer->ibp_ni;
int ver = conn->ibc_version;
int rc;
int done;
@@ -780,7 +765,7 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
LASSERT(conn->ibc_credits >= 0);
LASSERT(conn->ibc_credits <= conn->ibc_queue_depth);
- if (conn->ibc_nsends_posted == IBLND_CONCURRENT_SENDS(ver)) {
+ if (conn->ibc_nsends_posted == kiblnd_concurrent_sends(ver, ni)) {
/* tx completions outstanding... */
CDEBUG(D_NET, "%s: posted enough\n",
libcfs_nid2str(peer->ibp_nid));
@@ -851,14 +836,26 @@ kiblnd_post_tx_locked(kib_conn_t *conn, kib_tx_t *tx, int credit)
/* close_conn will launch failover */
rc = -ENETDOWN;
} else {
- struct ib_send_wr *wrq = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct kib_fast_reg_descriptor *frd = tx->fmr.fmr_frd;
+ struct ib_send_wr *bad = &tx->tx_wrq[tx->tx_nwrq - 1].wr;
+ struct ib_send_wr *wrq = &tx->tx_wrq[0].wr;
+
+ if (frd) {
+ if (!frd->frd_valid) {
+ wrq = &frd->frd_inv_wr;
+ wrq->next = &frd->frd_fastreg_wr.wr;
+ } else {
+ wrq = &frd->frd_fastreg_wr.wr;
+ }
+ frd->frd_fastreg_wr.wr.next = &tx->tx_wrq[0].wr;
+ }
- LASSERTF(wrq->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
+ LASSERTF(bad->wr_id == kiblnd_ptr2wreqid(tx, IBLND_WID_TX),
"bad wr_id %llx, opc %d, flags %d, peer: %s\n",
- wrq->wr_id, wrq->opcode, wrq->send_flags,
- libcfs_nid2str(conn->ibc_peer->ibp_nid));
- wrq = NULL;
- rc = ib_post_send(conn->ibc_cmid->qp, &tx->tx_wrq->wr, &wrq);
+ bad->wr_id, bad->opcode, bad->send_flags,
+ libcfs_nid2str(conn->ibc_peer->ibp_nid));
+ bad = NULL;
+ rc = ib_post_send(conn->ibc_cmid->qp, wrq, &bad);
}
conn->ibc_last_send = jiffies;
@@ -919,7 +916,7 @@ kiblnd_check_sends(kib_conn_t *conn)
spin_lock(&conn->ibc_lock);
- LASSERT(conn->ibc_nsends_posted <= IBLND_CONCURRENT_SENDS(ver));
+ LASSERT(conn->ibc_nsends_posted <= kiblnd_concurrent_sends(ver, ni));
LASSERT(!IBLND_OOB_CAPABLE(ver) ||
conn->ibc_noops_posted <= IBLND_OOB_MSGS(ver));
LASSERT(conn->ibc_reserved_credits >= 0);
@@ -1066,7 +1063,7 @@ kiblnd_init_rdma(kib_conn_t *conn, kib_tx_t *tx, int type,
kib_msg_t *ibmsg = tx->tx_msg;
kib_rdma_desc_t *srcrd = tx->tx_rd;
struct ib_sge *sge = &tx->tx_sge[0];
- struct ib_rdma_wr *wrq = &tx->tx_wrq[0], *next;
+ struct ib_rdma_wr *wrq, *next;
int rc = resid;
int srcidx = 0;
int dstidx = 0;
@@ -2333,11 +2330,11 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
if (reqmsg->ibm_u.connparams.ibcp_queue_depth >
- IBLND_MSG_QUEUE_SIZE(version)) {
+ kiblnd_msg_queue_size(version, ni)) {
CERROR("Can't accept conn from %s, queue depth too large: %d (<=%d wanted)\n",
libcfs_nid2str(nid),
reqmsg->ibm_u.connparams.ibcp_queue_depth,
- IBLND_MSG_QUEUE_SIZE(version));
+ kiblnd_msg_queue_size(version, ni));
if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_MSG_QUEUE_SIZE;
@@ -2346,24 +2343,24 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
}
if (reqmsg->ibm_u.connparams.ibcp_max_frags >
- IBLND_RDMA_FRAGS(version)) {
+ kiblnd_rdma_frags(version, ni)) {
CWARN("Can't accept conn from %s (version %x): max_frags %d too large (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ kiblnd_rdma_frags(version, ni));
if (version >= IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
} else if (reqmsg->ibm_u.connparams.ibcp_max_frags <
- IBLND_RDMA_FRAGS(version) && !net->ibn_fmr_ps) {
+ kiblnd_rdma_frags(version, ni) && !net->ibn_fmr_ps) {
CWARN("Can't accept conn from %s (version %x): max_frags %d incompatible without FMR pool (%d wanted)\n",
libcfs_nid2str(nid), version,
reqmsg->ibm_u.connparams.ibcp_max_frags,
- IBLND_RDMA_FRAGS(version));
+ kiblnd_rdma_frags(version, ni));
- if (version >= IBLND_MSG_VERSION)
+ if (version == IBLND_MSG_VERSION)
rej.ibr_why = IBLND_REJECT_RDMA_FRAGS;
goto failed;
@@ -2528,8 +2525,8 @@ kiblnd_passive_connect(struct rdma_cm_id *cmid, void *priv, int priv_nob)
lnet_ni_decref(ni);
rej.ibr_version = version;
- rej.ibr_cp.ibcp_queue_depth = IBLND_MSG_QUEUE_SIZE(version);
- rej.ibr_cp.ibcp_max_frags = IBLND_RDMA_FRAGS(version);
+ rej.ibr_cp.ibcp_queue_depth = kiblnd_msg_queue_size(version, ni);
+ rej.ibr_cp.ibcp_max_frags = kiblnd_rdma_frags(version, ni);
kiblnd_reject(cmid, &rej);
return -ECONNREFUSED;
@@ -2580,12 +2577,15 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
reason = "Unknown";
break;
- case IBLND_REJECT_RDMA_FRAGS:
+ case IBLND_REJECT_RDMA_FRAGS: {
+ struct lnet_ioctl_config_lnd_tunables *tunables;
+
if (!cp) {
reason = "can't negotiate max frags";
goto out;
}
- if (!*kiblnd_tunables.kib_map_on_demand) {
+ tunables = peer->ibp_ni->ni_lnd_tunables;
+ if (!tunables->lt_tun_u.lt_o2ib.lnd_map_on_demand) {
reason = "map_on_demand must be enabled";
goto out;
}
@@ -2597,7 +2597,7 @@ kiblnd_check_reconnect(kib_conn_t *conn, int version,
peer->ibp_max_frags = frag_num;
reason = "rdma fragments";
break;
-
+ }
case IBLND_REJECT_MSG_QUEUE_SIZE:
if (!cp) {
reason = "can't negotiate queue depth";
@@ -3430,6 +3430,12 @@ kiblnd_complete(struct ib_wc *wc)
default:
LBUG();
+ case IBLND_WID_MR:
+ if (wc->status != IB_WC_SUCCESS &&
+ wc->status != IB_WC_WR_FLUSH_ERR)
+ CNETERR("FastReg failed: %d\n", wc->status);
+ break;
+
case IBLND_WID_RDMA:
/*
* We only get RDMA completion notification if it fails. All
diff --git a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
index b4607dad3712..f8fdd4ae3dbf 100644
--- a/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
+++ b/drivers/staging/lustre/lnet/klnds/o2iblnd/o2iblnd_modparams.c
@@ -152,74 +152,135 @@ kib_tunables_t kiblnd_tunables = {
.kib_timeout = &timeout,
.kib_keepalive = &keepalive,
.kib_ntx = &ntx,
- .kib_credits = &credits,
- .kib_peertxcredits = &peer_credits,
- .kib_peercredits_hiw = &peer_credits_hiw,
- .kib_peerrtrcredits = &peer_buffer_credits,
- .kib_peertimeout = &peer_timeout,
.kib_default_ipif = &ipif_name,
.kib_retry_count = &retry_count,
.kib_rnr_retry_count = &rnr_retry_count,
- .kib_concurrent_sends = &concurrent_sends,
.kib_ib_mtu = &ib_mtu,
- .kib_map_on_demand = &map_on_demand,
- .kib_fmr_pool_size = &fmr_pool_size,
- .kib_fmr_flush_trigger = &fmr_flush_trigger,
- .kib_fmr_cache = &fmr_cache,
.kib_require_priv_port = &require_privileged_port,
.kib_use_priv_port = &use_privileged_port,
.kib_nscheds = &nscheds
};
-int
-kiblnd_tunables_init(void)
+static struct lnet_ioctl_config_o2iblnd_tunables default_tunables;
+
+/* # messages/RDMAs in-flight */
+int kiblnd_msg_queue_size(int version, lnet_ni_t *ni)
{
+ if (version == IBLND_MSG_VERSION_1)
+ return IBLND_MSG_QUEUE_SIZE_V1;
+ else if (ni)
+ return ni->ni_peertxcredits;
+ else
+ return peer_credits;
+}
+
+int kiblnd_tunables_setup(struct lnet_ni *ni)
+{
+ struct lnet_ioctl_config_o2iblnd_tunables *tunables;
+
+ /*
+ * if there was no tunables specified, setup the tunables to be
+ * defaulted
+ */
+ if (!ni->ni_lnd_tunables) {
+ LIBCFS_ALLOC(ni->ni_lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ if (!ni->ni_lnd_tunables)
+ return -ENOMEM;
+
+ memcpy(&ni->ni_lnd_tunables->lt_tun_u.lt_o2ib,
+ &default_tunables, sizeof(*tunables));
+ }
+ tunables = &ni->ni_lnd_tunables->lt_tun_u.lt_o2ib;
+
+ /* Current API version */
+ tunables->lnd_version = 0;
+
if (kiblnd_translate_mtu(*kiblnd_tunables.kib_ib_mtu) < 0) {
CERROR("Invalid ib_mtu %d, expected 256/512/1024/2048/4096\n",
*kiblnd_tunables.kib_ib_mtu);
return -EINVAL;
}
- if (*kiblnd_tunables.kib_peertxcredits < IBLND_CREDITS_DEFAULT)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_DEFAULT;
+ if (!ni->ni_peertimeout)
+ ni->ni_peertimeout = peer_timeout;
+
+ if (!ni->ni_maxtxcredits)
+ ni->ni_maxtxcredits = credits;
+
+ if (!ni->ni_peertxcredits)
+ ni->ni_peertxcredits = peer_credits;
- if (*kiblnd_tunables.kib_peertxcredits > IBLND_CREDITS_MAX)
- *kiblnd_tunables.kib_peertxcredits = IBLND_CREDITS_MAX;
+ if (!ni->ni_peerrtrcredits)
+ ni->ni_peerrtrcredits = peer_buffer_credits;
- if (*kiblnd_tunables.kib_peertxcredits > *kiblnd_tunables.kib_credits)
- *kiblnd_tunables.kib_peertxcredits = *kiblnd_tunables.kib_credits;
+ if (ni->ni_peertxcredits < IBLND_CREDITS_DEFAULT)
+ ni->ni_peertxcredits = IBLND_CREDITS_DEFAULT;
- if (*kiblnd_tunables.kib_peercredits_hiw < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits / 2;
+ if (ni->ni_peertxcredits > IBLND_CREDITS_MAX)
+ ni->ni_peertxcredits = IBLND_CREDITS_MAX;
- if (*kiblnd_tunables.kib_peercredits_hiw >= *kiblnd_tunables.kib_peertxcredits)
- *kiblnd_tunables.kib_peercredits_hiw = *kiblnd_tunables.kib_peertxcredits - 1;
+ if (ni->ni_peertxcredits > credits)
+ ni->ni_peertxcredits = credits;
- if (*kiblnd_tunables.kib_map_on_demand < 0 ||
- *kiblnd_tunables.kib_map_on_demand > IBLND_MAX_RDMA_FRAGS)
- *kiblnd_tunables.kib_map_on_demand = 0; /* disable map-on-demand */
+ if (!tunables->lnd_peercredits_hiw)
+ tunables->lnd_peercredits_hiw = peer_credits_hiw;
- if (*kiblnd_tunables.kib_map_on_demand == 1)
- *kiblnd_tunables.kib_map_on_demand = 2; /* don't make sense to create map if only one fragment */
+ if (tunables->lnd_peercredits_hiw < ni->ni_peertxcredits / 2)
+ tunables->lnd_peercredits_hiw = ni->ni_peertxcredits / 2;
- if (!*kiblnd_tunables.kib_concurrent_sends) {
- if (*kiblnd_tunables.kib_map_on_demand > 0 &&
- *kiblnd_tunables.kib_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8)
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits) * 2;
- else
- *kiblnd_tunables.kib_concurrent_sends = (*kiblnd_tunables.kib_peertxcredits);
+ if (tunables->lnd_peercredits_hiw >= ni->ni_peertxcredits)
+ tunables->lnd_peercredits_hiw = ni->ni_peertxcredits - 1;
+
+ if (tunables->lnd_map_on_demand < 0 ||
+ tunables->lnd_map_on_demand > IBLND_MAX_RDMA_FRAGS) {
+ /* disable map-on-demand */
+ tunables->lnd_map_on_demand = 0;
+ }
+
+ if (tunables->lnd_map_on_demand == 1) {
+ /* don't make sense to create map if only one fragment */
+ tunables->lnd_map_on_demand = 2;
+ }
+
+ if (!tunables->lnd_concurrent_sends) {
+ if (tunables->lnd_map_on_demand > 0 &&
+ tunables->lnd_map_on_demand <= IBLND_MAX_RDMA_FRAGS / 8) {
+ tunables->lnd_concurrent_sends =
+ ni->ni_peertxcredits * 2;
+ } else {
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits;
+ }
}
- if (*kiblnd_tunables.kib_concurrent_sends > *kiblnd_tunables.kib_peertxcredits * 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits * 2;
+ if (tunables->lnd_concurrent_sends > ni->ni_peertxcredits * 2)
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits * 2;
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits / 2)
- *kiblnd_tunables.kib_concurrent_sends = *kiblnd_tunables.kib_peertxcredits / 2;
+ if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits / 2)
+ tunables->lnd_concurrent_sends = ni->ni_peertxcredits / 2;
- if (*kiblnd_tunables.kib_concurrent_sends < *kiblnd_tunables.kib_peertxcredits) {
+ if (tunables->lnd_concurrent_sends < ni->ni_peertxcredits) {
CWARN("Concurrent sends %d is lower than message queue size: %d, performance may drop slightly.\n",
- *kiblnd_tunables.kib_concurrent_sends, *kiblnd_tunables.kib_peertxcredits);
+ tunables->lnd_concurrent_sends, ni->ni_peertxcredits);
}
+ if (!tunables->lnd_fmr_pool_size)
+ tunables->lnd_fmr_pool_size = fmr_pool_size;
+ if (!tunables->lnd_fmr_flush_trigger)
+ tunables->lnd_fmr_flush_trigger = fmr_flush_trigger;
+ if (!tunables->lnd_fmr_cache)
+ tunables->lnd_fmr_cache = fmr_cache;
+
return 0;
}
+
+void kiblnd_tunables_init(void)
+{
+ default_tunables.lnd_version = 0;
+ default_tunables.lnd_peercredits_hiw = peer_credits_hiw,
+ default_tunables.lnd_map_on_demand = map_on_demand;
+ default_tunables.lnd_concurrent_sends = concurrent_sends;
+ default_tunables.lnd_fmr_pool_size = fmr_pool_size;
+ default_tunables.lnd_fmr_flush_trigger = fmr_flush_trigger;
+ default_tunables.lnd_fmr_cache = fmr_cache;
+}
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
index cca7b2f7f1a7..406c0e7a57b9 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd.c
@@ -2582,7 +2582,6 @@ ksocknal_debug_peerhash(lnet_ni_t *ni)
}
read_unlock(&ksocknal_data.ksnd_global_lock);
- return;
}
void
diff --git a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
index d4ce06d0aeeb..964b4e338fe0 100644
--- a/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
+++ b/drivers/staging/lustre/lnet/klnds/socklnd/socklnd_lib.c
@@ -675,7 +675,6 @@ ksocknal_lib_set_callback(struct socket *sock, ksock_conn_t *conn)
sock->sk->sk_user_data = conn;
sock->sk->sk_data_ready = ksocknal_data_ready;
sock->sk->sk_write_space = ksocknal_write_space;
- return;
}
void
@@ -695,8 +694,6 @@ ksocknal_lib_reset_callback(struct socket *sock, ksock_conn_t *conn)
* sk_user_data is NULL.
*/
sock->sk->sk_user_data = NULL;
-
- return ;
}
int
diff --git a/drivers/staging/lustre/lnet/libcfs/debug.c b/drivers/staging/lustre/lnet/libcfs/debug.c
index c3d628bac5b8..8c260c3d5da4 100644
--- a/drivers/staging/lustre/lnet/libcfs/debug.c
+++ b/drivers/staging/lustre/lnet/libcfs/debug.c
@@ -232,130 +232,24 @@ int libcfs_panic_in_progress;
static const char *
libcfs_debug_subsys2str(int subsys)
{
- switch (1 << subsys) {
- default:
+ static const char *libcfs_debug_subsystems[] = LIBCFS_DEBUG_SUBSYS_NAMES;
+
+ if (subsys >= ARRAY_SIZE(libcfs_debug_subsystems))
return NULL;
- case S_UNDEFINED:
- return "undefined";
- case S_MDC:
- return "mdc";
- case S_MDS:
- return "mds";
- case S_OSC:
- return "osc";
- case S_OST:
- return "ost";
- case S_CLASS:
- return "class";
- case S_LOG:
- return "log";
- case S_LLITE:
- return "llite";
- case S_RPC:
- return "rpc";
- case S_LNET:
- return "lnet";
- case S_LND:
- return "lnd";
- case S_PINGER:
- return "pinger";
- case S_FILTER:
- return "filter";
- case S_ECHO:
- return "echo";
- case S_LDLM:
- return "ldlm";
- case S_LOV:
- return "lov";
- case S_LQUOTA:
- return "lquota";
- case S_OSD:
- return "osd";
- case S_LFSCK:
- return "lfsck";
- case S_LMV:
- return "lmv";
- case S_SEC:
- return "sec";
- case S_GSS:
- return "gss";
- case S_MGC:
- return "mgc";
- case S_MGS:
- return "mgs";
- case S_FID:
- return "fid";
- case S_FLD:
- return "fld";
- }
+
+ return libcfs_debug_subsystems[subsys];
}
/* libcfs_debug_token2mask() expects the returned string in lower-case */
static const char *
libcfs_debug_dbg2str(int debug)
{
- switch (1 << debug) {
- default:
+ static const char *libcfs_debug_masks[] = LIBCFS_DEBUG_MASKS_NAMES;
+
+ if (debug >= ARRAY_SIZE(libcfs_debug_masks))
return NULL;
- case D_TRACE:
- return "trace";
- case D_INODE:
- return "inode";
- case D_SUPER:
- return "super";
- case D_EXT2:
- return "ext2";
- case D_MALLOC:
- return "malloc";
- case D_CACHE:
- return "cache";
- case D_INFO:
- return "info";
- case D_IOCTL:
- return "ioctl";
- case D_NETERROR:
- return "neterror";
- case D_NET:
- return "net";
- case D_WARNING:
- return "warning";
- case D_BUFFS:
- return "buffs";
- case D_OTHER:
- return "other";
- case D_DENTRY:
- return "dentry";
- case D_NETTRACE:
- return "nettrace";
- case D_PAGE:
- return "page";
- case D_DLMTRACE:
- return "dlmtrace";
- case D_ERROR:
- return "error";
- case D_EMERG:
- return "emerg";
- case D_HA:
- return "ha";
- case D_RPCTRACE:
- return "rpctrace";
- case D_VFSTRACE:
- return "vfstrace";
- case D_READA:
- return "reada";
- case D_MMAP:
- return "mmap";
- case D_CONFIG:
- return "config";
- case D_CONSOLE:
- return "console";
- case D_QUOTA:
- return "quota";
- case D_SEC:
- return "sec";
- case D_LFSCK:
- return "lfsck";
- }
+
+ return libcfs_debug_masks[debug];
}
int
diff --git a/drivers/staging/lustre/lnet/libcfs/fail.c b/drivers/staging/lustre/lnet/libcfs/fail.c
index dadaf7685cbd..086e690bd6f2 100644
--- a/drivers/staging/lustre/lnet/libcfs/fail.c
+++ b/drivers/staging/lustre/lnet/libcfs/fail.c
@@ -41,6 +41,9 @@ EXPORT_SYMBOL(cfs_fail_loc);
unsigned int cfs_fail_val;
EXPORT_SYMBOL(cfs_fail_val);
+int cfs_fail_err;
+EXPORT_SYMBOL(cfs_fail_err);
+
DECLARE_WAIT_QUEUE_HEAD(cfs_race_waitq);
EXPORT_SYMBOL(cfs_race_waitq);
diff --git a/drivers/staging/lustre/lnet/libcfs/hash.c b/drivers/staging/lustre/lnet/libcfs/hash.c
index f60feb3a3dc7..cc45ed82b2be 100644
--- a/drivers/staging/lustre/lnet/libcfs/hash.c
+++ b/drivers/staging/lustre/lnet/libcfs/hash.c
@@ -942,10 +942,10 @@ cfs_hash_buckets_realloc(struct cfs_hash *hs, struct cfs_hash_bucket **old_bkts,
* @flags - CFS_HASH_REHASH enable synamic hash resizing
* - CFS_HASH_SORT enable chained hash sort
*/
-static int cfs_hash_rehash_worker(cfs_workitem_t *wi);
+static int cfs_hash_rehash_worker(struct cfs_workitem *wi);
#if CFS_HASH_DEBUG_LEVEL >= CFS_HASH_DEBUG_1
-static int cfs_hash_dep_print(cfs_workitem_t *wi)
+static int cfs_hash_dep_print(struct cfs_workitem *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_dep_wi);
int dep;
@@ -1847,7 +1847,7 @@ cfs_hash_rehash_bd(struct cfs_hash *hs, struct cfs_hash_bd *old)
}
static int
-cfs_hash_rehash_worker(cfs_workitem_t *wi)
+cfs_hash_rehash_worker(struct cfs_workitem *wi)
{
struct cfs_hash *hs = container_of(wi, struct cfs_hash, hs_rehash_wi);
struct cfs_hash_bucket **bkts;
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
index 2de9eeae0232..83543f928279 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_lock.c
@@ -49,7 +49,8 @@ EXPORT_SYMBOL(cfs_percpt_lock_free);
* reason we always allocate cacheline-aligned memory block.
*/
struct cfs_percpt_lock *
-cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
+cfs_percpt_lock_create(struct cfs_cpt_table *cptab,
+ struct lock_class_key *keys)
{
struct cfs_percpt_lock *pcl;
spinlock_t *lock;
@@ -67,12 +68,18 @@ cfs_percpt_lock_alloc(struct cfs_cpt_table *cptab)
return NULL;
}
- cfs_percpt_for_each(lock, i, pcl->pcl_locks)
+ if (!keys)
+ CWARN("Cannot setup class key for percpt lock, you may see recursive locking warnings which are actually fake.\n");
+
+ cfs_percpt_for_each(lock, i, pcl->pcl_locks) {
spin_lock_init(lock);
+ if (keys != NULL)
+ lockdep_set_class(lock, &keys[i]);
+ }
return pcl;
}
-EXPORT_SYMBOL(cfs_percpt_lock_alloc);
+EXPORT_SYMBOL(cfs_percpt_lock_create);
/**
* lock a CPU partition
@@ -142,44 +149,3 @@ cfs_percpt_unlock(struct cfs_percpt_lock *pcl, int index)
}
}
EXPORT_SYMBOL(cfs_percpt_unlock);
-
-/** free cpu-partition refcount */
-void
-cfs_percpt_atomic_free(atomic_t **refs)
-{
- cfs_percpt_free(refs);
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_free);
-
-/** allocate cpu-partition refcount with initial value @init_val */
-atomic_t **
-cfs_percpt_atomic_alloc(struct cfs_cpt_table *cptab, int init_val)
-{
- atomic_t **refs;
- atomic_t *ref;
- int i;
-
- refs = cfs_percpt_alloc(cptab, sizeof(*ref));
- if (!refs)
- return NULL;
-
- cfs_percpt_for_each(ref, i, refs)
- atomic_set(ref, init_val);
- return refs;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_alloc);
-
-/** return sum of cpu-partition refs */
-int
-cfs_percpt_atomic_summary(atomic_t **refs)
-{
- atomic_t *ref;
- int i;
- int val = 0;
-
- cfs_percpt_for_each(ref, i, refs)
- val += atomic_read(ref);
-
- return val;
-}
-EXPORT_SYMBOL(cfs_percpt_atomic_summary);
diff --git a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
index c5a6951516ed..d0e81bb41cdc 100644
--- a/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
+++ b/drivers/staging/lustre/lnet/libcfs/libcfs_mem.c
@@ -115,34 +115,6 @@ cfs_percpt_number(void *vars)
EXPORT_SYMBOL(cfs_percpt_number);
/*
- * return memory block shadowed from current CPU
- */
-void *
-cfs_percpt_current(void *vars)
-{
- struct cfs_var_array *arr;
- int cpt;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
- cpt = cfs_cpt_current(arr->va_cptab, 0);
- if (cpt < 0)
- return NULL;
-
- return arr->va_ptrs[cpt];
-}
-
-void *
-cfs_percpt_index(void *vars, int idx)
-{
- struct cfs_var_array *arr;
-
- arr = container_of(vars, struct cfs_var_array, va_ptrs[0]);
-
- LASSERT(idx >= 0 && idx < arr->va_count);
- return arr->va_ptrs[idx];
-}
-
-/*
* free variable array, see more detail in cfs_array_alloc
*/
void
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
index 389fb9eeea75..b52518c54efe 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-cpu.c
@@ -755,8 +755,13 @@ cfs_cpt_table_create(int ncpt)
struct cfs_cpu_partition *part;
int n;
- if (cpt >= ncpt)
- goto failed;
+ /*
+ * Each emulated NUMA node has all allowed CPUs in
+ * the mask.
+ * End loop when all partitions have assigned CPUs.
+ */
+ if (cpt == ncpt)
+ break;
part = &cptab->ctb_parts[cpt];
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
index 8c9377ed850c..84f9b7b47581 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-crypto.c
@@ -30,13 +30,34 @@
#include <crypto/hash.h>
#include <linux/scatterlist.h>
#include "../../../include/linux/libcfs/libcfs.h"
+#include "../../../include/linux/libcfs/libcfs_crypto.h"
#include "linux-crypto.h"
+
/**
- * Array of hash algorithm speed in MByte per second
+ * Array of hash algorithm speed in MByte per second
*/
static int cfs_crypto_hash_speeds[CFS_HASH_ALG_MAX];
-static int cfs_crypto_hash_alloc(unsigned char alg_id,
+/**
+ * Initialize the state descriptor for the specified hash algorithm.
+ *
+ * An internal routine to allocate the hash-specific state in \a hdesc for
+ * use with cfs_crypto_hash_digest() to compute the hash of a single message,
+ * though possibly in multiple chunks. The descriptor internal state should
+ * be freed with cfs_crypto_hash_final().
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ * \param[out] type pointer to the hash description in hash_types[]
+ * array
+ * \param[in,out] hdesc hash state descriptor to be initialized
+ * \param[in] key initial hash value/state, NULL to use default
+ * value
+ * \param[in] key_len length of \a key
+ *
+ * \retval 0 on success
+ * \retval negative errno on failure
+ */
+static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg,
const struct cfs_crypto_hash_type **type,
struct ahash_request **req,
unsigned char *key,
@@ -45,11 +66,11 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
struct crypto_ahash *tfm;
int err = 0;
- *type = cfs_crypto_hash_type(alg_id);
+ *type = cfs_crypto_hash_type(hash_alg);
if (!*type) {
CWARN("Unsupported hash algorithm id = %d, max id is %d\n",
- alg_id, CFS_HASH_ALG_MAX);
+ hash_alg, CFS_HASH_ALG_MAX);
return -EINVAL;
}
tfm = crypto_alloc_ahash((*type)->cht_name, 0, CRYPTO_ALG_ASYNC);
@@ -70,12 +91,6 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
ahash_request_set_callback(*req, 0, NULL, NULL);
- /** Shash have different logic for initialization then digest
- * shash: crypto_hash_setkey, crypto_hash_init
- * digest: crypto_digest_init, crypto_digest_setkey
- * Skip this function for digest, because we use shash logic at
- * cfs_crypto_hash_alloc.
- */
if (key)
err = crypto_ahash_setkey(tfm, key, key_len);
else if ((*type)->cht_key != 0)
@@ -90,7 +105,7 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n",
crypto_ahash_alg_name(tfm), crypto_ahash_driver_name(tfm),
- cfs_crypto_hash_speeds[alg_id]);
+ cfs_crypto_hash_speeds[hash_alg]);
err = crypto_ahash_init(*req);
if (err) {
@@ -100,7 +115,33 @@ static int cfs_crypto_hash_alloc(unsigned char alg_id,
return err;
}
-int cfs_crypto_hash_digest(unsigned char alg_id,
+/**
+ * Calculate hash digest for the passed buffer.
+ *
+ * This should be used when computing the hash on a single contiguous buffer.
+ * It combines the hash initialization, computation, and cleanup.
+ *
+ * \param[in] hash_alg id of hash algorithm (CFS_HASH_ALG_*)
+ * \param[in] buf data buffer on which to compute hash
+ * \param[in] buf_len length of \a buf in bytes
+ * \param[in] key initial value/state for algorithm,
+ * if \a key = NULL use default initial value
+ * \param[in] key_len length of \a key in bytes
+ * \param[out] hash pointer to computed hash value,
+ * if \a hash = NULL then \a hash_len is to digest
+ * size in bytes, retval -ENOSPC
+ * \param[in,out] hash_len size of \a hash buffer
+ *
+ * \retval -EINVAL \a buf, \a buf_len, \a hash_len,
+ * \a hash_alg invalid
+ * \retval -ENOENT \a hash_alg is unsupported
+ * \retval -ENOSPC \a hash is NULL, or \a hash_len less than
+ * digest size
+ * \retval 0 for success
+ * \retval negative errno for other errors from lower
+ * layers.
+ */
+int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
const void *buf, unsigned int buf_len,
unsigned char *key, unsigned int key_len,
unsigned char *hash, unsigned int *hash_len)
@@ -113,7 +154,7 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
if (!buf || buf_len == 0 || !hash_len)
return -EINVAL;
- err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
+ err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
if (err != 0)
return err;
@@ -134,15 +175,32 @@ int cfs_crypto_hash_digest(unsigned char alg_id,
}
EXPORT_SYMBOL(cfs_crypto_hash_digest);
+/**
+ * Allocate and initialize desriptor for hash algorithm.
+ *
+ * This should be used to initialize a hash descriptor for multiple calls
+ * to a single hash function when computing the hash across multiple
+ * separate buffers or pages using cfs_crypto_hash_update{,_page}().
+ *
+ * The hash descriptor should be freed with cfs_crypto_hash_final().
+ *
+ * \param[in] hash_alg algorithm id (CFS_HASH_ALG_*)
+ * \param[in] key initial value/state for algorithm, if \a key = NULL
+ * use default initial value
+ * \param[in] key_len length of \a key in bytes
+ *
+ * \retval pointer to descriptor of hash instance
+ * \retval ERR_PTR(errno) in case of error
+ */
struct cfs_crypto_hash_desc *
- cfs_crypto_hash_init(unsigned char alg_id,
- unsigned char *key, unsigned int key_len)
+cfs_crypto_hash_init(enum cfs_crypto_hash_alg hash_alg,
+ unsigned char *key, unsigned int key_len)
{
struct ahash_request *req;
int err;
const struct cfs_crypto_hash_type *type;
- err = cfs_crypto_hash_alloc(alg_id, &type, &req, key, key_len);
+ err = cfs_crypto_hash_alloc(hash_alg, &type, &req, key, key_len);
if (err)
return ERR_PTR(err);
@@ -150,6 +208,17 @@ struct cfs_crypto_hash_desc *
}
EXPORT_SYMBOL(cfs_crypto_hash_init);
+/**
+ * Update hash digest computed on data within the given \a page
+ *
+ * \param[in] hdesc hash state descriptor
+ * \param[in] page data page on which to compute the hash
+ * \param[in] offset offset within \a page at which to start hash
+ * \param[in] len length of data on which to compute hash
+ *
+ * \retval 0 for success
+ * \retval negative errno on failure
+ */
int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct page *page, unsigned int offset,
unsigned int len)
@@ -158,13 +227,23 @@ int cfs_crypto_hash_update_page(struct cfs_crypto_hash_desc *hdesc,
struct scatterlist sl;
sg_init_table(&sl, 1);
- sg_set_page(&sl, page, len, offset & ~CFS_PAGE_MASK);
+ sg_set_page(&sl, page, len, offset & ~PAGE_MASK);
ahash_request_set_crypt(req, &sl, NULL, sl.length);
return crypto_ahash_update(req);
}
EXPORT_SYMBOL(cfs_crypto_hash_update_page);
+/**
+ * Update hash digest computed on the specified data
+ *
+ * \param[in] hdesc hash state descriptor
+ * \param[in] buf data buffer on which to compute the hash
+ * \param[in] buf_len length of \buf on which to compute hash
+ *
+ * \retval 0 for success
+ * \retval negative errno on failure
+ */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
const void *buf, unsigned int buf_len)
{
@@ -178,7 +257,18 @@ int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
}
EXPORT_SYMBOL(cfs_crypto_hash_update);
-/* If hash_len pointer is NULL - destroy descriptor. */
+/**
+ * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
+ *
+ * \param[in] hdesc hash descriptor
+ * \param[out] hash pointer to hash buffer to store hash digest
+ * \param[in,out] hash_len pointer to hash buffer size, if \a hdesc = NULL
+ * only free \a hdesc instead of computing the hash
+ *
+ * \retval 0 for success
+ * \retval -EOVERFLOW if hash_len is too small for the hash digest
+ * \retval negative errno for other errors from lower layers
+ */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
unsigned char *hash, unsigned int *hash_len)
{
@@ -186,99 +276,153 @@ int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
struct ahash_request *req = (void *)hdesc;
int size = crypto_ahash_digestsize(crypto_ahash_reqtfm(req));
- if (!hash_len) {
- crypto_free_ahash(crypto_ahash_reqtfm(req));
- ahash_request_free(req);
- return 0;
+ if (!hash || !hash_len) {
+ err = 0;
+ goto free_ahash;
}
- if (!hash || *hash_len < size) {
- *hash_len = size;
- return -ENOSPC;
+ if (*hash_len < size) {
+ err = -EOVERFLOW;
+ goto free_ahash;
}
+
ahash_request_set_crypt(req, NULL, hash, 0);
err = crypto_ahash_final(req);
-
- if (err < 0) {
- /* May be caller can fix error */
- return err;
- }
+ if (!err)
+ *hash_len = size;
+free_ahash:
crypto_free_ahash(crypto_ahash_reqtfm(req));
ahash_request_free(req);
return err;
}
EXPORT_SYMBOL(cfs_crypto_hash_final);
-static void cfs_crypto_performance_test(unsigned char alg_id,
- const unsigned char *buf,
- unsigned int buf_len)
+/**
+ * Compute the speed of specified hash function
+ *
+ * Run a speed test on the given hash algorithm on buffer of the given size.
+ * The speed is stored internally in the cfs_crypto_hash_speeds[] array, and
+ * is available through the cfs_crypto_hash_speed() function.
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ * \param[in] buf data buffer on which to compute the hash
+ * \param[in] buf_len length of \buf on which to compute hash
+ */
+static void cfs_crypto_performance_test(enum cfs_crypto_hash_alg hash_alg)
{
+ int buf_len = max(PAGE_SIZE, 1048576UL);
+ void *buf;
unsigned long start, end;
int bcount, err = 0;
- int sec = 1; /* do test only 1 sec */
- unsigned char hash[64];
- unsigned int hash_len = 64;
-
- for (start = jiffies, end = start + sec * HZ, bcount = 0;
- time_before(jiffies, end); bcount++) {
- err = cfs_crypto_hash_digest(alg_id, buf, buf_len, NULL, 0,
- hash, &hash_len);
+ struct page *page;
+ unsigned char hash[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+ unsigned int hash_len = sizeof(hash);
+
+ page = alloc_page(GFP_KERNEL);
+ if (!page) {
+ err = -ENOMEM;
+ goto out_err;
+ }
+
+ buf = kmap(page);
+ memset(buf, 0xAD, PAGE_SIZE);
+ kunmap(page);
+
+ for (start = jiffies, end = start + msecs_to_jiffies(MSEC_PER_SEC),
+ bcount = 0; time_before(jiffies, end); bcount++) {
+ struct cfs_crypto_hash_desc *hdesc;
+ int i;
+
+ hdesc = cfs_crypto_hash_init(hash_alg, NULL, 0);
+ if (IS_ERR(hdesc)) {
+ err = PTR_ERR(hdesc);
+ break;
+ }
+
+ for (i = 0; i < buf_len / PAGE_SIZE; i++) {
+ err = cfs_crypto_hash_update_page(hdesc, page, 0,
+ PAGE_SIZE);
+ if (err)
+ break;
+ }
+
+ err = cfs_crypto_hash_final(hdesc, hash, &hash_len);
if (err)
break;
}
end = jiffies;
-
+ __free_page(page);
+out_err:
if (err) {
- cfs_crypto_hash_speeds[alg_id] = -1;
- CDEBUG(D_INFO, "Crypto hash algorithm %s, err = %d\n",
- cfs_crypto_hash_name(alg_id), err);
+ cfs_crypto_hash_speeds[hash_alg] = err;
+ CDEBUG(D_INFO, "Crypto hash algorithm %s test error: rc = %d\n",
+ cfs_crypto_hash_name(hash_alg), err);
} else {
unsigned long tmp;
tmp = ((bcount * buf_len / jiffies_to_msecs(end - start)) *
1000) / (1024 * 1024);
- cfs_crypto_hash_speeds[alg_id] = (int)tmp;
+ cfs_crypto_hash_speeds[hash_alg] = (int)tmp;
+ CDEBUG(D_CONFIG, "Crypto hash algorithm %s speed = %d MB/s\n",
+ cfs_crypto_hash_name(hash_alg),
+ cfs_crypto_hash_speeds[hash_alg]);
}
- CDEBUG(D_INFO, "Crypto hash algorithm %s speed = %d MB/s\n",
- cfs_crypto_hash_name(alg_id), cfs_crypto_hash_speeds[alg_id]);
}
-int cfs_crypto_hash_speed(unsigned char hash_alg)
+/**
+ * hash speed in Mbytes per second for valid hash algorithm
+ *
+ * Return the performance of the specified \a hash_alg that was previously
+ * computed using cfs_crypto_performance_test().
+ *
+ * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*)
+ *
+ * \retval positive speed of the hash function in MB/s
+ * \retval -ENOENT if \a hash_alg is unsupported
+ * \retval negative errno if \a hash_alg speed is unavailable
+ */
+int cfs_crypto_hash_speed(enum cfs_crypto_hash_alg hash_alg)
{
if (hash_alg < CFS_HASH_ALG_MAX)
return cfs_crypto_hash_speeds[hash_alg];
- return -1;
+ return -ENOENT;
}
EXPORT_SYMBOL(cfs_crypto_hash_speed);
/**
- * Do performance test for all hash algorithms.
+ * Run the performance test for all hash algorithms.
+ *
+ * Run the cfs_crypto_performance_test() benchmark for all of the available
+ * hash functions using a 1MB buffer size. This is a reasonable buffer size
+ * for Lustre RPCs, even if the actual RPC size is larger or smaller.
+ *
+ * Since the setup cost and computation speed of various hash algorithms is
+ * a function of the buffer size (and possibly internal contention of offload
+ * engines), this speed only represents an estimate of the actual speed under
+ * actual usage, but is reasonable for comparing available algorithms.
+ *
+ * The actual speeds are available via cfs_crypto_hash_speed() for later
+ * comparison.
+ *
+ * \retval 0 on success
+ * \retval -ENOMEM if no memory is available for test buffer
*/
static int cfs_crypto_test_hashes(void)
{
- unsigned char i;
- unsigned char *data;
- unsigned int j;
- /* Data block size for testing hash. Maximum
- * kmalloc size for 2.6.18 kernel is 128K
- */
- unsigned int data_len = 1 * 128 * 1024;
-
- data = kmalloc(data_len, 0);
- if (!data)
- return -ENOMEM;
+ enum cfs_crypto_hash_alg hash_alg;
- for (j = 0; j < data_len; j++)
- data[j] = j & 0xff;
+ for (hash_alg = 0; hash_alg < CFS_HASH_ALG_MAX; hash_alg++)
+ cfs_crypto_performance_test(hash_alg);
- for (i = 0; i < CFS_HASH_ALG_MAX; i++)
- cfs_crypto_performance_test(i, data, data_len);
-
- kfree(data);
return 0;
}
static int adler32;
+/**
+ * Register available hash functions
+ *
+ * \retval 0
+ */
int cfs_crypto_register(void)
{
request_module("crc32c");
@@ -290,6 +434,9 @@ int cfs_crypto_register(void)
return 0;
}
+/**
+ * Unregister previously registered hash functions
+ */
void cfs_crypto_unregister(void)
{
if (adler32 == 0)
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
index ebc60ac9bb7a..d89f71ee45b2 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-module.c
@@ -40,10 +40,75 @@
#define LNET_MINOR 240
+static inline size_t libcfs_ioctl_packlen(struct libcfs_ioctl_data *data)
+{
+ size_t len = sizeof(*data);
+
+ len += cfs_size_round(data->ioc_inllen1);
+ len += cfs_size_round(data->ioc_inllen2);
+ return len;
+}
+
+static inline bool libcfs_ioctl_is_invalid(struct libcfs_ioctl_data *data)
+{
+ if (data->ioc_hdr.ioc_len > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_len larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inllen1 > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_inllen1 larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inllen2 > BIT(30)) {
+ CERROR("LIBCFS ioctl: ioc_inllen2 larger than 1<<30\n");
+ return true;
+ }
+ if (data->ioc_inlbuf1 && !data->ioc_inllen1) {
+ CERROR("LIBCFS ioctl: inlbuf1 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_inlbuf2 && !data->ioc_inllen2) {
+ CERROR("LIBCFS ioctl: inlbuf2 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_pbuf1 && !data->ioc_plen1) {
+ CERROR("LIBCFS ioctl: pbuf1 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_pbuf2 && !data->ioc_plen2) {
+ CERROR("LIBCFS ioctl: pbuf2 pointer but 0 length\n");
+ return true;
+ }
+ if (data->ioc_plen1 && !data->ioc_pbuf1) {
+ CERROR("LIBCFS ioctl: plen1 nonzero but no pbuf1 pointer\n");
+ return true;
+ }
+ if (data->ioc_plen2 && !data->ioc_pbuf2) {
+ CERROR("LIBCFS ioctl: plen2 nonzero but no pbuf2 pointer\n");
+ return true;
+ }
+ if ((__u32)libcfs_ioctl_packlen(data) != data->ioc_hdr.ioc_len) {
+ CERROR("LIBCFS ioctl: packlen != ioc_len\n");
+ return true;
+ }
+ if (data->ioc_inllen1 &&
+ data->ioc_bulk[data->ioc_inllen1 - 1] != '\0') {
+ CERROR("LIBCFS ioctl: inlbuf1 not 0 terminated\n");
+ return true;
+ }
+ if (data->ioc_inllen2 &&
+ data->ioc_bulk[cfs_size_round(data->ioc_inllen1) +
+ data->ioc_inllen2 - 1] != '\0') {
+ CERROR("LIBCFS ioctl: inlbuf2 not 0 terminated\n");
+ return true;
+ }
+ return false;
+}
+
int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
{
if (libcfs_ioctl_is_invalid(data)) {
- CERROR("LNET: ioctl not correctly formatted\n");
+ CERROR("libcfs ioctl: parameter not correctly formatted\n");
return -EINVAL;
}
@@ -57,68 +122,47 @@ int libcfs_ioctl_data_adjust(struct libcfs_ioctl_data *data)
return 0;
}
-int libcfs_ioctl_getdata_len(const struct libcfs_ioctl_hdr __user *arg,
- __u32 *len)
+int libcfs_ioctl_getdata(struct libcfs_ioctl_hdr **hdr_pp,
+ const struct libcfs_ioctl_hdr __user *uhdr)
{
struct libcfs_ioctl_hdr hdr;
+ int err = 0;
- if (copy_from_user(&hdr, arg, sizeof(hdr)))
+ if (copy_from_user(&hdr, uhdr, sizeof(hdr)))
return -EFAULT;
if (hdr.ioc_version != LIBCFS_IOCTL_VERSION &&
hdr.ioc_version != LIBCFS_IOCTL_VERSION2) {
- CERROR("LNET: version mismatch expected %#x, got %#x\n",
+ CERROR("libcfs ioctl: version mismatch expected %#x, got %#x\n",
LIBCFS_IOCTL_VERSION, hdr.ioc_version);
return -EINVAL;
}
- *len = hdr.ioc_len;
-
- return 0;
-}
-
-int libcfs_ioctl_popdata(void __user *arg, void *data, int size)
-{
- if (copy_to_user(arg, data, size))
- return -EFAULT;
- return 0;
-}
-
-static int
-libcfs_psdev_open(struct inode *inode, struct file *file)
-{
- int rc = 0;
+ if (hdr.ioc_len < sizeof(struct libcfs_ioctl_data)) {
+ CERROR("libcfs ioctl: user buffer too small for ioctl\n");
+ return -EINVAL;
+ }
- if (!inode)
+ if (hdr.ioc_len > LIBCFS_IOC_DATA_MAX) {
+ CERROR("libcfs ioctl: user buffer is too large %d/%d\n",
+ hdr.ioc_len, LIBCFS_IOC_DATA_MAX);
return -EINVAL;
- if (libcfs_psdev_ops.p_open)
- rc = libcfs_psdev_ops.p_open(0, NULL);
- else
- return -EPERM;
- return rc;
-}
+ }
-/* called when closing /dev/device */
-static int
-libcfs_psdev_release(struct inode *inode, struct file *file)
-{
- int rc = 0;
+ LIBCFS_ALLOC(*hdr_pp, hdr.ioc_len);
+ if (!*hdr_pp)
+ return -ENOMEM;
- if (!inode)
- return -EINVAL;
- if (libcfs_psdev_ops.p_close)
- rc = libcfs_psdev_ops.p_close(0, NULL);
- else
- rc = -EPERM;
- return rc;
+ if (copy_from_user(*hdr_pp, uhdr, hdr.ioc_len)) {
+ LIBCFS_FREE(*hdr_pp, hdr.ioc_len);
+ err = -EFAULT;
+ }
+ return err;
}
-static long libcfs_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
+static long
+libcfs_psdev_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
- struct cfs_psdev_file pfile;
- int rc = 0;
-
if (!capable(CAP_SYS_ADMIN))
return -EACCES;
@@ -130,26 +174,12 @@ static long libcfs_ioctl(struct file *file,
return -EINVAL;
}
- /* Handle platform-dependent IOC requests */
- switch (cmd) {
- case IOC_LIBCFS_PANIC:
- if (!capable(CFS_CAP_SYS_BOOT))
- return -EPERM;
- panic("debugctl-invoked panic");
- return 0;
- }
-
- if (libcfs_psdev_ops.p_ioctl)
- rc = libcfs_psdev_ops.p_ioctl(&pfile, cmd, (void __user *)arg);
- else
- rc = -EPERM;
- return rc;
+ return libcfs_ioctl(cmd, (void __user *)arg);
}
static const struct file_operations libcfs_fops = {
- .unlocked_ioctl = libcfs_ioctl,
- .open = libcfs_psdev_open,
- .release = libcfs_psdev_release,
+ .owner = THIS_MODULE,
+ .unlocked_ioctl = libcfs_psdev_ioctl,
};
struct miscdevice libcfs_dev = {
diff --git a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
index 89084460231a..bbe19a684c81 100644
--- a/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
+++ b/drivers/staging/lustre/lnet/libcfs/linux/linux-prim.c
@@ -46,30 +46,6 @@
#include <linux/kgdb.h>
#endif
-/**
- * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
- * waiting threads, which is not always desirable because all threads will
- * be waken up again and again, even user only needs a few of them to be
- * active most time. This is not good for performance because cache can
- * be polluted by different threads.
- *
- * LIFO list can resolve this problem because we always wakeup the most
- * recent active thread by default.
- *
- * NB: please don't call non-exclusive & exclusive wait on the same
- * waitq if add_wait_queue_exclusive_head is used.
- */
-void
-add_wait_queue_exclusive_head(wait_queue_head_t *waitq, wait_queue_t *link)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&waitq->lock, flags);
- __add_wait_queue_exclusive(waitq, link);
- spin_unlock_irqrestore(&waitq->lock, flags);
-}
-EXPORT_SYMBOL(add_wait_queue_exclusive_head);
-
sigset_t
cfs_block_allsigs(void)
{
@@ -128,13 +104,6 @@ cfs_restore_sigs(sigset_t old)
}
EXPORT_SYMBOL(cfs_restore_sigs);
-int
-cfs_signal_pending(void)
-{
- return signal_pending(current);
-}
-EXPORT_SYMBOL(cfs_signal_pending);
-
void
cfs_clear_sigpending(void)
{
diff --git a/drivers/staging/lustre/lnet/libcfs/module.c b/drivers/staging/lustre/lnet/libcfs/module.c
index cdc640bfdba8..f2d041118cf7 100644
--- a/drivers/staging/lustre/lnet/libcfs/module.c
+++ b/drivers/staging/lustre/lnet/libcfs/module.c
@@ -54,9 +54,6 @@
# define DEBUG_SUBSYSTEM S_LNET
-#define LNET_MAX_IOCTL_BUF_LEN (sizeof(struct lnet_ioctl_net_config) + \
- sizeof(struct lnet_ioctl_config_data))
-
#include "../../include/linux/libcfs/libcfs.h"
#include <asm/div64.h>
@@ -68,20 +65,6 @@
static struct dentry *lnet_debugfs_root;
-/* called when opening /dev/device */
-static int libcfs_psdev_open(unsigned long flags, void *args)
-{
- try_module_get(THIS_MODULE);
- return 0;
-}
-
-/* called when closing /dev/device */
-static int libcfs_psdev_release(unsigned long flags, void *args)
-{
- module_put(THIS_MODULE);
- return 0;
-}
-
static DECLARE_RWSEM(ioctl_list_sem);
static LIST_HEAD(ioctl_list);
@@ -115,39 +98,47 @@ int libcfs_deregister_ioctl(struct libcfs_ioctl_handler *hand)
}
EXPORT_SYMBOL(libcfs_deregister_ioctl);
-static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
- void __user *arg, struct libcfs_ioctl_hdr *hdr)
+int libcfs_ioctl(unsigned long cmd, void __user *uparam)
{
struct libcfs_ioctl_data *data = NULL;
- int err = -EINVAL;
+ struct libcfs_ioctl_hdr *hdr;
+ int err;
+
+ /* 'cmd' and permissions get checked in our arch-specific caller */
+ err = libcfs_ioctl_getdata(&hdr, uparam);
+ if (err) {
+ CDEBUG_LIMIT(D_ERROR,
+ "libcfs ioctl: data header error %d\n", err);
+ return err;
+ }
- /*
- * The libcfs_ioctl_data_adjust() function performs adjustment
- * operations on the libcfs_ioctl_data structure to make
- * it usable by the code. This doesn't need to be called
- * for new data structures added.
- */
if (hdr->ioc_version == LIBCFS_IOCTL_VERSION) {
+ /*
+ * The libcfs_ioctl_data_adjust() function performs adjustment
+ * operations on the libcfs_ioctl_data structure to make
+ * it usable by the code. This doesn't need to be called
+ * for new data structures added.
+ */
data = container_of(hdr, struct libcfs_ioctl_data, ioc_hdr);
err = libcfs_ioctl_data_adjust(data);
if (err)
- return err;
+ goto out;
}
+ CDEBUG(D_IOCTL, "libcfs ioctl cmd %lu\n", cmd);
switch (cmd) {
case IOC_LIBCFS_CLEAR_DEBUG:
libcfs_debug_clear_buffer();
- return 0;
- /*
- * case IOC_LIBCFS_PANIC:
- * Handled in arch/cfs_module.c
- */
+ break;
+
case IOC_LIBCFS_MARK_DEBUG:
- if (!data->ioc_inlbuf1 ||
- data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0')
- return -EINVAL;
+ if (!data || !data->ioc_inlbuf1 ||
+ data->ioc_inlbuf1[data->ioc_inllen1 - 1] != '\0') {
+ err = -EINVAL;
+ goto out;
+ }
libcfs_debug_mark_buffer(data->ioc_inlbuf1);
- return 0;
+ break;
default: {
struct libcfs_ioctl_handler *hand;
@@ -156,67 +147,23 @@ static int libcfs_ioctl_handle(struct cfs_psdev_file *pfile, unsigned long cmd,
down_read(&ioctl_list_sem);
list_for_each_entry(hand, &ioctl_list, item) {
err = hand->handle_ioctl(cmd, hdr);
- if (err != -EINVAL) {
- if (err == 0)
- err = libcfs_ioctl_popdata(arg,
- hdr, hdr->ioc_len);
- break;
+ if (err == -EINVAL)
+ continue;
+
+ if (!err) {
+ if (copy_to_user(uparam, hdr, hdr->ioc_len))
+ err = -EFAULT;
}
+ break;
}
up_read(&ioctl_list_sem);
- break;
- }
- }
-
- return err;
-}
-
-static int libcfs_ioctl(struct cfs_psdev_file *pfile, unsigned long cmd,
- void __user *arg)
-{
- struct libcfs_ioctl_hdr *hdr;
- int err = 0;
- __u32 buf_len;
-
- err = libcfs_ioctl_getdata_len(arg, &buf_len);
- if (err)
- return err;
-
- /*
- * do a check here to restrict the size of the memory
- * to allocate to guard against DoS attacks.
- */
- if (buf_len > LNET_MAX_IOCTL_BUF_LEN) {
- CERROR("LNET: user buffer exceeds kernel buffer\n");
- return -EINVAL;
- }
-
- LIBCFS_ALLOC_GFP(hdr, buf_len, GFP_KERNEL);
- if (!hdr)
- return -ENOMEM;
-
- /* 'cmd' and permissions get checked in our arch-specific caller */
- if (copy_from_user(hdr, arg, buf_len)) {
- CERROR("LNET ioctl: data error\n");
- err = -EFAULT;
- goto out;
+ break; }
}
-
- err = libcfs_ioctl_handle(pfile, cmd, arg, hdr);
-
out:
- LIBCFS_FREE(hdr, buf_len);
+ LIBCFS_FREE(hdr, hdr->ioc_len);
return err;
}
-struct cfs_psdev_ops libcfs_psdev_ops = {
- libcfs_psdev_open,
- libcfs_psdev_release,
- NULL,
- NULL,
- libcfs_ioctl
-};
-
int lprocfs_call_handler(void *data, int write, loff_t *ppos,
void __user *buffer, size_t *lenp,
int (*handler)(void *data, int write, loff_t pos,
@@ -478,6 +425,13 @@ static struct ctl_table lnet_table[] = {
.proc_handler = &proc_dointvec
},
{
+ .procname = "fail_err",
+ .data = &cfs_fail_err,
+ .maxlen = sizeof(cfs_fail_err),
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
+ {
}
};
diff --git a/drivers/staging/lustre/lnet/libcfs/tracefile.c b/drivers/staging/lustre/lnet/libcfs/tracefile.c
index 244eb89eef68..7739b9469c5a 100644
--- a/drivers/staging/lustre/lnet/libcfs/tracefile.c
+++ b/drivers/staging/lustre/lnet/libcfs/tracefile.c
@@ -707,10 +707,9 @@ int cfs_tracefile_dump_all_pages(char *filename)
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
char *buf;
+ mm_segment_t __oldfs;
int rc;
- DECL_MMSPACE;
-
cfs_tracefile_write_lock();
filp = filp_open(filename, O_CREAT | O_EXCL | O_WRONLY | O_LARGEFILE,
@@ -729,11 +728,12 @@ int cfs_tracefile_dump_all_pages(char *filename)
rc = 0;
goto close;
}
+ __oldfs = get_fs();
+ set_fs(get_ds());
/* ok, for now, just write the pages. in the future we'll be building
* iobufs with the pages and calling generic_direct_IO
*/
- MMSPACE_OPEN;
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
__LASSERT_TAGE_INVARIANT(tage);
@@ -752,7 +752,7 @@ int cfs_tracefile_dump_all_pages(char *filename)
list_del(&tage->linkage);
cfs_tage_free(tage);
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
rc = vfs_fsync(filp, 1);
if (rc)
pr_err("sync returns %d\n", rc);
@@ -986,13 +986,12 @@ static int tracefiled(void *arg)
struct tracefiled_ctl *tctl = arg;
struct cfs_trace_page *tage;
struct cfs_trace_page *tmp;
+ mm_segment_t __oldfs;
struct file *filp;
char *buf;
int last_loop = 0;
int rc;
- DECL_MMSPACE;
-
/* we're started late enough that we pick up init's fs context */
/* this is so broken in uml? what on earth is going on? */
@@ -1025,8 +1024,8 @@ static int tracefiled(void *arg)
__LASSERT(list_empty(&pc.pc_pages));
goto end_loop;
}
-
- MMSPACE_OPEN;
+ __oldfs = get_fs();
+ set_fs(get_ds());
list_for_each_entry_safe(tage, tmp, &pc.pc_pages, linkage) {
static loff_t f_pos;
@@ -1051,7 +1050,7 @@ static int tracefiled(void *arg)
break;
}
}
- MMSPACE_CLOSE;
+ set_fs(__oldfs);
filp_close(filp, NULL);
put_pages_on_daemon_list(&pc);
diff --git a/drivers/staging/lustre/lnet/libcfs/workitem.c b/drivers/staging/lustre/lnet/libcfs/workitem.c
index c72fe00dce8d..92236ae59e49 100644
--- a/drivers/staging/lustre/lnet/libcfs/workitem.c
+++ b/drivers/staging/lustre/lnet/libcfs/workitem.c
@@ -111,7 +111,7 @@ cfs_wi_sched_cansleep(struct cfs_wi_sched *sched)
* 1. when it returns no one shall try to schedule the workitem.
*/
void
-cfs_wi_exit(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_exit(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -138,7 +138,7 @@ EXPORT_SYMBOL(cfs_wi_exit);
* cancel schedule request of workitem \a wi
*/
int
-cfs_wi_deschedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_deschedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
int rc;
@@ -179,7 +179,7 @@ EXPORT_SYMBOL(cfs_wi_deschedule);
* be added, and even dynamic creation of serialised queues might be supported.
*/
void
-cfs_wi_schedule(struct cfs_wi_sched *sched, cfs_workitem_t *wi)
+cfs_wi_schedule(struct cfs_wi_sched *sched, struct cfs_workitem *wi)
{
LASSERT(!in_interrupt()); /* because we use plain spinlock */
LASSERT(!sched->ws_stopping);
@@ -229,12 +229,12 @@ static int cfs_wi_scheduler(void *arg)
while (!sched->ws_stopping) {
int nloops = 0;
int rc;
- cfs_workitem_t *wi;
+ struct cfs_workitem *wi;
while (!list_empty(&sched->ws_runq) &&
nloops < CFS_WI_RESCHED) {
- wi = list_entry(sched->ws_runq.next, cfs_workitem_t,
- wi_list);
+ wi = list_entry(sched->ws_runq.next,
+ struct cfs_workitem, wi_list);
LASSERT(wi->wi_scheduled && !wi->wi_running);
list_del_init(&wi->wi_list);
diff --git a/drivers/staging/lustre/lnet/lnet/api-ni.c b/drivers/staging/lustre/lnet/lnet/api-ni.c
index 8764755544c9..fe0dbe7468e7 100644
--- a/drivers/staging/lustre/lnet/lnet/api-ni.c
+++ b/drivers/staging/lustre/lnet/lnet/api-ni.c
@@ -1215,9 +1215,9 @@ lnet_shutdown_lndni(struct lnet_ni *ni)
}
static int
-lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
- __s32 peer_cr, __s32 peer_buf_cr, __s32 credits)
+lnet_startup_lndni(struct lnet_ni *ni, struct lnet_ioctl_config_data *conf)
{
+ struct lnet_ioctl_config_lnd_tunables *lnd_tunables = NULL;
int rc = -EINVAL;
int lnd_type;
lnd_t *lnd;
@@ -1275,6 +1275,21 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
ni->ni_lnd = lnd;
+ if (conf && conf->cfg_hdr.ioc_len > sizeof(*conf))
+ lnd_tunables = (struct lnet_ioctl_config_lnd_tunables *)conf->cfg_bulk;
+
+ if (lnd_tunables) {
+ LIBCFS_ALLOC(ni->ni_lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ if (!ni->ni_lnd_tunables) {
+ mutex_unlock(&the_lnet.ln_lnd_mutex);
+ rc = -ENOMEM;
+ goto failed0;
+ }
+ memcpy(ni->ni_lnd_tunables, lnd_tunables,
+ sizeof(*ni->ni_lnd_tunables));
+ }
+
rc = lnd->lnd_startup(ni);
mutex_unlock(&the_lnet.ln_lnd_mutex);
@@ -1292,20 +1307,28 @@ lnet_startup_lndni(struct lnet_ni *ni, __s32 peer_timeout,
* If given some LND tunable parameters, parse those now to
* override the values in the NI structure.
*/
- if (peer_buf_cr >= 0)
- ni->ni_peerrtrcredits = peer_buf_cr;
- if (peer_timeout >= 0)
- ni->ni_peertimeout = peer_timeout;
+ if (conf && conf->cfg_config_u.cfg_net.net_peer_rtr_credits >= 0) {
+ ni->ni_peerrtrcredits =
+ conf->cfg_config_u.cfg_net.net_peer_rtr_credits;
+ }
+ if (conf && conf->cfg_config_u.cfg_net.net_peer_timeout >= 0) {
+ ni->ni_peertimeout =
+ conf->cfg_config_u.cfg_net.net_peer_timeout;
+ }
/*
* TODO
* Note: For now, don't allow the user to change
* peertxcredits as this number is used in the
* IB LND to control queue depth.
- * if (peer_cr != -1)
- * ni->ni_peertxcredits = peer_cr;
+ *
+ * if (conf && conf->cfg_config_u.cfg_net.net_peer_tx_credits != -1)
+ * ni->ni_peertxcredits =
+ * conf->cfg_config_u.cfg_net.net_peer_tx_credits;
*/
- if (credits >= 0)
- ni->ni_maxtxcredits = credits;
+ if (conf && conf->cfg_config_u.cfg_net.net_max_tx_credits >= 0) {
+ ni->ni_maxtxcredits =
+ conf->cfg_config_u.cfg_net.net_max_tx_credits;
+ }
LASSERT(ni->ni_peertimeout <= 0 || lnd->lnd_query);
@@ -1367,7 +1390,7 @@ lnet_startup_lndnis(struct list_head *nilist)
while (!list_empty(nilist)) {
ni = list_entry(nilist->next, lnet_ni_t, ni_list);
list_del(&ni->ni_list);
- rc = lnet_startup_lndni(ni, -1, -1, -1, -1);
+ rc = lnet_startup_lndni(ni, NULL);
if (rc < 0)
goto failed;
@@ -1641,25 +1664,20 @@ EXPORT_SYMBOL(LNetNIFini);
* parameters
*
* \param[in] ni network interface structure
- * \param[out] cpt_count the number of cpts the ni is on
- * \param[out] nid Network Interface ID
- * \param[out] peer_timeout NI peer timeout
- * \param[out] peer_tx_crdits NI peer transmit credits
- * \param[out] peer_rtr_credits NI peer router credits
- * \param[out] max_tx_credits NI max transmit credit
- * \param[out] net_config Network configuration
+ * \param[out] config NI configuration
*/
static void
-lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
- int *peer_timeout, int *peer_tx_credits,
- int *peer_rtr_credits, int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config)
+lnet_fill_ni_info(struct lnet_ni *ni, struct lnet_ioctl_config_data *config)
{
+ struct lnet_ioctl_config_lnd_tunables *lnd_cfg = NULL;
+ struct lnet_ioctl_net_config *net_config;
+ size_t min_size, tunable_size = 0;
int i;
- if (!ni)
+ if (!ni || !config)
return;
+ net_config = (struct lnet_ioctl_net_config *) config->cfg_bulk;
if (!net_config)
return;
@@ -1675,11 +1693,11 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
sizeof(net_config->ni_interfaces[i]));
}
- *nid = ni->ni_nid;
- *peer_timeout = ni->ni_peertimeout;
- *peer_tx_credits = ni->ni_peertxcredits;
- *peer_rtr_credits = ni->ni_peerrtrcredits;
- *max_tx_credits = ni->ni_maxtxcredits;
+ config->cfg_nid = ni->ni_nid;
+ config->cfg_config_u.cfg_net.net_peer_timeout = ni->ni_peertimeout;
+ config->cfg_config_u.cfg_net.net_max_tx_credits = ni->ni_maxtxcredits;
+ config->cfg_config_u.cfg_net.net_peer_tx_credits = ni->ni_peertxcredits;
+ config->cfg_config_u.cfg_net.net_peer_rtr_credits = ni->ni_peerrtrcredits;
net_config->ni_status = ni->ni_status->ns_status;
@@ -1689,18 +1707,40 @@ lnet_fill_ni_info(struct lnet_ni *ni, __u32 *cpt_count, __u64 *nid,
for (i = 0; i < num_cpts; i++)
net_config->ni_cpts[i] = ni->ni_cpts[i];
- *cpt_count = num_cpts;
+ config->cfg_ncpts = num_cpts;
+ }
+
+ /*
+ * See if user land tools sent in a newer and larger version
+ * of struct lnet_tunables than what the kernel uses.
+ */
+ min_size = sizeof(*config) + sizeof(*net_config);
+
+ if (config->cfg_hdr.ioc_len > min_size)
+ tunable_size = config->cfg_hdr.ioc_len - min_size;
+
+ /* Don't copy to much data to user space */
+ min_size = min(tunable_size, sizeof(*ni->ni_lnd_tunables));
+ lnd_cfg = (struct lnet_ioctl_config_lnd_tunables *)net_config->cfg_bulk;
+
+ if (ni->ni_lnd_tunables && lnd_cfg && min_size) {
+ memcpy(lnd_cfg, ni->ni_lnd_tunables, min_size);
+ config->cfg_config_u.cfg_net.net_interface_count = 1;
+
+ /* Tell user land that kernel side has less data */
+ if (tunable_size > sizeof(*ni->ni_lnd_tunables)) {
+ min_size = tunable_size - sizeof(ni->ni_lnd_tunables);
+ config->cfg_hdr.ioc_len -= min_size;
+ }
}
}
-int
-lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
- int *peer_tx_credits, int *peer_rtr_credits,
- int *max_tx_credits,
- struct lnet_ioctl_net_config *net_config)
+static int
+lnet_get_net_config(struct lnet_ioctl_config_data *config)
{
struct lnet_ni *ni;
struct list_head *tmp;
+ int idx = config->cfg_count;
int cpt, i = 0;
int rc = -ENOENT;
@@ -1712,9 +1752,7 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
ni = list_entry(tmp, lnet_ni_t, ni_list);
lnet_ni_lock(ni);
- lnet_fill_ni_info(ni, cpt_count, nid, peer_timeout,
- peer_tx_credits, peer_rtr_credits,
- max_tx_credits, net_config);
+ lnet_fill_ni_info(ni, config);
lnet_ni_unlock(ni);
rc = 0;
break;
@@ -1725,10 +1763,9 @@ lnet_get_net_config(int idx, __u32 *cpt_count, __u64 *nid, int *peer_timeout,
}
int
-lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
- __s32 peer_timeout, __s32 peer_cr, __s32 peer_buf_cr,
- __s32 credits)
+lnet_dyn_add_ni(lnet_pid_t requested_pid, struct lnet_ioctl_config_data *conf)
{
+ char *nets = conf->cfg_config_u.cfg_net.net_intf;
lnet_ping_info_t *pinfo;
lnet_handle_md_t md_handle;
struct lnet_ni *ni;
@@ -1773,8 +1810,7 @@ lnet_dyn_add_ni(lnet_pid_t requested_pid, char *nets,
list_del_init(&ni->ni_list);
- rc = lnet_startup_lndni(ni, peer_timeout, peer_cr,
- peer_buf_cr, credits);
+ rc = lnet_startup_lndni(ni, conf);
if (rc)
goto failed1;
@@ -1864,6 +1900,10 @@ LNetCtl(unsigned int cmd, void *arg)
int rc;
unsigned long secs_passed;
+ BUILD_BUG_ON(LIBCFS_IOC_DATA_MAX <
+ sizeof(struct lnet_ioctl_net_config) +
+ sizeof(struct lnet_ioctl_config_data));
+
switch (cmd) {
case IOC_LIBCFS_GET_NI:
rc = LNetGetId(data->ioc_count, &id);
@@ -1918,27 +1958,14 @@ LNetCtl(unsigned int cmd, void *arg)
&config->cfg_config_u.cfg_route.rtr_priority);
case IOC_LIBCFS_GET_NET: {
- struct lnet_ioctl_net_config *net_config;
- size_t total = sizeof(*config) + sizeof(*net_config);
-
+ size_t total = sizeof(*config) +
+ sizeof(struct lnet_ioctl_net_config);
config = arg;
if (config->cfg_hdr.ioc_len < total)
return -EINVAL;
- net_config = (struct lnet_ioctl_net_config *)
- config->cfg_bulk;
- if (!net_config)
- return -EINVAL;
-
- return lnet_get_net_config(config->cfg_count,
- &config->cfg_ncpts,
- &config->cfg_nid,
- &config->cfg_config_u.cfg_net.net_peer_timeout,
- &config->cfg_config_u.cfg_net.net_peer_tx_credits,
- &config->cfg_config_u.cfg_net.net_peer_rtr_credits,
- &config->cfg_config_u.cfg_net.net_max_tx_credits,
- net_config);
+ return lnet_get_net_config(config);
}
case IOC_LIBCFS_GET_LNET_STATS: {
diff --git a/drivers/staging/lustre/lnet/lnet/config.c b/drivers/staging/lustre/lnet/lnet/config.c
index 449069c9e649..480cc9c6caab 100644
--- a/drivers/staging/lustre/lnet/lnet/config.c
+++ b/drivers/staging/lustre/lnet/lnet/config.c
@@ -107,6 +107,9 @@ lnet_ni_free(struct lnet_ni *ni)
if (ni->ni_cpts)
cfs_expr_list_values_free(ni->ni_cpts, ni->ni_ncpts);
+ if (ni->ni_lnd_tunables)
+ LIBCFS_FREE(ni->ni_lnd_tunables, sizeof(*ni->ni_lnd_tunables));
+
for (i = 0; i < LNET_MAX_INTERFACES && ni->ni_interfaces[i]; i++) {
LIBCFS_FREE(ni->ni_interfaces[i],
strlen(ni->ni_interfaces[i]) + 1);
diff --git a/drivers/staging/lustre/lnet/lnet/lib-move.c b/drivers/staging/lustre/lnet/lnet/lib-move.c
index f19aa9320e34..c5d5bedb3128 100644
--- a/drivers/staging/lustre/lnet/lnet/lib-move.c
+++ b/drivers/staging/lustre/lnet/lnet/lib-move.c
@@ -407,7 +407,7 @@ lnet_copy_kiov2iov(unsigned int niov, struct kvec *iov, unsigned int iovoffset,
LASSERT(niov > 0);
LASSERT(nkiov > 0);
this_nob = min(iov->iov_len - iovoffset,
- (__kernel_size_t) kiov->kiov_len - kiovoffset);
+ (__kernel_size_t)kiov->kiov_len - kiovoffset);
this_nob = min(this_nob, nob);
if (!addr)
@@ -477,7 +477,7 @@ lnet_copy_iov2kiov(unsigned int nkiov, lnet_kiov_t *kiov,
do {
LASSERT(nkiov > 0);
LASSERT(niov > 0);
- this_nob = min((__kernel_size_t) kiov->kiov_len - kiovoffset,
+ this_nob = min((__kernel_size_t)kiov->kiov_len - kiovoffset,
iov->iov_len - iovoffset);
this_nob = min(this_nob, nob);
@@ -996,7 +996,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
LASSERT(msg2->msg_txpeer->lp_ni == ni);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ (void)lnet_post_send_locked(msg2, 1);
}
}
@@ -1019,7 +1019,7 @@ lnet_return_tx_credits_locked(lnet_msg_t *msg)
LASSERT(msg2->msg_txpeer == txpeer);
LASSERT(msg2->msg_tx_delayed);
- (void) lnet_post_send_locked(msg2, 1);
+ (void)lnet_post_send_locked(msg2, 1);
}
}
@@ -1142,7 +1142,7 @@ routing_off:
lnet_msg_t, msg_list);
list_del(&msg2->msg_list);
- (void) lnet_post_routed_recv_locked(msg2, 1);
+ (void)lnet_post_routed_recv_locked(msg2, 1);
}
}
if (rxpeer) {
diff --git a/drivers/staging/lustre/lnet/lnet/module.c b/drivers/staging/lustre/lnet/lnet/module.c
index 93037c1168ca..246b5c141d01 100644
--- a/drivers/staging/lustre/lnet/lnet/module.c
+++ b/drivers/staging/lustre/lnet/lnet/module.c
@@ -108,12 +108,7 @@ lnet_dyn_configure(struct libcfs_ioctl_hdr *hdr)
rc = -EINVAL;
goto out_unlock;
}
- rc = lnet_dyn_add_ni(LNET_PID_LUSTRE,
- conf->cfg_config_u.cfg_net.net_intf,
- conf->cfg_config_u.cfg_net.net_peer_timeout,
- conf->cfg_config_u.cfg_net.net_peer_tx_credits,
- conf->cfg_config_u.cfg_net.net_peer_rtr_credits,
- conf->cfg_config_u.cfg_net.net_max_tx_credits);
+ rc = lnet_dyn_add_ni(LNET_PID_LUSTRE, conf);
out_unlock:
mutex_unlock(&lnet_config_mutex);
diff --git a/drivers/staging/lustre/lnet/selftest/brw_test.c b/drivers/staging/lustre/lnet/selftest/brw_test.c
index dcb6e506f592..a63d86c4c10d 100644
--- a/drivers/staging/lustre/lnet/selftest/brw_test.c
+++ b/drivers/staging/lustre/lnet/selftest/brw_test.c
@@ -49,10 +49,10 @@ module_param(brw_inject_errors, int, 0644);
MODULE_PARM_DESC(brw_inject_errors, "# data errors to inject randomly, zero by default");
static void
-brw_client_fini(sfw_test_instance_t *tsi)
+brw_client_fini(struct sfw_test_instance *tsi)
{
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ struct srpc_bulk *bulk;
+ struct sfw_test_unit *tsu;
LASSERT(tsi->tsi_is_client);
@@ -67,21 +67,21 @@ brw_client_fini(sfw_test_instance_t *tsi)
}
static int
-brw_client_init(sfw_test_instance_t *tsi)
+brw_client_init(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
int flags;
int npg;
int len;
int opc;
- srpc_bulk_t *bulk;
- sfw_test_unit_t *tsu;
+ struct srpc_bulk *bulk;
+ struct sfw_test_unit *tsu;
LASSERT(sn);
LASSERT(tsi->tsi_is_client);
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
opc = breq->blk_opc;
flags = breq->blk_flags;
@@ -91,9 +91,8 @@ brw_client_init(sfw_test_instance_t *tsi)
* but we have to keep it for compatibility
*/
len = npg * PAGE_SIZE;
-
} else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+ struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
/*
* I should never get this step if it's unknown feature
@@ -225,7 +224,7 @@ bad_data:
}
static void
-brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_fill_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -237,7 +236,7 @@ brw_fill_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
+brw_check_bulk(struct srpc_bulk *bk, int pattern, __u64 magic)
{
int i;
struct page *pg;
@@ -255,14 +254,14 @@ brw_check_bulk(srpc_bulk_t *bk, int pattern, __u64 magic)
}
static int
-brw_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpcpp)
+brw_client_prep_rpc(struct sfw_test_unit *tsu,
+ lnet_process_id_t dest, struct srpc_client_rpc **rpcpp)
{
- srpc_bulk_t *bulk = tsu->tsu_private;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_client_rpc_t *rpc;
- srpc_brw_reqst_t *req;
+ struct srpc_bulk *bulk = tsu->tsu_private;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_client_rpc *rpc;
+ struct srpc_brw_reqst *req;
int flags;
int npg;
int len;
@@ -273,15 +272,14 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
LASSERT(bulk);
if (!(sn->sn_features & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *breq = &tsi->tsi_u.bulk_v0;
+ struct test_bulk_req *breq = &tsi->tsi_u.bulk_v0;
opc = breq->blk_opc;
flags = breq->blk_flags;
npg = breq->blk_npg;
len = npg * PAGE_SIZE;
-
} else {
- test_bulk_req_v1_t *breq = &tsi->tsi_u.bulk_v1;
+ struct test_bulk_req_v1 *breq = &tsi->tsi_u.bulk_v1;
/*
* I should never get this step if it's unknown feature
@@ -299,7 +297,7 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
if (rc)
return rc;
- memcpy(&rpc->crpc_bulk, bulk, offsetof(srpc_bulk_t, bk_iovs[npg]));
+ memcpy(&rpc->crpc_bulk, bulk, offsetof(struct srpc_bulk, bk_iovs[npg]));
if (opc == LST_BRW_WRITE)
brw_fill_bulk(&rpc->crpc_bulk, flags, BRW_MAGIC);
else
@@ -315,21 +313,21 @@ brw_client_prep_rpc(sfw_test_unit_t *tsu,
}
static void
-brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+brw_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
{
__u64 magic = BRW_MAGIC;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_msg_t *msg = &rpc->crpc_replymsg;
- srpc_brw_reply_t *reply = &msg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_msg *msg = &rpc->crpc_replymsg;
+ struct srpc_brw_reply *reply = &msg->msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.brw_reqst;
LASSERT(sn);
if (rpc->crpc_status) {
CERROR("BRW RPC to %s failed with %d\n",
libcfs_id2str(rpc->crpc_dest), rpc->crpc_status);
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_brw_errors);
return;
}
@@ -363,7 +361,7 @@ brw_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
static void
brw_server_rpc_done(struct srpc_server_rpc *rpc)
{
- srpc_bulk_t *blk = rpc->srpc_bulk;
+ struct srpc_bulk *blk = rpc->srpc_bulk;
if (!blk)
return;
@@ -384,9 +382,9 @@ static int
brw_bulk_ready(struct srpc_server_rpc *rpc, int status)
{
__u64 magic = BRW_MAGIC;
- srpc_brw_reply_t *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
- srpc_brw_reqst_t *reqst;
- srpc_msg_t *reqstmsg;
+ struct srpc_brw_reply *reply = &rpc->srpc_replymsg.msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst;
+ struct srpc_msg *reqstmsg;
LASSERT(rpc->srpc_bulk);
LASSERT(rpc->srpc_reqstbuf);
@@ -420,10 +418,10 @@ static int
brw_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_brw_reply_t *reply = &replymsg->msg_body.brw_reply;
- srpc_brw_reqst_t *reqst = &reqstmsg->msg_body.brw_reqst;
+ struct srpc_msg *replymsg = &rpc->srpc_replymsg;
+ struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_brw_reply *reply = &replymsg->msg_body.brw_reply;
+ struct srpc_brw_reqst *reqst = &reqstmsg->msg_body.brw_reqst;
int npg;
int rc;
@@ -459,7 +457,7 @@ brw_server_handle(struct srpc_server_rpc *rpc)
if (!(reqstmsg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
/* compat with old version */
- if (reqst->brw_len & ~CFS_PAGE_MASK) {
+ if (reqst->brw_len & ~PAGE_MASK) {
reply->brw_status = EINVAL;
return 0;
}
@@ -490,7 +488,8 @@ brw_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
-sfw_test_client_ops_t brw_test_client;
+struct sfw_test_client_ops brw_test_client;
+
void brw_init_test_client(void)
{
brw_test_client.tso_init = brw_client_init;
@@ -499,7 +498,8 @@ void brw_init_test_client(void)
brw_test_client.tso_done_rpc = brw_client_done_rpc;
};
-srpc_service_t brw_test_service;
+struct srpc_service brw_test_service;
+
void brw_init_test_service(void)
{
brw_test_service.sv_id = SRPC_SERVICE_BRW;
diff --git a/drivers/staging/lustre/lnet/selftest/conctl.c b/drivers/staging/lustre/lnet/selftest/conctl.c
index 79ee6c0bf7c1..408c614b6ca3 100644
--- a/drivers/staging/lustre/lnet/selftest/conctl.c
+++ b/drivers/staging/lustre/lnet/selftest/conctl.c
@@ -51,9 +51,9 @@ lst_session_new_ioctl(lstio_session_new_args_t *args)
char *name;
int rc;
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_key || /* no key is specified */
- !args->lstio_ses_namep || /* session name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_key || /* no key is specified */
+ !args->lstio_ses_namep || /* session name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -95,11 +95,11 @@ lst_session_info_ioctl(lstio_session_info_args_t *args)
{
/* no checking of key */
- if (!args->lstio_ses_idp || /* address for output sid */
- !args->lstio_ses_keyp || /* address for output key */
- !args->lstio_ses_featp || /* address for output features */
- !args->lstio_ses_ndinfo || /* address for output ndinfo */
- !args->lstio_ses_namep || /* address for output name */
+ if (!args->lstio_ses_idp || /* address for output sid */
+ !args->lstio_ses_keyp || /* address for output key */
+ !args->lstio_ses_featp || /* address for output features */
+ !args->lstio_ses_ndinfo || /* address for output ndinfo */
+ !args->lstio_ses_namep || /* address for output name */
args->lstio_ses_nmlen <= 0 ||
args->lstio_ses_nmlen > LST_NAME_SIZE)
return -EINVAL;
@@ -125,7 +125,7 @@ lst_debug_ioctl(lstio_debug_args_t *args)
if (!args->lstio_dbg_resultp)
return -EINVAL;
- if (args->lstio_dbg_namep && /* name of batch/group */
+ if (args->lstio_dbg_namep && /* name of batch/group */
(args->lstio_dbg_nmlen <= 0 ||
args->lstio_dbg_nmlen > LST_NAME_SIZE))
return -EINVAL;
@@ -326,7 +326,7 @@ lst_nodes_add_ioctl(lstio_group_nodes_args_t *args)
if (args->lstio_grp_key != console_session.ses_key)
return -EACCES;
- if (!args->lstio_grp_idsp || /* array of ids */
+ if (!args->lstio_grp_idsp || /* array of ids */
args->lstio_grp_count <= 0 ||
!args->lstio_grp_resultp ||
!args->lstio_grp_featp ||
@@ -394,13 +394,13 @@ lst_group_info_ioctl(lstio_group_info_args_t *args)
args->lstio_grp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_grp_entp && /* output: group entry */
- !args->lstio_grp_dentsp) /* output: node entry */
+ if (!args->lstio_grp_entp && /* output: group entry */
+ !args->lstio_grp_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_grp_dentsp) { /* have node entry */
- if (!args->lstio_grp_idxp || /* node index */
- !args->lstio_grp_ndentp) /* # of node entry */
+ if (args->lstio_grp_dentsp) { /* have node entry */
+ if (!args->lstio_grp_idxp || /* node index */
+ !args->lstio_grp_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&ndent, args->lstio_grp_ndentp,
@@ -612,18 +612,18 @@ lst_batch_info_ioctl(lstio_batch_info_args_t *args)
if (args->lstio_bat_key != console_session.ses_key)
return -EACCES;
- if (!args->lstio_bat_namep || /* batch name */
+ if (!args->lstio_bat_namep || /* batch name */
args->lstio_bat_nmlen <= 0 ||
args->lstio_bat_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_bat_entp && /* output: batch entry */
- !args->lstio_bat_dentsp) /* output: node entry */
+ if (!args->lstio_bat_entp && /* output: batch entry */
+ !args->lstio_bat_dentsp) /* output: node entry */
return -EINVAL;
- if (args->lstio_bat_dentsp) { /* have node entry */
- if (!args->lstio_bat_idxp || /* node index */
- !args->lstio_bat_ndentp) /* # of node entry */
+ if (args->lstio_bat_dentsp) { /* have node entry */
+ if (!args->lstio_bat_idxp || /* node index */
+ !args->lstio_bat_ndentp) /* # of node entry */
return -EINVAL;
if (copy_from_user(&index, args->lstio_bat_idxp,
@@ -722,18 +722,18 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (!args->lstio_tes_resultp ||
!args->lstio_tes_retp ||
- !args->lstio_tes_bat_name || /* no specified batch */
+ !args->lstio_tes_bat_name || /* no specified batch */
args->lstio_tes_bat_nmlen <= 0 ||
args->lstio_tes_bat_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_sgrp_name || /* no source group */
+ !args->lstio_tes_sgrp_name || /* no source group */
args->lstio_tes_sgrp_nmlen <= 0 ||
args->lstio_tes_sgrp_nmlen > LST_NAME_SIZE ||
- !args->lstio_tes_dgrp_name || /* no target group */
+ !args->lstio_tes_dgrp_name || /* no target group */
args->lstio_tes_dgrp_nmlen <= 0 ||
args->lstio_tes_dgrp_nmlen > LST_NAME_SIZE)
return -EINVAL;
- if (!args->lstio_tes_loop || /* negative is infinite */
+ if (!args->lstio_tes_loop || /* negative is infinite */
args->lstio_tes_concur <= 0 ||
args->lstio_tes_dist <= 0 ||
args->lstio_tes_span <= 0)
@@ -743,7 +743,7 @@ static int lst_test_add_ioctl(lstio_test_args_t *args)
if (args->lstio_tes_param &&
(args->lstio_tes_param_len <= 0 ||
args->lstio_tes_param_len >
- PAGE_SIZE - sizeof(lstcon_test_t)))
+ PAGE_SIZE - sizeof(struct lstcon_test)))
return -EINVAL;
LIBCFS_ALLOC(batch_name, args->lstio_tes_bat_nmlen + 1);
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.c b/drivers/staging/lustre/lnet/selftest/conrpc.c
index 35a227d0c657..6f687581117d 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.c
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.c
@@ -46,13 +46,13 @@
#include "conrpc.h"
#include "console.h"
-void lstcon_rpc_stat_reply(lstcon_rpc_trans_t *, srpc_msg_t *,
- lstcon_node_t *, lstcon_trans_stat_t *);
+void lstcon_rpc_stat_reply(struct lstcon_rpc_trans *, struct srpc_msg *,
+ struct lstcon_node *, lstcon_trans_stat_t *);
static void
-lstcon_rpc_done(srpc_client_rpc_t *rpc)
+lstcon_rpc_done(struct srpc_client_rpc *rpc)
{
- lstcon_rpc_t *crpc = (lstcon_rpc_t *)rpc->crpc_priv;
+ struct lstcon_rpc *crpc = (struct lstcon_rpc *)rpc->crpc_priv;
LASSERT(crpc && rpc == crpc->crp_rpc);
LASSERT(crpc->crp_posted && !crpc->crp_finished);
@@ -90,8 +90,8 @@ lstcon_rpc_done(srpc_client_rpc_t *rpc)
}
static int
-lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, int embedded, lstcon_rpc_t *crpc)
+lstcon_rpc_init(struct lstcon_node *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, int embedded, struct lstcon_rpc *crpc)
{
crpc->crp_rpc = sfw_create_rpc(nd->nd_id, service,
feats, bulk_npg, bulk_len,
@@ -115,16 +115,16 @@ lstcon_rpc_init(lstcon_node_t *nd, int service, unsigned feats,
}
static int
-lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
- int bulk_npg, int bulk_len, lstcon_rpc_t **crpcpp)
+lstcon_rpc_prep(struct lstcon_node *nd, int service, unsigned feats,
+ int bulk_npg, int bulk_len, struct lstcon_rpc **crpcpp)
{
- lstcon_rpc_t *crpc = NULL;
+ struct lstcon_rpc *crpc = NULL;
int rc;
spin_lock(&console_session.ses_rpc_lock);
crpc = list_first_entry_or_null(&console_session.ses_rpc_freelist,
- lstcon_rpc_t, crp_link);
+ struct lstcon_rpc, crp_link);
if (crpc)
list_del_init(&crpc->crp_link);
@@ -148,9 +148,9 @@ lstcon_rpc_prep(lstcon_node_t *nd, int service, unsigned feats,
}
void
-lstcon_rpc_put(lstcon_rpc_t *crpc)
+lstcon_rpc_put(struct lstcon_rpc *crpc)
{
- srpc_bulk_t *bulk = &crpc->crp_rpc->crpc_bulk;
+ struct srpc_bulk *bulk = &crpc->crp_rpc->crpc_bulk;
int i;
LASSERT(list_empty(&crpc->crp_link));
@@ -183,9 +183,9 @@ lstcon_rpc_put(lstcon_rpc_t *crpc)
}
static void
-lstcon_rpc_post(lstcon_rpc_t *crpc)
+lstcon_rpc_post(struct lstcon_rpc *crpc)
{
- lstcon_rpc_trans_t *trans = crpc->crp_trans;
+ struct lstcon_rpc_trans *trans = crpc->crp_trans;
LASSERT(trans);
@@ -236,9 +236,9 @@ lstcon_rpc_trans_name(int transop)
int
lstcon_rpc_trans_prep(struct list_head *translist, int transop,
- lstcon_rpc_trans_t **transpp)
+ struct lstcon_rpc_trans **transpp)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
if (translist) {
list_for_each_entry(trans, translist, tas_link) {
@@ -278,26 +278,26 @@ lstcon_rpc_trans_prep(struct list_head *translist, int transop,
}
void
-lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *crpc)
+lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *crpc)
{
list_add_tail(&crpc->crp_link, &trans->tas_rpcs_list);
crpc->crp_trans = trans;
}
void
-lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
+lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_node_t *nd;
+ struct srpc_client_rpc *rpc;
+ struct lstcon_rpc *crpc;
+ struct lstcon_node *nd;
list_for_each_entry(crpc, &trans->tas_rpcs_list, crp_link) {
rpc = crpc->crp_rpc;
spin_lock(&rpc->crpc_lock);
- if (!crpc->crp_posted || /* not posted */
- crpc->crp_stamp) { /* rpc done or aborted already */
+ if (!crpc->crp_posted || /* not posted */
+ crpc->crp_stamp) { /* rpc done or aborted already */
if (!crpc->crp_stamp) {
crpc->crp_stamp = cfs_time_current();
crpc->crp_status = -EINTR;
@@ -326,7 +326,7 @@ lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error)
}
static int
-lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
+lstcon_rpc_trans_check(struct lstcon_rpc_trans *trans)
{
if (console_session.ses_shutdown &&
!list_empty(&trans->tas_olink)) /* Not an end session RPC */
@@ -336,9 +336,9 @@ lstcon_rpc_trans_check(lstcon_rpc_trans_t *trans)
}
int
-lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
+lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout)
{
- lstcon_rpc_t *crpc;
+ struct lstcon_rpc *crpc;
int rc;
if (list_empty(&trans->tas_rpcs_list))
@@ -386,11 +386,11 @@ lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout)
}
static int
-lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
+lstcon_rpc_get_reply(struct lstcon_rpc *crpc, struct srpc_msg **msgpp)
{
- lstcon_node_t *nd = crpc->crp_node;
- srpc_client_rpc_t *rpc = crpc->crp_rpc;
- srpc_generic_reply_t *rep;
+ struct lstcon_node *nd = crpc->crp_node;
+ struct srpc_client_rpc *rpc = crpc->crp_rpc;
+ struct srpc_generic_reply *rep;
LASSERT(nd && rpc);
LASSERT(crpc->crp_stamp);
@@ -423,10 +423,10 @@ lstcon_rpc_get_reply(lstcon_rpc_t *crpc, srpc_msg_t **msgpp)
}
void
-lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
+lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans, lstcon_trans_stat_t *stat)
{
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *rep;
int error;
LASSERT(stat);
@@ -466,17 +466,17 @@ lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans, lstcon_trans_stat_t *stat)
}
int
-lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent)
{
struct list_head tmp;
struct list_head __user *next;
lstcon_rpc_ent_t *ent;
- srpc_generic_reply_t *rep;
- lstcon_rpc_t *crpc;
- srpc_msg_t *msg;
- lstcon_node_t *nd;
+ struct srpc_generic_reply *rep;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *msg;
+ struct lstcon_node *nd;
long dur;
struct timeval tv;
int error;
@@ -520,7 +520,7 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
continue;
/* RPC is done */
- rep = (srpc_generic_reply_t *)&msg->msg_body.reply;
+ rep = (struct srpc_generic_reply *)&msg->msg_body.reply;
if (copy_to_user(&ent->rpe_sid, &rep->sid, sizeof(lst_sid_t)) ||
copy_to_user(&ent->rpe_fwk_errno, &rep->status,
@@ -531,7 +531,6 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
continue;
error = readent(trans->tas_opc, msg, ent);
-
if (error)
return error;
}
@@ -540,11 +539,11 @@ lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
}
void
-lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
+lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans)
{
- srpc_client_rpc_t *rpc;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *tmp;
+ struct srpc_client_rpc *rpc;
+ struct lstcon_rpc *crpc;
+ struct lstcon_rpc *tmp;
int count = 0;
list_for_each_entry_safe(crpc, tmp, &trans->tas_rpcs_list, crp_link) {
@@ -563,10 +562,10 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
}
/*
- * rpcs can be still not callbacked (even LNetMDUnlink is called)
- * because huge timeout for inaccessible network, don't make
- * user wait for them, just abandon them, they will be recycled
- * in callback
+ * rpcs can be still not callbacked (even LNetMDUnlink is
+ * called) because huge timeout for inaccessible network,
+ * don't make user wait for them, just abandon them, they
+ * will be recycled in callback
*/
LASSERT(crpc->crp_status);
@@ -593,11 +592,11 @@ lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans)
}
int
-lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
- unsigned feats, lstcon_rpc_t **crpc)
+lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
+ unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_mksn_reqst_t *msrq;
- srpc_rmsn_reqst_t *rsrq;
+ struct srpc_mksn_reqst *msrq;
+ struct srpc_rmsn_reqst *rsrq;
int rc;
switch (transop) {
@@ -632,9 +631,9 @@ lstcon_sesrpc_prep(lstcon_node_t *nd, int transop,
}
int
-lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+lstcon_dbgrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_debug_reqst_t *drq;
+ struct srpc_debug_reqst *drq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_DEBUG, feats, 0, 0, crpc);
@@ -650,11 +649,11 @@ lstcon_dbgrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
}
int
-lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_tsb_hdr_t *tsb, lstcon_rpc_t **crpc)
+lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+ struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc)
{
- lstcon_batch_t *batch;
- srpc_batch_reqst_t *brq;
+ struct lstcon_batch *batch;
+ struct srpc_batch_reqst *brq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_BATCH, feats, 0, 0, crpc);
@@ -676,16 +675,16 @@ lstcon_batrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
LASSERT(!tsb->tsb_index);
- batch = (lstcon_batch_t *)tsb;
+ batch = (struct lstcon_batch *)tsb;
brq->bar_arg = batch->bat_arg;
return 0;
}
int
-lstcon_statrpc_prep(lstcon_node_t *nd, unsigned feats, lstcon_rpc_t **crpc)
+lstcon_statrpc_prep(struct lstcon_node *nd, unsigned feats, struct lstcon_rpc **crpc)
{
- srpc_stat_reqst_t *srq;
+ struct srpc_stat_reqst *srq;
int rc;
rc = lstcon_rpc_prep(nd, SRPC_SERVICE_QUERY_STAT, feats, 0, 0, crpc);
@@ -716,12 +715,12 @@ lstcon_next_id(int idx, int nkiov, lnet_kiov_t *kiov)
}
static int
-lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
+lstcon_dstnodes_prep(struct lstcon_group *grp, int idx,
int dist, int span, int nkiov, lnet_kiov_t *kiov)
{
lnet_process_id_packed_t *pid;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int start;
int end;
int i = 0;
@@ -770,9 +769,9 @@ lstcon_dstnodes_prep(lstcon_group_t *grp, int idx,
}
static int
-lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
+lstcon_pingrpc_prep(lst_test_ping_param_t *param, struct srpc_test_reqst *req)
{
- test_ping_req_t *prq = &req->tsr_u.ping;
+ struct test_ping_req *prq = &req->tsr_u.ping;
prq->png_size = param->png_size;
prq->png_flags = param->png_flags;
@@ -781,9 +780,9 @@ lstcon_pingrpc_prep(lst_test_ping_param_t *param, srpc_test_reqst_t *req)
}
static int
-lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
{
- test_bulk_req_t *brq = &req->tsr_u.bulk_v0;
+ struct test_bulk_req *brq = &req->tsr_u.bulk_v0;
brq->blk_opc = param->blk_opc;
brq->blk_npg = (param->blk_size + PAGE_SIZE - 1) /
@@ -794,9 +793,9 @@ lstcon_bulkrpc_v0_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
}
static int
-lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
+lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, struct srpc_test_reqst *req)
{
- test_bulk_req_v1_t *brq = &req->tsr_u.bulk_v1;
+ struct test_bulk_req_v1 *brq = &req->tsr_u.bulk_v1;
brq->blk_opc = param->blk_opc;
brq->blk_flags = param->blk_flags;
@@ -807,13 +806,13 @@ lstcon_bulkrpc_v1_prep(lst_test_bulk_param_t *param, srpc_test_reqst_t *req)
}
int
-lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
- lstcon_test_t *test, lstcon_rpc_t **crpc)
+lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned feats,
+ struct lstcon_test *test, struct lstcon_rpc **crpc)
{
- lstcon_group_t *sgrp = test->tes_src_grp;
- lstcon_group_t *dgrp = test->tes_dst_grp;
- srpc_test_reqst_t *trq;
- srpc_bulk_t *bulk;
+ struct lstcon_group *sgrp = test->tes_src_grp;
+ struct lstcon_group *dgrp = test->tes_dst_grp;
+ struct srpc_test_reqst *trq;
+ struct srpc_bulk *bulk;
int i;
int npg = 0;
int nob = 0;
@@ -841,7 +840,6 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
trq->tsr_ndest = 0;
trq->tsr_loop = nmax * test->tes_dist * test->tes_concur;
-
} else {
bulk = &(*crpc)->crp_rpc->crpc_bulk;
@@ -917,10 +915,10 @@ lstcon_testrpc_prep(lstcon_node_t *nd, int transop, unsigned feats,
}
static int
-lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
- lstcon_node_t *nd, srpc_msg_t *reply)
+lstcon_sesnew_stat_reply(struct lstcon_rpc_trans *trans,
+ struct lstcon_node *nd, struct srpc_msg *reply)
{
- srpc_mksn_reply_t *mksn_rep = &reply->msg_body.mksn_reply;
+ struct srpc_mksn_reply *mksn_rep = &reply->msg_body.mksn_reply;
int status = mksn_rep->mksn_status;
if (!status &&
@@ -940,7 +938,7 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
if (!trans->tas_feats_updated) {
spin_lock(&console_session.ses_rpc_lock);
- if (!trans->tas_feats_updated) { /* recheck with lock */
+ if (!trans->tas_feats_updated) { /* recheck with lock */
trans->tas_feats_updated = 1;
trans->tas_features = reply->msg_ses_feats;
}
@@ -964,14 +962,14 @@ lstcon_sesnew_stat_reply(lstcon_rpc_trans_t *trans,
}
void
-lstcon_rpc_stat_reply(lstcon_rpc_trans_t *trans, srpc_msg_t *msg,
- lstcon_node_t *nd, lstcon_trans_stat_t *stat)
+lstcon_rpc_stat_reply(struct lstcon_rpc_trans *trans, struct srpc_msg *msg,
+ struct lstcon_node *nd, lstcon_trans_stat_t *stat)
{
- srpc_rmsn_reply_t *rmsn_rep;
- srpc_debug_reply_t *dbg_rep;
- srpc_batch_reply_t *bat_rep;
- srpc_test_reply_t *test_rep;
- srpc_stat_reply_t *stat_rep;
+ struct srpc_rmsn_reply *rmsn_rep;
+ struct srpc_debug_reply *dbg_rep;
+ struct srpc_batch_reply *bat_rep;
+ struct srpc_test_reply *test_rep;
+ struct srpc_stat_reply *stat_rep;
int rc = 0;
switch (trans->tas_opc) {
@@ -1085,12 +1083,12 @@ int
lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct list_head *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp)
+ struct lstcon_rpc_trans **transpp)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
- lstcon_rpc_t *rpc;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
+ struct lstcon_rpc *rpc;
unsigned feats;
int rc;
@@ -1130,14 +1128,16 @@ lstcon_rpc_trans_ndlist(struct list_head *ndlist,
case LST_TRANS_TSBCLIADD:
case LST_TRANS_TSBSRVADD:
rc = lstcon_testrpc_prep(nd, transop, feats,
- (lstcon_test_t *)arg, &rpc);
+ (struct lstcon_test *)arg,
+ &rpc);
break;
case LST_TRANS_TSBRUN:
case LST_TRANS_TSBSTOP:
case LST_TRANS_TSBCLIQRY:
case LST_TRANS_TSBSRVQRY:
rc = lstcon_batrpc_prep(nd, transop, feats,
- (lstcon_tsb_hdr_t *)arg, &rpc);
+ (struct lstcon_tsb_hdr *)arg,
+ &rpc);
break;
case LST_TRANS_STATQRY:
rc = lstcon_statrpc_prep(nd, feats, &rpc);
@@ -1170,17 +1170,18 @@ static void
lstcon_rpc_pinger(void *arg)
{
struct stt_timer *ptimer = (struct stt_timer *)arg;
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- srpc_msg_t *rep;
- srpc_debug_reqst_t *drq;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_rpc *crpc;
+ struct srpc_msg *rep;
+ struct srpc_debug_reqst *drq;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int intv;
int count = 0;
int rc;
- /* RPC pinger is a special case of transaction,
+ /*
+ * RPC pinger is a special case of transaction,
* it's called by timer at 8 seconds interval.
*/
mutex_lock(&console_session.ses_mutex);
@@ -1326,9 +1327,9 @@ lstcon_rpc_pinger_stop(void)
void
lstcon_rpc_cleanup_wait(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_rpc_t *crpc;
- lstcon_rpc_t *temp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_rpc *crpc;
+ struct lstcon_rpc *temp;
struct list_head *pacer;
struct list_head zlist;
@@ -1338,7 +1339,7 @@ lstcon_rpc_cleanup_wait(void)
while (!list_empty(&console_session.ses_trans_list)) {
list_for_each(pacer, &console_session.ses_trans_list) {
- trans = list_entry(pacer, lstcon_rpc_trans_t,
+ trans = list_entry(pacer, struct lstcon_rpc_trans,
tas_link);
CDEBUG(D_NET, "Session closed, wakeup transaction %s\n",
@@ -1370,7 +1371,7 @@ lstcon_rpc_cleanup_wait(void)
list_for_each_entry_safe(crpc, temp, &zlist, crp_link) {
list_del(&crpc->crp_link);
- LIBCFS_FREE(crpc, sizeof(lstcon_rpc_t));
+ LIBCFS_FREE(crpc, sizeof(struct lstcon_rpc));
}
}
diff --git a/drivers/staging/lustre/lnet/selftest/conrpc.h b/drivers/staging/lustre/lnet/selftest/conrpc.h
index 3e7839dad5bb..90c3385a355c 100644
--- a/drivers/staging/lustre/lnet/selftest/conrpc.h
+++ b/drivers/staging/lustre/lnet/selftest/conrpc.h
@@ -63,9 +63,9 @@ struct lstcon_tsb_hdr;
struct lstcon_test;
struct lstcon_node;
-typedef struct lstcon_rpc {
+struct lstcon_rpc {
struct list_head crp_link; /* chain on rpc transaction */
- srpc_client_rpc_t *crp_rpc; /* client rpc */
+ struct srpc_client_rpc *crp_rpc; /* client rpc */
struct lstcon_node *crp_node; /* destination node */
struct lstcon_rpc_trans *crp_trans; /* conrpc transaction */
@@ -76,9 +76,9 @@ typedef struct lstcon_rpc {
unsigned int crp_embedded:1;
int crp_status; /* console rpc errors */
unsigned long crp_stamp; /* replied time stamp */
-} lstcon_rpc_t;
+};
-typedef struct lstcon_rpc_trans {
+struct lstcon_rpc_trans {
struct list_head tas_olink; /* link chain on owner list */
struct list_head tas_link; /* link chain on global list */
int tas_opc; /* operation code of transaction */
@@ -87,7 +87,7 @@ typedef struct lstcon_rpc_trans {
wait_queue_head_t tas_waitq; /* wait queue head */
atomic_t tas_remaining; /* # of un-scheduled rpcs */
struct list_head tas_rpcs_list; /* queued requests */
-} lstcon_rpc_trans_t;
+};
#define LST_TRANS_PRIVATE 0x1000
@@ -106,35 +106,35 @@ typedef struct lstcon_rpc_trans {
#define LST_TRANS_STATQRY 0x21
typedef int (*lstcon_rpc_cond_func_t)(int, struct lstcon_node *, void *);
-typedef int (*lstcon_rpc_readent_func_t)(int, srpc_msg_t *,
+typedef int (*lstcon_rpc_readent_func_t)(int, struct srpc_msg *,
lstcon_rpc_ent_t __user *);
int lstcon_sesrpc_prep(struct lstcon_node *nd, int transop,
- unsigned version, lstcon_rpc_t **crpc);
+ unsigned version, struct lstcon_rpc **crpc);
int lstcon_dbgrpc_prep(struct lstcon_node *nd,
- unsigned version, lstcon_rpc_t **crpc);
+ unsigned version, struct lstcon_rpc **crpc);
int lstcon_batrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_tsb_hdr *tsb, lstcon_rpc_t **crpc);
+ struct lstcon_tsb_hdr *tsb, struct lstcon_rpc **crpc);
int lstcon_testrpc_prep(struct lstcon_node *nd, int transop, unsigned version,
- struct lstcon_test *test, lstcon_rpc_t **crpc);
+ struct lstcon_test *test, struct lstcon_rpc **crpc);
int lstcon_statrpc_prep(struct lstcon_node *nd, unsigned version,
- lstcon_rpc_t **crpc);
-void lstcon_rpc_put(lstcon_rpc_t *crpc);
+ struct lstcon_rpc **crpc);
+void lstcon_rpc_put(struct lstcon_rpc *crpc);
int lstcon_rpc_trans_prep(struct list_head *translist,
- int transop, lstcon_rpc_trans_t **transpp);
+ int transop, struct lstcon_rpc_trans **transpp);
int lstcon_rpc_trans_ndlist(struct list_head *ndlist,
struct list_head *translist, int transop,
void *arg, lstcon_rpc_cond_func_t condition,
- lstcon_rpc_trans_t **transpp);
-void lstcon_rpc_trans_stat(lstcon_rpc_trans_t *trans,
+ struct lstcon_rpc_trans **transpp);
+void lstcon_rpc_trans_stat(struct lstcon_rpc_trans *trans,
lstcon_trans_stat_t *stat);
-int lstcon_rpc_trans_interpreter(lstcon_rpc_trans_t *trans,
+int lstcon_rpc_trans_interpreter(struct lstcon_rpc_trans *trans,
struct list_head __user *head_up,
lstcon_rpc_readent_func_t readent);
-void lstcon_rpc_trans_abort(lstcon_rpc_trans_t *trans, int error);
-void lstcon_rpc_trans_destroy(lstcon_rpc_trans_t *trans);
-void lstcon_rpc_trans_addreq(lstcon_rpc_trans_t *trans, lstcon_rpc_t *req);
-int lstcon_rpc_trans_postwait(lstcon_rpc_trans_t *trans, int timeout);
+void lstcon_rpc_trans_abort(struct lstcon_rpc_trans *trans, int error);
+void lstcon_rpc_trans_destroy(struct lstcon_rpc_trans *trans);
+void lstcon_rpc_trans_addreq(struct lstcon_rpc_trans *trans, struct lstcon_rpc *req);
+int lstcon_rpc_trans_postwait(struct lstcon_rpc_trans *trans, int timeout);
int lstcon_rpc_pinger_start(void);
void lstcon_rpc_pinger_stop(void);
void lstcon_rpc_cleanup_wait(void);
diff --git a/drivers/staging/lustre/lnet/selftest/console.c b/drivers/staging/lustre/lnet/selftest/console.c
index 1a923ea3a755..a03e52d29d3f 100644
--- a/drivers/staging/lustre/lnet/selftest/console.c
+++ b/drivers/staging/lustre/lnet/selftest/console.c
@@ -61,7 +61,7 @@ do { \
struct lstcon_session console_session;
static void
-lstcon_node_get(lstcon_node_t *nd)
+lstcon_node_get(struct lstcon_node *nd)
{
LASSERT(nd->nd_ref >= 1);
@@ -69,9 +69,9 @@ lstcon_node_get(lstcon_node_t *nd)
}
static int
-lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
+lstcon_node_find(lnet_process_id_t id, struct lstcon_node **ndpp, int create)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
unsigned int idx = LNET_NIDADDR(id.nid) % LST_GLOBAL_HASHSIZE;
LASSERT(id.nid != LNET_NID_ANY);
@@ -90,11 +90,11 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
if (!create)
return -ENOENT;
- LIBCFS_ALLOC(*ndpp, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_ALLOC(*ndpp, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
if (!*ndpp)
return -ENOMEM;
- ndl = (lstcon_ndlink_t *)(*ndpp + 1);
+ ndl = (struct lstcon_ndlink *)(*ndpp + 1);
ndl->ndl_node = *ndpp;
@@ -103,7 +103,7 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
ndl->ndl_node->nd_stamp = cfs_time_current();
ndl->ndl_node->nd_state = LST_NODE_UNKNOWN;
ndl->ndl_node->nd_timeout = 0;
- memset(&ndl->ndl_node->nd_ping, 0, sizeof(lstcon_rpc_t));
+ memset(&ndl->ndl_node->nd_ping, 0, sizeof(struct lstcon_rpc));
/*
* queued in global hash & list, no refcount is taken by
@@ -117,16 +117,16 @@ lstcon_node_find(lnet_process_id_t id, lstcon_node_t **ndpp, int create)
}
static void
-lstcon_node_put(lstcon_node_t *nd)
+lstcon_node_put(struct lstcon_node *nd)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
LASSERT(nd->nd_ref > 0);
if (--nd->nd_ref > 0)
return;
- ndl = (lstcon_ndlink_t *)(nd + 1);
+ ndl = (struct lstcon_ndlink *)(nd + 1);
LASSERT(!list_empty(&ndl->ndl_link));
LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -135,16 +135,16 @@ lstcon_node_put(lstcon_node_t *nd)
list_del(&ndl->ndl_link);
list_del(&ndl->ndl_hlink);
- LIBCFS_FREE(nd, sizeof(lstcon_node_t) + sizeof(lstcon_ndlink_t));
+ LIBCFS_FREE(nd, sizeof(struct lstcon_node) + sizeof(struct lstcon_ndlink));
}
static int
lstcon_ndlink_find(struct list_head *hash,
- lnet_process_id_t id, lstcon_ndlink_t **ndlpp, int create)
+ lnet_process_id_t id, struct lstcon_ndlink **ndlpp, int create)
{
unsigned int idx = LNET_NIDADDR(id.nid) % LST_NODE_HASHSIZE;
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int rc;
if (id.nid == LNET_NID_ANY)
@@ -168,7 +168,7 @@ lstcon_ndlink_find(struct list_head *hash,
if (rc)
return rc;
- LIBCFS_ALLOC(ndl, sizeof(lstcon_ndlink_t));
+ LIBCFS_ALLOC(ndl, sizeof(struct lstcon_ndlink));
if (!ndl) {
lstcon_node_put(nd);
return -ENOMEM;
@@ -184,7 +184,7 @@ lstcon_ndlink_find(struct list_head *hash,
}
static void
-lstcon_ndlink_release(lstcon_ndlink_t *ndl)
+lstcon_ndlink_release(struct lstcon_ndlink *ndl)
{
LASSERT(list_empty(&ndl->ndl_link));
LASSERT(!list_empty(&ndl->ndl_hlink));
@@ -196,12 +196,12 @@ lstcon_ndlink_release(lstcon_ndlink_t *ndl)
}
static int
-lstcon_group_alloc(char *name, lstcon_group_t **grpp)
+lstcon_group_alloc(char *name, struct lstcon_group **grpp)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int i;
- LIBCFS_ALLOC(grp, offsetof(lstcon_group_t,
+ LIBCFS_ALLOC(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
if (!grp)
return -ENOMEM;
@@ -209,7 +209,7 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
grp->grp_ref = 1;
if (name) {
if (strlen(name) > sizeof(grp->grp_name) - 1) {
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ LIBCFS_FREE(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
return -E2BIG;
}
@@ -229,18 +229,18 @@ lstcon_group_alloc(char *name, lstcon_group_t **grpp)
}
static void
-lstcon_group_addref(lstcon_group_t *grp)
+lstcon_group_addref(struct lstcon_group *grp)
{
grp->grp_ref++;
}
-static void lstcon_group_ndlink_release(lstcon_group_t *, lstcon_ndlink_t *);
+static void lstcon_group_ndlink_release(struct lstcon_group *, struct lstcon_ndlink *);
static void
-lstcon_group_drain(lstcon_group_t *grp, int keep)
+lstcon_group_drain(struct lstcon_group *grp, int keep)
{
- lstcon_ndlink_t *ndl;
- lstcon_ndlink_t *tmp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_ndlink *tmp;
list_for_each_entry_safe(ndl, tmp, &grp->grp_ndl_list, ndl_link) {
if (!(ndl->ndl_node->nd_state & keep))
@@ -249,7 +249,7 @@ lstcon_group_drain(lstcon_group_t *grp, int keep)
}
static void
-lstcon_group_decref(lstcon_group_t *grp)
+lstcon_group_decref(struct lstcon_group *grp)
{
int i;
@@ -264,20 +264,20 @@ lstcon_group_decref(lstcon_group_t *grp)
for (i = 0; i < LST_NODE_HASHSIZE; i++)
LASSERT(list_empty(&grp->grp_ndl_hash[i]));
- LIBCFS_FREE(grp, offsetof(lstcon_group_t,
+ LIBCFS_FREE(grp, offsetof(struct lstcon_group,
grp_ndl_hash[LST_NODE_HASHSIZE]));
}
static int
-lstcon_group_find(const char *name, lstcon_group_t **grpp)
+lstcon_group_find(const char *name, struct lstcon_group **grpp)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (strncmp(grp->grp_name, name, LST_NAME_SIZE))
continue;
- lstcon_group_addref(grp); /* +1 ref for caller */
+ lstcon_group_addref(grp); /* +1 ref for caller */
*grpp = grp;
return 0;
}
@@ -286,8 +286,8 @@ lstcon_group_find(const char *name, lstcon_group_t **grpp)
}
static int
-lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
- lstcon_ndlink_t **ndlpp, int create)
+lstcon_group_ndlink_find(struct lstcon_group *grp, lnet_process_id_t id,
+ struct lstcon_ndlink **ndlpp, int create)
{
int rc;
@@ -305,7 +305,7 @@ lstcon_group_ndlink_find(lstcon_group_t *grp, lnet_process_id_t id,
}
static void
-lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
+lstcon_group_ndlink_release(struct lstcon_group *grp, struct lstcon_ndlink *ndl)
{
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -313,8 +313,8 @@ lstcon_group_ndlink_release(lstcon_group_t *grp, lstcon_ndlink_t *ndl)
}
static void
-lstcon_group_ndlink_move(lstcon_group_t *old,
- lstcon_group_t *new, lstcon_ndlink_t *ndl)
+lstcon_group_ndlink_move(struct lstcon_group *old,
+ struct lstcon_group *new, struct lstcon_ndlink *ndl)
{
unsigned int idx = LNET_NIDADDR(ndl->ndl_node->nd_id.nid) %
LST_NODE_HASHSIZE;
@@ -329,21 +329,21 @@ lstcon_group_ndlink_move(lstcon_group_t *old,
}
static void
-lstcon_group_move(lstcon_group_t *old, lstcon_group_t *new)
+lstcon_group_move(struct lstcon_group *old, struct lstcon_group *new)
{
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
while (!list_empty(&old->grp_ndl_list)) {
ndl = list_entry(old->grp_ndl_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
lstcon_group_ndlink_move(old, new, ndl);
}
}
static int
-lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_sesrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
- lstcon_group_t *grp = (lstcon_group_t *)arg;
+ struct lstcon_group *grp = (struct lstcon_group *)arg;
switch (transop) {
case LST_TRANS_SESNEW:
@@ -370,10 +370,10 @@ lstcon_sesrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_sesrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_debug_reply_t *rep;
+ struct srpc_debug_reply *rep;
switch (transop) {
case LST_TRANS_SESNEW:
@@ -399,13 +399,13 @@ lstcon_sesrpc_readent(int transop, srpc_msg_t *msg,
}
static int
-lstcon_group_nodes_add(lstcon_group_t *grp,
+lstcon_group_nodes_add(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
unsigned *featp, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int i;
int rc;
@@ -466,13 +466,13 @@ lstcon_group_nodes_add(lstcon_group_t *grp,
}
static int
-lstcon_group_nodes_remove(lstcon_group_t *grp,
+lstcon_group_nodes_remove(struct lstcon_group *grp,
int count, lnet_process_id_t __user *ids_up,
struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int rc;
int i;
@@ -523,7 +523,7 @@ error:
int
lstcon_group_add(char *name)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp) ? 0 : -EEXIST;
@@ -548,7 +548,7 @@ int
lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
unsigned *featp, struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
LASSERT(count > 0);
@@ -578,8 +578,8 @@ lstcon_nodes_add(char *name, int count, lnet_process_id_t __user *ids_up,
int
lstcon_group_del(char *name)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -621,7 +621,7 @@ lstcon_group_del(char *name)
int
lstcon_group_clean(char *name, int args)
{
- lstcon_group_t *grp = NULL;
+ struct lstcon_group *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -654,7 +654,7 @@ int
lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
struct list_head __user *result_up)
{
- lstcon_group_t *grp = NULL;
+ struct lstcon_group *grp = NULL;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -683,8 +683,8 @@ lstcon_nodes_remove(char *name, int count, lnet_process_id_t __user *ids_up,
int
lstcon_group_refresh(char *name, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -725,7 +725,7 @@ lstcon_group_refresh(char *name, struct list_head __user *result_up)
int
lstcon_group_list(int index, int len, char __user *name_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
LASSERT(index >= 0);
LASSERT(name_up);
@@ -733,7 +733,7 @@ lstcon_group_list(int index, int len, char __user *name_up)
list_for_each_entry(grp, &console_session.ses_grp_list, grp_link) {
if (!index--) {
return copy_to_user(name_up, grp->grp_name, len) ?
- -EFAULT : 0;
+ -EFAULT : 0;
}
}
@@ -744,8 +744,8 @@ static int
lstcon_nodes_getent(struct list_head *head, int *index_p,
int *count_p, lstcon_node_ent_t __user *dents_up)
{
- lstcon_ndlink_t *ndl;
- lstcon_node_t *nd;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_node *nd;
int count = 0;
int index = 0;
@@ -786,8 +786,8 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
lstcon_node_ent_t __user *dents_up)
{
lstcon_ndlist_ent_t *gentp;
- lstcon_group_t *grp;
- lstcon_ndlink_t *ndl;
+ struct lstcon_group *grp;
+ struct lstcon_ndlink *ndl;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -828,9 +828,9 @@ lstcon_group_info(char *name, lstcon_ndlist_ent_t __user *gents_p,
}
static int
-lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
+lstcon_batch_find(const char *name, struct lstcon_batch **batpp)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
list_for_each_entry(bat, &console_session.ses_bat_list, bat_link) {
if (!strncmp(bat->bat_name, name, LST_NAME_SIZE)) {
@@ -845,7 +845,7 @@ lstcon_batch_find(const char *name, lstcon_batch_t **batpp)
int
lstcon_batch_add(char *name)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int i;
int rc;
@@ -855,7 +855,7 @@ lstcon_batch_add(char *name)
return rc;
}
- LIBCFS_ALLOC(bat, sizeof(lstcon_batch_t));
+ LIBCFS_ALLOC(bat, sizeof(struct lstcon_batch));
if (!bat) {
CERROR("Can't allocate descriptor for batch %s\n", name);
return -ENOMEM;
@@ -865,7 +865,7 @@ lstcon_batch_add(char *name)
sizeof(struct list_head) * LST_NODE_HASHSIZE);
if (!bat->bat_cli_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -ENOMEM;
}
@@ -875,7 +875,7 @@ lstcon_batch_add(char *name)
if (!bat->bat_srv_hash) {
CERROR("Can't allocate hash for batch %s\n", name);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -ENOMEM;
}
@@ -883,7 +883,7 @@ lstcon_batch_add(char *name)
if (strlen(name) > sizeof(bat->bat_name) - 1) {
LIBCFS_FREE(bat->bat_srv_hash, LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_cli_hash, LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
return -E2BIG;
}
strncpy(bat->bat_name, name, sizeof(bat->bat_name));
@@ -911,7 +911,7 @@ lstcon_batch_add(char *name)
int
lstcon_batch_list(int index, int len, char __user *name_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
LASSERT(name_up);
LASSERT(index >= 0);
@@ -934,9 +934,9 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
lstcon_test_batch_ent_t *entp;
struct list_head *clilst;
struct list_head *srvlst;
- lstcon_test_t *test = NULL;
- lstcon_batch_t *bat;
- lstcon_ndlink_t *ndl;
+ struct lstcon_test *test = NULL;
+ struct lstcon_batch *bat;
+ struct lstcon_ndlink *ndl;
int rc;
rc = lstcon_batch_find(name, &bat);
@@ -977,7 +977,6 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
if (!test) {
entp->u.tbe_batch.bae_ntest = bat->bat_ntest;
entp->u.tbe_batch.bae_state = bat->bat_state;
-
} else {
entp->u.tbe_test.tse_type = test->tes_type;
entp->u.tbe_test.tse_loop = test->tes_loop;
@@ -999,7 +998,7 @@ lstcon_batch_info(char *name, lstcon_test_batch_ent_t __user *ent_up,
}
static int
-lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_batrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
switch (transop) {
case LST_TRANS_TSBRUN:
@@ -1021,10 +1020,10 @@ lstcon_batrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_batch_op(lstcon_batch_t *bat, int transop,
+lstcon_batch_op(struct lstcon_batch *bat, int transop,
struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
rc = lstcon_rpc_trans_ndlist(&bat->bat_cli_list,
@@ -1047,7 +1046,7 @@ lstcon_batch_op(lstcon_batch_t *bat, int transop,
int
lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
if (lstcon_batch_find(name, &bat)) {
@@ -1069,7 +1068,7 @@ lstcon_batch_run(char *name, int timeout, struct list_head __user *result_up)
int
lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
if (lstcon_batch_find(name, &bat)) {
@@ -1089,17 +1088,17 @@ lstcon_batch_stop(char *name, int force, struct list_head __user *result_up)
}
static void
-lstcon_batch_destroy(lstcon_batch_t *bat)
+lstcon_batch_destroy(struct lstcon_batch *bat)
{
- lstcon_ndlink_t *ndl;
- lstcon_test_t *test;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_test *test;
int i;
list_del(&bat->bat_link);
while (!list_empty(&bat->bat_test_list)) {
test = list_entry(bat->bat_test_list.next,
- lstcon_test_t, tes_link);
+ struct lstcon_test, tes_link);
LASSERT(list_empty(&test->tes_trans_list));
list_del(&test->tes_link);
@@ -1107,7 +1106,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
lstcon_group_decref(test->tes_src_grp);
lstcon_group_decref(test->tes_dst_grp);
- LIBCFS_FREE(test, offsetof(lstcon_test_t,
+ LIBCFS_FREE(test, offsetof(struct lstcon_test,
tes_param[test->tes_paramlen]));
}
@@ -1115,7 +1114,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_cli_list)) {
ndl = list_entry(bat->bat_cli_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1123,7 +1122,7 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
while (!list_empty(&bat->bat_srv_list)) {
ndl = list_entry(bat->bat_srv_list.next,
- lstcon_ndlink_t, ndl_link);
+ struct lstcon_ndlink, ndl_link);
list_del_init(&ndl->ndl_link);
lstcon_ndlink_release(ndl);
@@ -1138,19 +1137,19 @@ lstcon_batch_destroy(lstcon_batch_t *bat)
sizeof(struct list_head) * LST_NODE_HASHSIZE);
LIBCFS_FREE(bat->bat_srv_hash,
sizeof(struct list_head) * LST_NODE_HASHSIZE);
- LIBCFS_FREE(bat, sizeof(lstcon_batch_t));
+ LIBCFS_FREE(bat, sizeof(struct lstcon_batch));
}
static int
-lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
+lstcon_testrpc_condition(int transop, struct lstcon_node *nd, void *arg)
{
- lstcon_test_t *test;
- lstcon_batch_t *batch;
- lstcon_ndlink_t *ndl;
+ struct lstcon_test *test;
+ struct lstcon_batch *batch;
+ struct lstcon_ndlink *ndl;
struct list_head *hash;
struct list_head *head;
- test = (lstcon_test_t *)arg;
+ test = (struct lstcon_test *)arg;
LASSERT(test);
batch = test->tes_batch;
@@ -1186,10 +1185,10 @@ lstcon_testrpc_condition(int transop, lstcon_node_t *nd, void *arg)
}
static int
-lstcon_test_nodes_add(lstcon_test_t *test, struct list_head __user *result_up)
+lstcon_test_nodes_add(struct lstcon_test *test, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
int transop;
int rc;
@@ -1237,7 +1236,7 @@ again:
}
static int
-lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
+lstcon_verify_batch(const char *name, struct lstcon_batch **batch)
{
int rc;
@@ -1256,10 +1255,10 @@ lstcon_verify_batch(const char *name, lstcon_batch_t **batch)
}
static int
-lstcon_verify_group(const char *name, lstcon_group_t **grp)
+lstcon_verify_group(const char *name, struct lstcon_group **grp)
{
int rc;
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
rc = lstcon_group_find(name, grp);
if (rc) {
@@ -1284,11 +1283,11 @@ lstcon_test_add(char *batch_name, int type, int loop,
void *param, int paramlen, int *retp,
struct list_head __user *result_up)
{
- lstcon_test_t *test = NULL;
+ struct lstcon_test *test = NULL;
int rc;
- lstcon_group_t *src_grp = NULL;
- lstcon_group_t *dst_grp = NULL;
- lstcon_batch_t *batch = NULL;
+ struct lstcon_group *src_grp = NULL;
+ struct lstcon_group *dst_grp = NULL;
+ struct lstcon_batch *batch = NULL;
/*
* verify that a batch of the given name exists, and the groups
@@ -1310,7 +1309,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
if (dst_grp->grp_userland)
*retp = 1;
- LIBCFS_ALLOC(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ LIBCFS_ALLOC(test, offsetof(struct lstcon_test, tes_param[paramlen]));
if (!test) {
CERROR("Can't allocate test descriptor\n");
rc = -ENOMEM;
@@ -1357,7 +1356,7 @@ lstcon_test_add(char *batch_name, int type, int loop,
return rc;
out:
if (test)
- LIBCFS_FREE(test, offsetof(lstcon_test_t, tes_param[paramlen]));
+ LIBCFS_FREE(test, offsetof(struct lstcon_test, tes_param[paramlen]));
if (dst_grp)
lstcon_group_decref(dst_grp);
@@ -1369,9 +1368,9 @@ out:
}
static int
-lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
+lstcon_test_find(struct lstcon_batch *batch, int idx, struct lstcon_test **testpp)
{
- lstcon_test_t *test;
+ struct lstcon_test *test;
list_for_each_entry(test, &batch->bat_test_list, tes_link) {
if (idx == test->tes_hdr.tsb_index) {
@@ -1384,10 +1383,10 @@ lstcon_test_find(lstcon_batch_t *batch, int idx, lstcon_test_t **testpp)
}
static int
-lstcon_tsbrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_tsbrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+ struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
LASSERT(transop == LST_TRANS_TSBCLIQRY ||
transop == LST_TRANS_TSBSRVQRY);
@@ -1404,12 +1403,12 @@ int
lstcon_test_batch_query(char *name, int testidx, int client,
int timeout, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
struct list_head *translist;
struct list_head *ndlist;
- lstcon_tsb_hdr_t *hdr;
- lstcon_batch_t *batch;
- lstcon_test_t *test = NULL;
+ struct lstcon_tsb_hdr *hdr;
+ struct lstcon_batch *batch;
+ struct lstcon_test *test = NULL;
int transop;
int rc;
@@ -1423,7 +1422,6 @@ lstcon_test_batch_query(char *name, int testidx, int client,
translist = &batch->bat_trans_list;
ndlist = &batch->bat_cli_list;
hdr = &batch->bat_hdr;
-
} else {
/* query specified test only */
rc = lstcon_test_find(batch, testidx, &test);
@@ -1448,7 +1446,8 @@ lstcon_test_batch_query(char *name, int testidx, int client,
lstcon_rpc_trans_postwait(trans, timeout);
- if (!testidx && /* query a batch, not a test */
+ /* query a batch, not a test */
+ if (!testidx &&
!lstcon_rpc_stat_failure(lstcon_trans_stat(), 0) &&
!lstcon_tsbqry_stat_run(lstcon_trans_stat(), 0)) {
/* all RPCs finished, and no active test */
@@ -1463,10 +1462,10 @@ lstcon_test_batch_query(char *name, int testidx, int client,
}
static int
-lstcon_statrpc_readent(int transop, srpc_msg_t *msg,
+lstcon_statrpc_readent(int transop, struct srpc_msg *msg,
lstcon_rpc_ent_t __user *ent_up)
{
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
sfw_counters_t __user *sfwk_stat;
srpc_counters_t __user *srpc_stat;
lnet_counters_t __user *lnet_stat;
@@ -1491,7 +1490,7 @@ lstcon_ndlist_stat(struct list_head *ndlist,
int timeout, struct list_head __user *result_up)
{
struct list_head head;
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
INIT_LIST_HEAD(&head);
@@ -1516,7 +1515,7 @@ int
lstcon_group_stat(char *grp_name, int timeout,
struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(grp_name, &grp);
@@ -1536,8 +1535,8 @@ int
lstcon_nodes_stat(int count, lnet_process_id_t __user *ids_up,
int timeout, struct list_head __user *result_up)
{
- lstcon_ndlink_t *ndl;
- lstcon_group_t *tmp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *tmp;
lnet_process_id_t id;
int i;
int rc;
@@ -1581,7 +1580,7 @@ lstcon_debug_ndlist(struct list_head *ndlist,
struct list_head *translist,
int timeout, struct list_head __user *result_up)
{
- lstcon_rpc_trans_t *trans;
+ struct lstcon_rpc_trans *trans;
int rc;
rc = lstcon_rpc_trans_ndlist(ndlist, translist, LST_TRANS_SESQRY,
@@ -1611,7 +1610,7 @@ int
lstcon_batch_debug(int timeout, char *name,
int client, struct list_head __user *result_up)
{
- lstcon_batch_t *bat;
+ struct lstcon_batch *bat;
int rc;
rc = lstcon_batch_find(name, &bat);
@@ -1629,7 +1628,7 @@ int
lstcon_group_debug(int timeout, char *name,
struct list_head __user *result_up)
{
- lstcon_group_t *grp;
+ struct lstcon_group *grp;
int rc;
rc = lstcon_group_find(name, &grp);
@@ -1649,8 +1648,8 @@ lstcon_nodes_debug(int timeout,
struct list_head __user *result_up)
{
lnet_process_id_t id;
- lstcon_ndlink_t *ndl;
- lstcon_group_t *grp;
+ struct lstcon_ndlink *ndl;
+ struct lstcon_group *grp;
int i;
int rc;
@@ -1749,7 +1748,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
if (strlen(name) > sizeof(console_session.ses_name) - 1)
return -E2BIG;
- strncpy(console_session.ses_name, name,
+ strlcpy(console_session.ses_name, name,
sizeof(console_session.ses_name));
rc = lstcon_batch_add(LST_DEFAULT_BATCH);
@@ -1758,7 +1757,7 @@ lstcon_session_new(char *name, int key, unsigned feats,
rc = lstcon_rpc_pinger_start();
if (rc) {
- lstcon_batch_t *bat = NULL;
+ struct lstcon_batch *bat = NULL;
lstcon_batch_find(LST_DEFAULT_BATCH, &bat);
lstcon_batch_destroy(bat);
@@ -1782,7 +1781,7 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
char __user *name_up, int len)
{
lstcon_ndlist_ent_t *entp;
- lstcon_ndlink_t *ndl;
+ struct lstcon_ndlink *ndl;
int rc = 0;
if (console_session.ses_state != LST_SESSION_ACTIVE)
@@ -1813,9 +1812,9 @@ lstcon_session_info(lst_sid_t __user *sid_up, int __user *key_up,
int
lstcon_session_end(void)
{
- lstcon_rpc_trans_t *trans;
- lstcon_group_t *grp;
- lstcon_batch_t *bat;
+ struct lstcon_rpc_trans *trans;
+ struct lstcon_group *grp;
+ struct lstcon_batch *bat;
int rc = 0;
LASSERT(console_session.ses_state == LST_SESSION_ACTIVE);
@@ -1849,7 +1848,7 @@ lstcon_session_end(void)
/* destroy all batches */
while (!list_empty(&console_session.ses_bat_list)) {
bat = list_entry(console_session.ses_bat_list.next,
- lstcon_batch_t, bat_link);
+ struct lstcon_batch, bat_link);
lstcon_batch_destroy(bat);
}
@@ -1857,7 +1856,7 @@ lstcon_session_end(void)
/* destroy all groups */
while (!list_empty(&console_session.ses_grp_list)) {
grp = list_entry(console_session.ses_grp_list.next,
- lstcon_group_t, grp_link);
+ struct lstcon_group, grp_link);
LASSERT(grp->grp_ref == 1);
lstcon_group_decref(grp);
@@ -1906,12 +1905,12 @@ lstcon_session_feats_check(unsigned feats)
static int
lstcon_acceptor_handle(struct srpc_server_rpc *rpc)
{
- srpc_msg_t *rep = &rpc->srpc_replymsg;
- srpc_msg_t *req = &rpc->srpc_reqstbuf->buf_msg;
- srpc_join_reqst_t *jreq = &req->msg_body.join_reqst;
- srpc_join_reply_t *jrep = &rep->msg_body.join_reply;
- lstcon_group_t *grp = NULL;
- lstcon_ndlink_t *ndl;
+ struct srpc_msg *rep = &rpc->srpc_replymsg;
+ struct srpc_msg *req = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_join_reqst *jreq = &req->msg_body.join_reqst;
+ struct srpc_join_reply *jrep = &rep->msg_body.join_reply;
+ struct lstcon_group *grp = NULL;
+ struct lstcon_ndlink *ndl;
int rc = 0;
sfw_unpack_message(req);
@@ -1987,7 +1986,8 @@ out:
return rc;
}
-static srpc_service_t lstcon_acceptor_service;
+static struct srpc_service lstcon_acceptor_service;
+
static void lstcon_init_acceptor_service(void)
{
/* initialize selftest console acceptor service table */
diff --git a/drivers/staging/lustre/lnet/selftest/console.h b/drivers/staging/lustre/lnet/selftest/console.h
index 554f582441f1..becd22e41da9 100644
--- a/drivers/staging/lustre/lnet/selftest/console.h
+++ b/drivers/staging/lustre/lnet/selftest/console.h
@@ -50,22 +50,25 @@
#include "selftest.h"
#include "conrpc.h"
-typedef struct lstcon_node {
+/* node descriptor */
+struct lstcon_node {
lnet_process_id_t nd_id; /* id of the node */
int nd_ref; /* reference count */
int nd_state; /* state of the node */
int nd_timeout; /* session timeout */
unsigned long nd_stamp; /* timestamp of last replied RPC */
struct lstcon_rpc nd_ping; /* ping rpc */
-} lstcon_node_t; /* node descriptor */
+};
-typedef struct {
+/* node link descriptor */
+struct lstcon_ndlink {
struct list_head ndl_link; /* chain on list */
struct list_head ndl_hlink; /* chain on hash */
- lstcon_node_t *ndl_node; /* pointer to node */
-} lstcon_ndlink_t; /* node link descriptor */
+ struct lstcon_node *ndl_node; /* pointer to node */
+};
-typedef struct {
+/* (alias of nodes) group descriptor */
+struct lstcon_group {
struct list_head grp_link; /* chain on global group list
*/
int grp_ref; /* reference count */
@@ -76,18 +79,19 @@ typedef struct {
struct list_head grp_trans_list; /* transaction list */
struct list_head grp_ndl_list; /* nodes list */
struct list_head grp_ndl_hash[0]; /* hash table for nodes */
-} lstcon_group_t; /* (alias of nodes) group descriptor */
+};
#define LST_BATCH_IDLE 0xB0 /* idle batch */
#define LST_BATCH_RUNNING 0xB1 /* running batch */
-typedef struct lstcon_tsb_hdr {
+struct lstcon_tsb_hdr {
lst_bid_t tsb_id; /* batch ID */
int tsb_index; /* test index */
-} lstcon_tsb_hdr_t;
+};
-typedef struct {
- lstcon_tsb_hdr_t bat_hdr; /* test_batch header */
+/* (tests ) batch descriptor */
+struct lstcon_batch {
+ struct lstcon_tsb_hdr bat_hdr; /* test_batch header */
struct list_head bat_link; /* chain on session's batches list */
int bat_ntest; /* # of test */
int bat_state; /* state of the batch */
@@ -95,20 +99,21 @@ typedef struct {
* for run, force for stop */
char bat_name[LST_NAME_SIZE];/* name of batch */
- struct list_head bat_test_list; /* list head of tests (lstcon_test_t)
+ struct list_head bat_test_list; /* list head of tests (struct lstcon_test)
*/
struct list_head bat_trans_list; /* list head of transaction */
struct list_head bat_cli_list; /* list head of client nodes
- * (lstcon_node_t) */
+ * (struct lstcon_node) */
struct list_head *bat_cli_hash; /* hash table of client nodes */
struct list_head bat_srv_list; /* list head of server nodes */
struct list_head *bat_srv_hash; /* hash table of server nodes */
-} lstcon_batch_t; /* (tests ) batch descriptor */
+};
-typedef struct lstcon_test {
- lstcon_tsb_hdr_t tes_hdr; /* test batch header */
+/* a single test descriptor */
+struct lstcon_test {
+ struct lstcon_tsb_hdr tes_hdr; /* test batch header */
struct list_head tes_link; /* chain on batch's tests list */
- lstcon_batch_t *tes_batch; /* pointer to batch */
+ struct lstcon_batch *tes_batch; /* pointer to batch */
int tes_type; /* type of the test, i.e: bulk, ping */
int tes_stop_onerr; /* stop on error */
@@ -120,12 +125,12 @@ typedef struct lstcon_test {
int tes_cliidx; /* client index, used for RPC creating */
struct list_head tes_trans_list; /* transaction list */
- lstcon_group_t *tes_src_grp; /* group run the test */
- lstcon_group_t *tes_dst_grp; /* target group */
+ struct lstcon_group *tes_src_grp; /* group run the test */
+ struct lstcon_group *tes_dst_grp; /* target group */
int tes_paramlen; /* test parameter length */
char tes_param[0]; /* test parameter */
-} lstcon_test_t; /* a single test descriptor */
+};
#define LST_GLOBAL_HASHSIZE 503 /* global nodes hash table size */
#define LST_NODE_HASHSIZE 239 /* node hash table (for batch or group) */
@@ -152,7 +157,7 @@ struct lstcon_session {
unsigned ses_expired:1; /* console is timedout */
__u64 ses_id_cookie; /* batch id cookie */
char ses_name[LST_NAME_SIZE];/* session name */
- lstcon_rpc_trans_t *ses_ping; /* session pinger */
+ struct lstcon_rpc_trans *ses_ping; /* session pinger */
struct stt_timer ses_ping_timer; /* timer for pinger */
lstcon_trans_stat_t ses_trans_stat; /* transaction stats */
diff --git a/drivers/staging/lustre/lnet/selftest/framework.c b/drivers/staging/lustre/lnet/selftest/framework.c
index e2c532399366..30e4f71f14c2 100644
--- a/drivers/staging/lustre/lnet/selftest/framework.c
+++ b/drivers/staging/lustre/lnet/selftest/framework.c
@@ -109,19 +109,19 @@ static struct smoketest_framework {
struct list_head fw_tests; /* registered test cases */
atomic_t fw_nzombies; /* # zombie sessions */
spinlock_t fw_lock; /* serialise */
- sfw_session_t *fw_session; /* _the_ session */
+ struct sfw_session *fw_session; /* _the_ session */
int fw_shuttingdown; /* shutdown in progress */
struct srpc_server_rpc *fw_active_srpc;/* running RPC */
} sfw_data;
/* forward ref's */
-int sfw_stop_batch(sfw_batch_t *tsb, int force);
-void sfw_destroy_session(sfw_session_t *sn);
+int sfw_stop_batch(struct sfw_batch *tsb, int force);
+void sfw_destroy_session(struct sfw_session *sn);
-static inline sfw_test_case_t *
+static inline struct sfw_test_case *
sfw_find_test_case(int id)
{
- sfw_test_case_t *tsc;
+ struct sfw_test_case *tsc;
LASSERT(id <= SRPC_SERVICE_MAX_ID);
LASSERT(id > SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -135,9 +135,9 @@ sfw_find_test_case(int id)
}
static int
-sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
+sfw_register_test(struct srpc_service *service, struct sfw_test_client_ops *cliops)
{
- sfw_test_case_t *tsc;
+ struct sfw_test_case *tsc;
if (sfw_find_test_case(service->sv_id)) {
CERROR("Failed to register test %s (%d)\n",
@@ -145,7 +145,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
return -EEXIST;
}
- LIBCFS_ALLOC(tsc, sizeof(sfw_test_case_t));
+ LIBCFS_ALLOC(tsc, sizeof(struct sfw_test_case));
if (!tsc)
return -ENOMEM;
@@ -159,7 +159,7 @@ sfw_register_test(srpc_service_t *service, sfw_test_client_ops_t *cliops)
static void
sfw_add_session_timer(void)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
struct stt_timer *timer = &sn->sn_timer;
LASSERT(!sfw_data.fw_shuttingdown);
@@ -177,7 +177,7 @@ sfw_add_session_timer(void)
static int
sfw_del_session_timer(void)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (!sn || !sn->sn_timer_active)
return 0;
@@ -196,10 +196,10 @@ static void
sfw_deactivate_session(void)
__must_hold(&sfw_data.fw_lock)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
int nactive = 0;
- sfw_batch_t *tsb;
- sfw_test_case_t *tsc;
+ struct sfw_batch *tsb;
+ struct sfw_test_case *tsc;
if (!sn)
return;
@@ -226,7 +226,7 @@ __must_hold(&sfw_data.fw_lock)
}
if (nactive)
- return; /* wait for active batches to stop */
+ return; /* wait for active batches to stop */
list_del_init(&sn->sn_list);
spin_unlock(&sfw_data.fw_lock);
@@ -239,7 +239,7 @@ __must_hold(&sfw_data.fw_lock)
static void
sfw_session_expired(void *data)
{
- sfw_session_t *sn = data;
+ struct sfw_session *sn = data;
spin_lock(&sfw_data.fw_lock);
@@ -257,12 +257,12 @@ sfw_session_expired(void *data)
}
static inline void
-sfw_init_session(sfw_session_t *sn, lst_sid_t sid,
+sfw_init_session(struct sfw_session *sn, lst_sid_t sid,
unsigned features, const char *name)
{
struct stt_timer *timer = &sn->sn_timer;
- memset(sn, 0, sizeof(sfw_session_t));
+ memset(sn, 0, sizeof(struct sfw_session));
INIT_LIST_HEAD(&sn->sn_list);
INIT_LIST_HEAD(&sn->sn_batches);
atomic_set(&sn->sn_refcount, 1); /* +1 for caller */
@@ -298,7 +298,7 @@ sfw_server_rpc_done(struct srpc_server_rpc *rpc)
}
static void
-sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
+sfw_client_rpc_fini(struct srpc_client_rpc *rpc)
{
LASSERT(!rpc->crpc_bulk.bk_niov);
LASSERT(list_empty(&rpc->crpc_list));
@@ -318,11 +318,11 @@ sfw_client_rpc_fini(srpc_client_rpc_t *rpc)
spin_unlock(&sfw_data.fw_lock);
}
-static sfw_batch_t *
+static struct sfw_batch *
sfw_find_batch(lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct sfw_batch *bat;
LASSERT(sn);
@@ -334,11 +334,11 @@ sfw_find_batch(lst_bid_t bid)
return NULL;
}
-static sfw_batch_t *
+static struct sfw_batch *
sfw_bid2batch(lst_bid_t bid)
{
- sfw_session_t *sn = sfw_data.fw_session;
- sfw_batch_t *bat;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct sfw_batch *bat;
LASSERT(sn);
@@ -346,7 +346,7 @@ sfw_bid2batch(lst_bid_t bid)
if (bat)
return bat;
- LIBCFS_ALLOC(bat, sizeof(sfw_batch_t));
+ LIBCFS_ALLOC(bat, sizeof(struct sfw_batch));
if (!bat)
return NULL;
@@ -361,11 +361,11 @@ sfw_bid2batch(lst_bid_t bid)
}
static int
-sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
+sfw_get_stats(struct srpc_stat_reqst *request, struct srpc_stat_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
sfw_counters_t *cnt = &reply->str_fw;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
reply->str_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -402,10 +402,10 @@ sfw_get_stats(srpc_stat_reqst_t *request, srpc_stat_reply_t *reply)
}
int
-sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
+sfw_make_session(struct srpc_mksn_reqst *request, struct srpc_mksn_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_msg_t *msg = container_of(request, srpc_msg_t,
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct srpc_msg *msg = container_of(request, struct srpc_msg,
msg_body.mksn_reqst);
int cplen = 0;
@@ -438,7 +438,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
/*
* reject the request if it requires unknown features
* NB: old version will always accept all features because it's not
- * aware of srpc_msg_t::msg_ses_feats, it's a defect but it's also
+ * aware of srpc_msg::msg_ses_feats, it's a defect but it's also
* harmless because it will return zero feature to console, and it's
* console's responsibility to make sure all nodes in a session have
* same feature mask.
@@ -449,7 +449,7 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
/* brand new or create by force */
- LIBCFS_ALLOC(sn, sizeof(sfw_session_t));
+ LIBCFS_ALLOC(sn, sizeof(struct sfw_session));
if (!sn) {
CERROR("dropping RPC mksn under memory pressure\n");
return -ENOMEM;
@@ -473,9 +473,9 @@ sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply)
}
static int
-sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
+sfw_remove_session(struct srpc_rmsn_reqst *request, struct srpc_rmsn_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
reply->rmsn_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -505,9 +505,9 @@ sfw_remove_session(srpc_rmsn_reqst_t *request, srpc_rmsn_reply_t *reply)
}
static int
-sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
+sfw_debug_session(struct srpc_debug_reqst *request, struct srpc_debug_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (!sn) {
reply->dbg_status = ESRCH;
@@ -526,10 +526,10 @@ sfw_debug_session(srpc_debug_reqst_t *request, srpc_debug_reply_t *reply)
}
static void
-sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
+sfw_test_rpc_fini(struct srpc_client_rpc *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct sfw_test_unit *tsu = rpc->crpc_priv;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
/* Called with hold of tsi->tsi_lock */
LASSERT(list_empty(&rpc->crpc_list));
@@ -537,7 +537,7 @@ sfw_test_rpc_fini(srpc_client_rpc_t *rpc)
}
static inline int
-sfw_test_buffers(sfw_test_instance_t *tsi)
+sfw_test_buffers(struct sfw_test_instance *tsi)
{
struct sfw_test_case *tsc;
struct srpc_service *svc;
@@ -614,10 +614,10 @@ sfw_unload_test(struct sfw_test_instance *tsi)
}
static void
-sfw_destroy_test_instance(sfw_test_instance_t *tsi)
+sfw_destroy_test_instance(struct sfw_test_instance *tsi)
{
- srpc_client_rpc_t *rpc;
- sfw_test_unit_t *tsu;
+ struct srpc_client_rpc *rpc;
+ struct sfw_test_unit *tsu;
if (!tsi->tsi_is_client)
goto clean;
@@ -630,14 +630,14 @@ sfw_destroy_test_instance(sfw_test_instance_t *tsi)
while (!list_empty(&tsi->tsi_units)) {
tsu = list_entry(tsi->tsi_units.next,
- sfw_test_unit_t, tsu_list);
+ struct sfw_test_unit, tsu_list);
list_del(&tsu->tsu_list);
LIBCFS_FREE(tsu, sizeof(*tsu));
}
while (!list_empty(&tsi->tsi_free_rpcs)) {
rpc = list_entry(tsi->tsi_free_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
}
@@ -648,34 +648,34 @@ clean:
}
static void
-sfw_destroy_batch(sfw_batch_t *tsb)
+sfw_destroy_batch(struct sfw_batch *tsb)
{
- sfw_test_instance_t *tsi;
+ struct sfw_test_instance *tsi;
LASSERT(!sfw_batch_active(tsb));
LASSERT(list_empty(&tsb->bat_list));
while (!list_empty(&tsb->bat_tests)) {
tsi = list_entry(tsb->bat_tests.next,
- sfw_test_instance_t, tsi_list);
+ struct sfw_test_instance, tsi_list);
list_del_init(&tsi->tsi_list);
sfw_destroy_test_instance(tsi);
}
- LIBCFS_FREE(tsb, sizeof(sfw_batch_t));
+ LIBCFS_FREE(tsb, sizeof(struct sfw_batch));
}
void
-sfw_destroy_session(sfw_session_t *sn)
+sfw_destroy_session(struct sfw_session *sn)
{
- sfw_batch_t *batch;
+ struct sfw_batch *batch;
LASSERT(list_empty(&sn->sn_list));
LASSERT(sn != sfw_data.fw_session);
while (!list_empty(&sn->sn_batches)) {
batch = list_entry(sn->sn_batches.next,
- sfw_batch_t, bat_list);
+ struct sfw_batch, bat_list);
list_del_init(&batch->bat_list);
sfw_destroy_batch(batch);
}
@@ -685,28 +685,28 @@ sfw_destroy_session(sfw_session_t *sn)
}
static void
-sfw_unpack_addtest_req(srpc_msg_t *msg)
+sfw_unpack_addtest_req(struct srpc_msg *msg)
{
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
LASSERT(msg->msg_type == SRPC_MSG_TEST_REQST);
LASSERT(req->tsr_is_client);
if (msg->msg_magic == SRPC_MSG_MAGIC)
- return; /* no flipping needed */
+ return; /* no flipping needed */
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (req->tsr_service == SRPC_SERVICE_BRW) {
if (!(msg->msg_ses_feats & LST_FEAT_BULK_LEN)) {
- test_bulk_req_t *bulk = &req->tsr_u.bulk_v0;
+ struct test_bulk_req *bulk = &req->tsr_u.bulk_v0;
__swab32s(&bulk->blk_opc);
__swab32s(&bulk->blk_npg);
__swab32s(&bulk->blk_flags);
} else {
- test_bulk_req_v1_t *bulk = &req->tsr_u.bulk_v1;
+ struct test_bulk_req_v1 *bulk = &req->tsr_u.bulk_v1;
__swab16s(&bulk->blk_opc);
__swab16s(&bulk->blk_flags);
@@ -718,7 +718,7 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
if (req->tsr_service == SRPC_SERVICE_PING) {
- test_ping_req_t *ping = &req->tsr_u.ping;
+ struct test_ping_req *ping = &req->tsr_u.ping;
__swab32s(&ping->png_size);
__swab32s(&ping->png_flags);
@@ -729,14 +729,14 @@ sfw_unpack_addtest_req(srpc_msg_t *msg)
}
static int
-sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
+sfw_add_test_instance(struct sfw_batch *tsb, struct srpc_server_rpc *rpc)
{
- srpc_msg_t *msg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_msg *msg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
int ndest = req->tsr_ndest;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
+ struct sfw_test_unit *tsu;
+ struct sfw_test_instance *tsi;
int i;
int rc;
@@ -789,13 +789,13 @@ sfw_add_test_instance(sfw_batch_t *tsb, struct srpc_server_rpc *rpc)
int j;
dests = page_address(bk->bk_iovs[i / SFW_ID_PER_PAGE].kiov_page);
- LASSERT(dests); /* my pages are within KVM always */
+ LASSERT(dests); /* my pages are within KVM always */
id = dests[i % SFW_ID_PER_PAGE];
if (msg->msg_magic != SRPC_MSG_MAGIC)
sfw_unpack_id(id);
for (j = 0; j < tsi->tsi_concur; j++) {
- LIBCFS_ALLOC(tsu, sizeof(sfw_test_unit_t));
+ LIBCFS_ALLOC(tsu, sizeof(struct sfw_test_unit));
if (!tsu) {
rc = -ENOMEM;
CERROR("Can't allocate tsu for %d\n",
@@ -824,11 +824,11 @@ error:
}
static void
-sfw_test_unit_done(sfw_test_unit_t *tsu)
+sfw_test_unit_done(struct sfw_test_unit *tsu)
{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_batch_t *tsb = tsi->tsi_batch;
- sfw_session_t *sn = tsb->bat_session;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_batch *tsb = tsi->tsi_batch;
+ struct sfw_session *sn = tsb->bat_session;
LASSERT(sfw_test_active(tsi));
@@ -844,8 +844,8 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
spin_lock(&sfw_data.fw_lock);
- if (!atomic_dec_and_test(&tsb->bat_nactive) ||/* tsb still active */
- sn == sfw_data.fw_session) { /* sn also active */
+ if (!atomic_dec_and_test(&tsb->bat_nactive) || /* tsb still active */
+ sn == sfw_data.fw_session) { /* sn also active */
spin_unlock(&sfw_data.fw_lock);
return;
}
@@ -866,10 +866,10 @@ sfw_test_unit_done(sfw_test_unit_t *tsu)
}
static void
-sfw_test_rpc_done(srpc_client_rpc_t *rpc)
+sfw_test_rpc_done(struct srpc_client_rpc *rpc)
{
- sfw_test_unit_t *tsu = rpc->crpc_priv;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct sfw_test_unit *tsu = rpc->crpc_priv;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
int done = 0;
tsi->tsi_ops->tso_done_rpc(tsu, rpc);
@@ -900,19 +900,19 @@ sfw_test_rpc_done(srpc_client_rpc_t *rpc)
}
int
-sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
+sfw_create_test_rpc(struct sfw_test_unit *tsu, lnet_process_id_t peer,
unsigned features, int nblk, int blklen,
- srpc_client_rpc_t **rpcpp)
+ struct srpc_client_rpc **rpcpp)
{
- srpc_client_rpc_t *rpc = NULL;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
+ struct srpc_client_rpc *rpc = NULL;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
spin_lock(&tsi->tsi_lock);
LASSERT(sfw_test_active(tsi));
/* pick request from buffer */
rpc = list_first_entry_or_null(&tsi->tsi_free_rpcs,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
if (rpc) {
LASSERT(nblk == rpc->crpc_bulk.bk_niov);
list_del_init(&rpc->crpc_list);
@@ -942,11 +942,11 @@ sfw_create_test_rpc(sfw_test_unit_t *tsu, lnet_process_id_t peer,
}
static int
-sfw_run_test(swi_workitem_t *wi)
+sfw_run_test(struct swi_workitem *wi)
{
- sfw_test_unit_t *tsu = wi->swi_workitem.wi_data;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- srpc_client_rpc_t *rpc = NULL;
+ struct sfw_test_unit *tsu = wi->swi_workitem.wi_data;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct srpc_client_rpc *rpc = NULL;
LASSERT(wi == &tsu->tsu_worker);
@@ -991,11 +991,11 @@ test_done:
}
static int
-sfw_run_batch(sfw_batch_t *tsb)
+sfw_run_batch(struct sfw_batch *tsb)
{
- swi_workitem_t *wi;
- sfw_test_unit_t *tsu;
- sfw_test_instance_t *tsi;
+ struct swi_workitem *wi;
+ struct sfw_test_unit *tsu;
+ struct sfw_test_instance *tsi;
if (sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch already active: %llu (%d)\n",
@@ -1026,10 +1026,10 @@ sfw_run_batch(sfw_batch_t *tsb)
}
int
-sfw_stop_batch(sfw_batch_t *tsb, int force)
+sfw_stop_batch(struct sfw_batch *tsb, int force)
{
- sfw_test_instance_t *tsi;
- srpc_client_rpc_t *rpc;
+ struct sfw_test_instance *tsi;
+ struct srpc_client_rpc *rpc;
if (!sfw_batch_active(tsb)) {
CDEBUG(D_NET, "Batch %llu inactive\n", tsb->bat_id.bat_id);
@@ -1068,9 +1068,9 @@ sfw_stop_batch(sfw_batch_t *tsb, int force)
}
static int
-sfw_query_batch(sfw_batch_t *tsb, int testidx, srpc_batch_reply_t *reply)
+sfw_query_batch(struct sfw_batch *tsb, int testidx, struct srpc_batch_reply *reply)
{
- sfw_test_instance_t *tsi;
+ struct sfw_test_instance *tsi;
if (testidx < 0)
return -EINVAL;
@@ -1115,11 +1115,11 @@ sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
static int
sfw_add_test(struct srpc_server_rpc *rpc)
{
- sfw_session_t *sn = sfw_data.fw_session;
- srpc_test_reply_t *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
- srpc_test_reqst_t *request;
+ struct sfw_session *sn = sfw_data.fw_session;
+ struct srpc_test_reply *reply = &rpc->srpc_replymsg.msg_body.tes_reply;
+ struct srpc_test_reqst *request;
int rc;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
request = &rpc->srpc_reqstbuf->buf_msg.msg_body.tes_reqst;
reply->tsr_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1183,11 +1183,11 @@ sfw_add_test(struct srpc_server_rpc *rpc)
}
static int
-sfw_control_batch(srpc_batch_reqst_t *request, srpc_batch_reply_t *reply)
+sfw_control_batch(struct srpc_batch_reqst *request, struct srpc_batch_reply *reply)
{
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
int rc = 0;
- sfw_batch_t *bat;
+ struct sfw_batch *bat;
reply->bar_sid = !sn ? LST_INVALID_SID : sn->sn_id;
@@ -1227,8 +1227,8 @@ static int
sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reply = &rpc->srpc_replymsg;
- srpc_msg_t *request = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_msg *reply = &rpc->srpc_replymsg;
+ struct srpc_msg *request = &rpc->srpc_reqstbuf->buf_msg;
unsigned features = LST_FEATS_MASK;
int rc = 0;
@@ -1244,7 +1244,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
/* Remove timer to avoid racing with it or expiring active session */
if (sfw_del_session_timer()) {
- CERROR("Dropping RPC (%s) from %s: racing with expiry timer.",
+ CERROR("dropping RPC %s from %s: racing with expiry timer\n",
sv->sv_name, libcfs_id2str(rpc->srpc_peer));
spin_unlock(&sfw_data.fw_lock);
return -EAGAIN;
@@ -1261,7 +1261,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
if (sv->sv_id != SRPC_SERVICE_MAKE_SESSION &&
sv->sv_id != SRPC_SERVICE_DEBUG) {
- sfw_session_t *sn = sfw_data.fw_session;
+ struct sfw_session *sn = sfw_data.fw_session;
if (sn &&
sn->sn_features != request->msg_ses_feats) {
@@ -1273,7 +1273,7 @@ sfw_handle_server_rpc(struct srpc_server_rpc *rpc)
}
} else if (request->msg_ses_feats & ~LST_FEATS_MASK) {
- /**
+ /*
* NB: at this point, old version will ignore features and
* create new session anyway, so console should be able
* to handle this
@@ -1377,12 +1377,12 @@ sfw_bulk_ready(struct srpc_server_rpc *rpc, int status)
return rc;
}
-srpc_client_rpc_t *
+struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv)
+ void (*done)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc = NULL;
+ struct srpc_client_rpc *rpc = NULL;
spin_lock(&sfw_data.fw_lock);
@@ -1391,7 +1391,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
if (!nbulkiov && !list_empty(&sfw_data.fw_zombie_rpcs)) {
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
srpc_init_client_rpc(rpc, peer, service, 0, 0,
@@ -1415,7 +1415,7 @@ sfw_create_rpc(lnet_process_id_t peer, int service,
}
void
-sfw_unpack_message(srpc_msg_t *msg)
+sfw_unpack_message(struct srpc_msg *msg)
{
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
@@ -1424,7 +1424,7 @@ sfw_unpack_message(srpc_msg_t *msg)
LASSERT(msg->msg_magic == __swab32(SRPC_MSG_MAGIC));
if (msg->msg_type == SRPC_MSG_STAT_REQST) {
- srpc_stat_reqst_t *req = &msg->msg_body.stat_reqst;
+ struct srpc_stat_reqst *req = &msg->msg_body.stat_reqst;
__swab32s(&req->str_type);
__swab64s(&req->str_rpyid);
@@ -1433,7 +1433,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_STAT_REPLY) {
- srpc_stat_reply_t *rep = &msg->msg_body.stat_reply;
+ struct srpc_stat_reply *rep = &msg->msg_body.stat_reply;
__swab32s(&rep->str_status);
sfw_unpack_sid(rep->str_sid);
@@ -1444,7 +1444,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_MKSN_REQST) {
- srpc_mksn_reqst_t *req = &msg->msg_body.mksn_reqst;
+ struct srpc_mksn_reqst *req = &msg->msg_body.mksn_reqst;
__swab64s(&req->mksn_rpyid);
__swab32s(&req->mksn_force);
@@ -1453,7 +1453,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_MKSN_REPLY) {
- srpc_mksn_reply_t *rep = &msg->msg_body.mksn_reply;
+ struct srpc_mksn_reply *rep = &msg->msg_body.mksn_reply;
__swab32s(&rep->mksn_status);
__swab32s(&rep->mksn_timeout);
@@ -1462,7 +1462,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_RMSN_REQST) {
- srpc_rmsn_reqst_t *req = &msg->msg_body.rmsn_reqst;
+ struct srpc_rmsn_reqst *req = &msg->msg_body.rmsn_reqst;
__swab64s(&req->rmsn_rpyid);
sfw_unpack_sid(req->rmsn_sid);
@@ -1470,7 +1470,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_RMSN_REPLY) {
- srpc_rmsn_reply_t *rep = &msg->msg_body.rmsn_reply;
+ struct srpc_rmsn_reply *rep = &msg->msg_body.rmsn_reply;
__swab32s(&rep->rmsn_status);
sfw_unpack_sid(rep->rmsn_sid);
@@ -1478,7 +1478,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_DEBUG_REQST) {
- srpc_debug_reqst_t *req = &msg->msg_body.dbg_reqst;
+ struct srpc_debug_reqst *req = &msg->msg_body.dbg_reqst;
__swab64s(&req->dbg_rpyid);
__swab32s(&req->dbg_flags);
@@ -1487,7 +1487,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_DEBUG_REPLY) {
- srpc_debug_reply_t *rep = &msg->msg_body.dbg_reply;
+ struct srpc_debug_reply *rep = &msg->msg_body.dbg_reply;
__swab32s(&rep->dbg_nbatch);
__swab32s(&rep->dbg_timeout);
@@ -1496,7 +1496,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_BATCH_REQST) {
- srpc_batch_reqst_t *req = &msg->msg_body.bat_reqst;
+ struct srpc_batch_reqst *req = &msg->msg_body.bat_reqst;
__swab32s(&req->bar_opc);
__swab64s(&req->bar_rpyid);
@@ -1508,7 +1508,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_BATCH_REPLY) {
- srpc_batch_reply_t *rep = &msg->msg_body.bat_reply;
+ struct srpc_batch_reply *rep = &msg->msg_body.bat_reply;
__swab32s(&rep->bar_status);
sfw_unpack_sid(rep->bar_sid);
@@ -1516,7 +1516,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_TEST_REQST) {
- srpc_test_reqst_t *req = &msg->msg_body.tes_reqst;
+ struct srpc_test_reqst *req = &msg->msg_body.tes_reqst;
__swab64s(&req->tsr_rpyid);
__swab64s(&req->tsr_bulkid);
@@ -1530,7 +1530,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_TEST_REPLY) {
- srpc_test_reply_t *rep = &msg->msg_body.tes_reply;
+ struct srpc_test_reply *rep = &msg->msg_body.tes_reply;
__swab32s(&rep->tsr_status);
sfw_unpack_sid(rep->tsr_sid);
@@ -1538,7 +1538,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_JOIN_REQST) {
- srpc_join_reqst_t *req = &msg->msg_body.join_reqst;
+ struct srpc_join_reqst *req = &msg->msg_body.join_reqst;
__swab64s(&req->join_rpyid);
sfw_unpack_sid(req->join_sid);
@@ -1546,7 +1546,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
if (msg->msg_type == SRPC_MSG_JOIN_REPLY) {
- srpc_join_reply_t *rep = &msg->msg_body.join_reply;
+ struct srpc_join_reply *rep = &msg->msg_body.join_reply;
__swab32s(&rep->join_status);
__swab32s(&rep->join_timeout);
@@ -1558,7 +1558,7 @@ sfw_unpack_message(srpc_msg_t *msg)
}
void
-sfw_abort_rpc(srpc_client_rpc_t *rpc)
+sfw_abort_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(atomic_read(&rpc->crpc_refcount) > 0);
LASSERT(rpc->crpc_service <= SRPC_FRAMEWORK_SERVICE_MAX_ID);
@@ -1569,7 +1569,7 @@ sfw_abort_rpc(srpc_client_rpc_t *rpc)
}
void
-sfw_post_rpc(srpc_client_rpc_t *rpc)
+sfw_post_rpc(struct srpc_client_rpc *rpc)
{
spin_lock(&rpc->crpc_lock);
@@ -1584,7 +1584,7 @@ sfw_post_rpc(srpc_client_rpc_t *rpc)
spin_unlock(&rpc->crpc_lock);
}
-static srpc_service_t sfw_services[] = {
+static struct srpc_service sfw_services[] = {
{
/* sv_id */ SRPC_SERVICE_DEBUG,
/* sv_name */ "debug",
@@ -1628,8 +1628,8 @@ sfw_startup(void)
int i;
int rc;
int error;
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
+ struct srpc_service *sv;
+ struct sfw_test_case *tsc;
if (session_timeout < 0) {
CERROR("Session timeout must be non-negative: %d\n",
@@ -1721,8 +1721,8 @@ sfw_startup(void)
void
sfw_shutdown(void)
{
- srpc_service_t *sv;
- sfw_test_case_t *tsc;
+ struct srpc_service *sv;
+ struct sfw_test_case *tsc;
int i;
spin_lock(&sfw_data.fw_lock);
@@ -1759,10 +1759,10 @@ sfw_shutdown(void)
}
while (!list_empty(&sfw_data.fw_zombie_rpcs)) {
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
rpc = list_entry(sfw_data.fw_zombie_rpcs.next,
- srpc_client_rpc_t, crpc_list);
+ struct srpc_client_rpc, crpc_list);
list_del(&rpc->crpc_list);
LIBCFS_FREE(rpc, srpc_client_rpc_size(rpc));
@@ -1778,7 +1778,7 @@ sfw_shutdown(void)
while (!list_empty(&sfw_data.fw_tests)) {
tsc = list_entry(sfw_data.fw_tests.next,
- sfw_test_case_t, tsc_list);
+ struct sfw_test_case, tsc_list);
srpc_wait_service_shutdown(tsc->tsc_srv_service);
diff --git a/drivers/staging/lustre/lnet/selftest/ping_test.c b/drivers/staging/lustre/lnet/selftest/ping_test.c
index 81a45045e186..ad26fe9dd4af 100644
--- a/drivers/staging/lustre/lnet/selftest/ping_test.c
+++ b/drivers/staging/lustre/lnet/selftest/ping_test.c
@@ -56,9 +56,9 @@ struct lst_ping_data {
static struct lst_ping_data lst_ping_data;
static int
-ping_client_init(sfw_test_instance_t *tsi)
+ping_client_init(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
LASSERT(tsi->tsi_is_client);
LASSERT(sn && !(sn->sn_features & ~LST_FEATS_MASK));
@@ -70,9 +70,9 @@ ping_client_init(sfw_test_instance_t *tsi)
}
static void
-ping_client_fini(sfw_test_instance_t *tsi)
+ping_client_fini(struct sfw_test_instance *tsi)
{
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
int errors;
LASSERT(sn);
@@ -86,12 +86,12 @@ ping_client_fini(sfw_test_instance_t *tsi)
}
static int
-ping_client_prep_rpc(sfw_test_unit_t *tsu,
- lnet_process_id_t dest, srpc_client_rpc_t **rpc)
+ping_client_prep_rpc(struct sfw_test_unit *tsu, lnet_process_id_t dest,
+ struct srpc_client_rpc **rpc)
{
- srpc_ping_reqst_t *req;
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
+ struct srpc_ping_reqst *req;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
struct timespec64 ts;
int rc;
@@ -118,18 +118,18 @@ ping_client_prep_rpc(sfw_test_unit_t *tsu,
}
static void
-ping_client_done_rpc(sfw_test_unit_t *tsu, srpc_client_rpc_t *rpc)
+ping_client_done_rpc(struct sfw_test_unit *tsu, struct srpc_client_rpc *rpc)
{
- sfw_test_instance_t *tsi = tsu->tsu_instance;
- sfw_session_t *sn = tsi->tsi_batch->bat_session;
- srpc_ping_reqst_t *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
- srpc_ping_reply_t *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
+ struct sfw_test_instance *tsi = tsu->tsu_instance;
+ struct sfw_session *sn = tsi->tsi_batch->bat_session;
+ struct srpc_ping_reqst *reqst = &rpc->crpc_reqstmsg.msg_body.ping_reqst;
+ struct srpc_ping_reply *reply = &rpc->crpc_replymsg.msg_body.ping_reply;
struct timespec64 ts;
LASSERT(sn);
if (rpc->crpc_status) {
- if (!tsi->tsi_stopping) /* rpc could have been aborted */
+ if (!tsi->tsi_stopping) /* rpc could have been aborted */
atomic_inc(&sn->sn_ping_errors);
CERROR("Unable to ping %s (%d): %d\n",
libcfs_id2str(rpc->crpc_dest),
@@ -171,10 +171,10 @@ static int
ping_server_handle(struct srpc_server_rpc *rpc)
{
struct srpc_service *sv = rpc->srpc_scd->scd_svc;
- srpc_msg_t *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
- srpc_msg_t *replymsg = &rpc->srpc_replymsg;
- srpc_ping_reqst_t *req = &reqstmsg->msg_body.ping_reqst;
- srpc_ping_reply_t *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
+ struct srpc_msg *reqstmsg = &rpc->srpc_reqstbuf->buf_msg;
+ struct srpc_msg *replymsg = &rpc->srpc_replymsg;
+ struct srpc_ping_reqst *req = &reqstmsg->msg_body.ping_reqst;
+ struct srpc_ping_reply *rep = &rpc->srpc_replymsg.msg_body.ping_reply;
LASSERT(sv->sv_id == SRPC_SERVICE_PING);
@@ -210,7 +210,8 @@ ping_server_handle(struct srpc_server_rpc *rpc)
return 0;
}
-sfw_test_client_ops_t ping_test_client;
+struct sfw_test_client_ops ping_test_client;
+
void ping_init_test_client(void)
{
ping_test_client.tso_init = ping_client_init;
@@ -219,7 +220,8 @@ void ping_init_test_client(void)
ping_test_client.tso_done_rpc = ping_client_done_rpc;
}
-srpc_service_t ping_test_service;
+struct srpc_service ping_test_service;
+
void ping_init_test_service(void)
{
ping_test_service.sv_id = SRPC_SERVICE_PING;
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.c b/drivers/staging/lustre/lnet/selftest/rpc.c
index 7d7748d96332..3c45a7cfae18 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.c
+++ b/drivers/staging/lustre/lnet/selftest/rpc.c
@@ -46,19 +46,19 @@
#include "selftest.h"
-typedef enum {
+enum srpc_state {
SRPC_STATE_NONE,
SRPC_STATE_NI_INIT,
SRPC_STATE_EQ_INIT,
SRPC_STATE_RUNNING,
SRPC_STATE_STOPPING,
-} srpc_state_t;
+};
static struct smoketest_rpc {
spinlock_t rpc_glock; /* global lock */
- srpc_service_t *rpc_services[SRPC_SERVICE_MAX_ID + 1];
+ struct srpc_service *rpc_services[SRPC_SERVICE_MAX_ID + 1];
lnet_handle_eq_t rpc_lnet_eq; /* _the_ LNet event queue */
- srpc_state_t rpc_state;
+ enum srpc_state rpc_state;
srpc_counters_t rpc_counters;
__u64 rpc_matchbits; /* matchbits counter */
} srpc_data;
@@ -71,7 +71,7 @@ srpc_serv_portal(int svc_id)
}
/* forward ref's */
-int srpc_handle_rpc(swi_workitem_t *wi);
+int srpc_handle_rpc(struct swi_workitem *wi);
void srpc_get_counters(srpc_counters_t *cnt)
{
@@ -88,7 +88,7 @@ void srpc_set_counters(const srpc_counters_t *cnt)
}
static int
-srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
+srpc_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i, int nob)
{
nob = min_t(int, nob, PAGE_SIZE);
@@ -102,7 +102,7 @@ srpc_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i, int nob)
}
void
-srpc_free_bulk(srpc_bulk_t *bk)
+srpc_free_bulk(struct srpc_bulk *bk)
{
int i;
struct page *pg;
@@ -117,25 +117,25 @@ srpc_free_bulk(srpc_bulk_t *bk)
__free_page(pg);
}
- LIBCFS_FREE(bk, offsetof(srpc_bulk_t, bk_iovs[bk->bk_niov]));
+ LIBCFS_FREE(bk, offsetof(struct srpc_bulk, bk_iovs[bk->bk_niov]));
}
-srpc_bulk_t *
+struct srpc_bulk *
srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len, int sink)
{
- srpc_bulk_t *bk;
+ struct srpc_bulk *bk;
int i;
LASSERT(bulk_npg > 0 && bulk_npg <= LNET_MAX_IOV);
LIBCFS_CPT_ALLOC(bk, lnet_cpt_table(), cpt,
- offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
if (!bk) {
CERROR("Can't allocate descriptor for %d pages\n", bulk_npg);
return NULL;
}
- memset(bk, 0, offsetof(srpc_bulk_t, bk_iovs[bulk_npg]));
+ memset(bk, 0, offsetof(struct srpc_bulk, bk_iovs[bulk_npg]));
bk->bk_sink = sink;
bk->bk_len = bulk_len;
bk->bk_niov = bulk_npg;
@@ -256,7 +256,7 @@ srpc_service_init(struct srpc_service *svc)
svc->sv_shuttingdown = 0;
svc->sv_cpt_data = cfs_percpt_alloc(lnet_cpt_table(),
- sizeof(struct srpc_service_cd));
+ sizeof(*svc->sv_cpt_data));
if (!svc->sv_cpt_data)
return -ENOMEM;
@@ -338,7 +338,7 @@ srpc_add_service(struct srpc_service *sv)
}
int
-srpc_remove_service(srpc_service_t *sv)
+srpc_remove_service(struct srpc_service *sv)
{
int id = sv->sv_id;
@@ -357,7 +357,7 @@ srpc_remove_service(srpc_service_t *sv)
static int
srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
int len, int options, lnet_process_id_t peer,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
int rc;
lnet_md_t md;
@@ -396,7 +396,7 @@ srpc_post_passive_rdma(int portal, int local, __u64 matchbits, void *buf,
static int
srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
int options, lnet_process_id_t peer, lnet_nid_t self,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
int rc;
lnet_md_t md;
@@ -449,7 +449,7 @@ srpc_post_active_rdma(int portal, __u64 matchbits, void *buf, int len,
static int
srpc_post_passive_rqtbuf(int service, int local, void *buf, int len,
- lnet_handle_md_t *mdh, srpc_event_t *ev)
+ lnet_handle_md_t *mdh, struct srpc_event *ev)
{
lnet_process_id_t any = { 0 };
@@ -697,7 +697,7 @@ srpc_finish_service(struct srpc_service *sv)
/* called with sv->sv_lock held */
static void
-srpc_service_recycle_buffer(struct srpc_service_cd *scd, srpc_buffer_t *buf)
+srpc_service_recycle_buffer(struct srpc_service_cd *scd, struct srpc_buffer *buf)
__must_hold(&scd->scd_lock)
{
if (!scd->scd_svc->sv_shuttingdown && scd->scd_buf_adjust >= 0) {
@@ -755,11 +755,11 @@ srpc_abort_service(struct srpc_service *sv)
}
void
-srpc_shutdown_service(srpc_service_t *sv)
+srpc_shutdown_service(struct srpc_service *sv)
{
struct srpc_service_cd *scd;
struct srpc_server_rpc *rpc;
- srpc_buffer_t *buf;
+ struct srpc_buffer *buf;
int i;
CDEBUG(D_NET, "Shutting down service: id %d, name %s\n",
@@ -792,9 +792,9 @@ srpc_shutdown_service(srpc_service_t *sv)
}
static int
-srpc_send_request(srpc_client_rpc_t *rpc)
+srpc_send_request(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_reqstev;
+ struct srpc_event *ev = &rpc->crpc_reqstev;
int rc;
ev->ev_fired = 0;
@@ -803,7 +803,7 @@ srpc_send_request(srpc_client_rpc_t *rpc)
rc = srpc_post_active_rdma(srpc_serv_portal(rpc->crpc_service),
rpc->crpc_service, &rpc->crpc_reqstmsg,
- sizeof(srpc_msg_t), LNET_MD_OP_PUT,
+ sizeof(struct srpc_msg), LNET_MD_OP_PUT,
rpc->crpc_dest, LNET_NID_ANY,
&rpc->crpc_reqstmdh, ev);
if (rc) {
@@ -814,9 +814,9 @@ srpc_send_request(srpc_client_rpc_t *rpc)
}
static int
-srpc_prepare_reply(srpc_client_rpc_t *rpc)
+srpc_prepare_reply(struct srpc_client_rpc *rpc)
{
- srpc_event_t *ev = &rpc->crpc_replyev;
+ struct srpc_event *ev = &rpc->crpc_replyev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.rpyid;
int rc;
@@ -827,7 +827,8 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
*id = srpc_next_id();
rc = srpc_post_passive_rdma(SRPC_RDMA_PORTAL, 0, *id,
- &rpc->crpc_replymsg, sizeof(srpc_msg_t),
+ &rpc->crpc_replymsg,
+ sizeof(struct srpc_msg),
LNET_MD_OP_PUT, rpc->crpc_dest,
&rpc->crpc_replymdh, ev);
if (rc) {
@@ -838,10 +839,10 @@ srpc_prepare_reply(srpc_client_rpc_t *rpc)
}
static int
-srpc_prepare_bulk(srpc_client_rpc_t *rpc)
+srpc_prepare_bulk(struct srpc_client_rpc *rpc)
{
- srpc_bulk_t *bk = &rpc->crpc_bulk;
- srpc_event_t *ev = &rpc->crpc_bulkev;
+ struct srpc_bulk *bk = &rpc->crpc_bulk;
+ struct srpc_event *ev = &rpc->crpc_bulkev;
__u64 *id = &rpc->crpc_reqstmsg.msg_body.reqst.bulkid;
int rc;
int opt;
@@ -873,8 +874,8 @@ srpc_prepare_bulk(srpc_client_rpc_t *rpc)
static int
srpc_do_bulk(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
- srpc_bulk_t *bk = rpc->srpc_bulk;
+ struct srpc_event *ev = &rpc->srpc_ev;
+ struct srpc_bulk *bk = rpc->srpc_bulk;
__u64 id = rpc->srpc_reqstbuf->buf_msg.msg_body.reqst.bulkid;
int rc;
int opt;
@@ -903,7 +904,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
{
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_buffer_t *buffer;
+ struct srpc_buffer *buffer;
LASSERT(status || rpc->srpc_wi.swi_state == SWI_STATE_DONE);
@@ -948,7 +949,7 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
if (!sv->sv_shuttingdown && !list_empty(&scd->scd_buf_blocked)) {
buffer = list_entry(scd->scd_buf_blocked.next,
- srpc_buffer_t, buf_list);
+ struct srpc_buffer, buf_list);
list_del(&buffer->buf_list);
srpc_init_server_rpc(rpc, scd, buffer);
@@ -963,12 +964,12 @@ srpc_server_rpc_done(struct srpc_server_rpc *rpc, int status)
/* handles an incoming RPC */
int
-srpc_handle_rpc(swi_workitem_t *wi)
+srpc_handle_rpc(struct swi_workitem *wi)
{
struct srpc_server_rpc *rpc = wi->swi_workitem.wi_data;
struct srpc_service_cd *scd = rpc->srpc_scd;
struct srpc_service *sv = scd->scd_svc;
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
int rc = 0;
LASSERT(wi == &rpc->srpc_wi);
@@ -995,8 +996,8 @@ srpc_handle_rpc(swi_workitem_t *wi)
default:
LBUG();
case SWI_STATE_NEWBORN: {
- srpc_msg_t *msg;
- srpc_generic_reply_t *reply;
+ struct srpc_msg *msg;
+ struct srpc_generic_reply *reply;
msg = &rpc->srpc_reqstbuf->buf_msg;
reply = &rpc->srpc_replymsg.msg_body.reply;
@@ -1077,7 +1078,7 @@ srpc_handle_rpc(swi_workitem_t *wi)
static void
srpc_client_rpc_expired(void *data)
{
- srpc_client_rpc_t *rpc = data;
+ struct srpc_client_rpc *rpc = data;
CWARN("Client RPC expired: service %d, peer %s, timeout %d.\n",
rpc->crpc_service, libcfs_id2str(rpc->crpc_dest),
@@ -1096,7 +1097,7 @@ srpc_client_rpc_expired(void *data)
}
static void
-srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
+srpc_add_client_rpc_timer(struct srpc_client_rpc *rpc)
{
struct stt_timer *timer = &rpc->crpc_timer;
@@ -1117,7 +1118,7 @@ srpc_add_client_rpc_timer(srpc_client_rpc_t *rpc)
* running on any CPU.
*/
static void
-srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
+srpc_del_client_rpc_timer(struct srpc_client_rpc *rpc)
{
/* timer not planted or already exploded */
if (!rpc->crpc_timeout)
@@ -1138,9 +1139,9 @@ srpc_del_client_rpc_timer(srpc_client_rpc_t *rpc)
}
static void
-srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
+srpc_client_rpc_done(struct srpc_client_rpc *rpc, int status)
{
- swi_workitem_t *wi = &rpc->crpc_wi;
+ struct swi_workitem *wi = &rpc->crpc_wi;
LASSERT(status || wi->swi_state == SWI_STATE_DONE);
@@ -1175,11 +1176,11 @@ srpc_client_rpc_done(srpc_client_rpc_t *rpc, int status)
/* sends an outgoing RPC */
int
-srpc_send_rpc(swi_workitem_t *wi)
+srpc_send_rpc(struct swi_workitem *wi)
{
int rc = 0;
- srpc_client_rpc_t *rpc;
- srpc_msg_t *reply;
+ struct srpc_client_rpc *rpc;
+ struct srpc_msg *reply;
int do_bulk;
LASSERT(wi);
@@ -1237,7 +1238,7 @@ srpc_send_rpc(swi_workitem_t *wi)
wi->swi_state = SWI_STATE_REQUEST_SENT;
/* perhaps more events, fall thru */
case SWI_STATE_REQUEST_SENT: {
- srpc_msg_type_t type = srpc_service2reply(rpc->crpc_service);
+ enum srpc_msg_type type = srpc_service2reply(rpc->crpc_service);
if (!rpc->crpc_replyev.ev_fired)
break;
@@ -1308,15 +1309,15 @@ abort:
return 0;
}
-srpc_client_rpc_t *
+struct srpc_client_rpc *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
- srpc_client_rpc_t *rpc;
+ struct srpc_client_rpc *rpc;
- LIBCFS_ALLOC(rpc, offsetof(srpc_client_rpc_t,
+ LIBCFS_ALLOC(rpc, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
if (!rpc)
return NULL;
@@ -1328,12 +1329,12 @@ srpc_create_client_rpc(lnet_process_id_t peer, int service,
/* called with rpc->crpc_lock held */
void
-srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
+srpc_abort_rpc(struct srpc_client_rpc *rpc, int why)
{
LASSERT(why);
- if (rpc->crpc_aborted || /* already aborted */
- rpc->crpc_closed) /* callback imminent */
+ if (rpc->crpc_aborted || /* already aborted */
+ rpc->crpc_closed) /* callback imminent */
return;
CDEBUG(D_NET, "Aborting RPC: service %d, peer %s, state %s, why %d\n",
@@ -1347,7 +1348,7 @@ srpc_abort_rpc(srpc_client_rpc_t *rpc, int why)
/* called with rpc->crpc_lock held */
void
-srpc_post_rpc(srpc_client_rpc_t *rpc)
+srpc_post_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(!rpc->crpc_aborted);
LASSERT(srpc_data.rpc_state == SRPC_STATE_RUNNING);
@@ -1363,7 +1364,7 @@ srpc_post_rpc(srpc_client_rpc_t *rpc)
int
srpc_send_reply(struct srpc_server_rpc *rpc)
{
- srpc_event_t *ev = &rpc->srpc_ev;
+ struct srpc_event *ev = &rpc->srpc_ev;
struct srpc_msg *msg = &rpc->srpc_replymsg;
struct srpc_buffer *buffer = rpc->srpc_reqstbuf;
struct srpc_service_cd *scd = rpc->srpc_scd;
@@ -1401,7 +1402,7 @@ srpc_send_reply(struct srpc_server_rpc *rpc)
rpc->srpc_peer, rpc->srpc_self,
&rpc->srpc_replymdh, ev);
if (rc)
- ev->ev_fired = 1; /* no more event expected */
+ ev->ev_fired = 1; /* no more event expected */
return rc;
}
@@ -1410,13 +1411,13 @@ static void
srpc_lnet_ev_handler(lnet_event_t *ev)
{
struct srpc_service_cd *scd;
- srpc_event_t *rpcev = ev->md.user_ptr;
- srpc_client_rpc_t *crpc;
+ struct srpc_event *rpcev = ev->md.user_ptr;
+ struct srpc_client_rpc *crpc;
struct srpc_server_rpc *srpc;
- srpc_buffer_t *buffer;
- srpc_service_t *sv;
- srpc_msg_t *msg;
- srpc_msg_type_t type;
+ struct srpc_buffer *buffer;
+ struct srpc_service *sv;
+ struct srpc_msg *msg;
+ enum srpc_msg_type type;
LASSERT(!in_interrupt());
@@ -1486,7 +1487,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
LASSERT(ev->type != LNET_EVENT_UNLINK ||
sv->sv_shuttingdown);
- buffer = container_of(ev->md.start, srpc_buffer_t, buf_msg);
+ buffer = container_of(ev->md.start, struct srpc_buffer, buf_msg);
buffer->buf_peer = ev->initiator;
buffer->buf_self = ev->target.nid;
@@ -1509,7 +1510,7 @@ srpc_lnet_ev_handler(lnet_event_t *ev)
scd->scd_buf_err = 0;
}
- if (!scd->scd_buf_err && /* adding buffer is enabled */
+ if (!scd->scd_buf_err && /* adding buffer is enabled */
!scd->scd_buf_adjust &&
scd->scd_buf_nposted < scd->scd_buf_low) {
scd->scd_buf_adjust = max(scd->scd_buf_total / 2,
@@ -1663,7 +1664,7 @@ srpc_shutdown(void)
spin_lock(&srpc_data.rpc_glock);
for (i = 0; i <= SRPC_SERVICE_MAX_ID; i++) {
- srpc_service_t *sv = srpc_data.rpc_services[i];
+ struct srpc_service *sv = srpc_data.rpc_services[i];
LASSERTF(!sv, "service not empty: id %d, name %s\n",
i, sv->sv_name);
diff --git a/drivers/staging/lustre/lnet/selftest/rpc.h b/drivers/staging/lustre/lnet/selftest/rpc.h
index a79c315f2ceb..c9b904cade16 100644
--- a/drivers/staging/lustre/lnet/selftest/rpc.h
+++ b/drivers/staging/lustre/lnet/selftest/rpc.h
@@ -44,7 +44,7 @@
*
* XXX: *REPLY == *REQST + 1
*/
-typedef enum {
+enum srpc_msg_type {
SRPC_MSG_MKSN_REQST = 0,
SRPC_MSG_MKSN_REPLY = 1,
SRPC_MSG_RMSN_REQST = 2,
@@ -63,7 +63,7 @@ typedef enum {
SRPC_MSG_PING_REPLY = 15,
SRPC_MSG_JOIN_REQST = 16,
SRPC_MSG_JOIN_REPLY = 17,
-} srpc_msg_type_t;
+};
/* CAVEAT EMPTOR:
* All srpc_*_reqst_t's 1st field must be matchbits of reply buffer,
@@ -72,122 +72,122 @@ typedef enum {
* All srpc_*_reply_t's 1st field must be a __u32 status, and 2nd field
* session id if needed.
*/
-typedef struct {
+struct srpc_generic_reqst {
__u64 rpyid; /* reply buffer matchbits */
__u64 bulkid; /* bulk buffer matchbits */
-} WIRE_ATTR srpc_generic_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_generic_reply {
__u32 status;
lst_sid_t sid;
-} WIRE_ATTR srpc_generic_reply_t;
+} WIRE_ATTR;
/* FRAMEWORK RPCs */
-typedef struct {
+struct srpc_mksn_reqst {
__u64 mksn_rpyid; /* reply buffer matchbits */
lst_sid_t mksn_sid; /* session id */
__u32 mksn_force; /* use brute force */
char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reqst_t; /* make session request */
+} WIRE_ATTR; /* make session request */
-typedef struct {
+struct srpc_mksn_reply {
__u32 mksn_status; /* session status */
lst_sid_t mksn_sid; /* session id */
__u32 mksn_timeout; /* session timeout */
char mksn_name[LST_NAME_SIZE];
-} WIRE_ATTR srpc_mksn_reply_t; /* make session reply */
+} WIRE_ATTR; /* make session reply */
-typedef struct {
+struct srpc_rmsn_reqst {
__u64 rmsn_rpyid; /* reply buffer matchbits */
lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reqst_t; /* remove session request */
+} WIRE_ATTR; /* remove session request */
-typedef struct {
+struct srpc_rmsn_reply {
__u32 rmsn_status;
lst_sid_t rmsn_sid; /* session id */
-} WIRE_ATTR srpc_rmsn_reply_t; /* remove session reply */
+} WIRE_ATTR; /* remove session reply */
-typedef struct {
+struct srpc_join_reqst {
__u64 join_rpyid; /* reply buffer matchbits */
lst_sid_t join_sid; /* session id to join */
char join_group[LST_NAME_SIZE]; /* group name */
-} WIRE_ATTR srpc_join_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_join_reply {
__u32 join_status; /* returned status */
lst_sid_t join_sid; /* session id */
__u32 join_timeout; /* # seconds' inactivity to
* expire */
char join_session[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_join_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_debug_reqst {
__u64 dbg_rpyid; /* reply buffer matchbits */
lst_sid_t dbg_sid; /* session id */
__u32 dbg_flags; /* bitmap of debug */
-} WIRE_ATTR srpc_debug_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_debug_reply {
__u32 dbg_status; /* returned code */
lst_sid_t dbg_sid; /* session id */
__u32 dbg_timeout; /* session timeout */
__u32 dbg_nbatch; /* # of batches in the node */
char dbg_name[LST_NAME_SIZE]; /* session name */
-} WIRE_ATTR srpc_debug_reply_t;
+} WIRE_ATTR;
#define SRPC_BATCH_OPC_RUN 1
#define SRPC_BATCH_OPC_STOP 2
#define SRPC_BATCH_OPC_QUERY 3
-typedef struct {
+struct srpc_batch_reqst {
__u64 bar_rpyid; /* reply buffer matchbits */
lst_sid_t bar_sid; /* session id */
lst_bid_t bar_bid; /* batch id */
__u32 bar_opc; /* create/start/stop batch */
__u32 bar_testidx; /* index of test */
__u32 bar_arg; /* parameters */
-} WIRE_ATTR srpc_batch_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_batch_reply {
__u32 bar_status; /* status of request */
lst_sid_t bar_sid; /* session id */
__u32 bar_active; /* # of active tests in batch/test */
__u32 bar_time; /* remained time */
-} WIRE_ATTR srpc_batch_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_stat_reqst {
__u64 str_rpyid; /* reply buffer matchbits */
lst_sid_t str_sid; /* session id */
__u32 str_type; /* type of stat */
-} WIRE_ATTR srpc_stat_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_stat_reply {
__u32 str_status;
lst_sid_t str_sid;
sfw_counters_t str_fw;
srpc_counters_t str_rpc;
lnet_counters_t str_lnet;
-} WIRE_ATTR srpc_stat_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_bulk_req {
__u32 blk_opc; /* bulk operation code */
__u32 blk_npg; /* # of pages */
__u32 blk_flags; /* reserved flags */
-} WIRE_ATTR test_bulk_req_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_bulk_req_v1 {
__u16 blk_opc; /* bulk operation code */
__u16 blk_flags; /* data check flags */
__u32 blk_len; /* data length */
__u32 blk_offset; /* reserved: offset */
-} WIRE_ATTR test_bulk_req_v1_t;
+} WIRE_ATTR;
-typedef struct {
+struct test_ping_req {
__u32 png_size; /* size of ping message */
__u32 png_flags; /* reserved flags */
-} WIRE_ATTR test_ping_req_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_test_reqst {
__u64 tsr_rpyid; /* reply buffer matchbits */
__u64 tsr_bulkid; /* bulk buffer matchbits */
lst_sid_t tsr_sid; /* session id */
@@ -201,82 +201,82 @@ typedef struct {
__u32 tsr_ndest; /* # of dest nodes */
union {
- test_ping_req_t ping;
- test_bulk_req_t bulk_v0;
- test_bulk_req_v1_t bulk_v1;
- } tsr_u;
-} WIRE_ATTR srpc_test_reqst_t;
+ struct test_ping_req ping;
+ struct test_bulk_req bulk_v0;
+ struct test_bulk_req_v1 bulk_v1;
+ } tsr_u;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_test_reply {
__u32 tsr_status; /* returned code */
lst_sid_t tsr_sid;
-} WIRE_ATTR srpc_test_reply_t;
+} WIRE_ATTR;
/* TEST RPCs */
-typedef struct {
+struct srpc_ping_reqst {
__u64 pnr_rpyid;
__u32 pnr_magic;
__u32 pnr_seq;
__u64 pnr_time_sec;
__u64 pnr_time_usec;
-} WIRE_ATTR srpc_ping_reqst_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_ping_reply {
__u32 pnr_status;
__u32 pnr_magic;
__u32 pnr_seq;
-} WIRE_ATTR srpc_ping_reply_t;
+} WIRE_ATTR;
-typedef struct {
+struct srpc_brw_reqst {
__u64 brw_rpyid; /* reply buffer matchbits */
__u64 brw_bulkid; /* bulk buffer matchbits */
__u32 brw_rw; /* read or write */
__u32 brw_len; /* bulk data len */
__u32 brw_flags; /* bulk data patterns */
-} WIRE_ATTR srpc_brw_reqst_t; /* bulk r/w request */
+} WIRE_ATTR; /* bulk r/w request */
-typedef struct {
+struct srpc_brw_reply {
__u32 brw_status;
-} WIRE_ATTR srpc_brw_reply_t; /* bulk r/w reply */
+} WIRE_ATTR; /* bulk r/w reply */
#define SRPC_MSG_MAGIC 0xeeb0f00d
#define SRPC_MSG_VERSION 1
-typedef struct srpc_msg {
+struct srpc_msg {
__u32 msg_magic; /* magic number */
__u32 msg_version; /* message version number */
- __u32 msg_type; /* type of message body: srpc_msg_type_t */
+ __u32 msg_type; /* type of message body: srpc_msg_type */
__u32 msg_reserved0;
__u32 msg_reserved1;
__u32 msg_ses_feats; /* test session features */
union {
- srpc_generic_reqst_t reqst;
- srpc_generic_reply_t reply;
-
- srpc_mksn_reqst_t mksn_reqst;
- srpc_mksn_reply_t mksn_reply;
- srpc_rmsn_reqst_t rmsn_reqst;
- srpc_rmsn_reply_t rmsn_reply;
- srpc_debug_reqst_t dbg_reqst;
- srpc_debug_reply_t dbg_reply;
- srpc_batch_reqst_t bat_reqst;
- srpc_batch_reply_t bat_reply;
- srpc_stat_reqst_t stat_reqst;
- srpc_stat_reply_t stat_reply;
- srpc_test_reqst_t tes_reqst;
- srpc_test_reply_t tes_reply;
- srpc_join_reqst_t join_reqst;
- srpc_join_reply_t join_reply;
-
- srpc_ping_reqst_t ping_reqst;
- srpc_ping_reply_t ping_reply;
- srpc_brw_reqst_t brw_reqst;
- srpc_brw_reply_t brw_reply;
+ struct srpc_generic_reqst reqst;
+ struct srpc_generic_reply reply;
+
+ struct srpc_mksn_reqst mksn_reqst;
+ struct srpc_mksn_reply mksn_reply;
+ struct srpc_rmsn_reqst rmsn_reqst;
+ struct srpc_rmsn_reply rmsn_reply;
+ struct srpc_debug_reqst dbg_reqst;
+ struct srpc_debug_reply dbg_reply;
+ struct srpc_batch_reqst bat_reqst;
+ struct srpc_batch_reply bat_reply;
+ struct srpc_stat_reqst stat_reqst;
+ struct srpc_stat_reply stat_reply;
+ struct srpc_test_reqst tes_reqst;
+ struct srpc_test_reply tes_reply;
+ struct srpc_join_reqst join_reqst;
+ struct srpc_join_reply join_reply;
+
+ struct srpc_ping_reqst ping_reqst;
+ struct srpc_ping_reply ping_reply;
+ struct srpc_brw_reqst brw_reqst;
+ struct srpc_brw_reply brw_reply;
} msg_body;
-} WIRE_ATTR srpc_msg_t;
+} WIRE_ATTR;
static inline void
-srpc_unpack_msg_hdr(srpc_msg_t *msg)
+srpc_unpack_msg_hdr(struct srpc_msg *msg)
{
if (msg->msg_magic == SRPC_MSG_MAGIC)
return; /* no flipping needed */
diff --git a/drivers/staging/lustre/lnet/selftest/selftest.h b/drivers/staging/lustre/lnet/selftest/selftest.h
index e689ca1846e1..4eac1c9e639f 100644
--- a/drivers/staging/lustre/lnet/selftest/selftest.h
+++ b/drivers/staging/lustre/lnet/selftest/selftest.h
@@ -93,7 +93,7 @@ struct sfw_test_instance;
/* all reply/bulk RDMAs go to this portal */
#define SRPC_RDMA_PORTAL 52
-static inline srpc_msg_type_t
+static inline enum srpc_msg_type
srpc_service2request(int service)
{
switch (service) {
@@ -128,13 +128,13 @@ srpc_service2request(int service)
}
}
-static inline srpc_msg_type_t
+static inline enum srpc_msg_type
srpc_service2reply(int service)
{
return srpc_service2request(service) + 1;
}
-typedef enum {
+enum srpc_event_type {
SRPC_BULK_REQ_RCVD = 1, /* passive bulk request(PUT sink/GET source)
* received */
SRPC_BULK_PUT_SENT = 2, /* active bulk PUT sent (source) */
@@ -143,57 +143,58 @@ typedef enum {
SRPC_REPLY_SENT = 5, /* outgoing reply sent */
SRPC_REQUEST_RCVD = 6, /* incoming request received */
SRPC_REQUEST_SENT = 7, /* outgoing request sent */
-} srpc_event_type_t;
+};
/* RPC event */
-typedef struct {
- srpc_event_type_t ev_type; /* what's up */
+struct srpc_event {
+ enum srpc_event_type ev_type; /* what's up */
lnet_event_kind_t ev_lnet; /* LNet event type */
int ev_fired; /* LNet event fired? */
int ev_status; /* LNet event status */
void *ev_data; /* owning server/client RPC */
-} srpc_event_t;
+};
-typedef struct {
+/* bulk descriptor */
+struct srpc_bulk {
int bk_len; /* len of bulk data */
lnet_handle_md_t bk_mdh;
int bk_sink; /* sink/source */
int bk_niov; /* # iov in bk_iovs */
lnet_kiov_t bk_iovs[0];
-} srpc_bulk_t; /* bulk descriptor */
+};
/* message buffer descriptor */
-typedef struct srpc_buffer {
+struct srpc_buffer {
struct list_head buf_list; /* chain on srpc_service::*_msgq */
- srpc_msg_t buf_msg;
+ struct srpc_msg buf_msg;
lnet_handle_md_t buf_mdh;
lnet_nid_t buf_self;
lnet_process_id_t buf_peer;
-} srpc_buffer_t;
+};
struct swi_workitem;
typedef int (*swi_action_t) (struct swi_workitem *);
-typedef struct swi_workitem {
+struct swi_workitem {
struct cfs_wi_sched *swi_sched;
- cfs_workitem_t swi_workitem;
+ struct cfs_workitem swi_workitem;
swi_action_t swi_action;
int swi_state;
-} swi_workitem_t;
+};
/* server-side state of a RPC */
struct srpc_server_rpc {
/* chain on srpc_service::*_rpcq */
struct list_head srpc_list;
struct srpc_service_cd *srpc_scd;
- swi_workitem_t srpc_wi;
- srpc_event_t srpc_ev; /* bulk/reply event */
+ struct swi_workitem srpc_wi;
+ struct srpc_event srpc_ev; /* bulk/reply event */
lnet_nid_t srpc_self;
lnet_process_id_t srpc_peer;
- srpc_msg_t srpc_replymsg;
+ struct srpc_msg srpc_replymsg;
lnet_handle_md_t srpc_replymdh;
- srpc_buffer_t *srpc_reqstbuf;
- srpc_bulk_t *srpc_bulk;
+ struct srpc_buffer *srpc_reqstbuf;
+ struct srpc_bulk *srpc_bulk;
unsigned int srpc_aborted; /* being given up */
int srpc_status;
@@ -201,14 +202,14 @@ struct srpc_server_rpc {
};
/* client-side state of a RPC */
-typedef struct srpc_client_rpc {
+struct srpc_client_rpc {
struct list_head crpc_list; /* chain on user's lists */
spinlock_t crpc_lock; /* serialize */
int crpc_service;
atomic_t crpc_refcount;
int crpc_timeout; /* # seconds to wait for reply */
struct stt_timer crpc_timer;
- swi_workitem_t crpc_wi;
+ struct swi_workitem crpc_wi;
lnet_process_id_t crpc_dest;
void (*crpc_done)(struct srpc_client_rpc *);
@@ -221,20 +222,20 @@ typedef struct srpc_client_rpc {
unsigned int crpc_closed:1; /* completed */
/* RPC events */
- srpc_event_t crpc_bulkev; /* bulk event */
- srpc_event_t crpc_reqstev; /* request event */
- srpc_event_t crpc_replyev; /* reply event */
+ struct srpc_event crpc_bulkev; /* bulk event */
+ struct srpc_event crpc_reqstev; /* request event */
+ struct srpc_event crpc_replyev; /* reply event */
/* bulk, request(reqst), and reply exchanged on wire */
- srpc_msg_t crpc_reqstmsg;
- srpc_msg_t crpc_replymsg;
+ struct srpc_msg crpc_reqstmsg;
+ struct srpc_msg crpc_replymsg;
lnet_handle_md_t crpc_reqstmdh;
lnet_handle_md_t crpc_replymdh;
- srpc_bulk_t crpc_bulk;
-} srpc_client_rpc_t;
+ struct srpc_bulk crpc_bulk;
+};
#define srpc_client_rpc_size(rpc) \
-offsetof(srpc_client_rpc_t, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
+offsetof(struct srpc_client_rpc, crpc_bulk.bk_iovs[(rpc)->crpc_bulk.bk_niov])
#define srpc_client_rpc_addref(rpc) \
do { \
@@ -266,13 +267,13 @@ struct srpc_service_cd {
/** backref to service */
struct srpc_service *scd_svc;
/** event buffer */
- srpc_event_t scd_ev;
+ struct srpc_event scd_ev;
/** free RPC descriptors */
struct list_head scd_rpc_free;
/** in-flight RPCs */
struct list_head scd_rpc_active;
/** workitem for posting buffer */
- swi_workitem_t scd_buf_wi;
+ struct swi_workitem scd_buf_wi;
/** CPT id */
int scd_cpt;
/** error code for scd_buf_wi */
@@ -306,7 +307,7 @@ struct srpc_service_cd {
#define SFW_FRWK_WI_MIN 16
#define SFW_FRWK_WI_MAX 256
-typedef struct srpc_service {
+struct srpc_service {
int sv_id; /* service id */
const char *sv_name; /* human readable name */
int sv_wi_total; /* total server workitems */
@@ -320,9 +321,9 @@ typedef struct srpc_service {
*/
int (*sv_handler)(struct srpc_server_rpc *);
int (*sv_bulk_ready)(struct srpc_server_rpc *, int);
-} srpc_service_t;
+};
-typedef struct {
+struct sfw_session {
struct list_head sn_list; /* chain on fw_zombie_sessions */
lst_sid_t sn_id; /* unique identifier */
unsigned int sn_timeout; /* # seconds' inactivity to expire */
@@ -335,37 +336,37 @@ typedef struct {
atomic_t sn_brw_errors;
atomic_t sn_ping_errors;
unsigned long sn_started;
-} sfw_session_t;
+};
#define sfw_sid_equal(sid0, sid1) ((sid0).ses_nid == (sid1).ses_nid && \
(sid0).ses_stamp == (sid1).ses_stamp)
-typedef struct {
+struct sfw_batch {
struct list_head bat_list; /* chain on sn_batches */
lst_bid_t bat_id; /* batch id */
int bat_error; /* error code of batch */
- sfw_session_t *bat_session; /* batch's session */
+ struct sfw_session *bat_session; /* batch's session */
atomic_t bat_nactive; /* # of active tests */
struct list_head bat_tests; /* test instances */
-} sfw_batch_t;
+};
-typedef struct {
+struct sfw_test_client_ops {
int (*tso_init)(struct sfw_test_instance *tsi); /* initialize test
* client */
void (*tso_fini)(struct sfw_test_instance *tsi); /* finalize test
* client */
int (*tso_prep_rpc)(struct sfw_test_unit *tsu,
lnet_process_id_t dest,
- srpc_client_rpc_t **rpc); /* prep a tests rpc */
+ struct srpc_client_rpc **rpc); /* prep a tests rpc */
void (*tso_done_rpc)(struct sfw_test_unit *tsu,
- srpc_client_rpc_t *rpc); /* done a test rpc */
-} sfw_test_client_ops_t;
+ struct srpc_client_rpc *rpc); /* done a test rpc */
+};
-typedef struct sfw_test_instance {
+struct sfw_test_instance {
struct list_head tsi_list; /* chain on batch */
int tsi_service; /* test type */
- sfw_batch_t *tsi_batch; /* batch */
- sfw_test_client_ops_t *tsi_ops; /* test client operation
+ struct sfw_batch *tsi_batch; /* batch */
+ struct sfw_test_client_ops *tsi_ops; /* test client operation
*/
/* public parameter for all test units */
@@ -384,11 +385,11 @@ typedef struct sfw_test_instance {
struct list_head tsi_active_rpcs; /* active rpcs */
union {
- test_ping_req_t ping; /* ping parameter */
- test_bulk_req_t bulk_v0; /* bulk parameter */
- test_bulk_req_v1_t bulk_v1; /* bulk v1 parameter */
+ struct test_ping_req ping; /* ping parameter */
+ struct test_bulk_req bulk_v0; /* bulk parameter */
+ struct test_bulk_req_v1 bulk_v1; /* bulk v1 parameter */
} tsi_u;
-} sfw_test_instance_t;
+};
/* XXX: trailing (PAGE_SIZE % sizeof(lnet_process_id_t)) bytes at the end of
* pages are not used */
@@ -397,57 +398,58 @@ typedef struct sfw_test_instance {
#define SFW_MAX_NDESTS (LNET_MAX_IOV * SFW_ID_PER_PAGE)
#define sfw_id_pages(n) (((n) + SFW_ID_PER_PAGE - 1) / SFW_ID_PER_PAGE)
-typedef struct sfw_test_unit {
+struct sfw_test_unit {
struct list_head tsu_list; /* chain on lst_test_instance */
lnet_process_id_t tsu_dest; /* id of dest node */
int tsu_loop; /* loop count of the test */
- sfw_test_instance_t *tsu_instance; /* pointer to test instance */
+ struct sfw_test_instance *tsu_instance; /* pointer to test instance */
void *tsu_private; /* private data */
- swi_workitem_t tsu_worker; /* workitem of the test unit */
-} sfw_test_unit_t;
+ struct swi_workitem tsu_worker; /* workitem of the test unit */
+};
-typedef struct sfw_test_case {
+struct sfw_test_case {
struct list_head tsc_list; /* chain on fw_tests */
- srpc_service_t *tsc_srv_service; /* test service */
- sfw_test_client_ops_t *tsc_cli_ops; /* ops of test client */
-} sfw_test_case_t;
+ struct srpc_service *tsc_srv_service; /* test service */
+ struct sfw_test_client_ops *tsc_cli_ops; /* ops of test client */
+};
-srpc_client_rpc_t *
+struct srpc_client_rpc *
sfw_create_rpc(lnet_process_id_t peer, int service,
unsigned features, int nbulkiov, int bulklen,
- void (*done)(srpc_client_rpc_t *), void *priv);
-int sfw_create_test_rpc(sfw_test_unit_t *tsu,
+ void (*done)(struct srpc_client_rpc *), void *priv);
+int sfw_create_test_rpc(struct sfw_test_unit *tsu,
lnet_process_id_t peer, unsigned features,
- int nblk, int blklen, srpc_client_rpc_t **rpc);
-void sfw_abort_rpc(srpc_client_rpc_t *rpc);
-void sfw_post_rpc(srpc_client_rpc_t *rpc);
-void sfw_client_rpc_done(srpc_client_rpc_t *rpc);
-void sfw_unpack_message(srpc_msg_t *msg);
+ int nblk, int blklen, struct srpc_client_rpc **rpc);
+void sfw_abort_rpc(struct srpc_client_rpc *rpc);
+void sfw_post_rpc(struct srpc_client_rpc *rpc);
+void sfw_client_rpc_done(struct srpc_client_rpc *rpc);
+void sfw_unpack_message(struct srpc_msg *msg);
void sfw_free_pages(struct srpc_server_rpc *rpc);
-void sfw_add_bulk_page(srpc_bulk_t *bk, struct page *pg, int i);
+void sfw_add_bulk_page(struct srpc_bulk *bk, struct page *pg, int i);
int sfw_alloc_pages(struct srpc_server_rpc *rpc, int cpt, int npages, int len,
int sink);
-int sfw_make_session(srpc_mksn_reqst_t *request, srpc_mksn_reply_t *reply);
+int sfw_make_session(struct srpc_mksn_reqst *request,
+ struct srpc_mksn_reply *reply);
-srpc_client_rpc_t *
+struct srpc_client_rpc *
srpc_create_client_rpc(lnet_process_id_t peer, int service,
int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv);
-void srpc_post_rpc(srpc_client_rpc_t *rpc);
-void srpc_abort_rpc(srpc_client_rpc_t *rpc, int why);
-void srpc_free_bulk(srpc_bulk_t *bk);
-srpc_bulk_t *srpc_alloc_bulk(int cpt, unsigned bulk_npg, unsigned bulk_len,
- int sink);
-int srpc_send_rpc(swi_workitem_t *wi);
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv);
+void srpc_post_rpc(struct srpc_client_rpc *rpc);
+void srpc_abort_rpc(struct srpc_client_rpc *rpc, int why);
+void srpc_free_bulk(struct srpc_bulk *bk);
+struct srpc_bulk *srpc_alloc_bulk(int cpt, unsigned bulk_npg,
+ unsigned bulk_len, int sink);
+int srpc_send_rpc(struct swi_workitem *wi);
int srpc_send_reply(struct srpc_server_rpc *rpc);
-int srpc_add_service(srpc_service_t *sv);
-int srpc_remove_service(srpc_service_t *sv);
-void srpc_shutdown_service(srpc_service_t *sv);
-void srpc_abort_service(srpc_service_t *sv);
-int srpc_finish_service(srpc_service_t *sv);
-int srpc_service_add_buffers(srpc_service_t *sv, int nbuffer);
-void srpc_service_remove_buffers(srpc_service_t *sv, int nbuffer);
+int srpc_add_service(struct srpc_service *sv);
+int srpc_remove_service(struct srpc_service *sv);
+void srpc_shutdown_service(struct srpc_service *sv);
+void srpc_abort_service(struct srpc_service *sv);
+int srpc_finish_service(struct srpc_service *sv);
+int srpc_service_add_buffers(struct srpc_service *sv, int nbuffer);
+void srpc_service_remove_buffers(struct srpc_service *sv, int nbuffer);
void srpc_get_counters(srpc_counters_t *cnt);
void srpc_set_counters(const srpc_counters_t *cnt);
@@ -461,15 +463,17 @@ srpc_serv_is_framework(struct srpc_service *svc)
}
static inline int
-swi_wi_action(cfs_workitem_t *wi)
+swi_wi_action(struct cfs_workitem *wi)
{
- swi_workitem_t *swi = container_of(wi, swi_workitem_t, swi_workitem);
+ struct swi_workitem *swi;
+
+ swi = container_of(wi, struct swi_workitem, swi_workitem);
return swi->swi_action(swi);
}
static inline void
-swi_init_workitem(swi_workitem_t *swi, void *data,
+swi_init_workitem(struct swi_workitem *swi, void *data,
swi_action_t action, struct cfs_wi_sched *sched)
{
swi->swi_sched = sched;
@@ -479,19 +483,19 @@ swi_init_workitem(swi_workitem_t *swi, void *data,
}
static inline void
-swi_schedule_workitem(swi_workitem_t *wi)
+swi_schedule_workitem(struct swi_workitem *wi)
{
cfs_wi_schedule(wi->swi_sched, &wi->swi_workitem);
}
static inline void
-swi_exit_workitem(swi_workitem_t *swi)
+swi_exit_workitem(struct swi_workitem *swi)
{
cfs_wi_exit(swi->swi_sched, &swi->swi_workitem);
}
static inline int
-swi_deschedule_workitem(swi_workitem_t *swi)
+swi_deschedule_workitem(struct swi_workitem *swi)
{
return cfs_wi_deschedule(swi->swi_sched, &swi->swi_workitem);
}
@@ -502,7 +506,7 @@ void sfw_shutdown(void);
void srpc_shutdown(void);
static inline void
-srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
+srpc_destroy_client_rpc(struct srpc_client_rpc *rpc)
{
LASSERT(rpc);
LASSERT(!srpc_event_pending(rpc));
@@ -515,14 +519,14 @@ srpc_destroy_client_rpc(srpc_client_rpc_t *rpc)
}
static inline void
-srpc_init_client_rpc(srpc_client_rpc_t *rpc, lnet_process_id_t peer,
+srpc_init_client_rpc(struct srpc_client_rpc *rpc, lnet_process_id_t peer,
int service, int nbulkiov, int bulklen,
- void (*rpc_done)(srpc_client_rpc_t *),
- void (*rpc_fini)(srpc_client_rpc_t *), void *priv)
+ void (*rpc_done)(struct srpc_client_rpc *),
+ void (*rpc_fini)(struct srpc_client_rpc *), void *priv)
{
LASSERT(nbulkiov <= LNET_MAX_IOV);
- memset(rpc, 0, offsetof(srpc_client_rpc_t,
+ memset(rpc, 0, offsetof(struct srpc_client_rpc,
crpc_bulk.bk_iovs[nbulkiov]));
INIT_LIST_HEAD(&rpc->crpc_list);
@@ -592,7 +596,7 @@ do { \
} while (0)
static inline void
-srpc_wait_service_shutdown(srpc_service_t *sv)
+srpc_wait_service_shutdown(struct srpc_service *sv)
{
int i = 2;
@@ -607,16 +611,16 @@ srpc_wait_service_shutdown(srpc_service_t *sv)
}
}
-extern sfw_test_client_ops_t brw_test_client;
+extern struct sfw_test_client_ops brw_test_client;
void brw_init_test_client(void);
-extern srpc_service_t brw_test_service;
+extern struct srpc_service brw_test_service;
void brw_init_test_service(void);
-extern sfw_test_client_ops_t ping_test_client;
+extern struct sfw_test_client_ops ping_test_client;
void ping_init_test_client(void);
-extern srpc_service_t ping_test_service;
+extern struct srpc_service ping_test_service;
void ping_init_test_service(void);
#endif /* __SELFTEST_SELFTEST_H__ */
diff --git a/drivers/staging/lustre/lnet/selftest/timer.c b/drivers/staging/lustre/lnet/selftest/timer.c
index 8be52526ae5a..b6c4aae007af 100644
--- a/drivers/staging/lustre/lnet/selftest/timer.c
+++ b/drivers/staging/lustre/lnet/selftest/timer.c
@@ -49,7 +49,7 @@
* sorted by increasing expiry time. The number of slots is 2**7 (128),
* to cover a time period of 1024 seconds into the future before wrapping.
*/
-#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
+#define STTIMER_MINPOLL 3 /* log2 min poll interval (8 s) */
#define STTIMER_SLOTTIME (1 << STTIMER_MINPOLL)
#define STTIMER_SLOTTIMEMASK (~(STTIMER_SLOTTIME - 1))
#define STTIMER_NSLOTS (1 << 7)
@@ -170,20 +170,22 @@ stt_check_timers(unsigned long *last)
static int
stt_timer_main(void *arg)
{
+ int rc = 0;
+
cfs_block_allsigs();
while (!stt_data.stt_shuttingdown) {
stt_check_timers(&stt_data.stt_prev_slot);
- wait_event_timeout(stt_data.stt_waitq,
- stt_data.stt_shuttingdown,
- cfs_time_seconds(STTIMER_SLOTTIME));
+ rc = wait_event_timeout(stt_data.stt_waitq,
+ stt_data.stt_shuttingdown,
+ cfs_time_seconds(STTIMER_SLOTTIME));
}
spin_lock(&stt_data.stt_lock);
stt_data.stt_nthreads--;
spin_unlock(&stt_data.stt_lock);
- return 0;
+ return rc;
}
static int
diff --git a/drivers/staging/lustre/lustre/fid/fid_request.c b/drivers/staging/lustre/lustre/fid/fid_request.c
index 39269c3c56a6..3a4df626462f 100644
--- a/drivers/staging/lustre/lustre/fid/fid_request.c
+++ b/drivers/staging/lustre/lustre/fid/fid_request.c
@@ -66,6 +66,7 @@ static int seq_client_rpc(struct lu_client_seq *seq,
unsigned int debug_mask;
int rc;
+ LASSERT(exp && !IS_ERR(exp));
req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_SEQ_QUERY,
LUSTRE_MDS_VERSION, SEQ_QUERY);
if (!req)
@@ -101,19 +102,22 @@ static int seq_client_rpc(struct lu_client_seq *seq,
req->rq_no_delay = req->rq_no_resend = 1;
debug_mask = D_CONSOLE;
} else {
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (seq->lcs_type == LUSTRE_SEQ_METADATA) {
+ req->rq_reply_portal = MDC_REPLY_PORTAL;
req->rq_request_portal = SEQ_METADATA_PORTAL;
- else
+ } else {
+ req->rq_reply_portal = OSC_REPLY_PORTAL;
req->rq_request_portal = SEQ_DATA_PORTAL;
+ }
debug_mask = D_INFO;
}
ptlrpc_at_set_req_timeout(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
rc = ptlrpc_queue_wait(req);
- if (seq->lcs_type == LUSTRE_SEQ_METADATA)
+ if (opc != SEQ_ALLOC_SUPER && seq->lcs_type == LUSTRE_SEQ_METADATA)
mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
diff --git a/drivers/staging/lustre/lustre/fld/fld_cache.c b/drivers/staging/lustre/lustre/fld/fld_cache.c
index 062f388cf38a..5a04e99d9249 100644
--- a/drivers/staging/lustre/lustre/fld/fld_cache.c
+++ b/drivers/staging/lustre/lustre/fld/fld_cache.c
@@ -178,8 +178,9 @@ restart_fixup:
if (n_range->lsr_end <= c_range->lsr_end) {
*n_range = *c_range;
fld_cache_entry_delete(cache, f_curr);
- } else
+ } else {
n_range->lsr_start = c_range->lsr_end;
+ }
}
/* we could have overlap over next
diff --git a/drivers/staging/lustre/lustre/fld/fld_internal.h b/drivers/staging/lustre/lustre/fld/fld_internal.h
index e8a3caf20c9b..75d6a48637a9 100644
--- a/drivers/staging/lustre/lustre/fld/fld_internal.h
+++ b/drivers/staging/lustre/lustre/fld/fld_internal.h
@@ -101,12 +101,6 @@ struct fld_cache {
unsigned int fci_no_shrink:1;
};
-enum fld_op {
- FLD_CREATE = 0,
- FLD_DELETE = 1,
- FLD_LOOKUP = 2
-};
-
enum {
/* 4M of FLD cache will not hurt client a lot. */
FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
@@ -126,7 +120,8 @@ enum {
extern struct lu_fld_hash fld_hash[];
int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op);
+ struct lu_seq_range *range, __u32 fld_op,
+ struct ptlrpc_request **reqp);
extern struct lprocfs_vars fld_client_debugfs_list[];
diff --git a/drivers/staging/lustre/lustre/fld/fld_request.c b/drivers/staging/lustre/lustre/fld/fld_request.c
index a3d122d85c8d..304c0ec268c9 100644
--- a/drivers/staging/lustre/lustre/fld/fld_request.c
+++ b/drivers/staging/lustre/lustre/fld/fld_request.c
@@ -64,9 +64,9 @@ static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
};
@@ -75,15 +75,15 @@ static void fld_enter_request(struct client_obd *cli)
struct mdc_cache_waiter mcw;
struct l_wait_info lwi = { 0 };
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
} else {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
}
@@ -92,10 +92,9 @@ static void fld_exit_request(struct client_obd *cli)
struct list_head *l, *tmp;
struct mdc_cache_waiter *mcw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
/* No free request slots anymore */
break;
@@ -106,7 +105,7 @@ static void fld_exit_request(struct client_obd *cli)
cli->cl_r_in_flight++;
wake_up(&mcw->mcw_waitq);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
@@ -392,55 +391,82 @@ void fld_client_fini(struct lu_client_fld *fld)
EXPORT_SYMBOL(fld_client_fini);
int fld_client_rpc(struct obd_export *exp,
- struct lu_seq_range *range, __u32 fld_op)
+ struct lu_seq_range *range, __u32 fld_op,
+ struct ptlrpc_request **reqp)
{
- struct ptlrpc_request *req;
+ struct ptlrpc_request *req = NULL;
struct lu_seq_range *prange;
__u32 *op;
- int rc;
+ int rc = 0;
struct obd_import *imp;
LASSERT(exp);
imp = class_exp2cliimp(exp);
- req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY, LUSTRE_MDS_VERSION,
- FLD_QUERY);
- if (!req)
- return -ENOMEM;
-
- op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
- *op = fld_op;
+ switch (fld_op) {
+ case FLD_QUERY:
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_QUERY,
+ LUSTRE_MDS_VERSION, FLD_QUERY);
+ if (!req)
+ return -ENOMEM;
+
+ /*
+ * XXX: only needed when talking to old server(< 2.6), it should
+ * be removed when < 2.6 server is not supported
+ */
+ op = req_capsule_client_get(&req->rq_pill, &RMF_FLD_OPC);
+ *op = FLD_LOOKUP;
+
+ if (imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
+ req->rq_allow_replay = 1;
+ break;
+ case FLD_READ:
+ req = ptlrpc_request_alloc_pack(imp, &RQF_FLD_READ,
+ LUSTRE_MDS_VERSION, FLD_READ);
+ if (!req)
+ return -ENOMEM;
+
+ req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA,
+ RCL_SERVER, PAGE_SIZE);
+ break;
+ default:
+ rc = -EINVAL;
+ break;
+ }
+ if (rc)
+ return rc;
prange = req_capsule_client_get(&req->rq_pill, &RMF_FLD_MDFLD);
*prange = *range;
-
ptlrpc_request_set_replen(req);
req->rq_request_portal = FLD_REQUEST_PORTAL;
req->rq_reply_portal = MDC_REPLY_PORTAL;
ptlrpc_at_set_req_timeout(req);
- if (fld_op == FLD_LOOKUP &&
- imp->imp_connect_flags_orig & OBD_CONNECT_MDS_MDS)
- req->rq_allow_replay = 1;
-
- if (fld_op != FLD_LOOKUP)
- mdc_get_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
fld_enter_request(&exp->exp_obd->u.cli);
rc = ptlrpc_queue_wait(req);
fld_exit_request(&exp->exp_obd->u.cli);
- if (fld_op != FLD_LOOKUP)
- mdc_put_rpc_lock(exp->exp_obd->u.cli.cl_rpc_lock, NULL);
if (rc)
goto out_req;
- prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
- if (!prange) {
- rc = -EFAULT;
- goto out_req;
+ if (fld_op == FLD_QUERY) {
+ prange = req_capsule_server_get(&req->rq_pill, &RMF_FLD_MDFLD);
+ if (!prange) {
+ rc = -EFAULT;
+ goto out_req;
+ }
+ *range = *prange;
}
- *range = *prange;
+
out_req:
- ptlrpc_req_finished(req);
+ if (rc || !reqp) {
+ ptlrpc_req_finished(req);
+ req = NULL;
+ }
+
+ if (reqp)
+ *reqp = req;
+
return rc;
}
@@ -468,7 +494,7 @@ int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
res.lsr_start = seq;
fld_range_set_type(&res, flags);
- rc = fld_client_rpc(target->ft_exp, &res, FLD_LOOKUP);
+ rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
if (rc == 0) {
*mds = res.lsr_index;
diff --git a/drivers/staging/lustre/lustre/include/cl_object.h b/drivers/staging/lustre/lustre/include/cl_object.h
index fb971ded5a1b..d4c33dd110ab 100644
--- a/drivers/staging/lustre/lustre/include/cl_object.h
+++ b/drivers/staging/lustre/lustre/include/cl_object.h
@@ -82,7 +82,6 @@
* - i_mutex
* - PG_locked
* - cl_object_header::coh_page_guard
- * - cl_object_header::coh_lock_guard
* - lu_site::ls_guard
*
* See the top comment in cl_object.c for the description of overall locking and
@@ -98,9 +97,12 @@
* super-class definitions.
*/
#include "lu_object.h"
+#include <linux/atomic.h>
#include "linux/lustre_compat25.h"
#include <linux/mutex.h>
#include <linux/radix-tree.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
struct inode;
@@ -138,7 +140,7 @@ struct cl_device_operations {
* cl_req_slice_add().
*
* \see osc_req_init(), lov_req_init(), lovsub_req_init()
- * \see ccc_req_init()
+ * \see vvp_req_init()
*/
int (*cdo_req_init)(const struct lu_env *env, struct cl_device *dev,
struct cl_req *req);
@@ -147,7 +149,7 @@ struct cl_device_operations {
/**
* Device in the client stack.
*
- * \see ccc_device, lov_device, lovsub_device, osc_device
+ * \see vvp_device, lov_device, lovsub_device, osc_device
*/
struct cl_device {
/** Super-class. */
@@ -243,7 +245,7 @@ enum cl_attr_valid {
* be discarded from the memory, all its sub-objects are torn-down and
* destroyed too.
*
- * \see ccc_object, lov_object, lovsub_object, osc_object
+ * \see vvp_object, lov_object, lovsub_object, osc_object
*/
struct cl_object {
/** super class */
@@ -322,7 +324,7 @@ struct cl_object_operations {
* to be used instead of newly created.
*/
int (*coo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
/**
* Initialize lock slice for this layer. Called top-to-bottom through
* every object layer when a new cl_lock is instantiated. Layer
@@ -383,11 +385,17 @@ struct cl_object_operations {
* object. Layers are supposed to fill parts of \a lvb that will be
* shipped to the glimpse originator as a glimpse result.
*
- * \see ccc_object_glimpse(), lovsub_object_glimpse(),
+ * \see vvp_object_glimpse(), lovsub_object_glimpse(),
* \see osc_object_glimpse()
*/
int (*coo_glimpse)(const struct lu_env *env,
const struct cl_object *obj, struct ost_lvb *lvb);
+ /**
+ * Object prune method. Called when the layout is going to change on
+ * this object, therefore each layer has to clean up their cache,
+ * mainly pages and locks.
+ */
+ int (*coo_prune)(const struct lu_env *env, struct cl_object *obj);
};
/**
@@ -398,22 +406,6 @@ struct cl_object_header {
* here.
*/
struct lu_object_header coh_lu;
- /** \name locks
- * \todo XXX move locks below to the separate cache-lines, they are
- * mostly useless otherwise.
- */
- /** @{ */
- /** Lock protecting page tree. */
- spinlock_t coh_page_guard;
- /** Lock protecting lock list. */
- spinlock_t coh_lock_guard;
- /** @} locks */
- /** Radix tree of cl_page's, cached for this object. */
- struct radix_tree_root coh_tree;
- /** # of pages in radix tree. */
- unsigned long coh_pages;
- /** List of cl_lock's granted for this object. */
- struct list_head coh_locks;
/**
* Parent object. It is assumed that an object has a well-defined
@@ -460,10 +452,6 @@ struct cl_object_header {
co_lu.lo_linkage)
/** @} cl_object */
-#ifndef pgoff_t
-#define pgoff_t unsigned long
-#endif
-
#define CL_PAGE_EOF ((pgoff_t)~0ull)
/** \addtogroup cl_page cl_page
@@ -727,16 +715,10 @@ struct cl_page {
atomic_t cp_ref;
/** An object this page is a part of. Immutable after creation. */
struct cl_object *cp_obj;
- /** Logical page index within the object. Immutable after creation. */
- pgoff_t cp_index;
/** List of slices. Immutable after creation. */
struct list_head cp_layers;
- /** Parent page, NULL for top-level page. Immutable after creation. */
- struct cl_page *cp_parent;
- /** Lower-layer page. NULL for bottommost page. Immutable after
- * creation.
- */
- struct cl_page *cp_child;
+ /** vmpage */
+ struct page *cp_vmpage;
/**
* Page state. This field is const to avoid accidental update, it is
* modified only internally within cl_page.c. Protected by a VM lock.
@@ -787,10 +769,11 @@ struct cl_page {
/**
* Per-layer part of cl_page.
*
- * \see ccc_page, lov_page, osc_page
+ * \see vvp_page, lov_page, osc_page
*/
struct cl_page_slice {
struct cl_page *cpl_page;
+ pgoff_t cpl_index;
/**
* Object slice corresponding to this page slice. Immutable after
* creation.
@@ -804,16 +787,9 @@ struct cl_page_slice {
/**
* Lock mode. For the client extent locks.
*
- * \warning: cl_lock_mode_match() assumes particular ordering here.
* \ingroup cl_lock
*/
enum cl_lock_mode {
- /**
- * Mode of a lock that protects no data, and exists only as a
- * placeholder. This is used for `glimpse' requests. A phantom lock
- * might get promoted to real lock at some point.
- */
- CLM_PHANTOM,
CLM_READ,
CLM_WRITE,
CLM_GROUP
@@ -846,11 +822,6 @@ struct cl_page_operations {
*/
/**
- * \return the underlying VM page. Optional.
- */
- struct page *(*cpo_vmpage)(const struct lu_env *env,
- const struct cl_page_slice *slice);
- /**
* Called when \a io acquires this page into the exclusive
* ownership. When this method returns, it is guaranteed that the is
* not owned by other io, and no transfer is going on against
@@ -897,14 +868,6 @@ struct cl_page_operations {
void (*cpo_export)(const struct lu_env *env,
const struct cl_page_slice *slice, int uptodate);
/**
- * Unmaps page from the user space (if it is mapped).
- *
- * \see cl_page_unmap()
- * \see vvp_page_unmap()
- */
- int (*cpo_unmap)(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
- /**
* Checks whether underlying VM page is locked (in the suitable
* sense). Used for assertions.
*
@@ -957,7 +920,7 @@ struct cl_page_operations {
*/
int (*cpo_is_under_lock)(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *io);
+ struct cl_io *io, pgoff_t *max);
/**
* Optional debugging helper. Prints given page slice.
@@ -1027,26 +990,6 @@ struct cl_page_operations {
*/
int (*cpo_make_ready)(const struct lu_env *env,
const struct cl_page_slice *slice);
- /**
- * Announce that this page is to be written out
- * opportunistically, that is, page is dirty, it is not
- * necessary to start write-out transfer right now, but
- * eventually page has to be written out.
- *
- * Main caller of this is the write path (see
- * vvp_io_commit_write()), using this method to build a
- * "transfer cache" from which large transfers are then
- * constructed by the req-formation engine.
- *
- * \todo XXX it would make sense to add page-age tracking
- * semantics here, and to oblige the req-formation engine to
- * send the page out not later than it is too old.
- *
- * \see cl_page_cache_add()
- */
- int (*cpo_cache_add)(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
} io[CRT_NR];
/**
* Tell transfer engine that only [to, from] part of a page should be
@@ -1098,9 +1041,8 @@ struct cl_page_operations {
*/
#define CL_PAGE_DEBUG(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -1111,9 +1053,8 @@ do { \
*/
#define CL_PAGE_HEADER(mask, env, page, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -1130,6 +1071,12 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
#define cl_page_in_use(pg) __page_in_use(pg, 1)
#define cl_page_in_use_noref(pg) __page_in_use(pg, 0)
+static inline struct page *cl_page_vmpage(struct cl_page *page)
+{
+ LASSERT(page->cp_vmpage);
+ return page->cp_vmpage;
+}
+
/** @} cl_page */
/** \addtogroup cl_lock cl_lock
@@ -1150,12 +1097,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* (struct cl_lock) and a list of layers (struct cl_lock_slice), linked to
* cl_lock::cll_layers list through cl_lock_slice::cls_linkage.
*
- * All locks for a given object are linked into cl_object_header::coh_locks
- * list (protected by cl_object_header::coh_lock_guard spin-lock) through
- * cl_lock::cll_linkage. Currently this list is not sorted in any way. We can
- * sort it in starting lock offset, or use altogether different data structure
- * like a tree.
- *
* Typical cl_lock consists of the two layers:
*
* - vvp_lock (vvp specific data), and
@@ -1177,111 +1118,29 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
*
* LIFE CYCLE
*
- * cl_lock is reference counted. When reference counter drops to 0, lock is
- * placed in the cache, except when lock is in CLS_FREEING state. CLS_FREEING
- * lock is destroyed when last reference is released. Referencing between
- * top-lock and its sub-locks is described in the lov documentation module.
- *
- * STATE MACHINE
- *
- * Also, cl_lock is a state machine. This requires some clarification. One of
- * the goals of client IO re-write was to make IO path non-blocking, or at
- * least to make it easier to make it non-blocking in the future. Here
- * `non-blocking' means that when a system call (read, write, truncate)
- * reaches a situation where it has to wait for a communication with the
- * server, it should --instead of waiting-- remember its current state and
- * switch to some other work. E.g,. instead of waiting for a lock enqueue,
- * client should proceed doing IO on the next stripe, etc. Obviously this is
- * rather radical redesign, and it is not planned to be fully implemented at
- * this time, instead we are putting some infrastructure in place, that would
- * make it easier to do asynchronous non-blocking IO easier in the
- * future. Specifically, where old locking code goes to sleep (waiting for
- * enqueue, for example), new code returns cl_lock_transition::CLO_WAIT. When
- * enqueue reply comes, its completion handler signals that lock state-machine
- * is ready to transit to the next state. There is some generic code in
- * cl_lock.c that sleeps, waiting for these signals. As a result, for users of
- * this cl_lock.c code, it looks like locking is done in normal blocking
- * fashion, and it the same time it is possible to switch to the non-blocking
- * locking (simply by returning cl_lock_transition::CLO_WAIT from cl_lock.c
- * functions).
- *
- * For a description of state machine states and transitions see enum
- * cl_lock_state.
- *
- * There are two ways to restrict a set of states which lock might move to:
- *
- * - placing a "hold" on a lock guarantees that lock will not be moved
- * into cl_lock_state::CLS_FREEING state until hold is released. Hold
- * can be only acquired on a lock that is not in
- * cl_lock_state::CLS_FREEING. All holds on a lock are counted in
- * cl_lock::cll_holds. Hold protects lock from cancellation and
- * destruction. Requests to cancel and destroy a lock on hold will be
- * recorded, but only honored when last hold on a lock is released;
- *
- * - placing a "user" on a lock guarantees that lock will not leave
- * cl_lock_state::CLS_NEW, cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED and cl_lock_state::CLS_HELD set of
- * states, once it enters this set. That is, if a user is added onto a
- * lock in a state not from this set, it doesn't immediately enforce
- * lock to move to this set, but once lock enters this set it will
- * remain there until all users are removed. Lock users are counted in
- * cl_lock::cll_users.
- *
- * User is used to assure that lock is not canceled or destroyed while
- * it is being enqueued, or actively used by some IO.
- *
- * Currently, a user always comes with a hold (cl_lock_invariant()
- * checks that a number of holds is not less than a number of users).
- *
- * CONCURRENCY
- *
- * This is how lock state-machine operates. struct cl_lock contains a mutex
- * cl_lock::cll_guard that protects struct fields.
- *
- * - mutex is taken, and cl_lock::cll_state is examined.
- *
- * - for every state there are possible target states where lock can move
- * into. They are tried in order. Attempts to move into next state are
- * done by _try() functions in cl_lock.c:cl_{enqueue,unlock,wait}_try().
- *
- * - if the transition can be performed immediately, state is changed,
- * and mutex is released.
- *
- * - if the transition requires blocking, _try() function returns
- * cl_lock_transition::CLO_WAIT. Caller unlocks mutex and goes to
- * sleep, waiting for possibility of lock state change. It is woken
- * up when some event occurs, that makes lock state change possible
- * (e.g., the reception of the reply from the server), and repeats
- * the loop.
- *
- * Top-lock and sub-lock has separate mutexes and the latter has to be taken
- * first to avoid dead-lock.
- *
- * To see an example of interaction of all these issues, take a look at the
- * lov_cl.c:lov_lock_enqueue() function. It is called as a part of
- * cl_enqueue_try(), and tries to advance top-lock to ENQUEUED state, by
- * advancing state-machines of its sub-locks (lov_lock_enqueue_one()). Note
- * also, that it uses trylock to grab sub-lock mutex to avoid dead-lock. It
- * also has to handle CEF_ASYNC enqueue, when sub-locks enqueues have to be
- * done in parallel, rather than one after another (this is used for glimpse
- * locks, that cannot dead-lock).
+ * cl_lock is a cacheless data container for the requirements of locks to
+ * complete the IO. cl_lock is created before I/O starts and destroyed when the
+ * I/O is complete.
+ *
+ * cl_lock depends on LDLM lock to fulfill lock semantics. LDLM lock is attached
+ * to cl_lock at OSC layer. LDLM lock is still cacheable.
*
* INTERFACE AND USAGE
*
- * struct cl_lock_operations provide a number of call-backs that are invoked
- * when events of interest occurs. Layers can intercept and handle glimpse,
- * blocking, cancel ASTs and a reception of the reply from the server.
+ * Two major methods are supported for cl_lock: clo_enqueue and clo_cancel. A
+ * cl_lock is enqueued by cl_lock_request(), which will call clo_enqueue()
+ * methods for each layer to enqueue the lock. At the LOV layer, if a cl_lock
+ * consists of multiple sub cl_locks, each sub locks will be enqueued
+ * correspondingly. At OSC layer, the lock enqueue request will tend to reuse
+ * cached LDLM lock; otherwise a new LDLM lock will have to be requested from
+ * OST side.
*
- * One important difference with the old client locking model is that new
- * client has a representation for the top-lock, whereas in the old code only
- * sub-locks existed as real data structures and file-level locks are
- * represented by "request sets" that are created and destroyed on each and
- * every lock creation.
+ * cl_lock_cancel() must be called to release a cl_lock after use. clo_cancel()
+ * method will be called for each layer to release the resource held by this
+ * lock. At OSC layer, the reference count of LDLM lock, which is held at
+ * clo_enqueue time, is released.
*
- * Top-locks are cached, and can be found in the cache by the system calls. It
- * is possible that top-lock is in cache, but some of its sub-locks were
- * canceled and destroyed. In that case top-lock has to be enqueued again
- * before it can be used.
+ * LDLM lock can only be canceled if there is no cl_lock using it.
*
* Overall process of the locking during IO operation is as following:
*
@@ -1294,7 +1153,7 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
*
* - when all locks are acquired, IO is performed;
*
- * - locks are released into cache.
+ * - locks are released after IO is complete.
*
* Striping introduces major additional complexity into locking. The
* fundamental problem is that it is generally unsafe to actively use (hold)
@@ -1316,16 +1175,6 @@ static inline int __page_in_use(const struct cl_page *page, int refc)
* buf is a part of memory mapped Lustre file, a lock or locks protecting buf
* has to be held together with the usual lock on [offset, offset + count].
*
- * As multi-stripe locks have to be allowed, it makes sense to cache them, so
- * that, for example, a sequence of O_APPEND writes can proceed quickly
- * without going down to the individual stripes to do lock matching. On the
- * other hand, multi-stripe locks shouldn't be used by normal read/write
- * calls. To achieve this, every layer can implement ->clo_fits_into() method,
- * that is called by lock matching code (cl_lock_lookup()), and that can be
- * used to selectively disable matching of certain locks for certain IOs. For
- * example, lov layer implements lov_lock_fits_into() that allow multi-stripe
- * locks to be matched only for truncates and O_APPEND writes.
- *
* Interaction with DLM
*
* In the expected setup, cl_lock is ultimately backed up by a collection of
@@ -1356,295 +1205,27 @@ struct cl_lock_descr {
__u32 cld_enq_flags;
};
-#define DDESCR "%s(%d):[%lu, %lu]"
+#define DDESCR "%s(%d):[%lu, %lu]:%x"
#define PDESCR(descr) \
cl_lock_mode_name((descr)->cld_mode), (descr)->cld_mode, \
- (descr)->cld_start, (descr)->cld_end
+ (descr)->cld_start, (descr)->cld_end, (descr)->cld_enq_flags
const char *cl_lock_mode_name(const enum cl_lock_mode mode);
/**
- * Lock state-machine states.
- *
- * \htmlonly
- * <pre>
- *
- * Possible state transitions:
- *
- * +------------------>NEW
- * | |
- * | | cl_enqueue_try()
- * | |
- * | cl_unuse_try() V
- * | +--------------QUEUING (*)
- * | | |
- * | | | cl_enqueue_try()
- * | | |
- * | | cl_unuse_try() V
- * sub-lock | +-------------ENQUEUED (*)
- * canceled | | |
- * | | | cl_wait_try()
- * | | |
- * | | (R)
- * | | |
- * | | V
- * | | HELD<---------+
- * | | | |
- * | | | | cl_use_try()
- * | | cl_unuse_try() | |
- * | | | |
- * | | V ---+
- * | +------------>INTRANSIT (D) <--+
- * | | |
- * | cl_unuse_try() | | cached lock found
- * | | | cl_use_try()
- * | | |
- * | V |
- * +------------------CACHED---------+
- * |
- * (C)
- * |
- * V
- * FREEING
- *
- * Legend:
- *
- * In states marked with (*) transition to the same state (i.e., a loop
- * in the diagram) is possible.
- *
- * (R) is the point where Receive call-back is invoked: it allows layers
- * to handle arrival of lock reply.
- *
- * (C) is the point where Cancellation call-back is invoked.
- *
- * (D) is the transit state which means the lock is changing.
- *
- * Transition to FREEING state is possible from any other state in the
- * diagram in case of unrecoverable error.
- * </pre>
- * \endhtmlonly
- *
- * These states are for individual cl_lock object. Top-lock and its sub-locks
- * can be in the different states. Another way to say this is that we have
- * nested state-machines.
- *
- * Separate QUEUING and ENQUEUED states are needed to support non-blocking
- * operation for locks with multiple sub-locks. Imagine lock on a file F, that
- * intersects 3 stripes S0, S1, and S2. To enqueue F client has to send
- * enqueue to S0, wait for its completion, then send enqueue for S1, wait for
- * its completion and at last enqueue lock for S2, and wait for its
- * completion. In that case, top-lock is in QUEUING state while S0, S1 are
- * handled, and is in ENQUEUED state after enqueue to S2 has been sent (note
- * that in this case, sub-locks move from state to state, and top-lock remains
- * in the same state).
- */
-enum cl_lock_state {
- /**
- * Lock that wasn't yet enqueued
- */
- CLS_NEW,
- /**
- * Enqueue is in progress, blocking for some intermediate interaction
- * with the other side.
- */
- CLS_QUEUING,
- /**
- * Lock is fully enqueued, waiting for server to reply when it is
- * granted.
- */
- CLS_ENQUEUED,
- /**
- * Lock granted, actively used by some IO.
- */
- CLS_HELD,
- /**
- * This state is used to mark the lock is being used, or unused.
- * We need this state because the lock may have several sublocks,
- * so it's impossible to have an atomic way to bring all sublocks
- * into CLS_HELD state at use case, or all sublocks to CLS_CACHED
- * at unuse case.
- * If a thread is referring to a lock, and it sees the lock is in this
- * state, it must wait for the lock.
- * See state diagram for details.
- */
- CLS_INTRANSIT,
- /**
- * Lock granted, not used.
- */
- CLS_CACHED,
- /**
- * Lock is being destroyed.
- */
- CLS_FREEING,
- CLS_NR
-};
-
-enum cl_lock_flags {
- /**
- * lock has been cancelled. This flag is never cleared once set (by
- * cl_lock_cancel0()).
- */
- CLF_CANCELLED = 1 << 0,
- /** cancellation is pending for this lock. */
- CLF_CANCELPEND = 1 << 1,
- /** destruction is pending for this lock. */
- CLF_DOOMED = 1 << 2,
- /** from enqueue RPC reply upcall. */
- CLF_FROM_UPCALL = 1 << 3,
-};
-
-/**
- * Lock closure.
- *
- * Lock closure is a collection of locks (both top-locks and sub-locks) that
- * might be updated in a result of an operation on a certain lock (which lock
- * this is a closure of).
- *
- * Closures are needed to guarantee dead-lock freedom in the presence of
- *
- * - nested state-machines (top-lock state-machine composed of sub-lock
- * state-machines), and
- *
- * - shared sub-locks.
- *
- * Specifically, many operations, such as lock enqueue, wait, unlock,
- * etc. start from a top-lock, and then operate on a sub-locks of this
- * top-lock, holding a top-lock mutex. When sub-lock state changes as a result
- * of such operation, this change has to be propagated to all top-locks that
- * share this sub-lock. Obviously, no natural lock ordering (e.g.,
- * top-to-bottom or bottom-to-top) captures this scenario, so try-locking has
- * to be used. Lock closure systematizes this try-and-repeat logic.
- */
-struct cl_lock_closure {
- /**
- * Lock that is mutexed when closure construction is started. When
- * closure in is `wait' mode (cl_lock_closure::clc_wait), mutex on
- * origin is released before waiting.
- */
- struct cl_lock *clc_origin;
- /**
- * List of enclosed locks, so far. Locks are linked here through
- * cl_lock::cll_inclosure.
- */
- struct list_head clc_list;
- /**
- * True iff closure is in a `wait' mode. This determines what
- * cl_lock_enclosure() does when a lock L to be added to the closure
- * is currently mutexed by some other thread.
- *
- * If cl_lock_closure::clc_wait is not set, then closure construction
- * fails with CLO_REPEAT immediately.
- *
- * In wait mode, cl_lock_enclosure() waits until next attempt to build
- * a closure might succeed. To this end it releases an origin mutex
- * (cl_lock_closure::clc_origin), that has to be the only lock mutex
- * owned by the current thread, and then waits on L mutex (by grabbing
- * it and immediately releasing), before returning CLO_REPEAT to the
- * caller.
- */
- int clc_wait;
- /** Number of locks in the closure. */
- int clc_nr;
-};
-
-/**
* Layered client lock.
*/
struct cl_lock {
- /** Reference counter. */
- atomic_t cll_ref;
/** List of slices. Immutable after creation. */
struct list_head cll_layers;
- /**
- * Linkage into cl_lock::cll_descr::cld_obj::coh_locks list. Protected
- * by cl_lock::cll_descr::cld_obj::coh_lock_guard.
- */
- struct list_head cll_linkage;
- /**
- * Parameters of this lock. Protected by
- * cl_lock::cll_descr::cld_obj::coh_lock_guard nested within
- * cl_lock::cll_guard. Modified only on lock creation and in
- * cl_lock_modify().
- */
+ /** lock attribute, extent, cl_object, etc. */
struct cl_lock_descr cll_descr;
- /** Protected by cl_lock::cll_guard. */
- enum cl_lock_state cll_state;
- /** signals state changes. */
- wait_queue_head_t cll_wq;
- /**
- * Recursive lock, most fields in cl_lock{} are protected by this.
- *
- * Locking rules: this mutex is never held across network
- * communication, except when lock is being canceled.
- *
- * Lock ordering: a mutex of a sub-lock is taken first, then a mutex
- * on a top-lock. Other direction is implemented through a
- * try-lock-repeat loop. Mutices of unrelated locks can be taken only
- * by try-locking.
- *
- * \see osc_lock_enqueue_wait(), lov_lock_cancel(), lov_sublock_wait().
- */
- struct mutex cll_guard;
- struct task_struct *cll_guarder;
- int cll_depth;
-
- /**
- * the owner for INTRANSIT state
- */
- struct task_struct *cll_intransit_owner;
- int cll_error;
- /**
- * Number of holds on a lock. A hold prevents a lock from being
- * canceled and destroyed. Protected by cl_lock::cll_guard.
- *
- * \see cl_lock_hold(), cl_lock_unhold(), cl_lock_release()
- */
- int cll_holds;
- /**
- * Number of lock users. Valid in cl_lock_state::CLS_HELD state
- * only. Lock user pins lock in CLS_HELD state. Protected by
- * cl_lock::cll_guard.
- *
- * \see cl_wait(), cl_unuse().
- */
- int cll_users;
- /**
- * Flag bit-mask. Values from enum cl_lock_flags. Updates are
- * protected by cl_lock::cll_guard.
- */
- unsigned long cll_flags;
- /**
- * A linkage into a list of locks in a closure.
- *
- * \see cl_lock_closure
- */
- struct list_head cll_inclosure;
- /**
- * Confict lock at queuing time.
- */
- struct cl_lock *cll_conflict;
- /**
- * A list of references to this lock, for debugging.
- */
- struct lu_ref cll_reference;
- /**
- * A list of holds on this lock, for debugging.
- */
- struct lu_ref cll_holders;
- /**
- * A reference for cl_lock::cll_descr::cld_obj. For debugging.
- */
- struct lu_ref_link cll_obj_ref;
-#ifdef CONFIG_LOCKDEP
- /* "dep_map" name is assumed by lockdep.h macros. */
- struct lockdep_map dep_map;
-#endif
};
/**
* Per-layer part of cl_lock
*
- * \see ccc_lock, lov_lock, lovsub_lock, osc_lock
+ * \see vvp_lock, lov_lock, lovsub_lock, osc_lock
*/
struct cl_lock_slice {
struct cl_lock *cls_lock;
@@ -1658,174 +1239,36 @@ struct cl_lock_slice {
};
/**
- * Possible (non-error) return values of ->clo_{enqueue,wait,unlock}().
- *
- * NOTE: lov_subresult() depends on ordering here.
- */
-enum cl_lock_transition {
- /** operation cannot be completed immediately. Wait for state change. */
- CLO_WAIT = 1,
- /** operation had to release lock mutex, restart. */
- CLO_REPEAT = 2,
- /** lower layer re-enqueued. */
- CLO_REENQUEUED = 3,
-};
-
-/**
*
* \see vvp_lock_ops, lov_lock_ops, lovsub_lock_ops, osc_lock_ops
*/
struct cl_lock_operations {
- /**
- * \name statemachine
- *
- * State machine transitions. These 3 methods are called to transfer
- * lock from one state to another, as described in the commentary
- * above enum #cl_lock_state.
- *
- * \retval 0 this layer has nothing more to do to before
- * transition to the target state happens;
- *
- * \retval CLO_REPEAT method had to release and re-acquire cl_lock
- * mutex, repeat invocation of transition method
- * across all layers;
- *
- * \retval CLO_WAIT this layer cannot move to the target state
- * immediately, as it has to wait for certain event
- * (e.g., the communication with the server). It
- * is guaranteed, that when the state transfer
- * becomes possible, cl_lock::cll_wq wait-queue
- * is signaled. Caller can wait for this event by
- * calling cl_lock_state_wait();
- *
- * \retval -ve failure, abort state transition, move the lock
- * into cl_lock_state::CLS_FREEING state, and set
- * cl_lock::cll_error.
- *
- * Once all layers voted to agree to transition (by returning 0), lock
- * is moved into corresponding target state. All state transition
- * methods are optional.
- */
/** @{ */
/**
* Attempts to enqueue the lock. Called top-to-bottom.
*
- * \see ccc_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
+ * \retval 0 this layer has enqueued the lock successfully
+ * \retval >0 this layer has enqueued the lock, but need to wait on
+ * @anchor for resources
+ * \retval -ve failure
+ *
+ * \see vvp_lock_enqueue(), lov_lock_enqueue(), lovsub_lock_enqueue(),
* \see osc_lock_enqueue()
*/
int (*clo_enqueue)(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
+ struct cl_io *io, struct cl_sync_io *anchor);
/**
- * Attempts to wait for enqueue result. Called top-to-bottom.
- *
- * \see ccc_lock_wait(), lov_lock_wait(), osc_lock_wait()
- */
- int (*clo_wait)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Attempts to unlock the lock. Called bottom-to-top. In addition to
- * usual return values of lock state-machine methods, this can return
- * -ESTALE to indicate that lock cannot be returned to the cache, and
- * has to be re-initialized.
- * unuse is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see ccc_lock_unuse(), lov_lock_unuse(), osc_lock_unuse()
- */
- int (*clo_unuse)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /**
- * Notifies layer that cached lock is started being used.
- *
- * \pre lock->cll_state == CLS_CACHED
- *
- * \see lov_lock_use(), osc_lock_use()
- */
- int (*clo_use)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} statemachine */
- /**
- * A method invoked when lock state is changed (as a result of state
- * transition). This is used, for example, to track when the state of
- * a sub-lock changes, to propagate this change to the corresponding
- * top-lock. Optional
- *
- * \see lovsub_lock_state()
- */
- void (*clo_state)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state st);
- /**
- * Returns true, iff given lock is suitable for the given io, idea
- * being, that there are certain "unsafe" locks, e.g., ones acquired
- * for O_APPEND writes, that we don't want to re-use for a normal
- * write, to avoid the danger of cascading evictions. Optional. Runs
- * under cl_object_header::coh_lock_guard.
- *
- * XXX this should take more information about lock needed by
- * io. Probably lock description or something similar.
- *
- * \see lov_fits_into()
- */
- int (*clo_fits_into)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
- /**
- * \name ast
- * Asynchronous System Traps. All of then are optional, all are
- * executed bottom-to-top.
- */
- /** @{ */
-
- /**
- * Cancellation callback. Cancel a lock voluntarily, or under
- * the request of server.
+ * Cancel a lock, release its DLM lock ref, while does not cancel the
+ * DLM lock
*/
void (*clo_cancel)(const struct lu_env *env,
const struct cl_lock_slice *slice);
- /**
- * Lock weighting ast. Executed to estimate how precious this lock
- * is. The sum of results across all layers is used to determine
- * whether lock worth keeping in cache given present memory usage.
- *
- * \see osc_lock_weigh(), vvp_lock_weigh(), lovsub_lock_weigh().
- */
- unsigned long (*clo_weigh)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
- /** @} ast */
-
- /**
- * \see lovsub_lock_closure()
- */
- int (*clo_closure)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure);
- /**
- * Executed bottom-to-top when lock description changes (e.g., as a
- * result of server granting more generous lock than was requested).
- *
- * \see lovsub_lock_modify()
- */
- int (*clo_modify)(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *updated);
- /**
- * Notifies layers (bottom-to-top) that lock is going to be
- * destroyed. Responsibility of layers is to prevent new references on
- * this lock from being acquired once this method returns.
- *
- * This can be called multiple times due to the races.
- *
- * \see cl_lock_delete()
- * \see osc_lock_delete(), lovsub_lock_delete()
- */
- void (*clo_delete)(const struct lu_env *env,
- const struct cl_lock_slice *slice);
+ /** @} */
/**
* Destructor. Frees resources and the slice.
*
- * \see ccc_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
+ * \see vvp_lock_fini(), lov_lock_fini(), lovsub_lock_fini(),
* \see osc_lock_fini()
*/
void (*clo_fini)(const struct lu_env *env, struct cl_lock_slice *slice);
@@ -2016,7 +1459,7 @@ enum cl_io_state {
* This is usually embedded into layer session data, rather than allocated
* dynamically.
*
- * \see vvp_io, lov_io, osc_io, ccc_io
+ * \see vvp_io, lov_io, osc_io
*/
struct cl_io_slice {
struct cl_io *cis_io;
@@ -2031,6 +1474,8 @@ struct cl_io_slice {
struct list_head cis_linkage;
};
+typedef void (*cl_commit_cbt)(const struct lu_env *, struct cl_io *,
+ struct cl_page *);
/**
* Per-layer io operations.
* \see vvp_io_ops, lov_io_ops, lovsub_io_ops, osc_io_ops
@@ -2114,7 +1559,7 @@ struct cl_io_operations {
void (*cio_fini)(const struct lu_env *env,
const struct cl_io_slice *slice);
} op[CIT_OP_NR];
- struct {
+
/**
* Submit pages from \a queue->c2_qin for IO, and move
* successfully submitted pages into \a queue->c2_qout. Return
@@ -2127,7 +1572,15 @@ struct cl_io_operations {
const struct cl_io_slice *slice,
enum cl_req_type crt,
struct cl_2queue *queue);
- } req_op[CRT_NR];
+ /**
+ * Queue async page for write.
+ * The difference between cio_submit and cio_queue is that
+ * cio_submit is for urgent request.
+ */
+ int (*cio_commit_async)(const struct lu_env *env,
+ const struct cl_io_slice *slice,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
/**
* Read missing page.
*
@@ -2140,31 +1593,6 @@ struct cl_io_operations {
const struct cl_io_slice *slice,
const struct cl_page_slice *page);
/**
- * Prepare write of a \a page. Called bottom-to-top by a top-level
- * cl_io_operations::op[CIT_WRITE]::cio_start() to prepare page for
- * get data from user-level buffer.
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_prepare_write(), lov_io_prepare_write(),
- * osc_io_prepare_write().
- */
- int (*cio_prepare_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
- *
- * \pre io->ci_type == CIT_WRITE
- *
- * \see vvp_io_commit_write(), lov_io_commit_write(),
- * osc_io_commit_write().
- */
- int (*cio_commit_write)(const struct lu_env *env,
- const struct cl_io_slice *slice,
- const struct cl_page_slice *page,
- unsigned from, unsigned to);
- /**
* Optional debugging helper. Print given io slice.
*/
int (*cio_print)(const struct lu_env *env, void *cookie,
@@ -2216,9 +1644,13 @@ enum cl_enq_flags {
*/
CEF_AGL = 0x00000020,
/**
+ * enqueue a lock to test DLM lock existence.
+ */
+ CEF_PEEK = 0x00000040,
+ /**
* mask of enq_flags.
*/
- CEF_MASK = 0x0000003f,
+ CEF_MASK = 0x0000007f,
};
/**
@@ -2228,12 +1660,12 @@ enum cl_enq_flags {
struct cl_io_lock_link {
/** linkage into one of cl_lockset lists. */
struct list_head cill_linkage;
- struct cl_lock_descr cill_descr;
- struct cl_lock *cill_lock;
+ struct cl_lock cill_lock;
/** optional destructor */
void (*cill_fini)(const struct lu_env *env,
struct cl_io_lock_link *link);
};
+#define cill_descr cill_lock.cll_descr
/**
* Lock-set represents a collection of locks, that io needs at a
@@ -2267,8 +1699,6 @@ struct cl_io_lock_link {
struct cl_lockset {
/** locks to be acquired. */
struct list_head cls_todo;
- /** locks currently being processed. */
- struct list_head cls_curr;
/** locks acquired. */
struct list_head cls_done;
};
@@ -2632,9 +2062,7 @@ struct cl_site {
* and top-locks (and top-pages) are accounted here.
*/
struct cache_stats cs_pages;
- struct cache_stats cs_locks;
atomic_t cs_pages_state[CPS_NR];
- atomic_t cs_locks_state[CLS_NR];
};
int cl_site_init(struct cl_site *s, struct cl_device *top);
@@ -2725,7 +2153,7 @@ static inline void cl_device_fini(struct cl_device *d)
}
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
+ struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops);
void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
struct cl_object *obj,
@@ -2758,7 +2186,7 @@ int cl_object_glimpse(const struct lu_env *env, struct cl_object *obj,
struct ost_lvb *lvb);
int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_object_conf *conf);
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj);
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj);
void cl_object_kill(const struct lu_env *env, struct cl_object *obj);
/**
@@ -2772,7 +2200,7 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1)
static inline void cl_object_page_init(struct cl_object *clob, int size)
{
clob->co_slice_off = cl_object_header(clob)->coh_page_bufsize;
- cl_object_header(clob)->coh_page_bufsize += ALIGN(size, 8);
+ cl_object_header(clob)->coh_page_bufsize += cfs_size_round(size);
}
static inline void *cl_object_page_slice(struct cl_object *clob,
@@ -2781,6 +2209,16 @@ static inline void *cl_object_page_slice(struct cl_object *clob,
return (void *)((char *)page + clob->co_slice_off);
}
+/**
+ * Return refcount of cl_object.
+ */
+static inline int cl_object_refc(struct cl_object *clob)
+{
+ struct lu_object_header *header = clob->co_lu.lo_header;
+
+ return atomic_read(&header->loh_ref);
+}
+
/** @} cl_object */
/** \defgroup cl_page cl_page
@@ -2794,28 +2232,20 @@ enum {
};
/* callback of cl_page_gang_lookup() */
-typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *,
- struct cl_page *, void *);
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata);
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index);
struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *obj,
pgoff_t idx, struct page *vmpage,
enum cl_page_type type);
-struct cl_page *cl_page_find_sub(const struct lu_env *env,
- struct cl_object *obj,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent);
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type);
void cl_page_get(struct cl_page *page);
void cl_page_put(const struct lu_env *env, struct cl_page *page);
void cl_page_print(const struct lu_env *env, void *cookie, lu_printer_t printer,
const struct cl_page *pg);
void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg);
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page);
struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj);
-struct cl_page *cl_page_top(struct cl_page *page);
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype);
@@ -2872,12 +2302,10 @@ int cl_page_flush(const struct lu_env *env, struct cl_io *io,
void cl_page_discard(const struct lu_env *env, struct cl_io *io,
struct cl_page *pg);
void cl_page_delete(const struct lu_env *env, struct cl_page *pg);
-int cl_page_unmap(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg);
int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg);
void cl_page_export(const struct lu_env *env, struct cl_page *pg, int uptodate);
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page);
+ struct cl_page *page, pgoff_t *max_index);
loff_t cl_offset(const struct cl_object *obj, pgoff_t idx);
pgoff_t cl_index(const struct cl_object *obj, loff_t offset);
int cl_page_size(const struct cl_object *obj);
@@ -2890,138 +2318,66 @@ void cl_lock_descr_print(const struct lu_env *env, void *cookie,
const struct cl_lock_descr *descr);
/* @} helper */
+/**
+ * Data structure managing a client's cached pages. A count of
+ * "unstable" pages is maintained, and an LRU of clean pages is
+ * maintained. "unstable" pages are pages pinned by the ptlrpc
+ * layer for recovery purposes.
+ */
+struct cl_client_cache {
+ /**
+ * # of users (OSCs)
+ */
+ atomic_t ccc_users;
+ /**
+ * # of threads are doing shrinking
+ */
+ unsigned int ccc_lru_shrinkers;
+ /**
+ * # of LRU entries available
+ */
+ atomic_t ccc_lru_left;
+ /**
+ * List of entities(OSCs) for this LRU cache
+ */
+ struct list_head ccc_lru;
+ /**
+ * Max # of LRU entries
+ */
+ unsigned long ccc_lru_max;
+ /**
+ * Lock to protect ccc_lru list
+ */
+ spinlock_t ccc_lru_lock;
+ /**
+ * # of unstable pages for this mount point
+ */
+ atomic_t ccc_unstable_nr;
+ /**
+ * Waitq for awaiting unstable pages to reach zero.
+ * Used at umounting time and signaled on BRW commit
+ */
+ wait_queue_head_t ccc_unstable_waitq;
+
+};
+
/** @} cl_page */
/** \defgroup cl_lock cl_lock
* @{
*/
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source);
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except, int pending,
- int canceld);
-static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env,
- struct cl_object *obj,
- struct cl_page *page,
- struct cl_lock *except,
- int pending, int canceld)
-{
- LASSERT(cl_object_header(obj) == cl_object_header(page->cp_obj));
- return cl_lock_at_pgoff(env, obj, page->cp_index, except,
- pending, canceld);
-}
-
+int cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock);
+int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_io *io);
+void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock);
const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
const struct lu_device_type *dtype);
-
-void cl_lock_get(struct cl_lock *lock);
-void cl_lock_get_trust(struct cl_lock *lock);
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source);
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock);
-
-int cl_lock_is_intransit(struct cl_lock *lock);
-
-int cl_lock_enqueue_wait(const struct lu_env *env, struct cl_lock *lock,
- int keep_mutex);
-
-/** \name statemachine statemachine
- * Interface to lock state machine consists of 3 parts:
- *
- * - "try" functions that attempt to effect a state transition. If state
- * transition is not possible right now (e.g., if it has to wait for some
- * asynchronous event to occur), these functions return
- * cl_lock_transition::CLO_WAIT.
- *
- * - "non-try" functions that implement synchronous blocking interface on
- * top of non-blocking "try" functions. These functions repeatedly call
- * corresponding "try" versions, and if state transition is not possible
- * immediately, wait for lock state change.
- *
- * - methods from cl_lock_operations, called by "try" functions. Lock can
- * be advanced to the target state only when all layers voted that they
- * are ready for this transition. "Try" functions call methods under lock
- * mutex. If a layer had to release a mutex, it re-acquires it and returns
- * cl_lock_transition::CLO_REPEAT, causing "try" function to call all
- * layers again.
- *
- * TRY NON-TRY METHOD FINAL STATE
- *
- * cl_enqueue_try() cl_enqueue() cl_lock_operations::clo_enqueue() CLS_ENQUEUED
- *
- * cl_wait_try() cl_wait() cl_lock_operations::clo_wait() CLS_HELD
- *
- * cl_unuse_try() cl_unuse() cl_lock_operations::clo_unuse() CLS_CACHED
- *
- * cl_use_try() NONE cl_lock_operations::clo_use() CLS_HELD
- *
- * @{
- */
-
-int cl_wait(const struct lu_env *env, struct cl_lock *lock);
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock);
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags);
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock);
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock);
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic);
-
-/** @} statemachine */
-
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state);
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need);
-
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_is_mutexed(struct cl_lock *lock);
-int cl_lock_nr_mutexed(const struct lu_env *env);
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock);
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need);
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need);
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc);
-
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait);
-void cl_lock_closure_fini(struct cl_lock_closure *closure);
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure);
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure);
-
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock);
+int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock, struct cl_sync_io *anchor);
void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock);
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error);
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int wait);
-
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock);
/** @} cl_lock */
@@ -3050,15 +2406,14 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
struct cl_lock_descr *descr);
int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
struct cl_page *page);
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to);
int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue);
int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
enum cl_req_type iot, struct cl_2queue *queue,
long timeout);
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb);
int cl_io_is_going(const struct lu_env *env);
/**
@@ -3114,6 +2469,12 @@ static inline struct cl_page *cl_page_list_last(struct cl_page_list *plist)
return list_entry(plist->pl_pages.prev, struct cl_page, cp_batch);
}
+static inline struct cl_page *cl_page_list_first(struct cl_page_list *plist)
+{
+ LASSERT(plist->pl_nr > 0);
+ return list_entry(plist->pl_pages.next, struct cl_page, cp_batch);
+}
+
/**
* Iterate over pages in a page list.
*/
@@ -3130,9 +2491,14 @@ void cl_page_list_init(struct cl_page_list *plist);
void cl_page_list_add(struct cl_page_list *plist, struct cl_page *page);
void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
struct cl_page *page);
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page);
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head);
+void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
+ struct cl_page *page);
void cl_page_list_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *plist);
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist);
void cl_2queue_init(struct cl_2queue *queue);
void cl_2queue_disown(const struct lu_env *env,
@@ -3177,13 +2543,18 @@ struct cl_sync_io {
atomic_t csi_barrier;
/** completion to be signaled when transfer is complete. */
wait_queue_head_t csi_waitq;
+ /** callback to invoke when this IO is finished */
+ void (*csi_end_io)(const struct lu_env *,
+ struct cl_sync_io *);
};
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages);
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *));
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout);
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret);
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret);
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor);
/** @} cl_sync_io */
@@ -3241,6 +2612,9 @@ void *cl_env_reenter(void);
void cl_env_reexit(void *cookie);
void cl_env_implant(struct lu_env *env, int *refcheck);
void cl_env_unplant(struct lu_env *env, int *refcheck);
+unsigned int cl_env_cache_purge(unsigned int nr);
+struct lu_env *cl_env_percpu_get(void);
+void cl_env_percpu_put(struct lu_env *env);
/** @} cl_env */
diff --git a/drivers/staging/lustre/lustre/include/lclient.h b/drivers/staging/lustre/lustre/include/lclient.h
deleted file mode 100644
index 5d839a9f789f..000000000000
--- a/drivers/staging/lustre/lustre/include/lclient.h
+++ /dev/null
@@ -1,408 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * Definitions shared between vvp and liblustre, and other clients in the
- * future.
- *
- * Author: Oleg Drokin <oleg.drokin@sun.com>
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#ifndef LCLIENT_H
-#define LCLIENT_H
-
-blkcnt_t dirty_cnt(struct inode *inode);
-
-int cl_glimpse_size0(struct inode *inode, int agl);
-int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
- struct inode *inode, struct cl_object *clob, int agl);
-
-static inline int cl_glimpse_size(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 0);
-}
-
-static inline int cl_agl(struct inode *inode)
-{
- return cl_glimpse_size0(inode, 1);
-}
-
-/**
- * Locking policy for setattr.
- */
-enum ccc_setattr_lock_type {
- /** Locking is done by server */
- SETATTR_NOLOCK,
- /** Extent lock is enqueued */
- SETATTR_EXTENT_LOCK,
- /** Existing local extent lock is used */
- SETATTR_MATCH_LOCK
-};
-
-/**
- * IO state private to vvp or slp layers.
- */
-struct ccc_io {
- /** super class */
- struct cl_io_slice cui_cl;
- struct cl_io_lock_link cui_link;
- /**
- * I/O vector information to or from which read/write is going.
- */
- struct iov_iter *cui_iter;
- /**
- * Total size for the left IO.
- */
- size_t cui_tot_count;
-
- union {
- struct {
- enum ccc_setattr_lock_type cui_local_lock;
- } setattr;
- } u;
- /**
- * True iff io is processing glimpse right now.
- */
- int cui_glimpse;
- /**
- * Layout version when this IO is initialized
- */
- __u32 cui_layout_gen;
- /**
- * File descriptor against which IO is done.
- */
- struct ll_file_data *cui_fd;
- struct kiocb *cui_iocb;
-};
-
-/**
- * True, if \a io is a normal io, False for splice_{read,write}.
- * must be implemented in arch specific code.
- */
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io);
-
-extern struct lu_context_key ccc_key;
-extern struct lu_context_key ccc_session_key;
-
-struct ccc_thread_info {
- struct cl_lock_descr cti_descr;
- struct cl_io cti_io;
- struct cl_attr cti_attr;
-};
-
-static inline struct ccc_thread_info *ccc_env_info(const struct lu_env *env)
-{
- struct ccc_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &ccc_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct cl_attr *ccc_env_thread_attr(const struct lu_env *env)
-{
- struct cl_attr *attr = &ccc_env_info(env)->cti_attr;
-
- memset(attr, 0, sizeof(*attr));
- return attr;
-}
-
-static inline struct cl_io *ccc_env_thread_io(const struct lu_env *env)
-{
- struct cl_io *io = &ccc_env_info(env)->cti_io;
-
- memset(io, 0, sizeof(*io));
- return io;
-}
-
-struct ccc_session {
- struct ccc_io cs_ios;
-};
-
-static inline struct ccc_session *ccc_env_session(const struct lu_env *env)
-{
- struct ccc_session *ses;
-
- ses = lu_context_key_get(env->le_ses, &ccc_session_key);
- LASSERT(ses);
- return ses;
-}
-
-static inline struct ccc_io *ccc_env_io(const struct lu_env *env)
-{
- return &ccc_env_session(env)->cs_ios;
-}
-
-/**
- * ccc-private object state.
- */
-struct ccc_object {
- struct cl_object_header cob_header;
- struct cl_object cob_cl;
- struct inode *cob_inode;
-
- /**
- * A list of dirty pages pending IO in the cache. Used by
- * SOM. Protected by ll_inode_info::lli_lock.
- *
- * \see ccc_page::cpg_pending_linkage
- */
- struct list_head cob_pending_list;
-
- /**
- * Access this counter is protected by inode->i_sem. Now that
- * the lifetime of transient pages must be covered by inode sem,
- * we don't need to hold any lock..
- */
- int cob_transient_pages;
- /**
- * Number of outstanding mmaps on this file.
- *
- * \see ll_vm_open(), ll_vm_close().
- */
- atomic_t cob_mmap_cnt;
-
- /**
- * various flags
- * cob_discard_page_warned
- * if pages belonging to this object are discarded when a client
- * is evicted, some debug info will be printed, this flag will be set
- * during processing the first discarded page, then avoid flooding
- * debug message for lots of discarded pages.
- *
- * \see ll_dirty_page_discard_warn.
- */
- unsigned int cob_discard_page_warned:1;
-};
-
-/**
- * ccc-private page state.
- */
-struct ccc_page {
- struct cl_page_slice cpg_cl;
- int cpg_defer_uptodate;
- int cpg_ra_used;
- int cpg_write_queued;
- /**
- * Non-empty iff this page is already counted in
- * ccc_object::cob_pending_list. Protected by
- * ccc_object::cob_pending_guard. This list is only used as a flag,
- * that is, never iterated through, only checked for list_empty(), but
- * having a list is useful for debugging.
- */
- struct list_head cpg_pending_linkage;
- /** VM page */
- struct page *cpg_page;
-};
-
-static inline struct ccc_page *cl2ccc_page(const struct cl_page_slice *slice)
-{
- return container_of(slice, struct ccc_page, cpg_cl);
-}
-
-struct ccc_device {
- struct cl_device cdv_cl;
- struct super_block *cdv_sb;
- struct cl_device *cdv_next;
-};
-
-struct ccc_lock {
- struct cl_lock_slice clk_cl;
-};
-
-struct ccc_req {
- struct cl_req_slice crq_cl;
-};
-
-void *ccc_key_init (const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_key_fini (const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key);
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data);
-
-int ccc_device_init (const struct lu_env *env,
- struct lu_device *d,
- const char *name, struct lu_device *next);
-struct lu_device *ccc_device_fini (const struct lu_env *env,
- struct lu_device *d);
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops);
-struct lu_device *ccc_device_free (const struct lu_env *env,
- struct lu_device *d);
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops);
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
-void ccc_umount(const struct lu_env *env, struct cl_device *dev);
-int ccc_global_init(struct lu_device_type *device_type);
-void ccc_global_fini(struct lu_device_type *device_type);
-int ccc_object_init0(const struct lu_env *env, struct ccc_object *vob,
- const struct cl_object_conf *conf);
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf);
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj);
-int ccc_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io,
- const struct cl_lock_operations *lkops);
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb);
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice);
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io);
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice);
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io);
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice);
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice);
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags);
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice);
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io);
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state);
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end);
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end);
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios);
-void ccc_io_advance(const struct lu_env *env, const struct cl_io_slice *ios,
- size_t nob);
-void ccc_io_update_iov(const struct lu_env *env, struct ccc_io *cio,
- struct cl_io *io);
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed);
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret);
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *oa, u64 flags);
-
-struct lu_device *ccc2lu_dev (struct ccc_device *vdv);
-struct lu_object *ccc2lu (struct ccc_object *vob);
-struct ccc_device *lu2ccc_dev (const struct lu_device *d);
-struct ccc_device *cl2ccc_dev (const struct cl_device *d);
-struct ccc_object *lu2ccc (const struct lu_object *obj);
-struct ccc_object *cl2ccc (const struct cl_object *obj);
-struct ccc_lock *cl2ccc_lock (const struct cl_lock_slice *slice);
-struct ccc_io *cl2ccc_io (const struct lu_env *env,
- const struct cl_io_slice *slice);
-struct ccc_req *cl2ccc_req (const struct cl_req_slice *slice);
-struct page *cl2vm_page (const struct cl_page_slice *slice);
-struct inode *ccc_object_inode(const struct cl_object *obj);
-struct ccc_object *cl_inode2ccc (struct inode *inode);
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
-
-int ccc_object_invariant(const struct cl_object *obj);
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
-void cl_inode_fini(struct inode *inode);
-int cl_local_size(struct inode *inode);
-
-__u16 ll_dirent_type_get(struct lu_dirent *ent);
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
-__u32 cl_fid_build_gen(const struct lu_fid *fid);
-
-# define CLOBINVRNT(env, clob, expr) \
- ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
-
-int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
-int cl_ocd_update(struct obd_device *host,
- struct obd_device *watched,
- enum obd_notify_event ev, void *owner, void *data);
-
-struct ccc_grouplock {
- struct lu_env *cg_env;
- struct cl_io *cg_io;
- struct cl_lock *cg_lock;
- unsigned long cg_gid;
-};
-
-int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg);
-void cl_put_grouplock(struct ccc_grouplock *cg);
-
-/**
- * New interfaces to get and put lov_stripe_md from lov layer. This violates
- * layering because lov_stripe_md is supposed to be a private data in lov.
- *
- * NB: If you find you have to use these interfaces for your new code, please
- * think about it again. These interfaces may be removed in the future for
- * better layering.
- */
-struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
-void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
-int lov_read_and_clear_async_rc(struct cl_object *clob);
-
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
-void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
-
-/**
- * Data structure managing a client's cached clean pages. An LRU of
- * pages is maintained, along with other statistics.
- */
-struct cl_client_cache {
- atomic_t ccc_users; /* # of users (OSCs) of this data */
- struct list_head ccc_lru; /* LRU list of cached clean pages */
- spinlock_t ccc_lru_lock; /* lock for list */
- atomic_t ccc_lru_left; /* # of LRU entries available */
- unsigned long ccc_lru_max; /* Max # of LRU entries possible */
- unsigned int ccc_lru_shrinkers; /* # of threads reclaiming */
-};
-
-#endif /*LCLIENT_H */
diff --git a/drivers/staging/lustre/lustre/include/linux/obd.h b/drivers/staging/lustre/lustre/include/linux/obd.h
deleted file mode 100644
index 3907bf4ce07c..000000000000
--- a/drivers/staging/lustre/lustre/include/linux/obd.h
+++ /dev/null
@@ -1,125 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2012, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- */
-
-#ifndef __LINUX_OBD_H
-#define __LINUX_OBD_H
-
-#ifndef __OBD_H
-#error Do not #include this file directly. #include <obd.h> instead
-#endif
-
-#include "../obd_support.h"
-
-#include <linux/fs.h>
-#include <linux/list.h>
-#include <linux/sched.h> /* for struct task_struct, for current.h */
-#include <linux/mount.h>
-
-#include "../lustre_intent.h"
-
-struct ll_iattr {
- struct iattr iattr;
- unsigned int ia_attr_flags;
-};
-
-#define CLIENT_OBD_LIST_LOCK_DEBUG 1
-
-struct client_obd_lock {
- spinlock_t lock;
-
- unsigned long time;
- struct task_struct *task;
- const char *func;
- int line;
-};
-
-static inline void __client_obd_list_lock(struct client_obd_lock *lock,
- const char *func, int line)
-{
- unsigned long cur = jiffies;
-
- while (1) {
- if (spin_trylock(&lock->lock)) {
- LASSERT(!lock->task);
- lock->task = current;
- lock->func = func;
- lock->line = line;
- lock->time = jiffies;
- break;
- }
-
- if (time_before(cur + 5 * HZ, jiffies) &&
- time_before(lock->time + 5 * HZ, jiffies)) {
- struct task_struct *task = lock->task;
-
- if (!task)
- continue;
-
- LCONSOLE_WARN("%s:%d: lock %p was acquired by <%s:%d:%s:%d> for %lu seconds.\n",
- current->comm, current->pid,
- lock, task->comm, task->pid,
- lock->func, lock->line,
- (jiffies - lock->time) / HZ);
- LCONSOLE_WARN("====== for current process =====\n");
- dump_stack();
- LCONSOLE_WARN("====== end =======\n");
- set_current_state(TASK_UNINTERRUPTIBLE);
- schedule_timeout(1000 * HZ);
- }
- cpu_relax();
- }
-}
-
-#define client_obd_list_lock(lock) \
- __client_obd_list_lock(lock, __func__, __LINE__)
-
-static inline void client_obd_list_unlock(struct client_obd_lock *lock)
-{
- LASSERT(lock->task);
- lock->task = NULL;
- lock->time = jiffies;
- spin_unlock(&lock->lock);
-}
-
-static inline void client_obd_list_lock_init(struct client_obd_lock *lock)
-{
- spin_lock_init(&lock->lock);
-}
-
-static inline void client_obd_list_lock_done(struct client_obd_lock *lock)
-{}
-
-#endif /* __LINUX_OBD_H */
diff --git a/drivers/staging/lustre/lustre/include/lu_object.h b/drivers/staging/lustre/lustre/include/lu_object.h
index 242bb1ef6245..2816512185af 100644
--- a/drivers/staging/lustre/lustre/include/lu_object.h
+++ b/drivers/staging/lustre/lustre/include/lu_object.h
@@ -198,7 +198,6 @@ typedef int (*lu_printer_t)(const struct lu_env *env,
* Operations specific for particular lu_object.
*/
struct lu_object_operations {
-
/**
* Allocate lower-layer parts of the object by calling
* lu_device_operations::ldo_object_alloc() of the corresponding
@@ -656,21 +655,21 @@ static inline struct seq_server_site *lu_site2seq(const struct lu_site *s)
* @{
*/
-int lu_site_init (struct lu_site *s, struct lu_device *d);
-void lu_site_fini (struct lu_site *s);
-int lu_site_init_finish (struct lu_site *s);
-void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
-void lu_device_get (struct lu_device *d);
-void lu_device_put (struct lu_device *d);
-int lu_device_init (struct lu_device *d, struct lu_device_type *t);
-void lu_device_fini (struct lu_device *d);
-int lu_object_header_init(struct lu_object_header *h);
+int lu_site_init(struct lu_site *s, struct lu_device *d);
+void lu_site_fini(struct lu_site *s);
+int lu_site_init_finish(struct lu_site *s);
+void lu_stack_fini(const struct lu_env *env, struct lu_device *top);
+void lu_device_get(struct lu_device *d);
+void lu_device_put(struct lu_device *d);
+int lu_device_init(struct lu_device *d, struct lu_device_type *t);
+void lu_device_fini(struct lu_device *d);
+int lu_object_header_init(struct lu_object_header *h);
void lu_object_header_fini(struct lu_object_header *h);
-int lu_object_init (struct lu_object *o,
- struct lu_object_header *h, struct lu_device *d);
-void lu_object_fini (struct lu_object *o);
-void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
-void lu_object_add (struct lu_object *before, struct lu_object *o);
+int lu_object_init(struct lu_object *o,
+ struct lu_object_header *h, struct lu_device *d);
+void lu_object_fini(struct lu_object *o);
+void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
+void lu_object_add(struct lu_object *before, struct lu_object *o);
/**
* Helpers to initialize and finalize device types.
@@ -781,9 +780,8 @@ int lu_cdebug_printer(const struct lu_env *env,
*/
#define LU_OBJECT_DEBUG(mask, env, object, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_print(env, &msgdata, lu_cdebug_printer, object);\
CDEBUG(mask, format, ## __VA_ARGS__); \
} \
@@ -794,9 +792,8 @@ do { \
*/
#define LU_OBJECT_HEADER(mask, env, object, format, ...) \
do { \
- LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
- \
if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \
+ LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \
lu_object_header_print(env, &msgdata, lu_cdebug_printer,\
(object)->lo_header); \
lu_cdebug_printer(env, &msgdata, "\n"); \
@@ -1007,6 +1004,10 @@ enum lu_context_tag {
*/
LCT_LOCAL = 1 << 7,
/**
+ * session for server thread
+ **/
+ LCT_SERVER_SESSION = BIT(8),
+ /**
* Set when at least one of keys, having values in this context has
* non-NULL lu_context_key::lct_exit() method. This is used to
* optimize lu_context_exit() call.
@@ -1118,7 +1119,7 @@ struct lu_context_key {
{ \
type *value; \
\
- CLASSERT(PAGE_SIZE >= sizeof (*value)); \
+ CLASSERT(PAGE_SIZE >= sizeof(*value)); \
\
value = kzalloc(sizeof(*value), GFP_NOFS); \
if (!value) \
@@ -1154,12 +1155,12 @@ do { \
(key)->lct_owner = THIS_MODULE; \
} while (0)
-int lu_context_key_register(struct lu_context_key *key);
-void lu_context_key_degister(struct lu_context_key *key);
-void *lu_context_key_get (const struct lu_context *ctx,
- const struct lu_context_key *key);
-void lu_context_key_quiesce (struct lu_context_key *key);
-void lu_context_key_revive (struct lu_context_key *key);
+int lu_context_key_register(struct lu_context_key *key);
+void lu_context_key_degister(struct lu_context_key *key);
+void *lu_context_key_get(const struct lu_context *ctx,
+ const struct lu_context_key *key);
+void lu_context_key_quiesce(struct lu_context_key *key);
+void lu_context_key_revive(struct lu_context_key *key);
/*
* LU_KEY_INIT_GENERIC() has to be a macro to correctly determine an
@@ -1216,21 +1217,21 @@ void lu_context_key_revive (struct lu_context_key *key);
LU_TYPE_START(mod, __VA_ARGS__); \
LU_TYPE_STOP(mod, __VA_ARGS__)
-int lu_context_init (struct lu_context *ctx, __u32 tags);
-void lu_context_fini (struct lu_context *ctx);
-void lu_context_enter (struct lu_context *ctx);
-void lu_context_exit (struct lu_context *ctx);
-int lu_context_refill(struct lu_context *ctx);
+int lu_context_init(struct lu_context *ctx, __u32 tags);
+void lu_context_fini(struct lu_context *ctx);
+void lu_context_enter(struct lu_context *ctx);
+void lu_context_exit(struct lu_context *ctx);
+int lu_context_refill(struct lu_context *ctx);
/*
* Helper functions to operate on multiple keys. These are used by the default
* device type operations, defined by LU_TYPE_INIT_FINI().
*/
-int lu_context_key_register_many(struct lu_context_key *k, ...);
+int lu_context_key_register_many(struct lu_context_key *k, ...);
void lu_context_key_degister_many(struct lu_context_key *k, ...);
-void lu_context_key_revive_many (struct lu_context_key *k, ...);
-void lu_context_key_quiesce_many (struct lu_context_key *k, ...);
+void lu_context_key_revive_many(struct lu_context_key *k, ...);
+void lu_context_key_quiesce_many(struct lu_context_key *k, ...);
/**
* Environment.
@@ -1246,9 +1247,9 @@ struct lu_env {
struct lu_context *le_ses;
};
-int lu_env_init (struct lu_env *env, __u32 tags);
-void lu_env_fini (struct lu_env *env);
-int lu_env_refill(struct lu_env *env);
+int lu_env_init(struct lu_env *env, __u32 tags);
+void lu_env_fini(struct lu_env *env);
+int lu_env_refill(struct lu_env *env);
/** @} lu_context */
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
index 5aae1d06a5fa..9c53c1792dc8 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_idl.h
@@ -183,6 +183,12 @@ struct lu_seq_range {
__u32 lsr_flags;
};
+struct lu_seq_range_array {
+ __u32 lsra_count;
+ __u32 lsra_padding;
+ struct lu_seq_range lsra_lsr[0];
+};
+
#define LU_SEQ_RANGE_MDT 0x0
#define LU_SEQ_RANGE_OST 0x1
#define LU_SEQ_RANGE_ANY 0x3
@@ -578,7 +584,7 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
if (fid_seq_is_mdt0(ostid->oi.oi_seq))
return FID_SEQ_OST_MDT0;
- if (fid_seq_is_default(ostid->oi.oi_seq))
+ if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
return FID_SEQ_LOV_DEFAULT;
if (fid_is_idif(&ostid->oi_fid))
@@ -590,9 +596,12 @@ static inline __u64 ostid_seq(const struct ost_id *ostid)
/* extract OST objid from a wire ost_id (id/seq) pair */
static inline __u64 ostid_id(const struct ost_id *ostid)
{
- if (fid_seq_is_mdt0(ostid_seq(ostid)))
+ if (fid_seq_is_mdt0(ostid->oi.oi_seq))
return ostid->oi.oi_id & IDIF_OID_MASK;
+ if (unlikely(fid_seq_is_default(ostid->oi.oi_seq)))
+ return ostid->oi.oi_id;
+
if (fid_is_idif(&ostid->oi_fid))
return fid_idif_id(fid_seq(&ostid->oi_fid),
fid_oid(&ostid->oi_fid), 0);
@@ -636,12 +645,22 @@ static inline void ostid_set_seq_llog(struct ost_id *oi)
*/
static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
+ if (fid_seq_is_mdt0(oi->oi.oi_seq)) {
if (oid >= IDIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
return;
}
oi->oi.oi_id = oid;
+ } else if (fid_is_idif(&oi->oi_fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Bad %llu to set "DOSTID"\n",
+ oid, POSTID(oi));
+ return;
+ }
+ oi->oi_fid.f_seq = fid_idif_seq(oid,
+ fid_idif_ost_idx(&oi->oi_fid));
+ oi->oi_fid.f_oid = oid;
+ oi->oi_fid.f_ver = oid >> 48;
} else {
if (oid > OBIF_MAX_OID) {
CERROR("Bad %llu to set " DOSTID "\n", oid, POSTID(oi));
@@ -651,25 +670,31 @@ static inline void ostid_set_id(struct ost_id *oi, __u64 oid)
}
}
-static inline void ostid_inc_id(struct ost_id *oi)
+static inline int fid_set_id(struct lu_fid *fid, __u64 oid)
{
- if (fid_seq_is_mdt0(ostid_seq(oi))) {
- if (unlikely(ostid_id(oi) + 1 > IDIF_MAX_OID)) {
- CERROR("Bad inc "DOSTID"\n", POSTID(oi));
- return;
+ if (unlikely(fid_seq_is_igif(fid->f_seq))) {
+ CERROR("bad IGIF, "DFID"\n", PFID(fid));
+ return -EBADF;
+ }
+
+ if (fid_is_idif(fid)) {
+ if (oid >= IDIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set IDIF "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
}
- oi->oi.oi_id++;
+ fid->f_seq = fid_idif_seq(oid, fid_idif_ost_idx(fid));
+ fid->f_oid = oid;
+ fid->f_ver = oid >> 48;
} else {
- oi->oi_fid.f_oid++;
+ if (oid > OBIF_MAX_OID) {
+ CERROR("Too large OID %#llx to set REG "DFID"\n",
+ (unsigned long long)oid, PFID(fid));
+ return -EBADF;
+ }
+ fid->f_oid = oid;
}
-}
-
-static inline void ostid_dec_id(struct ost_id *oi)
-{
- if (fid_seq_is_mdt0(ostid_seq(oi)))
- oi->oi.oi_id--;
- else
- oi->oi_fid.f_oid--;
+ return 0;
}
/**
@@ -684,30 +709,34 @@ static inline void ostid_dec_id(struct ost_id *oi)
static inline int ostid_to_fid(struct lu_fid *fid, struct ost_id *ostid,
__u32 ost_idx)
{
+ __u64 seq = ostid_seq(ostid);
+
if (ost_idx > 0xffff) {
CERROR("bad ost_idx, "DOSTID" ost_idx:%u\n", POSTID(ostid),
ost_idx);
return -EBADF;
}
- if (fid_seq_is_mdt0(ostid_seq(ostid))) {
+ if (fid_seq_is_mdt0(seq)) {
+ __u64 oid = ostid_id(ostid);
+
/* This is a "legacy" (old 1.x/2.early) OST object in "group 0"
* that we map into the IDIF namespace. It allows up to 2^48
* objects per OST, as this is the object namespace that has
* been in production for years. This can handle create rates
* of 1M objects/s/OST for 9 years, or combinations thereof.
*/
- if (ostid_id(ostid) >= IDIF_MAX_OID) {
+ if (oid >= IDIF_MAX_OID) {
CERROR("bad MDT0 id, " DOSTID " ost_idx:%u\n",
POSTID(ostid), ost_idx);
return -EBADF;
}
- fid->f_seq = fid_idif_seq(ostid_id(ostid), ost_idx);
+ fid->f_seq = fid_idif_seq(oid, ost_idx);
/* truncate to 32 bits by assignment */
- fid->f_oid = ostid_id(ostid);
+ fid->f_oid = oid;
/* in theory, not currently used */
- fid->f_ver = ostid_id(ostid) >> 48;
- } else /* if (fid_seq_is_idif(seq) || fid_seq_is_norm(seq)) */ {
+ fid->f_ver = oid >> 48;
+ } else if (likely(!fid_seq_is_default(seq))) {
/* This is either an IDIF object, which identifies objects across
* all OSTs, or a regular FID. The IDIF namespace maps legacy
* OST objects into the FID namespace. In both cases, we just
@@ -1001,8 +1030,9 @@ static inline int lu_dirent_calc_size(int namelen, __u16 attr)
size = (sizeof(struct lu_dirent) + namelen + align) & ~align;
size += sizeof(struct luda_type);
- } else
+ } else {
size = sizeof(struct lu_dirent) + namelen;
+ }
return (size + 7) & ~7;
}
@@ -1256,6 +1286,9 @@ void lustre_swab_ptlrpc_body(struct ptlrpc_body *pb);
#define OBD_CONNECT_PINGLESS 0x4000000000000ULL/* pings not required */
#define OBD_CONNECT_FLOCK_DEAD 0x8000000000000ULL/* flock deadlock detection */
#define OBD_CONNECT_DISP_STRIPE 0x10000000000000ULL/*create stripe disposition*/
+#define OBD_CONNECT_OPEN_BY_FID 0x20000000000000ULL /* open by fid won't pack
+ * name in request
+ */
/* XXX README XXX:
* Please DO NOT add flag values here before first ensuring that this same
@@ -1428,6 +1461,8 @@ enum obdo_flags {
*/
OBD_FL_RECOV_RESEND = 0x00080000, /* recoverable resent */
OBD_FL_NOSPC_BLK = 0x00100000, /* no more block space on OST */
+ OBD_FL_FLUSH = 0x00200000, /* flush pages on the OST */
+ OBD_FL_SHORT_IO = 0x00400000, /* short io request */
/* Note that while these checksum values are currently separate bits,
* in 2.x we can actually allow all values from 1-31 if we wanted.
@@ -1525,6 +1560,11 @@ static inline void lmm_oi_set_seq(struct ost_id *oi, __u64 seq)
oi->oi.oi_seq = seq;
}
+static inline void lmm_oi_set_id(struct ost_id *oi, __u64 oid)
+{
+ oi->oi.oi_id = oid;
+}
+
static inline __u64 lmm_oi_id(struct ost_id *oi)
{
return oi->oi.oi_id;
@@ -1732,6 +1772,11 @@ void lustre_swab_obd_statfs(struct obd_statfs *os);
#define OBD_BRW_MEMALLOC 0x800 /* Client runs in the "kswapd" context */
#define OBD_BRW_OVER_USRQUOTA 0x1000 /* Running out of user quota */
#define OBD_BRW_OVER_GRPQUOTA 0x2000 /* Running out of group quota */
+#define OBD_BRW_SOFT_SYNC 0x4000 /* This flag notifies the server
+ * that the client is running low on
+ * space for unstable pages; asking
+ * it to sync quickly
+ */
#define OBD_OBJECT_EOF 0xffffffffffffffffULL
@@ -2436,6 +2481,7 @@ struct mdt_rec_reint {
void lustre_swab_mdt_rec_reint(struct mdt_rec_reint *rr);
+/* lmv structures */
struct lmv_desc {
__u32 ld_tgt_count; /* how many MDS's */
__u32 ld_active_tgt_count; /* how many active */
@@ -2460,7 +2506,6 @@ struct lmv_stripe_md {
struct lu_fid mea_ids[0];
};
-/* lmv structures */
#define MEA_MAGIC_LAST_CHAR 0xb2221ca1
#define MEA_MAGIC_ALL_CHARS 0xb222a11c
#define MEA_MAGIC_HASH_SEGMENT 0xb222a11b
@@ -2470,9 +2515,10 @@ struct lmv_stripe_md {
#define MAX_HASH_HIGHEST_BIT 0x1000000000000000ULL
enum fld_rpc_opc {
- FLD_QUERY = 900,
+ FLD_QUERY = 900,
+ FLD_READ = 901,
FLD_LAST_OPC,
- FLD_FIRST_OPC = FLD_QUERY
+ FLD_FIRST_OPC = FLD_QUERY
};
enum seq_rpc_opc {
@@ -2486,6 +2532,12 @@ enum seq_op {
SEQ_ALLOC_META = 1
};
+enum fld_op {
+ FLD_CREATE = 0,
+ FLD_DELETE = 1,
+ FLD_LOOKUP = 2,
+};
+
/*
* LOV data structures
*/
@@ -2582,6 +2634,8 @@ struct ldlm_extent {
__u64 gid;
};
+#define LDLM_GID_ANY ((__u64)-1)
+
static inline int ldlm_extent_overlap(struct ldlm_extent *ex1,
struct ldlm_extent *ex2)
{
@@ -3304,7 +3358,7 @@ struct getinfo_fid2path {
char gf_path[0];
} __packed;
-void lustre_swab_fid2path (struct getinfo_fid2path *gf);
+void lustre_swab_fid2path(struct getinfo_fid2path *gf);
enum {
LAYOUT_INTENT_ACCESS = 0,
diff --git a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
index 276906e646f5..59ba48ac31a7 100644
--- a/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
+++ b/drivers/staging/lustre/lustre/include/lustre/lustre_user.h
@@ -193,37 +193,37 @@ struct ost_id {
* *INFO - set/get lov_user_mds_data
*/
/* see <lustre_lib.h> for ioctl numberss 101-150 */
-#define LL_IOC_GETFLAGS _IOR ('f', 151, long)
-#define LL_IOC_SETFLAGS _IOW ('f', 152, long)
-#define LL_IOC_CLRFLAGS _IOW ('f', 153, long)
+#define LL_IOC_GETFLAGS _IOR('f', 151, long)
+#define LL_IOC_SETFLAGS _IOW('f', 152, long)
+#define LL_IOC_CLRFLAGS _IOW('f', 153, long)
/* LL_IOC_LOV_SETSTRIPE: See also OBD_IOC_LOV_SETSTRIPE */
-#define LL_IOC_LOV_SETSTRIPE _IOW ('f', 154, long)
+#define LL_IOC_LOV_SETSTRIPE _IOW('f', 154, long)
/* LL_IOC_LOV_GETSTRIPE: See also OBD_IOC_LOV_GETSTRIPE */
-#define LL_IOC_LOV_GETSTRIPE _IOW ('f', 155, long)
+#define LL_IOC_LOV_GETSTRIPE _IOW('f', 155, long)
/* LL_IOC_LOV_SETEA: See also OBD_IOC_LOV_SETEA */
-#define LL_IOC_LOV_SETEA _IOW ('f', 156, long)
-#define LL_IOC_RECREATE_OBJ _IOW ('f', 157, long)
-#define LL_IOC_RECREATE_FID _IOW ('f', 157, struct lu_fid)
-#define LL_IOC_GROUP_LOCK _IOW ('f', 158, long)
-#define LL_IOC_GROUP_UNLOCK _IOW ('f', 159, long)
+#define LL_IOC_LOV_SETEA _IOW('f', 156, long)
+#define LL_IOC_RECREATE_OBJ _IOW('f', 157, long)
+#define LL_IOC_RECREATE_FID _IOW('f', 157, struct lu_fid)
+#define LL_IOC_GROUP_LOCK _IOW('f', 158, long)
+#define LL_IOC_GROUP_UNLOCK _IOW('f', 159, long)
/* LL_IOC_QUOTACHECK: See also OBD_IOC_QUOTACHECK */
-#define LL_IOC_QUOTACHECK _IOW ('f', 160, int)
+#define LL_IOC_QUOTACHECK _IOW('f', 160, int)
/* LL_IOC_POLL_QUOTACHECK: See also OBD_IOC_POLL_QUOTACHECK */
-#define LL_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+#define LL_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
/* LL_IOC_QUOTACTL: See also OBD_IOC_QUOTACTL */
#define LL_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
#define IOC_OBD_STATFS _IOWR('f', 164, struct obd_statfs *)
#define IOC_LOV_GETINFO _IOWR('f', 165, struct lov_user_mds_data *)
-#define LL_IOC_FLUSHCTX _IOW ('f', 166, long)
-#define LL_IOC_RMTACL _IOW ('f', 167, long)
-#define LL_IOC_GETOBDCOUNT _IOR ('f', 168, long)
+#define LL_IOC_FLUSHCTX _IOW('f', 166, long)
+#define LL_IOC_RMTACL _IOW('f', 167, long)
+#define LL_IOC_GETOBDCOUNT _IOR('f', 168, long)
#define LL_IOC_LLOOP_ATTACH _IOWR('f', 169, long)
#define LL_IOC_LLOOP_DETACH _IOWR('f', 170, long)
#define LL_IOC_LLOOP_INFO _IOWR('f', 171, struct lu_fid)
#define LL_IOC_LLOOP_DETACH_BYDEV _IOWR('f', 172, long)
-#define LL_IOC_PATH2FID _IOR ('f', 173, long)
+#define LL_IOC_PATH2FID _IOR('f', 173, long)
#define LL_IOC_GET_CONNECT_FLAGS _IOWR('f', 174, __u64 *)
-#define LL_IOC_GET_MDTIDX _IOR ('f', 175, int)
+#define LL_IOC_GET_MDTIDX _IOR('f', 175, int)
/* see <lustre_lib.h> for ioctl numbers 177-210 */
@@ -676,7 +676,12 @@ static inline const char *changelog_type2str(int type)
#define CLF_UNLINK_HSM_EXISTS 0x0002 /* File has something in HSM */
/* HSM cleaning needed */
/* Flags for rename */
-#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of target */
+#define CLF_RENAME_LAST 0x0001 /* rename unlink last hardlink of
+ * target
+ */
+#define CLF_RENAME_LAST_EXISTS 0x0002 /* rename unlink last hardlink of target
+ * has an archive in backend
+ */
/* Flags for HSM */
/* 12b used (from high weight to low weight):
@@ -833,9 +838,8 @@ struct ioc_data_version {
__u64 idv_flags; /* See LL_DV_xxx */
};
-#define LL_DV_NOFLUSH 0x01 /* Do not take READ EXTENT LOCK before sampling
- * version. Dirty caches are left unchanged.
- */
+#define LL_DV_RD_FLUSH BIT(0) /* Flush dirty pages from clients */
+#define LL_DV_WR_FLUSH BIT(1) /* Flush all caching pages from clients */
#ifndef offsetof
# define offsetof(typ, memb) ((unsigned long)((char *)&(((typ *)0)->memb)))
@@ -1095,12 +1099,12 @@ struct hsm_action_list {
__u32 padding1;
char hal_fsname[0]; /* null-terminated */
/* struct hsm_action_item[hal_count] follows, aligned on 8-byte
- * boundaries. See hai_zero
+ * boundaries. See hai_first
*/
} __packed;
#ifndef HAVE_CFS_SIZE_ROUND
-static inline int cfs_size_round (int val)
+static inline int cfs_size_round(int val)
{
return (val + 7) & (~0x7);
}
@@ -1109,7 +1113,7 @@ static inline int cfs_size_round (int val)
#endif
/* Return pointer to first hai in action list */
-static inline struct hsm_action_item *hai_zero(struct hsm_action_list *hal)
+static inline struct hsm_action_item *hai_first(struct hsm_action_list *hal)
{
return (struct hsm_action_item *)(hal->hal_fsname +
cfs_size_round(strlen(hal-> \
@@ -1131,7 +1135,7 @@ static inline int hal_size(struct hsm_action_list *hal)
struct hsm_action_item *hai;
sz = sizeof(*hal) + cfs_size_round(strlen(hal->hal_fsname) + 1);
- hai = hai_zero(hal);
+ hai = hai_first(hal);
for (i = 0; i < hal->hal_count; i++, hai = hai_next(hai))
sz += cfs_size_round(hai->hai_len);
diff --git a/drivers/staging/lustre/lustre/include/lustre_cfg.h b/drivers/staging/lustre/lustre/include/lustre_cfg.h
index bb16ae980b98..e229e91f7f56 100644
--- a/drivers/staging/lustre/lustre/include/lustre_cfg.h
+++ b/drivers/staging/lustre/lustre/include/lustre_cfg.h
@@ -161,7 +161,7 @@ static inline void *lustre_cfg_buf(struct lustre_cfg *lcfg, int index)
int offset;
int bufcount;
- LASSERT (index >= 0);
+ LASSERT(index >= 0);
bufcount = lcfg->lcfg_bufcount;
if (index >= bufcount)
diff --git a/drivers/staging/lustre/lustre/include/lustre_disk.h b/drivers/staging/lustre/lustre/include/lustre_disk.h
index 95fd36063f55..b36821ffb252 100644
--- a/drivers/staging/lustre/lustre/include/lustre_disk.h
+++ b/drivers/staging/lustre/lustre/include/lustre_disk.h
@@ -130,7 +130,6 @@ struct lustre_sb_info {
struct lustre_mount_data *lsi_lmd; /* mount command info */
struct ll_sb_info *lsi_llsbi; /* add'l client sbi info */
struct dt_device *lsi_dt_dev; /* dt device to access disk fs*/
- struct vfsmount *lsi_srv_mnt; /* the one server mount */
atomic_t lsi_mounts; /* references to the srv_mnt */
char lsi_svname[MTI_NAME_MAXLEN];
char lsi_osd_obdname[64];
@@ -158,7 +157,6 @@ struct lustre_sb_info {
struct lustre_mount_info {
char *lmi_name;
struct super_block *lmi_sb;
- struct vfsmount *lmi_mnt;
struct list_head lmi_list_chain;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm.h b/drivers/staging/lustre/lustre/include/lustre_dlm.h
index 8b0364f71129..9cade144faca 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm.h
@@ -71,6 +71,7 @@ struct obd_device;
*/
enum ldlm_error {
ELDLM_OK = 0,
+ ELDLM_LOCK_MATCHED = 1,
ELDLM_LOCK_CHANGED = 300,
ELDLM_LOCK_ABORTED = 301,
@@ -269,7 +270,7 @@ struct ldlm_pool {
struct completion pl_kobj_unregister;
};
-typedef int (*ldlm_cancel_for_recovery)(struct ldlm_lock *lock);
+typedef int (*ldlm_cancel_cbt)(struct ldlm_lock *lock);
/**
* LVB operations.
@@ -446,8 +447,11 @@ struct ldlm_namespace {
/** Limit of parallel AST RPC count. */
unsigned ns_max_parallel_ast;
- /** Callback to cancel locks before replaying it during recovery. */
- ldlm_cancel_for_recovery ns_cancel_for_recovery;
+ /**
+ * Callback to check if a lock is good to be canceled by ELC or
+ * during recovery.
+ */
+ ldlm_cancel_cbt ns_cancel;
/** LDLM lock stats */
struct lprocfs_stats *ns_stats;
@@ -479,9 +483,9 @@ static inline int ns_connect_lru_resize(struct ldlm_namespace *ns)
}
static inline void ns_register_cancel(struct ldlm_namespace *ns,
- ldlm_cancel_for_recovery arg)
+ ldlm_cancel_cbt arg)
{
- ns->ns_cancel_for_recovery = arg;
+ ns->ns_cancel = arg;
}
struct ldlm_lock;
diff --git a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
index 7f2ba2ffe0eb..e7e0c21a9b40 100644
--- a/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
+++ b/drivers/staging/lustre/lustre/include/lustre_dlm_flags.h
@@ -37,17 +37,11 @@
/** l_flags bits marked as "gone" bits */
#define LDLM_FL_GONE_MASK 0x0006004000000000ULL
-/** l_flags bits marked as "hide_lock" bits */
-#define LDLM_FL_HIDE_LOCK_MASK 0x0000206400000000ULL
-
/** l_flags bits marked as "inherit" bits */
#define LDLM_FL_INHERIT_MASK 0x0000000000800000ULL
-/** l_flags bits marked as "local_only" bits */
-#define LDLM_FL_LOCAL_ONLY_MASK 0x00FFFFFF00000000ULL
-
-/** l_flags bits marked as "on_wire" bits */
-#define LDLM_FL_ON_WIRE_MASK 0x00000000C08F932FULL
+/** l_flags bits marked as "off_wire" bits */
+#define LDLM_FL_OFF_WIRE_MASK 0x00FFFFFF00000000ULL
/** extent, mode, or resource changed */
#define LDLM_FL_LOCK_CHANGED 0x0000000000000001ULL /* bit 0 */
@@ -204,7 +198,7 @@
#define ldlm_set_cancel(_l) LDLM_SET_FLAG((_l), 1ULL << 36)
#define ldlm_clear_cancel(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 36)
-/** whatever it might mean */
+/** whatever it might mean -- never transmitted? */
#define LDLM_FL_LOCAL_ONLY 0x0000002000000000ULL /* bit 37 */
#define ldlm_is_local_only(_l) LDLM_TEST_FLAG((_l), 1ULL << 37)
#define ldlm_set_local_only(_l) LDLM_SET_FLAG((_l), 1ULL << 37)
@@ -287,18 +281,18 @@
* has canceled this lock and is waiting for rpc_lock which is taken by
* the first operation. LDLM_FL_BL_AST is set by ldlm_callback_handler() in
* the lock to prevent the Early Lock Cancel (ELC) code from cancelling it.
- *
- * LDLM_FL_BL_DONE is to be set by ldlm_cancel_callback() when lock cache is
- * dropped to let ldlm_callback_handler() return EINVAL to the server. It
- * is used when ELC RPC is already prepared and is waiting for rpc_lock,
- * too late to send a separate CANCEL RPC.
*/
#define LDLM_FL_BL_AST 0x0000400000000000ULL /* bit 46 */
#define ldlm_is_bl_ast(_l) LDLM_TEST_FLAG((_l), 1ULL << 46)
#define ldlm_set_bl_ast(_l) LDLM_SET_FLAG((_l), 1ULL << 46)
#define ldlm_clear_bl_ast(_l) LDLM_CLEAR_FLAG((_l), 1ULL << 46)
-/** whatever it might mean */
+/**
+ * Set by ldlm_cancel_callback() when lock cache is dropped to let
+ * ldlm_callback_handler() return EINVAL to the server. It is used when
+ * ELC RPC is already prepared and is waiting for rpc_lock, too late to
+ * send a separate CANCEL RPC.
+ */
#define LDLM_FL_BL_DONE 0x0000800000000000ULL /* bit 47 */
#define ldlm_is_bl_done(_l) LDLM_TEST_FLAG((_l), 1ULL << 47)
#define ldlm_set_bl_done(_l) LDLM_SET_FLAG((_l), 1ULL << 47)
@@ -381,104 +375,16 @@
/** test for ldlm_lock flag bit set */
#define LDLM_TEST_FLAG(_l, _b) (((_l)->l_flags & (_b)) != 0)
+/** multi-bit test: are any of mask bits set? */
+#define LDLM_HAVE_MASK(_l, _m) ((_l)->l_flags & LDLM_FL_##_m##_MASK)
+
/** set a ldlm_lock flag bit */
#define LDLM_SET_FLAG(_l, _b) ((_l)->l_flags |= (_b))
/** clear a ldlm_lock flag bit */
#define LDLM_CLEAR_FLAG(_l, _b) ((_l)->l_flags &= ~(_b))
-/** Mask of flags inherited from parent lock when doing intents. */
-#define LDLM_INHERIT_FLAGS LDLM_FL_INHERIT_MASK
-
-/** Mask of Flags sent in AST lock_flags to map into the receiving lock. */
-#define LDLM_AST_FLAGS LDLM_FL_AST_MASK
-
/** @} subgroup */
/** @} group */
-#ifdef WIRESHARK_COMPILE
-static int hf_lustre_ldlm_fl_lock_changed = -1;
-static int hf_lustre_ldlm_fl_block_granted = -1;
-static int hf_lustre_ldlm_fl_block_conv = -1;
-static int hf_lustre_ldlm_fl_block_wait = -1;
-static int hf_lustre_ldlm_fl_ast_sent = -1;
-static int hf_lustre_ldlm_fl_replay = -1;
-static int hf_lustre_ldlm_fl_intent_only = -1;
-static int hf_lustre_ldlm_fl_has_intent = -1;
-static int hf_lustre_ldlm_fl_flock_deadlock = -1;
-static int hf_lustre_ldlm_fl_discard_data = -1;
-static int hf_lustre_ldlm_fl_no_timeout = -1;
-static int hf_lustre_ldlm_fl_block_nowait = -1;
-static int hf_lustre_ldlm_fl_test_lock = -1;
-static int hf_lustre_ldlm_fl_cancel_on_block = -1;
-static int hf_lustre_ldlm_fl_deny_on_contention = -1;
-static int hf_lustre_ldlm_fl_ast_discard_data = -1;
-static int hf_lustre_ldlm_fl_fail_loc = -1;
-static int hf_lustre_ldlm_fl_skipped = -1;
-static int hf_lustre_ldlm_fl_cbpending = -1;
-static int hf_lustre_ldlm_fl_wait_noreproc = -1;
-static int hf_lustre_ldlm_fl_cancel = -1;
-static int hf_lustre_ldlm_fl_local_only = -1;
-static int hf_lustre_ldlm_fl_failed = -1;
-static int hf_lustre_ldlm_fl_canceling = -1;
-static int hf_lustre_ldlm_fl_local = -1;
-static int hf_lustre_ldlm_fl_lvb_ready = -1;
-static int hf_lustre_ldlm_fl_kms_ignore = -1;
-static int hf_lustre_ldlm_fl_cp_reqd = -1;
-static int hf_lustre_ldlm_fl_cleaned = -1;
-static int hf_lustre_ldlm_fl_atomic_cb = -1;
-static int hf_lustre_ldlm_fl_bl_ast = -1;
-static int hf_lustre_ldlm_fl_bl_done = -1;
-static int hf_lustre_ldlm_fl_no_lru = -1;
-static int hf_lustre_ldlm_fl_fail_notified = -1;
-static int hf_lustre_ldlm_fl_destroyed = -1;
-static int hf_lustre_ldlm_fl_server_lock = -1;
-static int hf_lustre_ldlm_fl_res_locked = -1;
-static int hf_lustre_ldlm_fl_waited = -1;
-static int hf_lustre_ldlm_fl_ns_srv = -1;
-static int hf_lustre_ldlm_fl_excl = -1;
-
-const value_string lustre_ldlm_flags_vals[] = {
- {LDLM_FL_LOCK_CHANGED, "LDLM_FL_LOCK_CHANGED"},
- {LDLM_FL_BLOCK_GRANTED, "LDLM_FL_BLOCK_GRANTED"},
- {LDLM_FL_BLOCK_CONV, "LDLM_FL_BLOCK_CONV"},
- {LDLM_FL_BLOCK_WAIT, "LDLM_FL_BLOCK_WAIT"},
- {LDLM_FL_AST_SENT, "LDLM_FL_AST_SENT"},
- {LDLM_FL_REPLAY, "LDLM_FL_REPLAY"},
- {LDLM_FL_INTENT_ONLY, "LDLM_FL_INTENT_ONLY"},
- {LDLM_FL_HAS_INTENT, "LDLM_FL_HAS_INTENT"},
- {LDLM_FL_FLOCK_DEADLOCK, "LDLM_FL_FLOCK_DEADLOCK"},
- {LDLM_FL_DISCARD_DATA, "LDLM_FL_DISCARD_DATA"},
- {LDLM_FL_NO_TIMEOUT, "LDLM_FL_NO_TIMEOUT"},
- {LDLM_FL_BLOCK_NOWAIT, "LDLM_FL_BLOCK_NOWAIT"},
- {LDLM_FL_TEST_LOCK, "LDLM_FL_TEST_LOCK"},
- {LDLM_FL_CANCEL_ON_BLOCK, "LDLM_FL_CANCEL_ON_BLOCK"},
- {LDLM_FL_DENY_ON_CONTENTION, "LDLM_FL_DENY_ON_CONTENTION"},
- {LDLM_FL_AST_DISCARD_DATA, "LDLM_FL_AST_DISCARD_DATA"},
- {LDLM_FL_FAIL_LOC, "LDLM_FL_FAIL_LOC"},
- {LDLM_FL_SKIPPED, "LDLM_FL_SKIPPED"},
- {LDLM_FL_CBPENDING, "LDLM_FL_CBPENDING"},
- {LDLM_FL_WAIT_NOREPROC, "LDLM_FL_WAIT_NOREPROC"},
- {LDLM_FL_CANCEL, "LDLM_FL_CANCEL"},
- {LDLM_FL_LOCAL_ONLY, "LDLM_FL_LOCAL_ONLY"},
- {LDLM_FL_FAILED, "LDLM_FL_FAILED"},
- {LDLM_FL_CANCELING, "LDLM_FL_CANCELING"},
- {LDLM_FL_LOCAL, "LDLM_FL_LOCAL"},
- {LDLM_FL_LVB_READY, "LDLM_FL_LVB_READY"},
- {LDLM_FL_KMS_IGNORE, "LDLM_FL_KMS_IGNORE"},
- {LDLM_FL_CP_REQD, "LDLM_FL_CP_REQD"},
- {LDLM_FL_CLEANED, "LDLM_FL_CLEANED"},
- {LDLM_FL_ATOMIC_CB, "LDLM_FL_ATOMIC_CB"},
- {LDLM_FL_BL_AST, "LDLM_FL_BL_AST"},
- {LDLM_FL_BL_DONE, "LDLM_FL_BL_DONE"},
- {LDLM_FL_NO_LRU, "LDLM_FL_NO_LRU"},
- {LDLM_FL_FAIL_NOTIFIED, "LDLM_FL_FAIL_NOTIFIED"},
- {LDLM_FL_DESTROYED, "LDLM_FL_DESTROYED"},
- {LDLM_FL_SERVER_LOCK, "LDLM_FL_SERVER_LOCK"},
- {LDLM_FL_RES_LOCKED, "LDLM_FL_RES_LOCKED"},
- {LDLM_FL_WAITED, "LDLM_FL_WAITED"},
- {LDLM_FL_NS_SRV, "LDLM_FL_NS_SRV"},
- {LDLM_FL_EXCL, "LDLM_FL_EXCL"},
- { 0, NULL }
-};
-#endif /* WIRESHARK_COMPILE */
+
#endif /* LDLM_ALL_FLAGS_MASK */
diff --git a/drivers/staging/lustre/lustre/include/lustre_fid.h b/drivers/staging/lustre/lustre/include/lustre_fid.h
index ab4a92390a43..12e8b585c2b4 100644
--- a/drivers/staging/lustre/lustre/include/lustre_fid.h
+++ b/drivers/staging/lustre/lustre/include/lustre_fid.h
@@ -308,10 +308,10 @@ static inline int fid_seq_in_fldb(__u64 seq)
fid_seq_is_root(seq) || fid_seq_is_dot(seq);
}
-static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq)
+static inline void lu_last_id_fid(struct lu_fid *fid, __u64 seq, __u32 ost_idx)
{
if (fid_seq_is_mdt0(seq)) {
- fid->f_seq = fid_idif_seq(0, 0);
+ fid->f_seq = fid_idif_seq(0, ost_idx);
} else {
LASSERTF(fid_seq_is_norm(seq) || fid_seq_is_echo(seq) ||
fid_seq_is_idif(seq), "%#llx\n", seq);
@@ -498,19 +498,6 @@ static inline void ostid_build_res_name(struct ost_id *oi,
}
}
-static inline void ostid_res_name_to_id(struct ost_id *oi,
- struct ldlm_res_id *name)
-{
- if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_SEQ_OFF])) {
- /* old resid */
- ostid_set_seq(oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
- ostid_set_id(oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- } else {
- /* new resid */
- fid_extract_from_res_name(&oi->oi_fid, name);
- }
-}
-
/**
* Return true if the resource is for the object identified by this id & group.
*/
@@ -546,7 +533,8 @@ static inline void ost_fid_build_resid(const struct lu_fid *fid,
}
static inline void ost_fid_from_resid(struct lu_fid *fid,
- const struct ldlm_res_id *name)
+ const struct ldlm_res_id *name,
+ int ost_idx)
{
if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
/* old resid */
@@ -554,7 +542,7 @@ static inline void ost_fid_from_resid(struct lu_fid *fid,
ostid_set_seq(&oi, name->name[LUSTRE_RES_ID_VER_OID_OFF]);
ostid_set_id(&oi, name->name[LUSTRE_RES_ID_SEQ_OFF]);
- ostid_to_fid(fid, &oi, 0);
+ ostid_to_fid(fid, &oi, ost_idx);
} else {
/* new resid */
fid_extract_from_res_name(fid, name);
diff --git a/drivers/staging/lustre/lustre/include/lustre_import.h b/drivers/staging/lustre/lustre/include/lustre_import.h
index dac2d84d8266..8325c82b3ebf 100644
--- a/drivers/staging/lustre/lustre/include/lustre_import.h
+++ b/drivers/staging/lustre/lustre/include/lustre_import.h
@@ -109,7 +109,7 @@ static inline char *ptlrpc_import_state_name(enum lustre_imp_state state)
"RECOVER", "FULL", "EVICTED",
};
- LASSERT (state <= LUSTRE_IMP_EVICTED);
+ LASSERT(state <= LUSTRE_IMP_EVICTED);
return import_state_names[state];
}
diff --git a/drivers/staging/lustre/lustre/include/lustre_lib.h b/drivers/staging/lustre/lustre/include/lustre_lib.h
index f2223d55850a..00b976766aef 100644
--- a/drivers/staging/lustre/lustre/include/lustre_lib.h
+++ b/drivers/staging/lustre/lustre/include/lustre_lib.h
@@ -280,16 +280,16 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_DATA_TYPE long
#define OBD_IOC_CREATE _IOWR('f', 101, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_DESTROY _IOW ('f', 104, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_DESTROY _IOW('f', 104, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PREALLOCATE _IOWR('f', 105, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SETATTR _IOW ('f', 107, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SETATTR _IOW('f', 107, OBD_IOC_DATA_TYPE)
#define OBD_IOC_GETATTR _IOWR ('f', 108, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ _IOWR('f', 109, OBD_IOC_DATA_TYPE)
#define OBD_IOC_WRITE _IOWR('f', 110, OBD_IOC_DATA_TYPE)
#define OBD_IOC_STATFS _IOWR('f', 113, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SYNC _IOW ('f', 114, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SYNC _IOW('f', 114, OBD_IOC_DATA_TYPE)
#define OBD_IOC_READ2 _IOWR('f', 115, OBD_IOC_DATA_TYPE)
#define OBD_IOC_FORMAT _IOWR('f', 116, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PARTITION _IOWR('f', 117, OBD_IOC_DATA_TYPE)
@@ -308,13 +308,13 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_GETDTNAME OBD_IOC_GETNAME
#define OBD_IOC_LOV_GET_CONFIG _IOWR('f', 132, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CLIENT_RECOVER _IOW ('f', 133, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PING_TARGET _IOW ('f', 136, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CLIENT_RECOVER _IOW('f', 133, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PING_TARGET _IOW('f', 136, OBD_IOC_DATA_TYPE)
#define OBD_IOC_DEC_FS_USE_COUNT _IO ('f', 139)
-#define OBD_IOC_NO_TRANSNO _IOW ('f', 140, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_SET_READONLY _IOW ('f', 141, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_ABORT_RECOVERY _IOR ('f', 142, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_NO_TRANSNO _IOW('f', 140, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_SET_READONLY _IOW('f', 141, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_ABORT_RECOVERY _IOR('f', 142, OBD_IOC_DATA_TYPE)
#define OBD_IOC_ROOT_SQUASH _IOWR('f', 143, OBD_IOC_DATA_TYPE)
@@ -324,27 +324,27 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_CLOSE_UUID _IOWR ('f', 147, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_CHANGELOG_SEND _IOW ('f', 148, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_CHANGELOG_SEND _IOW('f', 148, OBD_IOC_DATA_TYPE)
#define OBD_IOC_GETDEVICE _IOWR ('f', 149, OBD_IOC_DATA_TYPE)
#define OBD_IOC_FID2PATH _IOWR ('f', 150, OBD_IOC_DATA_TYPE)
/* see also <lustre/lustre_user.h> for ioctls 151-153 */
/* OBD_IOC_LOV_SETSTRIPE: See also LL_IOC_LOV_SETSTRIPE */
-#define OBD_IOC_LOV_SETSTRIPE _IOW ('f', 154, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_SETSTRIPE _IOW('f', 154, OBD_IOC_DATA_TYPE)
/* OBD_IOC_LOV_GETSTRIPE: See also LL_IOC_LOV_GETSTRIPE */
-#define OBD_IOC_LOV_GETSTRIPE _IOW ('f', 155, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_GETSTRIPE _IOW('f', 155, OBD_IOC_DATA_TYPE)
/* OBD_IOC_LOV_SETEA: See also LL_IOC_LOV_SETEA */
-#define OBD_IOC_LOV_SETEA _IOW ('f', 156, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_LOV_SETEA _IOW('f', 156, OBD_IOC_DATA_TYPE)
/* see <lustre/lustre_user.h> for ioctls 157-159 */
/* OBD_IOC_QUOTACHECK: See also LL_IOC_QUOTACHECK */
-#define OBD_IOC_QUOTACHECK _IOW ('f', 160, int)
+#define OBD_IOC_QUOTACHECK _IOW('f', 160, int)
/* OBD_IOC_POLL_QUOTACHECK: See also LL_IOC_POLL_QUOTACHECK */
-#define OBD_IOC_POLL_QUOTACHECK _IOR ('f', 161, struct if_quotacheck *)
+#define OBD_IOC_POLL_QUOTACHECK _IOR('f', 161, struct if_quotacheck *)
/* OBD_IOC_QUOTACTL: See also LL_IOC_QUOTACTL */
#define OBD_IOC_QUOTACTL _IOWR('f', 162, struct if_quotactl)
/* see also <lustre/lustre_user.h> for ioctls 163-176 */
-#define OBD_IOC_CHANGELOG_REG _IOW ('f', 177, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_DEREG _IOW ('f', 178, struct obd_ioctl_data)
-#define OBD_IOC_CHANGELOG_CLEAR _IOW ('f', 179, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_REG _IOW('f', 177, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_DEREG _IOW('f', 178, struct obd_ioctl_data)
+#define OBD_IOC_CHANGELOG_CLEAR _IOW('f', 179, struct obd_ioctl_data)
#define OBD_IOC_RECORD _IOWR('f', 180, OBD_IOC_DATA_TYPE)
#define OBD_IOC_ENDRECORD _IOWR('f', 181, OBD_IOC_DATA_TYPE)
#define OBD_IOC_PARSE _IOWR('f', 182, OBD_IOC_DATA_TYPE)
@@ -352,7 +352,7 @@ static inline void obd_ioctl_freedata(char *buf, int len)
#define OBD_IOC_PROCESS_CFG _IOWR('f', 184, OBD_IOC_DATA_TYPE)
#define OBD_IOC_DUMP_LOG _IOWR('f', 185, OBD_IOC_DATA_TYPE)
#define OBD_IOC_CLEAR_LOG _IOWR('f', 186, OBD_IOC_DATA_TYPE)
-#define OBD_IOC_PARAM _IOW ('f', 187, OBD_IOC_DATA_TYPE)
+#define OBD_IOC_PARAM _IOW('f', 187, OBD_IOC_DATA_TYPE)
#define OBD_IOC_POOL _IOWR('f', 188, OBD_IOC_DATA_TYPE)
#define OBD_IOC_REPLACE_NIDS _IOWR('f', 189, OBD_IOC_DATA_TYPE)
@@ -522,6 +522,28 @@ struct l_wait_info {
sigmask(SIGTERM) | sigmask(SIGQUIT) | \
sigmask(SIGALRM))
+/**
+ * wait_queue_t of Linux (version < 2.6.34) is a FIFO list for exclusively
+ * waiting threads, which is not always desirable because all threads will
+ * be waken up again and again, even user only needs a few of them to be
+ * active most time. This is not good for performance because cache can
+ * be polluted by different threads.
+ *
+ * LIFO list can resolve this problem because we always wakeup the most
+ * recent active thread by default.
+ *
+ * NB: please don't call non-exclusive & exclusive wait on the same
+ * waitq if add_wait_queue_exclusive_head is used.
+ */
+#define add_wait_queue_exclusive_head(waitq, link) \
+{ \
+ unsigned long flags; \
+ \
+ spin_lock_irqsave(&((waitq)->lock), flags); \
+ __add_wait_queue_exclusive(waitq, link); \
+ spin_unlock_irqrestore(&((waitq)->lock), flags); \
+}
+
/*
* wait for @condition to become true, but no longer than timeout, specified
* by @info.
@@ -578,7 +600,7 @@ do { \
\
if (condition) \
break; \
- if (cfs_signal_pending()) { \
+ if (signal_pending(current)) { \
if (info->lwi_on_signal && \
(__timeout == 0 || __allow_intr)) { \
if (info->lwi_on_signal != LWI_ON_SIGNAL_NOOP) \
diff --git a/drivers/staging/lustre/lustre/include/lustre_mdc.h b/drivers/staging/lustre/lustre/include/lustre_mdc.h
index af77eb359c43..f267ff8a6ec8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_mdc.h
+++ b/drivers/staging/lustre/lustre/include/lustre_mdc.h
@@ -64,9 +64,27 @@ struct obd_export;
struct ptlrpc_request;
struct obd_device;
+/**
+ * Serializes in-flight MDT-modifying RPC requests to preserve idempotency.
+ *
+ * This mutex is used to implement execute-once semantics on the MDT.
+ * The MDT stores the last transaction ID and result for every client in
+ * its last_rcvd file. If the client doesn't get a reply, it can safely
+ * resend the request and the MDT will reconstruct the reply being aware
+ * that the request has already been executed. Without this lock,
+ * execution status of concurrent in-flight requests would be
+ * overwritten.
+ *
+ * This design limits the extent to which we can keep a full pipeline of
+ * in-flight requests from a single client. This limitation could be
+ * overcome by allowing multiple slots per client in the last_rcvd file.
+ */
struct mdc_rpc_lock {
+ /** Lock protecting in-flight RPC concurrency. */
struct mutex rpcl_mutex;
+ /** Intent associated with currently executing request. */
struct lookup_intent *rpcl_it;
+ /** Used for MDS/RPC load testing purposes. */
int rpcl_fakes;
};
diff --git a/drivers/staging/lustre/lustre/include/lustre_net.h b/drivers/staging/lustre/lustre/include/lustre_net.h
index 69586a522eb7..a7973d5de168 100644
--- a/drivers/staging/lustre/lustre/include/lustre_net.h
+++ b/drivers/staging/lustre/lustre/include/lustre_net.h
@@ -1327,7 +1327,9 @@ struct ptlrpc_request {
/* allow the req to be sent if the import is in recovery
* status
*/
- rq_allow_replay:1;
+ rq_allow_replay:1,
+ /* bulk request, sent to server, but uncommitted */
+ rq_unstable:1;
unsigned int rq_nr_resend;
diff --git a/drivers/staging/lustre/lustre/include/lustre_param.h b/drivers/staging/lustre/lustre/include/lustre_param.h
index 383fe6febe4b..a42cf90c1cd8 100644
--- a/drivers/staging/lustre/lustre/include/lustre_param.h
+++ b/drivers/staging/lustre/lustre/include/lustre_param.h
@@ -89,6 +89,7 @@ int class_parse_nid_quiet(char *buf, lnet_nid_t *nid, char **endh);
/* Prefixes for parameters handled by obd's proc methods (XXX_process_config) */
#define PARAM_OST "ost."
+#define PARAM_OSD "osd."
#define PARAM_OSC "osc."
#define PARAM_MDT "mdt."
#define PARAM_MDD "mdd."
diff --git a/drivers/staging/lustre/lustre/include/lustre_req_layout.h b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
index b2e67fcf9ef1..0aac4391ea16 100644
--- a/drivers/staging/lustre/lustre/include/lustre_req_layout.h
+++ b/drivers/staging/lustre/lustre/include/lustre_req_layout.h
@@ -137,6 +137,7 @@ extern struct req_format RQF_MGS_CONFIG_READ;
/* fid/fld req_format */
extern struct req_format RQF_SEQ_QUERY;
extern struct req_format RQF_FLD_QUERY;
+extern struct req_format RQF_FLD_READ;
/* MDS req_format */
extern struct req_format RQF_MDS_CONNECT;
extern struct req_format RQF_MDS_DISCONNECT;
@@ -199,7 +200,7 @@ extern struct req_format RQF_OST_BRW_READ;
extern struct req_format RQF_OST_BRW_WRITE;
extern struct req_format RQF_OST_STATFS;
extern struct req_format RQF_OST_SET_GRANT_INFO;
-extern struct req_format RQF_OST_GET_INFO_GENERIC;
+extern struct req_format RQF_OST_GET_INFO;
extern struct req_format RQF_OST_GET_INFO_LAST_ID;
extern struct req_format RQF_OST_GET_INFO_LAST_FID;
extern struct req_format RQF_OST_SET_INFO_LAST_FID;
diff --git a/drivers/staging/lustre/lustre/include/obd.h b/drivers/staging/lustre/lustre/include/obd.h
index 4264d97650ec..2d926e0ee647 100644
--- a/drivers/staging/lustre/lustre/include/obd.h
+++ b/drivers/staging/lustre/lustre/include/obd.h
@@ -37,7 +37,7 @@
#ifndef __OBD_H
#define __OBD_H
-#include "linux/obd.h"
+#include <linux/spinlock.h>
#define IOC_OSC_TYPE 'h'
#define IOC_OSC_MIN_NR 20
@@ -54,6 +54,7 @@
#include "lustre_export.h"
#include "lustre_fid.h"
#include "lustre_fld.h"
+#include "lustre_intent.h"
#define MAX_OBD_DEVICES 8192
@@ -165,9 +166,6 @@ struct obd_info {
obd_enqueue_update_f oi_cb_up;
};
-void lov_stripe_lock(struct lov_stripe_md *md);
-void lov_stripe_unlock(struct lov_stripe_md *md);
-
struct obd_type {
struct list_head typ_chain;
struct obd_ops *typ_dt_ops;
@@ -293,14 +291,10 @@ struct client_obd {
* blocking everywhere, but we don't want to slow down fast-path of
* our main platform.)
*
- * Exact type of ->cl_loi_list_lock is defined in arch/obd.h together
- * with client_obd_list_{un,}lock() and
- * client_obd_list_lock_{init,done}() functions.
- *
* NB by Jinshan: though field names are still _loi_, but actually
* osc_object{}s are in the list.
*/
- struct client_obd_lock cl_loi_list_lock;
+ spinlock_t cl_loi_list_lock;
struct list_head cl_loi_ready_list;
struct list_head cl_loi_hp_ready_list;
struct list_head cl_loi_write_list;
@@ -327,7 +321,8 @@ struct client_obd {
atomic_t cl_lru_shrinkers;
atomic_t cl_lru_in_list;
struct list_head cl_lru_list; /* lru page list */
- struct client_obd_lock cl_lru_list_lock; /* page list protector */
+ spinlock_t cl_lru_list_lock; /* page list protector */
+ atomic_t cl_unstable_count;
/* number of in flight destroy rpcs is limited to max_rpcs_in_flight */
atomic_t cl_destroy_in_flight;
@@ -364,6 +359,7 @@ struct client_obd {
/* ptlrpc work for writeback in ptlrpcd context */
void *cl_writeback_work;
+ void *cl_lru_work;
/* hash tables for osc_quota_info */
struct cfs_hash *cl_quota_hash[MAXQUOTAS];
};
@@ -391,45 +387,9 @@ struct ost_pool {
struct rw_semaphore op_rw_sem; /* to protect ost_pool use */
};
-/* Round-robin allocator data */
-struct lov_qos_rr {
- __u32 lqr_start_idx; /* start index of new inode */
- __u32 lqr_offset_idx; /* aliasing for start_idx */
- int lqr_start_count; /* reseed counter */
- struct ost_pool lqr_pool; /* round-robin optimized list */
- unsigned long lqr_dirty:1; /* recalc round-robin list */
-};
-
/* allow statfs data caching for 1 second */
#define OBD_STATFS_CACHE_SECONDS 1
-struct lov_statfs_data {
- struct obd_info lsd_oi;
- struct obd_statfs lsd_statfs;
-};
-
-/* Stripe placement optimization */
-struct lov_qos {
- struct list_head lq_oss_list; /* list of OSSs that targets use */
- struct rw_semaphore lq_rw_sem;
- __u32 lq_active_oss_count;
- unsigned int lq_prio_free; /* priority for free space */
- unsigned int lq_threshold_rr;/* priority for rr */
- struct lov_qos_rr lq_rr; /* round robin qos data */
- unsigned long lq_dirty:1, /* recalc qos data */
- lq_same_space:1,/* the ost's all have approx.
- * the same space avail
- */
- lq_reset:1, /* zero current penalties */
- lq_statfs_in_progress:1; /* statfs op in
- progress */
- /* qos statfs data */
- struct lov_statfs_data *lq_statfs_data;
- wait_queue_head_t lq_statfs_waitq; /* waitqueue to notify statfs
- * requests completion
- */
-};
-
struct lov_tgt_desc {
struct list_head ltd_kill;
struct obd_uuid ltd_uuid;
@@ -442,25 +402,6 @@ struct lov_tgt_desc {
ltd_reap:1; /* should this target be deleted */
};
-/* Pool metadata */
-#define pool_tgt_size(_p) _p->pool_obds.op_size
-#define pool_tgt_count(_p) _p->pool_obds.op_count
-#define pool_tgt_array(_p) _p->pool_obds.op_array
-#define pool_tgt_rw_sem(_p) _p->pool_obds.op_rw_sem
-
-struct pool_desc {
- char pool_name[LOV_MAXPOOLNAME + 1]; /* name of pool */
- struct ost_pool pool_obds; /* pool members */
- atomic_t pool_refcount; /* pool ref. counter */
- struct lov_qos_rr pool_rr; /* round robin qos */
- struct hlist_node pool_hash; /* access by poolname */
- struct list_head pool_list; /* serial access */
- struct dentry *pool_debugfs_entry; /* file in debugfs */
- struct obd_device *pool_lobd; /* obd of the lov/lod to which
- * this pool belongs
- */
-};
-
struct lov_obd {
struct lov_desc desc;
struct lov_tgt_desc **lov_tgts; /* sparse array */
@@ -468,8 +409,6 @@ struct lov_obd {
struct mutex lov_lock;
struct obd_connect_data lov_ocd;
atomic_t lov_refcount;
- __u32 lov_tgt_count; /* how many OBD's */
- __u32 lov_active_tgt_count; /* how many active */
__u32 lov_death_row;/* tgts scheduled to be deleted */
__u32 lov_tgt_size; /* size of tgts array */
int lov_connects;
@@ -479,7 +418,7 @@ struct lov_obd {
struct dentry *lov_pool_debugfs_entry;
enum lustre_sec_part lov_sp_me;
- /* Cached LRU pages from upper layer */
+ /* Cached LRU and unstable data from upper layer */
void *lov_cache;
struct rw_semaphore lov_notify_lock;
@@ -511,7 +450,7 @@ struct lmv_obd {
struct obd_uuid cluuid;
struct obd_export *exp;
- struct mutex init_mutex;
+ struct mutex lmv_init_mutex;
int connected;
int max_easize;
int max_def_easize;
diff --git a/drivers/staging/lustre/lustre/include/obd_cksum.h b/drivers/staging/lustre/lustre/include/obd_cksum.h
index 637fa22110a4..f6c18df906a8 100644
--- a/drivers/staging/lustre/lustre/include/obd_cksum.h
+++ b/drivers/staging/lustre/lustre/include/obd_cksum.h
@@ -35,6 +35,7 @@
#ifndef __OBD_CKSUM
#define __OBD_CKSUM
#include "../../include/linux/libcfs/libcfs.h"
+#include "../../include/linux/libcfs/libcfs_crypto.h"
#include "lustre/lustre_idl.h"
static inline unsigned char cksum_obd2cfs(enum cksum_type cksum_type)
diff --git a/drivers/staging/lustre/lustre/include/obd_class.h b/drivers/staging/lustre/lustre/include/obd_class.h
index 706869f8c98f..32863bcb30b9 100644
--- a/drivers/staging/lustre/lustre/include/obd_class.h
+++ b/drivers/staging/lustre/lustre/include/obd_class.h
@@ -477,7 +477,7 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
struct lu_context session_ctx;
struct lu_env env;
- lu_context_init(&session_ctx, LCT_SESSION);
+ lu_context_init(&session_ctx, LCT_SESSION | LCT_SERVER_SESSION);
session_ctx.lc_thread = NULL;
lu_context_enter(&session_ctx);
@@ -490,8 +490,9 @@ static inline int obd_setup(struct obd_device *obd, struct lustre_cfg *cfg)
obd->obd_lu_dev = d;
d->ld_obd = obd;
rc = 0;
- } else
+ } else {
rc = PTR_ERR(d);
+ }
}
lu_context_exit(&session_ctx);
lu_context_fini(&session_ctx);
diff --git a/drivers/staging/lustre/lustre/include/obd_support.h b/drivers/staging/lustre/lustre/include/obd_support.h
index f8ee3a3254ba..60034d39b00d 100644
--- a/drivers/staging/lustre/lustre/include/obd_support.h
+++ b/drivers/staging/lustre/lustre/include/obd_support.h
@@ -58,6 +58,7 @@ extern int at_early_margin;
extern int at_extra;
extern unsigned int obd_sync_filter;
extern unsigned int obd_max_dirty_pages;
+extern atomic_t obd_unstable_pages;
extern atomic_t obd_dirty_pages;
extern atomic_t obd_dirty_transit_pages;
extern char obd_jobid_var[];
@@ -289,6 +290,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_OST_ENOINO 0x229
#define OBD_FAIL_OST_DQACQ_NET 0x230
#define OBD_FAIL_OST_STATFS_EINPROGRESS 0x231
+#define OBD_FAIL_OST_SET_INFO_NET 0x232
#define OBD_FAIL_LDLM 0x300
#define OBD_FAIL_LDLM_NAMESPACE_NEW 0x301
@@ -319,6 +321,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_LDLM_AGL_DELAY 0x31a
#define OBD_FAIL_LDLM_AGL_NOLOCK 0x31b
#define OBD_FAIL_LDLM_OST_LVB 0x31c
+#define OBD_FAIL_LDLM_ENQUEUE_HANG 0x31d
/* LOCKLESS IO */
#define OBD_FAIL_LDLM_SET_CONTENTION 0x385
@@ -426,6 +429,7 @@ extern char obd_jobid_var[];
#define OBD_FAIL_FLD 0x1100
#define OBD_FAIL_FLD_QUERY_NET 0x1101
+#define OBD_FAIL_FLD_READ_NET 0x1102
#define OBD_FAIL_SEC_CTX 0x1200
#define OBD_FAIL_SEC_CTX_INIT_NET 0x1201
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c b/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
deleted file mode 100644
index 96141d17d07f..000000000000
--- a/drivers/staging/lustre/lustre/lclient/lcommon_cl.c
+++ /dev/null
@@ -1,1203 +0,0 @@
-/*
- * GPL HEADER START
- *
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 only,
- * as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * General Public License version 2 for more details (a copy is included
- * in the LICENSE file that accompanied this code).
- *
- * You should have received a copy of the GNU General Public License
- * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- * GPL HEADER END
- */
-/*
- * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Use is subject to license terms.
- *
- * Copyright (c) 2011, 2015, Intel Corporation.
- */
-/*
- * This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
- *
- * cl code shared between vvp and liblustre (and other Lustre clients in the
- * future).
- *
- * Author: Nikita Danilov <nikita.danilov@sun.com>
- */
-
-#define DEBUG_SUBSYSTEM S_LLITE
-
-#include "../../include/linux/libcfs/libcfs.h"
-# include <linux/fs.h>
-# include <linux/sched.h>
-# include <linux/mm.h>
-# include <linux/quotaops.h>
-# include <linux/highmem.h>
-# include <linux/pagemap.h>
-# include <linux/rbtree.h>
-
-#include "../include/obd.h"
-#include "../include/obd_support.h"
-#include "../include/lustre_fid.h"
-#include "../include/lustre_lite.h"
-#include "../include/lustre_dlm.h"
-#include "../include/lustre_ver.h"
-#include "../include/lustre_mdc.h"
-#include "../include/cl_object.h"
-
-#include "../include/lclient.h"
-
-#include "../llite/llite_internal.h"
-
-static const struct cl_req_operations ccc_req_ops;
-
-/*
- * ccc_ prefix stands for "Common Client Code".
- */
-
-static struct kmem_cache *ccc_lock_kmem;
-static struct kmem_cache *ccc_object_kmem;
-static struct kmem_cache *ccc_thread_kmem;
-static struct kmem_cache *ccc_session_kmem;
-static struct kmem_cache *ccc_req_kmem;
-
-static struct lu_kmem_descr ccc_caches[] = {
- {
- .ckd_cache = &ccc_lock_kmem,
- .ckd_name = "ccc_lock_kmem",
- .ckd_size = sizeof(struct ccc_lock)
- },
- {
- .ckd_cache = &ccc_object_kmem,
- .ckd_name = "ccc_object_kmem",
- .ckd_size = sizeof(struct ccc_object)
- },
- {
- .ckd_cache = &ccc_thread_kmem,
- .ckd_name = "ccc_thread_kmem",
- .ckd_size = sizeof(struct ccc_thread_info),
- },
- {
- .ckd_cache = &ccc_session_kmem,
- .ckd_name = "ccc_session_kmem",
- .ckd_size = sizeof(struct ccc_session)
- },
- {
- .ckd_cache = &ccc_req_kmem,
- .ckd_name = "ccc_req_kmem",
- .ckd_size = sizeof(struct ccc_req)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-/*****************************************************************************
- *
- * Vvp device and device type functions.
- *
- */
-
-void *ccc_key_init(const struct lu_context *ctx, struct lu_context_key *key)
-{
- struct ccc_thread_info *info;
-
- info = kmem_cache_zalloc(ccc_thread_kmem, GFP_NOFS);
- if (!info)
- info = ERR_PTR(-ENOMEM);
- return info;
-}
-
-void ccc_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_thread_info *info = data;
-
- kmem_cache_free(ccc_thread_kmem, info);
-}
-
-void *ccc_session_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
-{
- struct ccc_session *session;
-
- session = kmem_cache_zalloc(ccc_session_kmem, GFP_NOFS);
- if (!session)
- session = ERR_PTR(-ENOMEM);
- return session;
-}
-
-void ccc_session_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
-{
- struct ccc_session *session = data;
-
- kmem_cache_free(ccc_session_kmem, session);
-}
-
-struct lu_context_key ccc_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = ccc_key_init,
- .lct_fini = ccc_key_fini
-};
-
-struct lu_context_key ccc_session_key = {
- .lct_tags = LCT_SESSION,
- .lct_init = ccc_session_key_init,
- .lct_fini = ccc_session_key_fini
-};
-
-/* type constructor/destructor: ccc_type_{init,fini,start,stop}(). */
-/* LU_TYPE_INIT_FINI(ccc, &ccc_key, &ccc_session_key); */
-
-int ccc_device_init(const struct lu_env *env, struct lu_device *d,
- const char *name, struct lu_device *next)
-{
- struct ccc_device *vdv;
- int rc;
-
- vdv = lu2ccc_dev(d);
- vdv->cdv_next = lu2cl_dev(next);
-
- LASSERT(d->ld_site && next->ld_type);
- next->ld_site = d->ld_site;
- rc = next->ld_type->ldt_ops->ldto_device_init(
- env, next, next->ld_type->ldt_name, NULL);
- if (rc == 0) {
- lu_device_get(next);
- lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
- }
- return rc;
-}
-
-struct lu_device *ccc_device_fini(const struct lu_env *env,
- struct lu_device *d)
-{
- return cl2lu_dev(lu2ccc_dev(d)->cdv_next);
-}
-
-struct lu_device *ccc_device_alloc(const struct lu_env *env,
- struct lu_device_type *t,
- struct lustre_cfg *cfg,
- const struct lu_device_operations *luops,
- const struct cl_device_operations *clops)
-{
- struct ccc_device *vdv;
- struct lu_device *lud;
- struct cl_site *site;
- int rc;
-
- vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
- if (!vdv)
- return ERR_PTR(-ENOMEM);
-
- lud = &vdv->cdv_cl.cd_lu_dev;
- cl_device_init(&vdv->cdv_cl, t);
- ccc2lu_dev(vdv)->ld_ops = luops;
- vdv->cdv_cl.cd_ops = clops;
-
- site = kzalloc(sizeof(*site), GFP_NOFS);
- if (site) {
- rc = cl_site_init(site, &vdv->cdv_cl);
- if (rc == 0)
- rc = lu_site_init_finish(&site->cs_lu);
- else {
- LASSERT(!lud->ld_site);
- CERROR("Cannot init lu_site, rc %d.\n", rc);
- kfree(site);
- }
- } else
- rc = -ENOMEM;
- if (rc != 0) {
- ccc_device_free(env, lud);
- lud = ERR_PTR(rc);
- }
- return lud;
-}
-
-struct lu_device *ccc_device_free(const struct lu_env *env,
- struct lu_device *d)
-{
- struct ccc_device *vdv = lu2ccc_dev(d);
- struct cl_site *site = lu2cl_site(d->ld_site);
- struct lu_device *next = cl2lu_dev(vdv->cdv_next);
-
- if (d->ld_site) {
- cl_site_fini(site);
- kfree(site);
- }
- cl_device_fini(lu2cl_dev(d));
- kfree(vdv);
- return next;
-}
-
-int ccc_req_init(const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req)
-{
- struct ccc_req *vrq;
- int result;
-
- vrq = kmem_cache_zalloc(ccc_req_kmem, GFP_NOFS);
- if (vrq) {
- cl_req_slice_add(req, &vrq->crq_cl, dev, &ccc_req_ops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-/**
- * An `emergency' environment used by ccc_inode_fini() when cl_env_get()
- * fails. Access to this environment is serialized by ccc_inode_fini_guard
- * mutex.
- */
-static struct lu_env *ccc_inode_fini_env;
-
-/**
- * A mutex serializing calls to slp_inode_fini() under extreme memory
- * pressure, when environments cannot be allocated.
- */
-static DEFINE_MUTEX(ccc_inode_fini_guard);
-static int dummy_refcheck;
-
-int ccc_global_init(struct lu_device_type *device_type)
-{
- int result;
-
- result = lu_kmem_init(ccc_caches);
- if (result)
- return result;
-
- result = lu_device_type_init(device_type);
- if (result)
- goto out_kmem;
-
- ccc_inode_fini_env = cl_env_alloc(&dummy_refcheck,
- LCT_REMEMBER|LCT_NOREF);
- if (IS_ERR(ccc_inode_fini_env)) {
- result = PTR_ERR(ccc_inode_fini_env);
- goto out_device;
- }
-
- ccc_inode_fini_env->le_ctx.lc_cookie = 0x4;
- return 0;
-out_device:
- lu_device_type_fini(device_type);
-out_kmem:
- lu_kmem_fini(ccc_caches);
- return result;
-}
-
-void ccc_global_fini(struct lu_device_type *device_type)
-{
- if (ccc_inode_fini_env) {
- cl_env_put(ccc_inode_fini_env, &dummy_refcheck);
- ccc_inode_fini_env = NULL;
- }
- lu_device_type_fini(device_type);
- lu_kmem_fini(ccc_caches);
-}
-
-/*****************************************************************************
- *
- * Object operations.
- *
- */
-
-struct lu_object *ccc_object_alloc(const struct lu_env *env,
- const struct lu_object_header *unused,
- struct lu_device *dev,
- const struct cl_object_operations *clops,
- const struct lu_object_operations *luops)
-{
- struct ccc_object *vob;
- struct lu_object *obj;
-
- vob = kmem_cache_zalloc(ccc_object_kmem, GFP_NOFS);
- if (vob) {
- struct cl_object_header *hdr;
-
- obj = ccc2lu(vob);
- hdr = &vob->cob_header;
- cl_object_header_init(hdr);
- lu_object_init(obj, &hdr->coh_lu, dev);
- lu_object_add_top(&hdr->coh_lu, obj);
-
- vob->cob_cl.co_ops = clops;
- obj->lo_ops = luops;
- } else
- obj = NULL;
- return obj;
-}
-
-int ccc_object_init0(const struct lu_env *env,
- struct ccc_object *vob,
- const struct cl_object_conf *conf)
-{
- vob->cob_inode = conf->coc_inode;
- vob->cob_transient_pages = 0;
- cl_object_page_init(&vob->cob_cl, sizeof(struct ccc_page));
- return 0;
-}
-
-int ccc_object_init(const struct lu_env *env, struct lu_object *obj,
- const struct lu_object_conf *conf)
-{
- struct ccc_device *dev = lu2ccc_dev(obj->lo_dev);
- struct ccc_object *vob = lu2ccc(obj);
- struct lu_object *below;
- struct lu_device *under;
- int result;
-
- under = &dev->cdv_next->cd_lu_dev;
- below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
- if (below) {
- const struct cl_object_conf *cconf;
-
- cconf = lu2cl_conf(conf);
- INIT_LIST_HEAD(&vob->cob_pending_list);
- lu_object_add(obj, below);
- result = ccc_object_init0(env, vob, cconf);
- } else
- result = -ENOMEM;
- return result;
-}
-
-void ccc_object_free(const struct lu_env *env, struct lu_object *obj)
-{
- struct ccc_object *vob = lu2ccc(obj);
-
- lu_object_fini(obj);
- lu_object_header_fini(obj->lo_header);
- kmem_cache_free(ccc_object_kmem, vob);
-}
-
-int ccc_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused,
- const struct cl_lock_operations *lkops)
-{
- struct ccc_lock *clk;
- int result;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- clk = kmem_cache_zalloc(ccc_lock_kmem, GFP_NOFS);
- if (clk) {
- cl_lock_slice_add(lock, &clk->clk_cl, obj, lkops);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
-}
-
-int ccc_object_glimpse(const struct lu_env *env,
- const struct cl_object *obj, struct ost_lvb *lvb)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- lvb->lvb_mtime = cl_inode_mtime(inode);
- lvb->lvb_atime = cl_inode_atime(inode);
- lvb->lvb_ctime = cl_inode_ctime(inode);
- /*
- * LU-417: Add dirty pages block count lest i_blocks reports 0, some
- * "cp" or "tar" on remote node may think it's a completely sparse file
- * and skip it.
- */
- if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
- lvb->lvb_blocks = dirty_cnt(inode);
- return 0;
-}
-
-static void ccc_object_size_lock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- ll_inode_size_lock(inode);
- cl_object_attr_lock(obj);
-}
-
-static void ccc_object_size_unlock(struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
-
- cl_object_attr_unlock(obj);
- ll_inode_size_unlock(inode);
-}
-
-/*****************************************************************************
- *
- * Page operations.
- *
- */
-
-struct page *ccc_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2vm_page(slice);
-}
-
-int ccc_page_is_under_lock(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *desc = &ccc_env_info(env)->cti_descr;
- struct cl_page *page = slice->cpl_page;
-
- int result;
-
- if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
- io->ci_type == CIT_FAULT) {
- if (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)
- result = -EBUSY;
- else {
- desc->cld_start = page->cp_index;
- desc->cld_end = page->cp_index;
- desc->cld_obj = page->cp_obj;
- desc->cld_mode = CLM_READ;
- result = cl_queue_match(&io->ci_lockset.cls_done,
- desc) ? -EBUSY : 0;
- }
- } else
- result = 0;
- return result;
-}
-
-int ccc_fail(const struct lu_env *env, const struct cl_page_slice *slice)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
-int ccc_transient_page_prep(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /* transient page should always be sent. */
- return 0;
-}
-
-/*****************************************************************************
- *
- * Lock operations.
- *
- */
-
-void ccc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
-}
-
-void ccc_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
-{
- struct ccc_lock *clk = cl2ccc_lock(slice);
-
- kmem_cache_free(ccc_lock_kmem, clk);
-}
-
-int ccc_lock_enqueue(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_use(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_unuse(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-int ccc_lock_wait(const struct lu_env *env, const struct cl_lock_slice *slice)
-{
- CLOBINVRNT(env, slice->cls_obj, ccc_object_invariant(slice->cls_obj));
- return 0;
-}
-
-/**
- * Implementation of cl_lock_operations::clo_fits_into() methods for ccc
- * layer. This function is executed every time io finds an existing lock in
- * the lock cache while creating new lock. This function has to decide whether
- * cached lock "fits" into io.
- *
- * \param slice lock to be checked
- * \param io IO that wants a lock.
- *
- * \see lov_lock_fits_into().
- */
-int ccc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock *lock = slice->cls_lock;
- const struct cl_lock_descr *descr = &lock->cll_descr;
- const struct ccc_io *cio = ccc_env_io(env);
- int result;
-
- /*
- * Work around DLM peculiarity: it assumes that glimpse
- * (LDLM_FL_HAS_INTENT) lock is always LCK_PR, and returns reads lock
- * when asked for LCK_PW lock with LDLM_FL_HAS_INTENT flag set. Make
- * sure that glimpse doesn't get CLM_WRITE top-lock, so that it
- * doesn't enqueue CLM_WRITE sub-locks.
- */
- if (cio->cui_glimpse)
- result = descr->cld_mode != CLM_WRITE;
-
- /*
- * Also, don't match incomplete write locks for read, otherwise read
- * would enqueue missing sub-locks in the write mode.
- */
- else if (need->cld_mode != descr->cld_mode)
- result = lock->cll_state >= CLS_ENQUEUED;
- else
- result = 1;
- return result;
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for ccc layer, invoked
- * whenever lock state changes. Transfers object attributes, that might be
- * updated as a result of lock acquiring into inode.
- */
-void ccc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct cl_lock *lock = slice->cls_lock;
-
- /*
- * Refresh inode attributes when the lock is moving into CLS_HELD
- * state, and only when this is a result of real enqueue, rather than
- * of finding lock in the cache.
- */
- if (state == CLS_HELD && lock->cll_state < CLS_HELD) {
- struct cl_object *obj;
- struct inode *inode;
-
- obj = slice->cls_obj;
- inode = ccc_object_inode(obj);
-
- /* vmtruncate() sets the i_size
- * under both a DLM lock and the
- * ll_inode_size_lock(). If we don't get the
- * ll_inode_size_lock() here we can match the DLM lock and
- * reset i_size. generic_file_write can then trust the
- * stale i_size when doing appending writes and effectively
- * cancel the result of the truncate. Getting the
- * ll_inode_size_lock() after the enqueue maintains the DLM
- * -> ll_inode_size_lock() acquiring order.
- */
- if (lock->cll_descr.cld_start == 0 &&
- lock->cll_descr.cld_end == CL_PAGE_EOF)
- cl_merge_lvb(env, inode);
- }
-}
-
-/*****************************************************************************
- *
- * io operations.
- *
- */
-
-int ccc_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- pgoff_t start, pgoff_t end)
-{
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock_descr *descr = &cio->cui_link.cill_descr;
- struct cl_object *obj = io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
-
- memset(&cio->cui_link, 0, sizeof(cio->cui_link));
-
- if (cio->cui_fd && (cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
- descr->cld_mode = CLM_GROUP;
- descr->cld_gid = cio->cui_fd->fd_grouplock.cg_gid;
- } else {
- descr->cld_mode = mode;
- }
- descr->cld_obj = obj;
- descr->cld_start = start;
- descr->cld_end = end;
- descr->cld_enq_flags = enqflags;
-
- cl_io_lock_add(env, io, &cio->cui_link);
- return 0;
-}
-
-void ccc_io_update_iov(const struct lu_env *env,
- struct ccc_io *cio, struct cl_io *io)
-{
- size_t size = io->u.ci_rw.crw_count;
-
- if (!cl_is_normalio(env, io) || !cio->cui_iter)
- return;
-
- iov_iter_truncate(cio->cui_iter, size);
-}
-
-int ccc_io_one_lock(const struct lu_env *env, struct cl_io *io,
- __u32 enqflags, enum cl_lock_mode mode,
- loff_t start, loff_t end)
-{
- struct cl_object *obj = io->ci_obj;
-
- return ccc_io_one_lock_index(env, io, enqflags, mode,
- cl_index(obj, start), cl_index(obj, end));
-}
-
-void ccc_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
-{
- CLOBINVRNT(env, ios->cis_io->ci_obj,
- ccc_object_invariant(ios->cis_io->ci_obj));
-}
-
-void ccc_io_advance(const struct lu_env *env,
- const struct cl_io_slice *ios,
- size_t nob)
-{
- struct ccc_io *cio = cl2ccc_io(env, ios);
- struct cl_io *io = ios->cis_io;
- struct cl_object *obj = ios->cis_io->ci_obj;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
-
- if (!cl_is_normalio(env, io))
- return;
-
- iov_iter_reexpand(cio->cui_iter, cio->cui_tot_count -= nob);
-}
-
-/**
- * Helper function that if necessary adjusts file size (inode->i_size), when
- * position at the offset \a pos is accessed. File size can be arbitrary stale
- * on a Lustre client, but client at least knows KMS. If accessed area is
- * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
- *
- * Locking: cl_isize_lock is used to serialize changes to inode size and to
- * protect consistency between inode size and cl_object
- * attributes. cl_object_size_lock() protects consistency between cl_attr's of
- * top-object and sub-objects.
- */
-int ccc_prep_size(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, loff_t start, size_t count, int *exceed)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct inode *inode = ccc_object_inode(obj);
- loff_t pos = start + count - 1;
- loff_t kms;
- int result;
-
- /*
- * Consistency guarantees: following possibilities exist for the
- * relation between region being accessed and real file size at this
- * moment:
- *
- * (A): the region is completely inside of the file;
- *
- * (B-x): x bytes of region are inside of the file, the rest is
- * outside;
- *
- * (C): the region is completely outside of the file.
- *
- * This classification is stable under DLM lock already acquired by
- * the caller, because to change the class, other client has to take
- * DLM lock conflicting with our lock. Also, any updates to ->i_size
- * by other threads on this client are serialized by
- * ll_inode_size_lock(). This guarantees that short reads are handled
- * correctly in the face of concurrent writes and truncates.
- */
- ccc_object_size_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- if (result == 0) {
- kms = attr->cat_kms;
- if (pos > kms) {
- /*
- * A glimpse is necessary to determine whether we
- * return a short read (B) or some zeroes at the end
- * of the buffer (C)
- */
- ccc_object_size_unlock(obj);
- result = cl_glimpse_lock(env, io, inode, obj, 0);
- if (result == 0 && exceed) {
- /* If objective page index exceed end-of-file
- * page index, return directly. Do not expect
- * kernel will check such case correctly.
- * linux-2.6.18-128.1.1 miss to do that.
- * --bug 17336
- */
- loff_t size = cl_isize_read(inode);
- loff_t cur_index = start >> PAGE_SHIFT;
- loff_t size_index = (size - 1) >>
- PAGE_SHIFT;
-
- if ((size == 0 && cur_index != 0) ||
- size_index < cur_index)
- *exceed = 1;
- }
- return result;
- }
- /*
- * region is within kms and, hence, within real file
- * size (A). We need to increase i_size to cover the
- * read region so that generic_file_read() will do its
- * job, but that doesn't mean the kms size is
- * _correct_, it is only the _minimum_ size. If
- * someone does a stat they will get the correct size
- * which will always be >= the kms value here.
- * b=11081
- */
- if (cl_isize_read(inode) < kms) {
- cl_isize_write_nolock(inode, kms);
- CDEBUG(D_VFSTRACE,
- DFID" updating i_size %llu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (__u64)cl_isize_read(inode));
-
- }
- }
- ccc_object_size_unlock(obj);
- return result;
-}
-
-/*****************************************************************************
- *
- * Transfer operations.
- *
- */
-
-void ccc_req_completion(const struct lu_env *env,
- const struct cl_req_slice *slice, int ioret)
-{
- struct ccc_req *vrq;
-
- if (ioret > 0)
- cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
-
- vrq = cl2ccc_req(slice);
- kmem_cache_free(ccc_req_kmem, vrq);
-}
-
-/**
- * Implementation of struct cl_req_operations::cro_attr_set() for ccc
- * layer. ccc is responsible for
- *
- * - o_[mac]time
- *
- * - o_mode
- *
- * - o_parent_seq
- *
- * - o_[ug]id
- *
- * - o_parent_oid
- *
- * - o_parent_ver
- *
- * - o_ioepoch,
- *
- */
-void ccc_req_attr_set(const struct lu_env *env,
- const struct cl_req_slice *slice,
- const struct cl_object *obj,
- struct cl_req_attr *attr, u64 flags)
-{
- struct inode *inode;
- struct obdo *oa;
- u32 valid_flags;
-
- oa = attr->cra_oa;
- inode = ccc_object_inode(obj);
- valid_flags = OBD_MD_FLTYPE;
-
- if (slice->crs_req->crq_type == CRT_WRITE) {
- if (flags & OBD_MD_FLEPOCH) {
- oa->o_valid |= OBD_MD_FLEPOCH;
- oa->o_ioepoch = cl_i2info(inode)->lli_ioepoch;
- valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
- OBD_MD_FLUID | OBD_MD_FLGID;
- }
- }
- obdo_from_inode(oa, inode, valid_flags & flags);
- obdo_set_parent_fid(oa, &cl_i2info(inode)->lli_fid);
- memcpy(attr->cra_jobid, cl_i2info(inode)->lli_jobid,
- JOBSTATS_JOBID_SIZE);
-}
-
-static const struct cl_req_operations ccc_req_ops = {
- .cro_attr_set = ccc_req_attr_set,
- .cro_completion = ccc_req_completion
-};
-
-int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
-{
- struct lu_env *env;
- struct cl_io *io;
- int result;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
-
- io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
- io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
- io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
- io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
- io->u.ci_setattr.sa_valid = attr->ia_valid;
-
-again:
- if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
- struct ccc_io *cio = ccc_env_io(env);
-
- if (attr->ia_valid & ATTR_FILE)
- /* populate the file descriptor for ftruncate to honor
- * group lock - see LU-787
- */
- cio->cui_fd = cl_iattr2fd(inode, attr);
-
- result = cl_io_loop(env, io);
- } else {
- result = io->ci_result;
- }
- cl_io_fini(env, io);
- if (unlikely(io->ci_need_restart))
- goto again;
- /* HSM import case: file is released, cannot be restored
- * no need to fail except if restore registration failed
- * with -ENODATA
- */
- if (result == -ENODATA && io->ci_restore_needed &&
- io->ci_result != -ENODATA)
- result = 0;
- cl_env_put(env, &refcheck);
- return result;
-}
-
-/*****************************************************************************
- *
- * Type conversions.
- *
- */
-
-struct lu_device *ccc2lu_dev(struct ccc_device *vdv)
-{
- return &vdv->cdv_cl.cd_lu_dev;
-}
-
-struct ccc_device *lu2ccc_dev(const struct lu_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl.cd_lu_dev);
-}
-
-struct ccc_device *cl2ccc_dev(const struct cl_device *d)
-{
- return container_of0(d, struct ccc_device, cdv_cl);
-}
-
-struct lu_object *ccc2lu(struct ccc_object *vob)
-{
- return &vob->cob_cl.co_lu;
-}
-
-struct ccc_object *lu2ccc(const struct lu_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl.co_lu);
-}
-
-struct ccc_object *cl2ccc(const struct cl_object *obj)
-{
- return container_of0(obj, struct ccc_object, cob_cl);
-}
-
-struct ccc_lock *cl2ccc_lock(const struct cl_lock_slice *slice)
-{
- return container_of(slice, struct ccc_lock, clk_cl);
-}
-
-struct ccc_io *cl2ccc_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- struct ccc_io *cio;
-
- cio = container_of(slice, struct ccc_io, cui_cl);
- LASSERT(cio == ccc_env_io(env));
- return cio;
-}
-
-struct ccc_req *cl2ccc_req(const struct cl_req_slice *slice)
-{
- return container_of0(slice, struct ccc_req, crq_cl);
-}
-
-struct page *cl2vm_page(const struct cl_page_slice *slice)
-{
- return cl2ccc_page(slice)->cpg_page;
-}
-
-/*****************************************************************************
- *
- * Accessors.
- *
- */
-int ccc_object_invariant(const struct cl_object *obj)
-{
- struct inode *inode = ccc_object_inode(obj);
- struct cl_inode_info *lli = cl_i2info(inode);
-
- return (S_ISREG(cl_inode_mode(inode)) ||
- /* i_mode of unlinked inode is zeroed. */
- cl_inode_mode(inode) == 0) && lli->lli_clob == obj;
-}
-
-struct inode *ccc_object_inode(const struct cl_object *obj)
-{
- return cl2ccc(obj)->cob_inode;
-}
-
-/**
- * Initialize or update CLIO structures for regular files when new
- * meta-data arrives from the server.
- *
- * \param inode regular file inode
- * \param md new file metadata from MDS
- * - allocates cl_object if necessary,
- * - updated layout, if object was already here.
- */
-int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
-{
- struct lu_env *env;
- struct cl_inode_info *lli;
- struct cl_object *clob;
- struct lu_site *site;
- struct lu_fid *fid;
- struct cl_object_conf conf = {
- .coc_inode = inode,
- .u = {
- .coc_md = md
- }
- };
- int result = 0;
- int refcheck;
-
- LASSERT(md->body->valid & OBD_MD_FLID);
- LASSERT(S_ISREG(cl_inode_mode(inode)));
-
- env = cl_env_get(&refcheck);
- if (IS_ERR(env))
- return PTR_ERR(env);
-
- site = cl_i2sbi(inode)->ll_site;
- lli = cl_i2info(inode);
- fid = &lli->lli_fid;
- LASSERT(fid_is_sane(fid));
-
- if (!lli->lli_clob) {
- /* clob is slave of inode, empty lli_clob means for new inode,
- * there is no clob in cache with the given fid, so it is
- * unnecessary to perform lookup-alloc-lookup-insert, just
- * alloc and insert directly.
- */
- LASSERT(inode->i_state & I_NEW);
- conf.coc_lu.loc_flags = LOC_F_NEW;
- clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
- fid, &conf);
- if (!IS_ERR(clob)) {
- /*
- * No locking is necessary, as new inode is
- * locked by I_NEW bit.
- */
- lli->lli_clob = clob;
- lli->lli_has_smd = lsm_has_objects(md->lsm);
- lu_object_ref_add(&clob->co_lu, "inode", inode);
- } else
- result = PTR_ERR(clob);
- } else {
- result = cl_conf_set(env, lli->lli_clob, &conf);
- }
-
- cl_env_put(env, &refcheck);
-
- if (result != 0)
- CERROR("Failure to initialize cl object "DFID": %d\n",
- PFID(fid), result);
- return result;
-}
-
-/**
- * Wait for others drop their references of the object at first, then we drop
- * the last one, which will lead to the object be destroyed immediately.
- * Must be called after cl_object_kill() against this object.
- *
- * The reason we want to do this is: destroying top object will wait for sub
- * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
- * to initiate top object destroying which may deadlock. See bz22520.
- */
-static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
-{
- struct lu_object_header *header = obj->co_lu.lo_header;
- wait_queue_t waiter;
-
- if (unlikely(atomic_read(&header->loh_ref) != 1)) {
- struct lu_site *site = obj->co_lu.lo_dev->ld_site;
- struct lu_site_bkt_data *bkt;
-
- bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
-
- while (1) {
- set_current_state(TASK_UNINTERRUPTIBLE);
- if (atomic_read(&header->loh_ref) == 1)
- break;
- schedule();
- }
-
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
- }
-
- cl_object_put(env, obj);
-}
-
-void cl_inode_fini(struct inode *inode)
-{
- struct lu_env *env;
- struct cl_inode_info *lli = cl_i2info(inode);
- struct cl_object *clob = lli->lli_clob;
- int refcheck;
- int emergency;
-
- if (clob) {
- void *cookie;
-
- cookie = cl_env_reenter();
- env = cl_env_get(&refcheck);
- emergency = IS_ERR(env);
- if (emergency) {
- mutex_lock(&ccc_inode_fini_guard);
- LASSERT(ccc_inode_fini_env);
- cl_env_implant(ccc_inode_fini_env, &refcheck);
- env = ccc_inode_fini_env;
- }
- /*
- * cl_object cache is a slave to inode cache (which, in turn
- * is a slave to dentry cache), don't keep cl_object in memory
- * when its master is evicted.
- */
- cl_object_kill(env, clob);
- lu_object_ref_del(&clob->co_lu, "inode", inode);
- cl_object_put_last(env, clob);
- lli->lli_clob = NULL;
- if (emergency) {
- cl_env_unplant(ccc_inode_fini_env, &refcheck);
- mutex_unlock(&ccc_inode_fini_guard);
- } else
- cl_env_put(env, &refcheck);
- cl_env_reexit(cookie);
- }
-}
-
-/**
- * return IF_* type for given lu_dirent entry.
- * IF_* flag shld be converted to particular OS file type in
- * platform llite module.
- */
-__u16 ll_dirent_type_get(struct lu_dirent *ent)
-{
- __u16 type = 0;
- struct luda_type *lt;
- int len = 0;
-
- if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
- const unsigned align = sizeof(struct luda_type) - 1;
-
- len = le16_to_cpu(ent->lde_namelen);
- len = (len + align) & ~align;
- lt = (void *)ent->lde_name + len;
- type = IFTODT(le16_to_cpu(lt->lt_type));
- }
- return type;
-}
-
-/**
- * build inode number from passed @fid
- */
-__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
-{
- if (BITS_PER_LONG == 32 || api32)
- return fid_flatten32(fid);
- else
- return fid_flatten(fid);
-}
-
-/**
- * build inode generation from passed @fid. If our FID overflows the 32-bit
- * inode number then return a non-zero generation to distinguish them.
- */
-__u32 cl_fid_build_gen(const struct lu_fid *fid)
-{
- __u32 gen;
-
- if (fid_is_igif(fid)) {
- gen = lu_igif_gen(fid);
- return gen;
- }
-
- gen = fid_flatten(fid) >> 32;
- return gen;
-}
-
-/* lsm is unreliable after hsm implementation as layout can be changed at
- * any time. This is only to support old, non-clio-ized interfaces. It will
- * cause deadlock if clio operations are called with this extra layout refcount
- * because in case the layout changed during the IO, ll_layout_refresh() will
- * have to wait for the refcount to become zero to destroy the older layout.
- *
- * Notice that the lsm returned by this function may not be valid unless called
- * inside layout lock - MDS_INODELOCK_LAYOUT.
- */
-struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
-{
- return lov_lsm_get(cl_i2info(inode)->lli_clob);
-}
-
-inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
-{
- lov_lsm_put(cl_i2info(inode)->lli_clob, lsm);
-}
diff --git a/drivers/staging/lustre/lustre/ldlm/l_lock.c b/drivers/staging/lustre/lustre/ldlm/l_lock.c
index e5d1344e817a..621323f6ee60 100644
--- a/drivers/staging/lustre/lustre/ldlm/l_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/l_lock.c
@@ -54,7 +54,7 @@ struct ldlm_resource *lock_res_and_lock(struct ldlm_lock *lock)
lock_res(lock->l_resource);
- lock->l_flags |= LDLM_FL_RES_LOCKED;
+ ldlm_set_res_locked(lock);
return lock->l_resource;
}
EXPORT_SYMBOL(lock_res_and_lock);
@@ -65,7 +65,7 @@ EXPORT_SYMBOL(lock_res_and_lock);
void unlock_res_and_lock(struct ldlm_lock *lock)
{
/* on server-side resource of lock doesn't change */
- lock->l_flags &= ~LDLM_FL_RES_LOCKED;
+ ldlm_clear_res_locked(lock);
unlock_res(lock->l_resource);
spin_unlock(&lock->l_lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
index a803e200f206..cf1f1783632f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_extent.c
@@ -75,12 +75,12 @@ __u64 ldlm_extent_shift_kms(struct ldlm_lock *lock, __u64 old_kms)
* just after we finish and take our lock into account in its
* calculation of the kms
*/
- lock->l_flags |= LDLM_FL_KMS_IGNORE;
+ ldlm_set_kms_ignore(lock);
list_for_each(tmp, &res->lr_granted) {
lck = list_entry(tmp, struct ldlm_lock, l_res_link);
- if (lck->l_flags & LDLM_FL_KMS_IGNORE)
+ if (ldlm_is_kms_ignore(lck))
continue;
if (lck->l_policy_data.l_extent.end >= old_kms)
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
index b88b78606aee..349bfcc9b331 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_flock.c
@@ -101,8 +101,7 @@ ldlm_flock_destroy(struct ldlm_lock *lock, enum ldlm_mode mode, __u64 flags)
LASSERT(hlist_unhashed(&lock->l_exp_flock_hash));
list_del_init(&lock->l_res_link);
- if (flags == LDLM_FL_WAIT_NOREPROC &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
+ if (flags == LDLM_FL_WAIT_NOREPROC && !ldlm_is_failed(lock)) {
/* client side - set a flag to prevent sending a CANCEL */
lock->l_flags |= LDLM_FL_LOCAL_ONLY | LDLM_FL_CBPENDING;
@@ -436,7 +435,7 @@ ldlm_flock_interrupted_wait(void *data)
lock_res_and_lock(lock);
/* client side - set flag to prevent lock from being put on LRU list */
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
}
@@ -520,30 +519,29 @@ ldlm_flock_completion_ast(struct ldlm_lock *lock, __u64 flags, void *data)
granted:
OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_CP_CB_WAIT, 10);
- if (lock->l_flags & LDLM_FL_DESTROYED) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
- return 0;
- }
-
- if (lock->l_flags & LDLM_FL_FAILED) {
+ if (ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue waking up: failed");
return -EIO;
}
- if (rc) {
- LDLM_DEBUG(lock, "client-side enqueue waking up: failed (%d)",
- rc);
- return rc;
- }
-
LDLM_DEBUG(lock, "client-side enqueue granted");
lock_res_and_lock(lock);
+ /*
+ * Protect against race where lock could have been just destroyed
+ * due to overlap in ldlm_process_flock_lock().
+ */
+ if (ldlm_is_destroyed(lock)) {
+ unlock_res_and_lock(lock);
+ LDLM_DEBUG(lock, "client-side enqueue waking up: destroyed");
+ return 0;
+ }
+
/* ldlm_lock_enqueue() has already placed lock on the granted list. */
list_del_init(&lock->l_res_link);
- if (lock->l_flags & LDLM_FL_FLOCK_DEADLOCK) {
+ if (ldlm_is_flock_deadlock(lock)) {
LDLM_DEBUG(lock, "client-side enqueue deadlock received");
rc = -EDEADLK;
} else if (flags & LDLM_FL_TEST_LOCK) {
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
index e21373e7306f..32f227f37799 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_internal.h
@@ -95,9 +95,10 @@ enum {
LDLM_CANCEL_PASSED = 1 << 1, /* Cancel passed number of locks. */
LDLM_CANCEL_SHRINK = 1 << 2, /* Cancel locks from shrinker. */
LDLM_CANCEL_LRUR = 1 << 3, /* Cancel locks from lru resize. */
- LDLM_CANCEL_NO_WAIT = 1 << 4 /* Cancel locks w/o blocking (neither
- * sending nor waiting for any rpcs)
- */
+ LDLM_CANCEL_NO_WAIT = 1 << 4, /* Cancel locks w/o blocking (neither
+ * sending nor waiting for any rpcs)
+ */
+ LDLM_CANCEL_LRUR_NO_WAIT = 1 << 5, /* LRUR + NO_WAIT */
};
int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr,
@@ -145,7 +146,8 @@ void ldlm_lock_decref_internal(struct ldlm_lock *, __u32 mode);
void ldlm_lock_decref_internal_nolock(struct ldlm_lock *, __u32 mode);
int ldlm_run_ast_work(struct ldlm_namespace *ns, struct list_head *rpc_list,
enum ldlm_desc_ast_t ast_type);
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock);
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use);
+#define ldlm_lock_remove_from_lru(lock) ldlm_lock_remove_from_lru_check(lock, 0)
int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock);
void ldlm_lock_destroy_nolock(struct ldlm_lock *lock);
@@ -216,8 +218,6 @@ enum ldlm_policy_res {
LDLM_POLICY_SKIP_LOCK
};
-typedef enum ldlm_policy_res ldlm_policy_res_t;
-
#define LDLM_POOL_SYSFS_PRINT_int(v) sprintf(buf, "%d\n", v)
#define LDLM_POOL_SYSFS_SET_int(a, b) { a = b; }
#define LDLM_POOL_SYSFS_PRINT_u64(v) sprintf(buf, "%lld\n", v)
@@ -305,9 +305,10 @@ static inline int is_granted_or_cancelled(struct ldlm_lock *lock)
int ret = 0;
lock_res_and_lock(lock);
- if (((lock->l_req_mode == lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_CP_REQD)) ||
- (lock->l_flags & (LDLM_FL_FAILED | LDLM_FL_CANCEL)))
+ if ((lock->l_req_mode == lock->l_granted_mode) &&
+ !ldlm_is_cp_reqd(lock))
+ ret = 1;
+ else if (ldlm_is_failed(lock) || ldlm_is_cancel(lock))
ret = 1;
unlock_res_and_lock(lock);
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
index 7dd7df59aa1f..b4ffbe2fc4ed 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lib.c
@@ -314,7 +314,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
INIT_LIST_HEAD(&cli->cl_loi_write_list);
INIT_LIST_HEAD(&cli->cl_loi_read_list);
- client_obd_list_lock_init(&cli->cl_loi_list_lock);
+ spin_lock_init(&cli->cl_loi_list_lock);
atomic_set(&cli->cl_pending_w_pages, 0);
atomic_set(&cli->cl_pending_r_pages, 0);
cli->cl_r_in_flight = 0;
@@ -333,7 +333,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
atomic_set(&cli->cl_lru_busy, 0);
atomic_set(&cli->cl_lru_in_list, 0);
INIT_LIST_HEAD(&cli->cl_lru_list);
- client_obd_list_lock_init(&cli->cl_lru_list_lock);
+ spin_lock_init(&cli->cl_lru_list_lock);
+ atomic_set(&cli->cl_unstable_count, 0);
init_waitqueue_head(&cli->cl_destroy_waitq);
atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -355,6 +356,12 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
LNET_MTU >> PAGE_SHIFT);
+ /*
+ * set cl_chunkbits default value to PAGE_CACHE_SHIFT,
+ * it will be updated at OSC connection time.
+ */
+ cli->cl_chunkbits = PAGE_SHIFT;
+
if (!strcmp(name, LUSTRE_MDC_NAME)) {
cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
} else if (totalram_pages >> (20 - PAGE_SHIFT) <= 128 /* MB */) {
@@ -429,7 +436,6 @@ err_ldlm:
ldlm_put_ref();
err:
return rc;
-
}
EXPORT_SYMBOL(client_obd_setup);
@@ -438,6 +444,7 @@ int client_obd_cleanup(struct obd_device *obddev)
ldlm_namespace_free_post(obddev->obd_namespace);
obddev->obd_namespace = NULL;
+ obd_cleanup_client_import(obddev);
LASSERT(!obddev->u.cli.cl_import);
ldlm_put_ref();
@@ -748,6 +755,7 @@ int ldlm_error2errno(enum ldlm_error error)
switch (error) {
case ELDLM_OK:
+ case ELDLM_LOCK_MATCHED:
result = 0;
break;
case ELDLM_LOCK_CHANGED:
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
index ecd65a7a3dc9..bff94ea12d6f 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lock.c
@@ -185,7 +185,7 @@ void ldlm_lock_put(struct ldlm_lock *lock)
"final lock_put on destroyed lock, freeing it.");
res = lock->l_resource;
- LASSERT(lock->l_flags & LDLM_FL_DESTROYED);
+ LASSERT(ldlm_is_destroyed(lock));
LASSERT(list_empty(&lock->l_res_link));
LASSERT(list_empty(&lock->l_pending_chain));
@@ -229,15 +229,25 @@ int ldlm_lock_remove_from_lru_nolock(struct ldlm_lock *lock)
/**
* Removes LDLM lock \a lock from LRU. Obtains the LRU lock first.
+ *
+ * If \a last_use is non-zero, it will remove the lock from LRU only if
+ * it matches lock's l_last_used.
+ *
+ * \retval 0 if \a last_use is set, the lock is not in LRU list or \a last_use
+ * doesn't match lock's l_last_used;
+ * otherwise, the lock hasn't been in the LRU list.
+ * \retval 1 the lock was in LRU list and removed.
*/
-int ldlm_lock_remove_from_lru(struct ldlm_lock *lock)
+int ldlm_lock_remove_from_lru_check(struct ldlm_lock *lock, time_t last_use)
{
struct ldlm_namespace *ns = ldlm_lock_to_ns(lock);
- int rc;
+ int rc = 0;
spin_lock(&ns->ns_lock);
- rc = ldlm_lock_remove_from_lru_nolock(lock);
+ if (last_use == 0 || last_use == lock->l_last_used)
+ rc = ldlm_lock_remove_from_lru_nolock(lock);
spin_unlock(&ns->ns_lock);
+
return rc;
}
@@ -252,8 +262,7 @@ static void ldlm_lock_add_to_lru_nolock(struct ldlm_lock *lock)
LASSERT(list_empty(&lock->l_lru));
LASSERT(lock->l_resource->lr_type != LDLM_FLOCK);
list_add_tail(&lock->l_lru, &ns->ns_unused_list);
- if (lock->l_flags & LDLM_FL_SKIPPED)
- lock->l_flags &= ~LDLM_FL_SKIPPED;
+ ldlm_clear_skipped(lock);
LASSERT(ns->ns_nr_unused >= 0);
ns->ns_nr_unused++;
}
@@ -318,11 +327,11 @@ static int ldlm_lock_destroy_internal(struct ldlm_lock *lock)
LBUG();
}
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
LASSERT(list_empty(&lock->l_lru));
return 0;
}
- lock->l_flags |= LDLM_FL_DESTROYED;
+ ldlm_set_destroyed(lock);
if (lock->l_export && lock->l_export->exp_lock_hash) {
/* NB: it's safe to call cfs_hash_del() even lock isn't
@@ -544,7 +553,7 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
/* It's unlikely but possible that someone marked the lock as
* destroyed after we did handle2object on it
*/
- if (flags == 0 && ((lock->l_flags & LDLM_FL_DESTROYED) == 0)) {
+ if (flags == 0 && !ldlm_is_destroyed(lock)) {
lu_ref_add(&lock->l_reference, "handle", current);
return lock;
}
@@ -554,21 +563,22 @@ struct ldlm_lock *__ldlm_handle2lock(const struct lustre_handle *handle,
LASSERT(lock->l_resource);
lu_ref_add_atomic(&lock->l_reference, "handle", current);
- if (unlikely(lock->l_flags & LDLM_FL_DESTROYED)) {
+ if (unlikely(ldlm_is_destroyed(lock))) {
unlock_res_and_lock(lock);
CDEBUG(D_INFO, "lock already destroyed: lock %p\n", lock);
LDLM_LOCK_PUT(lock);
return NULL;
}
- if (flags && (lock->l_flags & flags)) {
- unlock_res_and_lock(lock);
- LDLM_LOCK_PUT(lock);
- return NULL;
- }
+ if (flags) {
+ if (lock->l_flags & flags) {
+ unlock_res_and_lock(lock);
+ LDLM_LOCK_PUT(lock);
+ return NULL;
+ }
- if (flags)
lock->l_flags |= flags;
+ }
unlock_res_and_lock(lock);
return lock;
@@ -599,14 +609,14 @@ EXPORT_SYMBOL(ldlm_lock2desc);
static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_AST_SENT) == 0) {
+ if (!ldlm_is_ast_sent(lock)) {
LDLM_DEBUG(lock, "lock incompatible; sending blocking AST.");
- lock->l_flags |= LDLM_FL_AST_SENT;
+ ldlm_set_ast_sent(lock);
/* If the enqueuing client said so, tell the AST recipient to
* discard dirty data, rather than writing back.
*/
- if (new->l_flags & LDLM_FL_AST_DISCARD_DATA)
- lock->l_flags |= LDLM_FL_DISCARD_DATA;
+ if (ldlm_is_ast_discard_data(new))
+ ldlm_set_discard_data(lock);
LASSERT(list_empty(&lock->l_bl_ast));
list_add(&lock->l_bl_ast, work_list);
LDLM_LOCK_GET(lock);
@@ -621,8 +631,8 @@ static void ldlm_add_bl_work_item(struct ldlm_lock *lock, struct ldlm_lock *new,
static void ldlm_add_cp_work_item(struct ldlm_lock *lock,
struct list_head *work_list)
{
- if ((lock->l_flags & LDLM_FL_CP_REQD) == 0) {
- lock->l_flags |= LDLM_FL_CP_REQD;
+ if (!ldlm_is_cp_reqd(lock)) {
+ ldlm_set_cp_reqd(lock);
LDLM_DEBUG(lock, "lock granted; sending completion AST.");
LASSERT(list_empty(&lock->l_cp_ast));
list_add(&lock->l_cp_ast, work_list);
@@ -657,7 +667,7 @@ void ldlm_lock_addref(struct lustre_handle *lockh, __u32 mode)
struct ldlm_lock *lock;
lock = ldlm_handle2lock(lockh);
- LASSERT(lock);
+ LASSERTF(lock, "Non-existing lock: %llx\n", lockh->cookie);
ldlm_lock_addref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
}
@@ -704,7 +714,7 @@ int ldlm_lock_addref_try(struct lustre_handle *lockh, __u32 mode)
if (lock) {
lock_res_and_lock(lock);
if (lock->l_readers != 0 || lock->l_writers != 0 ||
- !(lock->l_flags & LDLM_FL_CBPENDING)) {
+ !ldlm_is_cbpending(lock)) {
ldlm_lock_addref_internal_nolock(lock, mode);
result = 0;
}
@@ -770,17 +780,17 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_decref_internal_nolock(lock, mode);
- if (lock->l_flags & LDLM_FL_LOCAL &&
+ if (ldlm_is_local(lock) &&
!lock->l_readers && !lock->l_writers) {
/* If this is a local lock on a server namespace and this was
* the last reference, cancel the lock.
*/
CDEBUG(D_INFO, "forcing cancel of local lock\n");
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
}
if (!lock->l_readers && !lock->l_writers &&
- (lock->l_flags & LDLM_FL_CBPENDING)) {
+ ldlm_is_cbpending(lock)) {
/* If we received a blocked AST and this was the last reference,
* run the callback.
*/
@@ -791,16 +801,14 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_remove_from_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
- if ((lock->l_flags & LDLM_FL_ATOMIC_CB) ||
+ if (ldlm_is_atomic_cb(lock) ||
ldlm_bl_to_thread_lock(ns, NULL, lock) != 0)
ldlm_handle_bl_callback(ns, NULL, lock);
} else if (!lock->l_readers && !lock->l_writers &&
- !(lock->l_flags & LDLM_FL_NO_LRU) &&
- !(lock->l_flags & LDLM_FL_BL_AST)) {
-
+ !ldlm_is_no_lru(lock) && !ldlm_is_bl_ast(lock)) {
LDLM_DEBUG(lock, "add lock into lru list");
/* If this is a client-side namespace and this was the last
@@ -809,7 +817,7 @@ void ldlm_lock_decref_internal(struct ldlm_lock *lock, __u32 mode)
ldlm_lock_add_to_lru(lock);
unlock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_FAIL_LOC)
+ if (ldlm_is_fail_loc(lock))
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Call ldlm_cancel_lru() only if EARLY_CANCEL and LRU RESIZE
@@ -853,7 +861,7 @@ void ldlm_lock_decref_and_cancel(struct lustre_handle *lockh, __u32 mode)
LDLM_DEBUG(lock, "ldlm_lock_decref(%s)", ldlm_lockname[mode]);
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
unlock_res_and_lock(lock);
ldlm_lock_decref_internal(lock, mode);
LDLM_LOCK_PUT(lock);
@@ -971,7 +979,7 @@ static void ldlm_granted_list_add_lock(struct ldlm_lock *lock,
ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(lock, "About to add lock:");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1073,10 +1081,9 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
* whose parents already hold a lock so forward progress
* can still happen.
*/
- if (lock->l_flags & LDLM_FL_CBPENDING &&
- !(flags & LDLM_FL_CBPENDING))
+ if (ldlm_is_cbpending(lock) && !(flags & LDLM_FL_CBPENDING))
continue;
- if (!unref && lock->l_flags & LDLM_FL_CBPENDING &&
+ if (!unref && ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
continue;
@@ -1092,6 +1099,7 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
if (unlikely(match == LCK_GROUP) &&
lock->l_resource->lr_type == LDLM_EXTENT &&
+ policy->l_extent.gid != LDLM_GID_ANY &&
lock->l_policy_data.l_extent.gid != policy->l_extent.gid)
continue;
@@ -1104,11 +1112,10 @@ static struct ldlm_lock *search_queue(struct list_head *queue,
policy->l_inodebits.bits))
continue;
- if (!unref && (lock->l_flags & LDLM_FL_GONE_MASK))
+ if (!unref && LDLM_HAVE_MASK(lock, GONE))
continue;
- if ((flags & LDLM_FL_LOCAL_ONLY) &&
- !(lock->l_flags & LDLM_FL_LOCAL))
+ if ((flags & LDLM_FL_LOCAL_ONLY) && !ldlm_is_local(lock))
continue;
if (flags & LDLM_FL_TEST_LOCK) {
@@ -1142,7 +1149,7 @@ EXPORT_SYMBOL(ldlm_lock_fail_match_locked);
*/
void ldlm_lock_allow_match_locked(struct ldlm_lock *lock)
{
- lock->l_flags |= LDLM_FL_LVB_READY;
+ ldlm_set_lvb_ready(lock);
wake_up_all(&lock->l_waitq);
}
EXPORT_SYMBOL(ldlm_lock_allow_match_locked);
@@ -1243,8 +1250,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
if (lock) {
ldlm_lock2handle(lock, lockh);
- if ((flags & LDLM_FL_LVB_READY) &&
- (!(lock->l_flags & LDLM_FL_LVB_READY))) {
+ if ((flags & LDLM_FL_LVB_READY) && !ldlm_is_lvb_ready(lock)) {
__u64 wait_flags = LDLM_FL_LVB_READY |
LDLM_FL_DESTROYED | LDLM_FL_FAIL_NOTIFIED;
struct l_wait_info lwi;
@@ -1271,7 +1277,7 @@ enum ldlm_mode ldlm_lock_match(struct ldlm_namespace *ns, __u64 flags,
l_wait_event(lock->l_waitq,
lock->l_flags & wait_flags,
&lwi);
- if (!(lock->l_flags & LDLM_FL_LVB_READY)) {
+ if (!ldlm_is_lvb_ready(lock)) {
if (flags & LDLM_FL_TEST_LOCK)
LDLM_LOCK_RELEASE(lock);
else
@@ -1325,10 +1331,10 @@ enum ldlm_mode ldlm_revalidate_lock_handle(struct lustre_handle *lockh,
lock = ldlm_handle2lock(lockh);
if (lock) {
lock_res_and_lock(lock);
- if (lock->l_flags & LDLM_FL_GONE_MASK)
+ if (LDLM_HAVE_MASK(lock, GONE))
goto out;
- if (lock->l_flags & LDLM_FL_CBPENDING &&
+ if (ldlm_is_cbpending(lock) &&
lock->l_readers == 0 && lock->l_writers == 0)
goto out;
@@ -1542,7 +1548,8 @@ enum ldlm_error ldlm_lock_enqueue(struct ldlm_namespace *ns,
/* Some flags from the enqueue want to make it into the AST, via the
* lock's l_flags.
*/
- lock->l_flags |= *flags & LDLM_FL_AST_DISCARD_DATA;
+ if (*flags & LDLM_FL_AST_DISCARD_DATA)
+ ldlm_set_ast_discard_data(lock);
/*
* This distinction between local lock trees is very important; a client
@@ -1581,7 +1588,7 @@ ldlm_work_bl_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
lock_res_and_lock(lock);
list_del_init(&lock->l_bl_ast);
- LASSERT(lock->l_flags & LDLM_FL_AST_SENT);
+ LASSERT(ldlm_is_ast_sent(lock));
LASSERT(lock->l_bl_ast_run == 0);
LASSERT(lock->l_blocking_lock);
lock->l_bl_ast_run++;
@@ -1628,12 +1635,12 @@ ldlm_work_cp_ast_lock(struct ptlrpc_request_set *rqset, void *opaq)
/* nobody should touch l_cp_ast */
lock_res_and_lock(lock);
list_del_init(&lock->l_cp_ast);
- LASSERT(lock->l_flags & LDLM_FL_CP_REQD);
+ LASSERT(ldlm_is_cp_reqd(lock));
/* save l_completion_ast since it can be changed by
* mds_intent_policy(), see bug 14225
*/
completion_callback = lock->l_completion_ast;
- lock->l_flags &= ~LDLM_FL_CP_REQD;
+ ldlm_clear_cp_reqd(lock);
unlock_res_and_lock(lock);
if (completion_callback)
@@ -1778,8 +1785,8 @@ out:
void ldlm_cancel_callback(struct ldlm_lock *lock)
{
check_res_locked(lock->l_resource);
- if (!(lock->l_flags & LDLM_FL_CANCEL)) {
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (!ldlm_is_cancel(lock)) {
+ ldlm_set_cancel(lock);
if (lock->l_blocking_ast) {
unlock_res_and_lock(lock);
lock->l_blocking_ast(lock, NULL, lock->l_ast_data,
@@ -1789,7 +1796,7 @@ void ldlm_cancel_callback(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "no blocking ast");
}
}
- lock->l_flags |= LDLM_FL_BL_DONE;
+ ldlm_set_bl_done(lock);
}
/**
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
index ebe9042adb25..ab739f079a48 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_lockd.c
@@ -124,10 +124,10 @@ void ldlm_handle_bl_callback(struct ldlm_namespace *ns,
LDLM_DEBUG(lock, "client blocking AST callback handler");
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)
- lock->l_flags |= LDLM_FL_CANCEL;
+ if (ldlm_is_cancel_on_block(lock))
+ ldlm_set_cancel(lock);
do_ast = !lock->l_readers && !lock->l_writers;
unlock_res_and_lock(lock);
@@ -172,7 +172,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
set_current_state(TASK_INTERRUPTIBLE);
schedule_timeout(to);
if (lock->l_granted_mode == lock->l_req_mode ||
- lock->l_flags & LDLM_FL_DESTROYED)
+ ldlm_is_destroyed(lock))
break;
}
}
@@ -215,7 +215,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
}
lock_res_and_lock(lock);
- if ((lock->l_flags & LDLM_FL_DESTROYED) ||
+ if (ldlm_is_destroyed(lock) ||
lock->l_granted_mode == lock->l_req_mode) {
/* bug 11300: the lock has already been granted */
unlock_res_and_lock(lock);
@@ -291,7 +291,7 @@ static void ldlm_handle_cp_callback(struct ptlrpc_request *req,
out:
if (rc < 0) {
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_failed(lock);
unlock_res_and_lock(lock);
wake_up(&lock->l_waitq);
}
@@ -360,8 +360,7 @@ static int __ldlm_bl_to_thread(struct ldlm_bl_work_item *blwi,
struct ldlm_bl_pool *blp = ldlm_state->ldlm_bl_pool;
spin_lock(&blp->blp_lock);
- if (blwi->blwi_lock &&
- blwi->blwi_lock->l_flags & LDLM_FL_DISCARD_DATA) {
+ if (blwi->blwi_lock && ldlm_is_discard_data(blwi->blwi_lock)) {
/* add LDLM_FL_DISCARD_DATA requests to the priority list */
list_add_tail(&blwi->blwi_entry, &blp->blp_prio_list);
} else {
@@ -626,23 +625,22 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
return 0;
}
- if ((lock->l_flags & LDLM_FL_FAIL_LOC) &&
+ if (ldlm_is_fail_loc(lock) &&
lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK)
OBD_RACE(OBD_FAIL_LDLM_CP_BL_RACE);
/* Copy hints/flags (e.g. LDLM_FL_DISCARD_DATA) from AST. */
lock_res_and_lock(lock);
lock->l_flags |= ldlm_flags_from_wire(dlm_req->lock_flags &
- LDLM_AST_FLAGS);
+ LDLM_FL_AST_MASK);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
/* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache.
*/
- if (((lock->l_flags & LDLM_FL_CANCELING) &&
- (lock->l_flags & LDLM_FL_BL_DONE)) ||
- (lock->l_flags & LDLM_FL_FAILED)) {
+ if ((ldlm_is_canceling(lock) && ldlm_is_bl_done(lock)) ||
+ ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "callback on lock %#llx - lock disappeared\n",
dlm_req->lock_handle[0].cookie);
unlock_res_and_lock(lock);
@@ -656,7 +654,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
* Let ldlm_cancel_lru() be fast.
*/
ldlm_lock_remove_from_lru(lock);
- lock->l_flags |= LDLM_FL_BL_AST;
+ ldlm_set_bl_ast(lock);
}
unlock_res_and_lock(lock);
@@ -674,7 +672,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req)
case LDLM_BL_CALLBACK:
CDEBUG(D_INODE, "blocking ast\n");
req_capsule_extend(&req->rq_pill, &RQF_LDLM_BL_CALLBACK);
- if (!(lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK)) {
+ if (!ldlm_is_cancel_on_block(lock)) {
rc = ldlm_callback_reply(req, 0);
if (req->rq_no_reply || rc)
ldlm_callback_errmsg(req, "Normal process", rc,
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
index 74e193e52cd6..107314e284a0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_request.c
@@ -153,7 +153,7 @@ static int ldlm_completion_tail(struct ldlm_lock *lock)
long delay;
int result;
- if (lock->l_flags & (LDLM_FL_DESTROYED | LDLM_FL_FAILED)) {
+ if (ldlm_is_destroyed(lock) || ldlm_is_failed(lock)) {
LDLM_DEBUG(lock, "client-side enqueue: destroyed");
result = -EIO;
} else {
@@ -252,7 +252,7 @@ noreproc:
lwd.lwd_lock = lock;
- if (lock->l_flags & LDLM_FL_NO_TIMEOUT) {
+ if (ldlm_is_no_timeout(lock)) {
LDLM_DEBUG(lock, "waiting indefinitely because of NO_TIMEOUT");
lwi = LWI_INTR(interrupted_completion_wait, &lwd);
} else {
@@ -269,7 +269,7 @@ noreproc:
if (OBD_FAIL_CHECK_RESET(OBD_FAIL_LDLM_INTR_CP_AST,
OBD_FAIL_LDLM_CP_BL_RACE | OBD_FAIL_ONCE)) {
- lock->l_flags |= LDLM_FL_FAIL_LOC;
+ ldlm_set_fail_loc(lock);
rc = -EINTR;
} else {
/* Go to sleep until the lock is granted or cancelled. */
@@ -296,7 +296,7 @@ static void failed_lock_cleanup(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check that lock is not granted or failed, we might race. */
if ((lock->l_req_mode != lock->l_granted_mode) &&
- !(lock->l_flags & LDLM_FL_FAILED)) {
+ !ldlm_is_failed(lock)) {
/* Make sure that this lock will not be found by raced
* bl_ast and -EINVAL reply is sent to server anyways.
* bug 17645
@@ -347,7 +347,6 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
struct ldlm_lock *lock;
struct ldlm_reply *reply;
int cleanup_phase = 1;
- int size = 0;
lock = ldlm_handle2lock(lockh);
/* ldlm_cli_enqueue is holding a reference on this lock. */
@@ -375,8 +374,8 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
goto cleanup;
}
- if (lvb_len != 0) {
- LASSERT(lvb);
+ if (lvb_len > 0) {
+ int size = 0;
size = req_capsule_get_size(&req->rq_pill, &RMF_DLM_LVB,
RCL_SERVER);
@@ -390,12 +389,13 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
rc = -EINVAL;
goto cleanup;
}
+ lvb_len = size;
}
if (rc == ELDLM_LOCK_ABORTED) {
- if (lvb_len != 0)
+ if (lvb_len > 0 && lvb)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lvb, size);
+ lvb, lvb_len);
if (rc == 0)
rc = ELDLM_LOCK_ABORTED;
goto cleanup;
@@ -421,7 +421,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
*flags = ldlm_flags_from_wire(reply->lock_flags);
lock->l_flags |= ldlm_flags_from_wire(reply->lock_flags &
- LDLM_INHERIT_FLAGS);
+ LDLM_FL_INHERIT_MASK);
/* move NO_TIMEOUT flag to the lock to force ldlm_lock_match()
* to wait with no timeout as well
*/
@@ -489,7 +489,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
/* If the lock has already been granted by a completion AST, don't
* clobber the LVB with an older one.
*/
- if (lvb_len != 0) {
+ if (lvb_len > 0) {
/* We must lock or a racing completion might update lvb without
* letting us know and we'll clobber the correct value.
* Cannot unlock after the check either, as that still leaves
@@ -498,7 +498,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
lock_res_and_lock(lock);
if (lock->l_req_mode != lock->l_granted_mode)
rc = ldlm_fill_lvb(lock, &req->rq_pill, RCL_SERVER,
- lock->l_lvb_data, size);
+ lock->l_lvb_data, lvb_len);
unlock_res_and_lock(lock);
if (rc < 0) {
cleanup_phase = 1;
@@ -518,7 +518,7 @@ int ldlm_cli_enqueue_fini(struct obd_export *exp, struct ptlrpc_request *req,
}
}
- if (lvb_len && lvb) {
+ if (lvb_len > 0 && lvb) {
/* Copy the LVB here, and not earlier, because the completion
* AST (if any) can override what we got in the reply
*/
@@ -601,7 +601,7 @@ int ldlm_prep_elc_req(struct obd_export *exp, struct ptlrpc_request *req,
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
flags = ns_connect_lru_resize(ns) ?
- LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
+ LDLM_CANCEL_LRUR_NO_WAIT : LDLM_CANCEL_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
@@ -821,12 +821,11 @@ static __u64 ldlm_cli_cancel_local(struct ldlm_lock *lock)
LDLM_DEBUG(lock, "client-side cancel");
/* Set this flag to prevent others from getting new references*/
lock_res_and_lock(lock);
- lock->l_flags |= LDLM_FL_CBPENDING;
+ ldlm_set_cbpending(lock);
local_only = !!(lock->l_flags &
(LDLM_FL_LOCAL_ONLY|LDLM_FL_CANCEL_ON_BLOCK));
ldlm_cancel_callback(lock);
- rc = (lock->l_flags & LDLM_FL_BL_AST) ?
- LDLM_FL_BL_AST : LDLM_FL_CANCELING;
+ rc = ldlm_is_bl_ast(lock) ? LDLM_FL_BL_AST : LDLM_FL_CANCELING;
unlock_res_and_lock(lock);
if (local_only) {
@@ -1131,31 +1130,30 @@ EXPORT_SYMBOL(ldlm_cli_cancel_list_local);
* dirty data, to close a file, ...) or waiting for any RPCs in-flight (e.g.
* readahead requests, ...)
*/
-static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
- ldlm_policy_res_t result = LDLM_POLICY_CANCEL_LOCK;
- ldlm_cancel_for_recovery cb = ns->ns_cancel_for_recovery;
-
- lock_res_and_lock(lock);
+ enum ldlm_policy_res result = LDLM_POLICY_CANCEL_LOCK;
/* don't check added & count since we want to process all locks
- * from unused list
+ * from unused list.
+ * It's fine to not take lock to access lock->l_resource since
+ * the lock has already been granted so it won't change.
*/
switch (lock->l_resource->lr_type) {
case LDLM_EXTENT:
case LDLM_IBITS:
- if (cb && cb(lock))
+ if (ns->ns_cancel && ns->ns_cancel(lock) != 0)
break;
default:
result = LDLM_POLICY_SKIP_LOCK;
- lock->l_flags |= LDLM_FL_SKIPPED;
+ lock_res_and_lock(lock);
+ ldlm_set_skipped(lock);
+ unlock_res_and_lock(lock);
break;
}
- unlock_res_and_lock(lock);
return result;
}
@@ -1168,10 +1166,10 @@ static ldlm_policy_res_t ldlm_cancel_no_wait_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
unsigned long cur = cfs_time_current();
struct ldlm_pool *pl = &ns->ns_pool;
@@ -1196,8 +1194,13 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
/* Stop when SLV is not yet come from server or lv is smaller than
* it is.
*/
- return (slv == 0 || lv < slv) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if (slv == 0 || lv < slv)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
}
/**
@@ -1209,10 +1212,10 @@ static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
/* Stop LRU processing when we reach past @count or have checked all
* locks in LRU.
@@ -1230,16 +1233,35 @@ static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
{
- /* Stop LRU processing if young lock is found and we reach past count */
- return ((added >= count) &&
- time_before(cfs_time_current(),
- cfs_time_add(lock->l_last_used, ns->ns_max_age))) ?
- LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
+ if ((added >= count) &&
+ time_before(cfs_time_current(),
+ cfs_time_add(lock->l_last_used, ns->ns_max_age)))
+ return LDLM_POLICY_KEEP_LOCK;
+
+ if (ns->ns_cancel && ns->ns_cancel(lock) == 0)
+ return LDLM_POLICY_KEEP_LOCK;
+
+ return LDLM_POLICY_CANCEL_LOCK;
+}
+
+static enum ldlm_policy_res
+ldlm_cancel_lrur_no_wait_policy(struct ldlm_namespace *ns,
+ struct ldlm_lock *lock,
+ int unused, int added,
+ int count)
+{
+ enum ldlm_policy_res result;
+
+ result = ldlm_cancel_lrur_policy(ns, lock, unused, added, count);
+ if (result == LDLM_POLICY_KEEP_LOCK)
+ return result;
+
+ return ldlm_cancel_no_wait_policy(ns, lock, unused, added, count);
}
/**
@@ -1251,10 +1273,9 @@ static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
*
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
-static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
- int count)
+static enum ldlm_policy_res
+ldlm_cancel_default_policy(struct ldlm_namespace *ns, struct ldlm_lock *lock,
+ int unused, int added, int count)
{
/* Stop LRU processing when we reach past count or have checked all
* locks in LRU.
@@ -1263,7 +1284,8 @@ static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+typedef enum ldlm_policy_res (*ldlm_cancel_lru_policy_t)(
+ struct ldlm_namespace *,
struct ldlm_lock *, int,
int, int);
@@ -1281,6 +1303,8 @@ ldlm_cancel_lru_policy(struct ldlm_namespace *ns, int flags)
return ldlm_cancel_lrur_policy;
else if (flags & LDLM_CANCEL_PASSED)
return ldlm_cancel_passed_policy;
+ else if (flags & LDLM_CANCEL_LRUR_NO_WAIT)
+ return ldlm_cancel_lrur_no_wait_policy;
} else {
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
@@ -1329,6 +1353,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
ldlm_cancel_lru_policy_t pf;
struct ldlm_lock *lock, *next;
int added = 0, unused, remained;
+ int no_wait = flags & (LDLM_CANCEL_NO_WAIT | LDLM_CANCEL_LRUR_NO_WAIT);
spin_lock(&ns->ns_lock);
unused = ns->ns_nr_unused;
@@ -1341,7 +1366,8 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
LASSERT(pf);
while (!list_empty(&ns->ns_unused_list)) {
- ldlm_policy_res_t result;
+ enum ldlm_policy_res result;
+ time_t last_use = 0;
/* all unused locks */
if (remained-- <= 0)
@@ -1354,17 +1380,20 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
list_for_each_entry_safe(lock, next, &ns->ns_unused_list,
l_lru) {
/* No locks which got blocking requests. */
- LASSERT(!(lock->l_flags & LDLM_FL_BL_AST));
+ LASSERT(!ldlm_is_bl_ast(lock));
- if (flags & LDLM_CANCEL_NO_WAIT &&
- lock->l_flags & LDLM_FL_SKIPPED)
+ if (no_wait && ldlm_is_skipped(lock))
/* already processed */
continue;
+ last_use = lock->l_last_used;
+ if (last_use == cfs_time_current())
+ continue;
+
/* Somebody is already doing CANCEL. No need for this
* lock in LRU, do not traverse it again.
*/
- if (!(lock->l_flags & LDLM_FL_CANCELING))
+ if (!ldlm_is_canceling(lock))
break;
ldlm_lock_remove_from_lru_nolock(lock);
@@ -1407,12 +1436,14 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
lock_res_and_lock(lock);
/* Check flags again under the lock. */
- if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (ldlm_lock_remove_from_lru(lock) == 0)) {
+ if (ldlm_is_canceling(lock) ||
+ (ldlm_lock_remove_from_lru_check(lock, last_use) == 0)) {
/* Another thread is removing lock from LRU, or
* somebody is already doing CANCEL, or there
* is a blocking request which will send cancel
- * by itself, or the lock is no longer unused.
+ * by itself, or the lock is no longer unused or
+ * the lock has been used since the pf() call and
+ * pages could be put under it.
*/
unlock_res_and_lock(lock);
lu_ref_del(&lock->l_reference,
@@ -1429,7 +1460,7 @@ static int ldlm_prepare_lru_list(struct ldlm_namespace *ns,
* where while we are doing cancel here, server is also
* silently cancelling this lock.
*/
- lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
+ ldlm_clear_cancel_on_block(lock);
/* Setting the CBPENDING flag is a little misleading,
* but prevents an important race; namely, once
@@ -1526,8 +1557,7 @@ int ldlm_cancel_resource_local(struct ldlm_resource *res,
/* If somebody is already doing CANCEL, or blocking AST came,
* skip this lock.
*/
- if (lock->l_flags & LDLM_FL_BL_AST ||
- lock->l_flags & LDLM_FL_CANCELING)
+ if (ldlm_is_bl_ast(lock) || ldlm_is_canceling(lock))
continue;
if (lockmode_compat(lock->l_granted_mode, mode))
@@ -1771,7 +1801,6 @@ static void ldlm_namespace_foreach(struct ldlm_namespace *ns,
cfs_hash_for_each_nolock(ns->ns_rs_hash,
ldlm_res_iter_helper, &helper);
-
}
/* non-blocking function to manipulate a lock whose cb_data is being put away.
@@ -1887,7 +1916,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
int flags;
/* Bug 11974: Do not replay a lock which is actively being canceled */
- if (lock->l_flags & LDLM_FL_CANCELING) {
+ if (ldlm_is_canceling(lock)) {
LDLM_DEBUG(lock, "Not replaying canceled lock:");
return 0;
}
@@ -1896,7 +1925,7 @@ static int replay_one_lock(struct obd_import *imp, struct ldlm_lock *lock)
* server might have long dropped it, but notification of that event was
* lost by network. (and server granted conflicting lock already)
*/
- if (lock->l_flags & LDLM_FL_CANCEL_ON_BLOCK) {
+ if (ldlm_is_cancel_on_block(lock)) {
LDLM_DEBUG(lock, "Not replaying reply-less lock:");
ldlm_lock_cancel(lock);
return 0;
diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
index 9dede87ad0a3..e99c89c34cd0 100644
--- a/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
+++ b/drivers/staging/lustre/lustre/ldlm/ldlm_resource.c
@@ -124,9 +124,15 @@ int ldlm_debugfs_setup(void)
}
rc = ldebugfs_add_vars(ldlm_debugfs_dir, ldlm_debugfs_list, NULL);
+ if (rc) {
+ CERROR("LProcFS failed in ldlm-init\n");
+ goto err_svc;
+ }
return 0;
+err_svc:
+ ldebugfs_remove(&ldlm_svc_debugfs_dir);
err_ns:
ldebugfs_remove(&ldlm_ns_debugfs_dir);
err_type:
@@ -758,12 +764,12 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
list_for_each(tmp, q) {
lock = list_entry(tmp, struct ldlm_lock,
l_res_link);
- if (lock->l_flags & LDLM_FL_CLEANED) {
+ if (ldlm_is_cleaned(lock)) {
lock = NULL;
continue;
}
LDLM_LOCK_GET(lock);
- lock->l_flags |= LDLM_FL_CLEANED;
+ ldlm_set_cleaned(lock);
break;
}
@@ -775,13 +781,13 @@ static void cleanup_resource(struct ldlm_resource *res, struct list_head *q,
/* Set CBPENDING so nothing in the cancellation path
* can match this lock.
*/
- lock->l_flags |= LDLM_FL_CBPENDING;
- lock->l_flags |= LDLM_FL_FAILED;
+ ldlm_set_cbpending(lock);
+ ldlm_set_failed(lock);
lock->l_flags |= flags;
/* ... without sending a CANCEL message for local_only. */
if (local_only)
- lock->l_flags |= LDLM_FL_LOCAL_ONLY;
+ ldlm_set_local_only(lock);
if (local_only && (lock->l_readers || lock->l_writers)) {
/* This is a little bit gross, but much better than the
@@ -1275,7 +1281,7 @@ void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
LDLM_DEBUG(lock, "About to add this lock:\n");
- if (lock->l_flags & LDLM_FL_DESTROYED) {
+ if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
}
@@ -1400,3 +1406,4 @@ void ldlm_resource_dump(int level, struct ldlm_resource *res)
LDLM_DEBUG_LIMIT(level, lock, "###");
}
}
+EXPORT_SYMBOL(ldlm_resource_dump);
diff --git a/drivers/staging/lustre/lustre/llite/Makefile b/drivers/staging/lustre/lustre/llite/Makefile
index 9ac29e718da3..2ce10ff01b80 100644
--- a/drivers/staging/lustre/lustre/llite/Makefile
+++ b/drivers/staging/lustre/lustre/llite/Makefile
@@ -4,7 +4,8 @@ lustre-y := dcache.o dir.o file.o llite_close.o llite_lib.o llite_nfs.o \
rw.o namei.o symlink.o llite_mmap.o \
xattr.o xattr_cache.o remote_perm.o llite_rmtacl.o \
rw26.o super25.o statahead.o \
- ../lclient/glimpse.o ../lclient/lcommon_cl.o ../lclient/lcommon_misc.o \
- vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o lproc_llite.o
+ glimpse.o lcommon_cl.o lcommon_misc.o \
+ vvp_dev.o vvp_page.o vvp_lock.o vvp_io.o vvp_object.o vvp_req.o \
+ lproc_llite.o
llite_lloop-y := lloop.o
diff --git a/drivers/staging/lustre/lustre/llite/dcache.c b/drivers/staging/lustre/lustre/llite/dcache.c
index dd1c827013b9..1b6f82a1a435 100644
--- a/drivers/staging/lustre/lustre/llite/dcache.c
+++ b/drivers/staging/lustre/lustre/llite/dcache.c
@@ -108,11 +108,8 @@ static int ll_dcompare(const struct dentry *parent, const struct dentry *dentry,
static inline int return_if_equal(struct ldlm_lock *lock, void *data)
{
- if ((lock->l_flags &
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA)) ==
- (LDLM_FL_CANCELING | LDLM_FL_DISCARD_DATA))
- return LDLM_ITER_CONTINUE;
- return LDLM_ITER_STOP;
+ return (ldlm_is_canceling(lock) && ldlm_is_discard_data(lock)) ?
+ LDLM_ITER_CONTINUE : LDLM_ITER_STOP;
}
/* find any ldlm lock of the inode in mdc and lov
@@ -253,8 +250,8 @@ void ll_invalidate_aliases(struct inode *inode)
{
struct dentry *dentry;
- CDEBUG(D_INODE, "marking dentries for ino %lu/%u(%p) invalid\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_INODE, "marking dentries for ino "DFID"(%p) invalid\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_lock_dcache(inode);
hlist_for_each_entry(dentry, &inode->i_dentry, d_u.d_alias) {
@@ -289,8 +286,8 @@ void ll_lookup_finish_locks(struct lookup_intent *it, struct inode *inode)
if (it->d.lustre.it_lock_mode && inode) {
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
}
diff --git a/drivers/staging/lustre/lustre/llite/dir.c b/drivers/staging/lustre/lustre/llite/dir.c
index 7a0a67f3c4e7..4b00d1ac84fb 100644
--- a/drivers/staging/lustre/lustre/llite/dir.c
+++ b/drivers/staging/lustre/lustre/llite/dir.c
@@ -158,11 +158,16 @@ static int ll_dir_filler(void *_hash, struct page *page0)
int i;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) hash %llu\n",
- inode->i_ino, inode->i_generation, inode, hash);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) hash %llu\n",
+ PFID(ll_inode2fid(inode)), inode, hash);
LASSERT(max_pages > 0 && max_pages <= MD_MAX_BRW_PAGES);
+ op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
+ LUSTRE_OPC_ANY, NULL);
+ if (IS_ERR(op_data))
+ return PTR_ERR(op_data);
+
page_pool = kcalloc(max_pages, sizeof(page), GFP_NOFS);
if (page_pool) {
page_pool[0] = page0;
@@ -177,8 +182,6 @@ static int ll_dir_filler(void *_hash, struct page *page0)
page_pool[npages] = page;
}
- op_data = ll_prep_md_op_data(NULL, inode, NULL, NULL, 0, 0,
- LUSTRE_OPC_ANY, NULL);
op_data->op_npages = npages;
op_data->op_offset = hash;
rc = md_readpage(exp, op_data, page_pool, &request);
@@ -190,7 +193,7 @@ static int ll_dir_filler(void *_hash, struct page *page0)
body = req_capsule_server_get(&request->rq_pill, &RMF_MDT_BODY);
/* Checked by mdc_readpage() */
if (body->valid & OBD_MD_FLSIZE)
- cl_isize_write(inode, body->size);
+ i_size_write(inode, body->size);
nrdpgs = (request->rq_bulk->bd_nob_transferred+PAGE_SIZE-1)
>> PAGE_SHIFT;
@@ -372,8 +375,8 @@ struct page *ll_get_dir_page(struct inode *dir, __u64 hash,
return ERR_PTR(rc);
}
- CDEBUG(D_INODE, "setting lr_lvb_inode to inode %p (%lu/%u)\n",
- dir, dir->i_ino, dir->i_generation);
+ CDEBUG(D_INODE, "setting lr_lvb_inode to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(dir)), dir);
md_set_lock_data(ll_i2sbi(dir)->ll_md_exp,
&it.d.lustre.it_lock_handle, dir, NULL);
} else {
@@ -468,6 +471,28 @@ fail:
goto out_unlock;
}
+/**
+ * return IF_* type for given lu_dirent entry.
+ * IF_* flag shld be converted to particular OS file type in
+ * platform llite module.
+ */
+static __u16 ll_dirent_type_get(struct lu_dirent *ent)
+{
+ __u16 type = 0;
+ struct luda_type *lt;
+ int len = 0;
+
+ if (le32_to_cpu(ent->lde_attrs) & LUDA_TYPE) {
+ const unsigned int align = sizeof(struct luda_type) - 1;
+
+ len = le16_to_cpu(ent->lde_namelen);
+ len = (len + align) & ~align;
+ lt = (void *)ent->lde_name + len;
+ type = IFTODT(le16_to_cpu(lt->lt_type));
+ }
+ return type;
+}
+
int ll_dir_read(struct inode *inode, struct dir_context *ctx)
{
struct ll_inode_info *info = ll_i2info(inode);
@@ -589,15 +614,16 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
struct inode *inode = file_inode(filp);
struct ll_file_data *lfd = LUSTRE_FPRIVATE(filp);
struct ll_sb_info *sbi = ll_i2sbi(inode);
+ __u64 pos = lfd ? lfd->lfd_pos : 0;
int hash64 = sbi->ll_flags & LL_SBI_64BIT_HASH;
int api32 = ll_need_32bit_api(sbi);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p) pos %lu/%llu 32bit_api %d\n",
- inode->i_ino, inode->i_generation,
- inode, (unsigned long)lfd->lfd_pos, i_size_read(inode), api32);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p) pos %lu/%llu 32bit_api %d\n",
+ PFID(ll_inode2fid(inode)), inode, (unsigned long)pos,
+ i_size_read(inode), api32);
- if (lfd->lfd_pos == MDS_DIR_END_OFF) {
+ if (pos == MDS_DIR_END_OFF) {
/*
* end-of-file.
*/
@@ -605,9 +631,10 @@ static int ll_readdir(struct file *filp, struct dir_context *ctx)
goto out;
}
- ctx->pos = lfd->lfd_pos;
+ ctx->pos = pos;
rc = ll_dir_read(inode, ctx);
- lfd->lfd_pos = ctx->pos;
+ if (lfd)
+ lfd->lfd_pos = ctx->pos;
if (ctx->pos == MDS_DIR_END_OFF) {
if (api32)
ctx->pos = LL_DIR_END_OFF_32BIT;
@@ -804,9 +831,8 @@ int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc < 0) {
- CDEBUG(D_INFO, "md_getattr failed on inode %lu/%u: rc %d\n",
- inode->i_ino,
- inode->i_generation, rc);
+ CDEBUG(D_INFO, "md_getattr failed on inode "DFID": rc %d\n",
+ PFID(ll_inode2fid(inode)), rc);
goto out;
}
@@ -916,7 +942,7 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
}
/* Read current file data version */
- rc = ll_data_version(inode, &data_version, 1);
+ rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc != 0) {
CDEBUG(D_HSM, "Could not read file data version of "
@@ -936,6 +962,9 @@ static int ll_ioc_copy_start(struct super_block *sb, struct hsm_copy *copy)
}
progress:
+ /* On error, the request should be considered as completed */
+ if (hpk.hpk_errval > 0)
+ hpk.hpk_flags |= HP_FLAG_COMPLETED;
rc = obd_iocontrol(LL_IOC_HSM_PROGRESS, sbi->ll_md_exp, sizeof(hpk),
&hpk, NULL);
@@ -997,8 +1026,7 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
goto progress;
}
- rc = ll_data_version(inode, &data_version,
- copy->hc_hai.hai_action == HSMA_ARCHIVE);
+ rc = ll_data_version(inode, &data_version, LL_DV_RD_FLUSH);
iput(inode);
if (rc) {
CDEBUG(D_HSM, "Could not read file data version. Request could not be confirmed.\n");
@@ -1033,7 +1061,6 @@ static int ll_ioc_copy_end(struct super_block *sb, struct hsm_copy *copy)
/* hpk_errval must be >= 0 */
hpk.hpk_errval = EBUSY;
}
-
}
progress:
@@ -1242,8 +1269,8 @@ static long ll_dir_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct obd_ioctl_data *data;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), cmd=%#x\n",
- inode->i_ino, inode->i_generation, inode, cmd);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), cmd=%#x\n",
+ PFID(ll_inode2fid(inode)), inode, cmd);
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
if (_IOC_TYPE(cmd) == 'T' || _IOC_TYPE(cmd) == 't') /* tty ioctls */
@@ -1362,7 +1389,6 @@ out_free:
lmv_out_free:
obd_ioctl_freedata(buf, len);
return rc;
-
}
case LL_IOC_LOV_SETSTRIPE: {
struct lov_user_md_v3 lumv3;
@@ -1474,8 +1500,9 @@ free_lmv:
cmd == LL_IOC_MDC_GETINFO)) {
rc = 0;
goto skip_lmm;
- } else
+ } else {
goto out_req;
+ }
}
if (cmd == IOC_MDC_GETFILESTRIPE ||
@@ -1688,15 +1715,16 @@ out_quotactl:
return ll_flush_ctx(inode);
#ifdef CONFIG_FS_POSIX_ACL
case LL_IOC_RMTACL: {
- if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
- struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
+ if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- rc = rct_add(&sbi->ll_rct, current_pid(), arg);
- if (!rc)
- fd->fd_flags |= LL_FILE_RMTACL;
- return rc;
- } else
- return 0;
+ rc = rct_add(&sbi->ll_rct, current_pid(), arg);
+ if (!rc)
+ fd->fd_flags |= LL_FILE_RMTACL;
+ return rc;
+ } else {
+ return 0;
+ }
}
#endif
case LL_IOC_GETOBDCOUNT: {
@@ -1817,6 +1845,9 @@ out_quotactl:
return rc;
}
case LL_IOC_HSM_CT_START:
+ if (!capable(CFS_CAP_SYS_ADMIN))
+ return -EPERM;
+
rc = copy_and_ioctl(cmd, sbi->ll_md_exp, (void __user *)arg,
sizeof(struct lustre_kernelcomm));
return rc;
diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c
index cf619af3caf5..f47f2acaf90c 100644
--- a/drivers/staging/lustre/lustre/llite/file.c
+++ b/drivers/staging/lustre/lustre/llite/file.c
@@ -45,6 +45,7 @@
#include "../include/lustre_lite.h"
#include <linux/pagemap.h>
#include <linux/file.h>
+#include <linux/mount.h>
#include "llite_internal.h"
#include "../include/lustre/ll_fiemap.h"
@@ -87,8 +88,7 @@ void ll_pack_inode2opdata(struct inode *inode, struct md_op_data *op_data,
op_data->op_attr.ia_ctime = inode->i_ctime;
op_data->op_attr.ia_size = i_size_read(inode);
op_data->op_attr_blocks = inode->i_blocks;
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- ll_inode_to_ext_flags(inode->i_flags);
+ op_data->op_attr_flags = ll_inode_to_ext_flags(inode->i_flags);
op_data->op_ioepoch = ll_i2info(inode)->lli_ioepoch;
if (fh)
op_data->op_handle = *fh;
@@ -170,13 +170,15 @@ static int ll_close_inode_openhandle(struct obd_export *md_exp,
*/
rc = ll_som_update(inode, op_data);
if (rc) {
- CERROR("inode %lu mdc Size-on-MDS update failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc Size-on-MDS update failed: rc = %d\n",
+ ll_i2mdexp(inode)->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
rc = 0;
}
} else if (rc) {
- CERROR("inode %lu mdc close failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc close failed: rc = %d\n",
+ ll_i2mdexp(inode)->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
}
/* DATA_MODIFIED flag was successfully sent on close, cancel data
@@ -278,7 +280,7 @@ static int ll_md_close(struct obd_export *md_exp, struct inode *inode,
/* clear group lock, if present */
if (unlikely(fd->fd_flags & LL_FILE_GROUP_LOCKED))
- ll_put_grouplock(inode, file, fd->fd_grouplock.cg_gid);
+ ll_put_grouplock(inode, file, fd->fd_grouplock.lg_gid);
if (fd->fd_lease_och) {
bool lease_broken;
@@ -343,8 +345,8 @@ int ll_file_release(struct inode *inode, struct file *file)
struct ll_inode_info *lli = ll_i2info(inode);
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
#ifdef CONFIG_FS_POSIX_ACL
if (sbi->ll_flags & LL_SBI_RMT_CLIENT && is_root_inode(inode)) {
@@ -543,8 +545,8 @@ int ll_file_open(struct inode *inode, struct file *file)
struct ll_file_data *fd;
int rc = 0, opendir_set = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), flags %o\n", inode->i_ino,
- inode->i_generation, inode, file->f_flags);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), flags %o\n",
+ PFID(ll_inode2fid(inode)), inode, file->f_flags);
it = file->private_data; /* XXX: compat macro */
file->private_data = NULL; /* prevent ll_local_open assertion */
@@ -677,7 +679,9 @@ restart:
if (rc)
goto out_och_free;
- LASSERT(it_disposition(it, DISP_ENQ_OPEN_REF));
+ LASSERTF(it_disposition(it, DISP_ENQ_OPEN_REF),
+ "inode %p: disposition %x, status %d\n", inode,
+ it_disposition(it, ~0), it->d.lustre.it_status);
rc = ll_local_open(file, it, fd, *och_p);
if (rc)
@@ -875,16 +879,19 @@ ll_lease_open(struct inode *inode, struct file *file, fmode_t fmode,
return och;
out_close:
- rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
- if (rc2)
- CERROR("Close openhandle returned %d\n", rc2);
-
- /* cancel open lock */
+ /* Cancel open lock */
if (it.d.lustre.it_lock_mode != 0) {
ldlm_lock_decref_and_cancel(&och->och_lease_handle,
it.d.lustre.it_lock_mode);
it.d.lustre.it_lock_mode = 0;
+ och->och_lease_handle.cookie = 0ULL;
}
+ rc2 = ll_close_inode_openhandle(sbi->ll_md_exp, inode, och, NULL);
+ if (rc2 < 0)
+ CERROR("%s: error closing file "DFID": %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&ll_i2info(inode)->lli_fid), rc2);
+ och = NULL; /* och has been freed in ll_close_inode_openhandle() */
out_release_it:
ll_intent_release(&it);
out:
@@ -908,7 +915,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
lock_res_and_lock(lock);
cancelled = ldlm_is_cancel(lock);
unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
CDEBUG(D_INODE, "lease for " DFID " broken? %d\n",
@@ -926,7 +933,7 @@ static int ll_lease_close(struct obd_client_handle *och, struct inode *inode,
/* Fills the obdo with the attributes for the lsm */
static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
- struct obdo *obdo, __u64 ioepoch, int sync)
+ struct obdo *obdo, __u64 ioepoch, int dv_flags)
{
struct ptlrpc_request_set *set;
struct obd_info oinfo = { };
@@ -945,9 +952,11 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
OBD_MD_FLMTIME | OBD_MD_FLCTIME |
OBD_MD_FLGROUP | OBD_MD_FLEPOCH |
OBD_MD_FLDATAVERSION;
- if (sync) {
+ if (dv_flags & (LL_DV_WR_FLUSH | LL_DV_RD_FLUSH)) {
oinfo.oi_oa->o_valid |= OBD_MD_FLFLAGS;
oinfo.oi_oa->o_flags |= OBD_FL_SRVLOCK;
+ if (dv_flags & LL_DV_WR_FLUSH)
+ oinfo.oi_oa->o_flags |= OBD_FL_FLUSH;
}
set = ptlrpc_prep_set();
@@ -960,11 +969,16 @@ static int ll_lsm_getattr(struct lov_stripe_md *lsm, struct obd_export *exp,
rc = ptlrpc_set_wait(set);
ptlrpc_set_destroy(set);
}
- if (rc == 0)
+ if (rc == 0) {
oinfo.oi_oa->o_valid &= (OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ |
OBD_MD_FLATIME | OBD_MD_FLMTIME |
OBD_MD_FLCTIME | OBD_MD_FLSIZE |
- OBD_MD_FLDATAVERSION);
+ OBD_MD_FLDATAVERSION | OBD_MD_FLFLAGS);
+ if (dv_flags & LL_DV_WR_FLUSH &&
+ !(oinfo.oi_oa->o_valid & OBD_MD_FLFLAGS &&
+ oinfo.oi_oa->o_flags & OBD_FL_FLUSH))
+ return -ENOTSUPP;
+ }
return rc;
}
@@ -980,7 +994,7 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
lsm = ccc_inode_lsm_get(inode);
rc = ll_lsm_getattr(lsm, ll_i2dtexp(inode),
- obdo, ioepoch, sync);
+ obdo, ioepoch, sync ? LL_DV_RD_FLUSH : 0);
if (rc == 0) {
struct ost_id *oi = lsm ? &lsm->lsm_oi : &obdo->o_oi;
@@ -994,50 +1008,57 @@ int ll_inode_getattr(struct inode *inode, struct obdo *obdo,
return rc;
}
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode)
+int ll_merge_attr(const struct lu_env *env, struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
- struct cl_attr *attr = ccc_env_thread_attr(env);
- struct ost_lvb lvb;
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ s64 atime;
+ s64 mtime;
+ s64 ctime;
int rc = 0;
ll_inode_size_lock(inode);
+
/* merge timestamps the most recently obtained from mds with
* timestamps obtained from osts
*/
- LTIME_S(inode->i_atime) = lli->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = lli->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = lli->lli_lvb.lvb_ctime;
+ LTIME_S(inode->i_atime) = lli->lli_atime;
+ LTIME_S(inode->i_mtime) = lli->lli_mtime;
+ LTIME_S(inode->i_ctime) = lli->lli_ctime;
- lvb.lvb_size = i_size_read(inode);
- lvb.lvb_blocks = inode->i_blocks;
- lvb.lvb_mtime = LTIME_S(inode->i_mtime);
- lvb.lvb_atime = LTIME_S(inode->i_atime);
- lvb.lvb_ctime = LTIME_S(inode->i_ctime);
+ mtime = LTIME_S(inode->i_mtime);
+ atime = LTIME_S(inode->i_atime);
+ ctime = LTIME_S(inode->i_ctime);
cl_object_attr_lock(obj);
rc = cl_object_attr_get(env, obj, attr);
cl_object_attr_unlock(obj);
- if (rc == 0) {
- if (lvb.lvb_atime < attr->cat_atime)
- lvb.lvb_atime = attr->cat_atime;
- if (lvb.lvb_ctime < attr->cat_ctime)
- lvb.lvb_ctime = attr->cat_ctime;
- if (lvb.lvb_mtime < attr->cat_mtime)
- lvb.lvb_mtime = attr->cat_mtime;
+ if (rc != 0)
+ goto out_size_unlock;
- CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
- PFID(&lli->lli_fid), attr->cat_size);
- cl_isize_write_nolock(inode, attr->cat_size);
+ if (atime < attr->cat_atime)
+ atime = attr->cat_atime;
- inode->i_blocks = attr->cat_blocks;
+ if (ctime < attr->cat_ctime)
+ ctime = attr->cat_ctime;
- LTIME_S(inode->i_mtime) = lvb.lvb_mtime;
- LTIME_S(inode->i_atime) = lvb.lvb_atime;
- LTIME_S(inode->i_ctime) = lvb.lvb_ctime;
- }
+ if (mtime < attr->cat_mtime)
+ mtime = attr->cat_mtime;
+
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(&lli->lli_fid), attr->cat_size);
+
+ i_size_write(inode, attr->cat_size);
+
+ inode->i_blocks = attr->cat_blocks;
+
+ LTIME_S(inode->i_mtime) = mtime;
+ LTIME_S(inode->i_atime) = atime;
+ LTIME_S(inode->i_ctime) = ctime;
+
+out_size_unlock:
ll_inode_size_unlock(inode);
return rc;
@@ -1120,47 +1141,48 @@ ll_file_io_generic(const struct lu_env *env, struct vvp_io_args *args,
struct cl_io *io;
ssize_t result;
+ CDEBUG(D_VFSTRACE, "file: %s, type: %d ppos: %llu, count: %zd\n",
+ file->f_path.dentry->d_name.name, iot, *ppos, count);
+
restart:
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
ll_io_init(io, file, iot == CIT_WRITE);
if (cl_io_rw_init(env, io, iot, *ppos, count) == 0) {
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
int write_mutex_locked = 0;
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- vio->cui_io_subtype = args->via_io_subtype;
+ vio->vui_fd = LUSTRE_FPRIVATE(file);
+ vio->vui_io_subtype = args->via_io_subtype;
- switch (vio->cui_io_subtype) {
+ switch (vio->vui_io_subtype) {
case IO_NORMAL:
- cio->cui_iter = args->u.normal.via_iter;
- cio->cui_iocb = args->u.normal.via_iocb;
+ vio->vui_iter = args->u.normal.via_iter;
+ vio->vui_iocb = args->u.normal.via_iocb;
if ((iot == CIT_WRITE) &&
- !(cio->cui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
if (mutex_lock_interruptible(&lli->
lli_write_mutex)) {
result = -ERESTARTSYS;
goto out;
}
write_mutex_locked = 1;
- } else if (iot == CIT_READ) {
- down_read(&lli->lli_trunc_sem);
}
+ down_read(&lli->lli_trunc_sem);
break;
case IO_SPLICE:
- vio->u.splice.cui_pipe = args->u.splice.via_pipe;
- vio->u.splice.cui_flags = args->u.splice.via_flags;
+ vio->u.splice.vui_pipe = args->u.splice.via_pipe;
+ vio->u.splice.vui_flags = args->u.splice.via_flags;
break;
default:
- CERROR("Unknown IO type - %u\n", vio->cui_io_subtype);
+ CERROR("Unknown IO type - %u\n", vio->vui_io_subtype);
LBUG();
}
result = cl_io_loop(env, io);
+ if (args->via_io_subtype == IO_NORMAL)
+ up_read(&lli->lli_trunc_sem);
if (write_mutex_locked)
mutex_unlock(&lli->lli_write_mutex);
- else if (args->via_io_subtype == IO_NORMAL && iot == CIT_READ)
- up_read(&lli->lli_trunc_sem);
} else {
/* cl_io_rw_init() handled IO */
result = io->ci_result;
@@ -1197,6 +1219,7 @@ out:
fd->fd_write_failed = true;
}
}
+ CDEBUG(D_VFSTRACE, "iot: %d, result: %zd\n", iot, result);
return result;
}
@@ -1212,7 +1235,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to)
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_NORMAL);
+ args = ll_env_args(env, IO_NORMAL);
args->u.normal.via_iter = to;
args->u.normal.via_iocb = iocb;
@@ -1236,7 +1259,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_NORMAL);
+ args = ll_env_args(env, IO_NORMAL);
args->u.normal.via_iter = from;
args->u.normal.via_iocb = iocb;
@@ -1262,7 +1285,7 @@ static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos,
if (IS_ERR(env))
return PTR_ERR(env);
- args = vvp_env_args(env, IO_SPLICE);
+ args = ll_env_args(env, IO_SPLICE);
args->u.splice.via_pipe = pipe;
args->u.splice.via_flags = flags;
@@ -1354,7 +1377,8 @@ static int ll_lov_recreate_fid(struct inode *inode, unsigned long arg)
}
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum, int lum_size)
+ __u64 flags, struct lov_user_md *lum,
+ int lum_size)
{
struct lov_stripe_md *lsm = NULL;
struct lookup_intent oit = {.it_op = IT_OPEN, .it_flags = flags};
@@ -1363,8 +1387,8 @@ int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
lsm = ccc_inode_lsm_get(inode);
if (lsm) {
ccc_inode_lsm_put(inode, lsm);
- CDEBUG(D_IOCTL, "stripe already exists for ino %lu\n",
- inode->i_ino);
+ CDEBUG(D_IOCTL, "stripe already exists for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
rc = -EEXIST;
goto out;
}
@@ -1478,7 +1502,7 @@ out:
static int ll_lov_setea(struct inode *inode, struct file *file,
unsigned long arg)
{
- int flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
+ __u64 flags = MDS_OPEN_HAS_OBJS | FMODE_WRITE;
struct lov_user_md *lump;
int lum_size = sizeof(struct lov_user_md) +
sizeof(struct lov_user_ost_data);
@@ -1512,7 +1536,7 @@ static int ll_lov_setstripe(struct inode *inode, struct file *file,
struct lov_user_md_v1 __user *lumv1p = (void __user *)arg;
struct lov_user_md_v3 __user *lumv3p = (void __user *)arg;
int lum_size, rc;
- int flags = FMODE_WRITE;
+ __u64 flags = FMODE_WRITE;
/* first try with v1 which is smaller than v3 */
lum_size = sizeof(struct lov_user_md_v1);
@@ -1561,7 +1585,7 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
+ struct ll_grouplock grouplock;
int rc;
if (arg == 0) {
@@ -1575,14 +1599,14 @@ ll_get_grouplock(struct inode *inode, struct file *file, unsigned long arg)
spin_lock(&lli->lli_lock);
if (fd->fd_flags & LL_FILE_GROUP_LOCKED) {
CWARN("group lock already existed with gid %lu\n",
- fd->fd_grouplock.cg_gid);
+ fd->fd_grouplock.lg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
- LASSERT(!fd->fd_grouplock.cg_lock);
+ LASSERT(!fd->fd_grouplock.lg_lock);
spin_unlock(&lli->lli_lock);
- rc = cl_get_grouplock(cl_i2info(inode)->lli_clob,
+ rc = cl_get_grouplock(ll_i2info(inode)->lli_clob,
arg, (file->f_flags & O_NONBLOCK), &grouplock);
if (rc)
return rc;
@@ -1608,7 +1632,7 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- struct ccc_grouplock grouplock;
+ struct ll_grouplock grouplock;
spin_lock(&lli->lli_lock);
if (!(fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
@@ -1616,11 +1640,11 @@ static int ll_put_grouplock(struct inode *inode, struct file *file,
CWARN("no group lock held\n");
return -EINVAL;
}
- LASSERT(fd->fd_grouplock.cg_lock);
+ LASSERT(fd->fd_grouplock.lg_lock);
- if (fd->fd_grouplock.cg_gid != arg) {
+ if (fd->fd_grouplock.lg_gid != arg) {
CWARN("group lock %lu doesn't match current id %lu\n",
- arg, fd->fd_grouplock.cg_gid);
+ arg, fd->fd_grouplock.lg_gid);
spin_unlock(&lli->lli_lock);
return -EINVAL;
}
@@ -1861,11 +1885,12 @@ error:
* This value is computed using stripe object version on OST.
* Version is computed using server side locking.
*
- * @param extent_lock Take extent lock. Not needed if a process is already
- * holding the OST object group locks.
+ * @param sync if do sync on the OST side;
+ * 0: no sync
+ * LL_DV_RD_FLUSH: flush dirty pages, LCK_PR on OSTs
+ * LL_DV_WR_FLUSH: drop all caching pages, LCK_PW on OSTs
*/
-int ll_data_version(struct inode *inode, __u64 *data_version,
- int extent_lock)
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags)
{
struct lov_stripe_md *lsm = NULL;
struct ll_sb_info *sbi = ll_i2sbi(inode);
@@ -1887,7 +1912,7 @@ int ll_data_version(struct inode *inode, __u64 *data_version,
goto out;
}
- rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, extent_lock);
+ rc = ll_lsm_getattr(lsm, sbi->ll_dt_exp, obdo, 0, flags);
if (rc == 0) {
if (!(obdo->o_valid & OBD_MD_FLDATAVERSION))
rc = -EOPNOTSUPP;
@@ -1923,7 +1948,7 @@ int ll_hsm_release(struct inode *inode)
}
/* Grab latest data_version and [am]time values */
- rc = ll_data_version(inode, &data_version, 1);
+ rc = ll_data_version(inode, &data_version, LL_DV_WR_FLUSH);
if (rc != 0)
goto out;
@@ -1933,7 +1958,7 @@ int ll_hsm_release(struct inode *inode)
goto out;
}
- ll_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
cl_env_nested_put(&nest, env);
/* Release the file.
@@ -2227,8 +2252,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
int flags, rc;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),cmd=%x\n", inode->i_ino,
- inode->i_generation, inode, cmd);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),cmd=%x\n",
+ PFID(ll_inode2fid(inode)), inode, cmd);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_IOCTL, 1);
/* asm-ppc{,64} declares TCGETS, et. al. as type 't' not 'T' */
@@ -2331,9 +2356,8 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
if (copy_from_user(&idv, (char __user *)arg, sizeof(idv)))
return -EFAULT;
- rc = ll_data_version(inode, &idv.idv_version,
- !(idv.idv_flags & LL_DV_NOFLUSH));
-
+ idv.idv_flags &= LL_DV_RD_FLUSH | LL_DV_WR_FLUSH;
+ rc = ll_data_version(inode, &idv.idv_version, idv.idv_flags);
if (rc == 0 && copy_to_user((char __user *)arg, &idv,
sizeof(idv)))
return -EFAULT;
@@ -2499,7 +2523,7 @@ ll_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
rc = och->och_flags &
(FMODE_READ | FMODE_WRITE);
unlock_res_and_lock(lock);
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
}
mutex_unlock(&lli->lli_och_mutex);
@@ -2537,9 +2561,8 @@ static loff_t ll_file_seek(struct file *file, loff_t offset, int origin)
retval = offset + ((origin == SEEK_END) ? i_size_read(inode) :
(origin == SEEK_CUR) ? file->f_pos : 0);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), to=%llu=%#llx(%d)\n",
- inode->i_ino, inode->i_generation, inode, retval, retval,
- origin);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), to=%llu=%#llx(%d)\n",
+ PFID(ll_inode2fid(inode)), inode, retval, retval, origin);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LLSEEK, 1);
if (origin == SEEK_END || origin == SEEK_HOLE || origin == SEEK_DATA) {
@@ -2603,8 +2626,8 @@ int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
if (IS_ERR(env))
return PTR_ERR(env);
- io = ccc_env_thread_io(env);
- io->ci_obj = cl_i2info(inode)->lli_clob;
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
io->ci_ignore_layout = ignore_layout;
/* initialize parameters for sync */
@@ -2634,8 +2657,8 @@ int ll_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct ptlrpc_request *req;
int rc, err;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FSYNC, 1);
rc = filemap_write_and_wait_range(inode->i_mapping, start, end);
@@ -2693,8 +2716,8 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
int rc;
int rc2 = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu file_lock=%p\n",
- inode->i_ino, file_lock);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID" file_lock=%p\n",
+ PFID(ll_inode2fid(inode)), file_lock);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_FLOCK, 1);
@@ -2777,9 +2800,9 @@ ll_file_flock(struct file *file, int cmd, struct file_lock *file_lock)
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- CDEBUG(D_DLMTRACE, "inode=%lu, pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
- inode->i_ino, flock.l_flock.pid, flags, einfo.ei_mode,
- flock.l_flock.start, flock.l_flock.end);
+ CDEBUG(D_DLMTRACE, "inode="DFID", pid=%u, flags=%#llx, mode=%u, start=%llu, end=%llu\n",
+ PFID(ll_inode2fid(inode)), flock.l_flock.pid, flags,
+ einfo.ei_mode, flock.l_flock.start, flock.l_flock.end);
rc = md_enqueue(sbi->ll_md_exp, &einfo, NULL,
op_data, &lockh, &flock, 0, NULL /* req */, flags);
@@ -2901,8 +2924,8 @@ static int __ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
struct obd_export *exp;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p),name=%pd\n",
- inode->i_ino, inode->i_generation, inode, dentry);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p),name=%pd\n",
+ PFID(ll_inode2fid(inode)), inode, dentry);
exp = ll_i2mdexp(inode);
@@ -2998,9 +3021,9 @@ static int ll_inode_revalidate(struct dentry *dentry, __u64 ibits)
/* if object isn't regular file, don't validate size */
if (!S_ISREG(inode->i_mode)) {
- LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_lvb.lvb_atime;
- LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_lvb.lvb_mtime;
- LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_lvb.lvb_ctime;
+ LTIME_S(inode->i_atime) = ll_i2info(inode)->lli_atime;
+ LTIME_S(inode->i_mtime) = ll_i2info(inode)->lli_mtime;
+ LTIME_S(inode->i_ctime) = ll_i2info(inode)->lli_ctime;
} else {
/* In case of restore, the MDT has the right size and has
* already send it back without granting the layout lock,
@@ -3124,8 +3147,8 @@ int ll_inode_permission(struct inode *inode, int mask)
return rc;
}
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), inode mode %x mask %o\n",
- inode->i_ino, inode->i_generation, inode, inode->i_mode, mask);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), inode mode %x mask %o\n",
+ PFID(ll_inode2fid(inode)), inode, inode->i_mode, mask);
if (ll_i2sbi(inode)->ll_flags & LL_SBI_RMT_CLIENT)
return lustre_check_remote_perm(inode, mask);
@@ -3335,10 +3358,10 @@ static int ll_layout_fetch(struct inode *inode, struct ldlm_lock *lock)
int rc;
CDEBUG(D_INODE, DFID" LVB_READY=%d l_lvb_data=%p l_lvb_len=%d\n",
- PFID(ll_inode2fid(inode)), !!(lock->l_flags & LDLM_FL_LVB_READY),
+ PFID(ll_inode2fid(inode)), ldlm_is_lvb_ready(lock),
lock->l_lvb_data, lock->l_lvb_len);
- if (lock->l_lvb_data && (lock->l_flags & LDLM_FL_LVB_READY))
+ if (lock->l_lvb_data && ldlm_is_lvb_ready(lock))
return 0;
/* if layout lock was granted right away, the layout is returned
@@ -3415,14 +3438,14 @@ static int ll_layout_lock_set(struct lustre_handle *lockh, enum ldlm_mode mode,
LASSERT(lock);
LASSERT(ldlm_has_layout(lock));
- LDLM_DEBUG(lock, "File %p/"DFID" being reconfigured: %d",
- inode, PFID(&lli->lli_fid), reconf);
+ LDLM_DEBUG(lock, "File "DFID"(%p) being reconfigured: %d",
+ PFID(&lli->lli_fid), inode, reconf);
/* in case this is a caching lock and reinstate with new inode */
md_set_lock_data(sbi->ll_md_exp, &lockh->cookie, inode, NULL);
lock_res_and_lock(lock);
- lvb_ready = !!(lock->l_flags & LDLM_FL_LVB_READY);
+ lvb_ready = ldlm_is_lvb_ready(lock);
unlock_res_and_lock(lock);
/* checking lvb_ready is racy but this is okay. The worst case is
* that multi processes may configure the file on the same time.
@@ -3487,9 +3510,9 @@ out:
/* wait for IO to complete if it's still being used. */
if (wait_layout) {
- CDEBUG(D_INODE, "%s: %p/" DFID " wait for layout reconf.\n",
+ CDEBUG(D_INODE, "%s: "DFID"(%p) wait for layout reconf\n",
ll_get_fsname(inode->i_sb, NULL, 0),
- inode, PFID(&lli->lli_fid));
+ PFID(&lli->lli_fid), inode);
memset(&conf, 0, sizeof(conf));
conf.coc_opc = OBJECT_CONF_WAIT;
@@ -3498,7 +3521,8 @@ out:
if (rc == 0)
rc = -EAGAIN;
- CDEBUG(D_INODE, "file: " DFID " waiting layout return: %d.\n",
+ CDEBUG(D_INODE, "%s: file="DFID" waiting layout return: %d.\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
PFID(&lli->lli_fid), rc);
}
return rc;
@@ -3571,9 +3595,9 @@ again:
it.it_op = IT_LAYOUT;
lockh.cookie = 0ULL;
- LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file %p/" DFID "",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid));
+ LDLM_DEBUG_NOLOCK("%s: requeue layout lock for file "DFID"(%p)",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(&lli->lli_fid), inode);
rc = md_enqueue(sbi->ll_md_exp, &einfo, &it, op_data, &lockh,
NULL, 0, NULL, 0);
@@ -3601,7 +3625,7 @@ again:
/**
* This function send a restore request to the MDT
*/
-int ll_layout_restore(struct inode *inode)
+int ll_layout_restore(struct inode *inode, loff_t offset, __u64 length)
{
struct hsm_user_request *hur;
int len, rc;
@@ -3617,9 +3641,10 @@ int ll_layout_restore(struct inode *inode)
hur->hur_request.hr_flags = 0;
memcpy(&hur->hur_user_item[0].hui_fid, &ll_i2info(inode)->lli_fid,
sizeof(hur->hur_user_item[0].hui_fid));
- hur->hur_user_item[0].hui_extent.length = -1;
+ hur->hur_user_item[0].hui_extent.offset = offset;
+ hur->hur_user_item[0].hui_extent.length = length;
hur->hur_request.hr_itemcount = 1;
- rc = obd_iocontrol(LL_IOC_HSM_REQUEST, cl_i2sbi(inode)->ll_md_exp,
+ rc = obd_iocontrol(LL_IOC_HSM_REQUEST, ll_i2sbi(inode)->ll_md_exp,
len, hur, NULL);
kfree(hur);
return rc;
diff --git a/drivers/staging/lustre/lustre/lclient/glimpse.c b/drivers/staging/lustre/lustre/llite/glimpse.c
index c4e8a0878ac8..d8ea75424e2f 100644
--- a/drivers/staging/lustre/lustre/lclient/glimpse.c
+++ b/drivers/staging/lustre/lustre/llite/glimpse.c
@@ -52,7 +52,6 @@
#include <linux/file.h>
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../llite/llite_internal.h"
static const struct cl_lock_descr whole_file = {
@@ -70,14 +69,14 @@ static const struct cl_lock_descr whole_file = {
blkcnt_t dirty_cnt(struct inode *inode)
{
blkcnt_t cnt = 0;
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
void *results[1];
if (inode->i_mapping)
cnt += radix_tree_gang_lookup_tag(&inode->i_mapping->page_tree,
results, 0, 1,
PAGECACHE_TAG_DIRTY);
- if (cnt == 0 && atomic_read(&vob->cob_mmap_cnt) > 0)
+ if (cnt == 0 && atomic_read(&vob->vob_mmap_cnt) > 0)
cnt = 1;
return (cnt > 0) ? 1 : 0;
@@ -86,17 +85,17 @@ blkcnt_t dirty_cnt(struct inode *inode)
int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
struct inode *inode, struct cl_object *clob, int agl)
{
- struct cl_lock_descr *descr = &ccc_env_info(env)->cti_descr;
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
const struct lu_fid *fid = lu_object_fid(&clob->co_lu);
- struct ccc_io *cio = ccc_env_io(env);
- struct cl_lock *lock;
int result;
result = 0;
if (!(lli->lli_flags & LLIF_MDS_SIZE_LOCK)) {
- CDEBUG(D_DLMTRACE, "Glimpsing inode "DFID"\n", PFID(fid));
+ CDEBUG(D_DLMTRACE, "Glimpsing inode " DFID "\n", PFID(fid));
if (lli->lli_has_smd) {
+ struct cl_lock *lock = vvp_env_lock(env);
+ struct cl_lock_descr *descr = &lock->cll_descr;
+
/* NOTE: this looks like DLM lock request, but it may
* not be one. Due to CEF_ASYNC flag (translated
* to LDLM_FL_HAS_INTENT by osc), this is
@@ -113,11 +112,10 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
*/
*descr = whole_file;
descr->cld_obj = clob;
- descr->cld_mode = CLM_PHANTOM;
+ descr->cld_mode = CLM_READ;
descr->cld_enq_flags = CEF_ASYNC | CEF_MUST;
if (agl)
descr->cld_enq_flags |= CEF_AGL;
- cio->cui_glimpse = 1;
/*
* CEF_ASYNC is used because glimpse sub-locks cannot
* deadlock (because they never conflict with other
@@ -126,21 +124,13 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
* CEF_MUST protects glimpse lock from conversion into
* a lockless mode.
*/
- lock = cl_lock_request(env, io, descr, "glimpse",
- current);
- cio->cui_glimpse = 0;
-
- if (!lock)
- return 0;
-
- if (IS_ERR(lock))
- return PTR_ERR(lock);
+ result = cl_lock_request(env, io, lock);
+ if (result < 0)
+ return result;
- LASSERT(agl == 0);
- result = cl_wait(env, lock);
- if (result == 0) {
- cl_merge_lvb(env, inode);
- if (cl_isize_read(inode) > 0 &&
+ if (!agl) {
+ ll_merge_attr(env, inode);
+ if (i_size_read(inode) > 0 &&
inode->i_blocks == 0) {
/*
* LU-417: Add dirty pages block count
@@ -150,12 +140,11 @@ int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
*/
inode->i_blocks = dirty_cnt(inode);
}
- cl_unuse(env, lock);
}
- cl_lock_release(env, lock, "glimpse", current);
+ cl_lock_release(env, lock);
} else {
CDEBUG(D_DLMTRACE, "No objects for inode\n");
- cl_merge_lvb(env, inode);
+ ll_merge_attr(env, inode);
}
}
@@ -167,22 +156,24 @@ static int cl_io_get(struct inode *inode, struct lu_env **envout,
{
struct lu_env *env;
struct cl_io *io;
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *clob = lli->lli_clob;
int result;
- if (S_ISREG(cl_inode_mode(inode))) {
+ if (S_ISREG(inode->i_mode)) {
env = cl_env_get(refcheck);
if (!IS_ERR(env)) {
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = clob;
*envout = env;
*ioout = io;
result = 1;
- } else
+ } else {
result = PTR_ERR(env);
- } else
+ }
+ } else {
result = 0;
+ }
return result;
}
@@ -231,14 +222,11 @@ int cl_local_size(struct inode *inode)
{
struct lu_env *env = NULL;
struct cl_io *io = NULL;
- struct ccc_thread_info *cti;
struct cl_object *clob;
- struct cl_lock_descr *descr;
- struct cl_lock *lock;
int result;
int refcheck;
- if (!cl_i2info(inode)->lli_has_smd)
+ if (!ll_i2info(inode)->lli_has_smd)
return 0;
result = cl_io_get(inode, &env, &io, &refcheck);
@@ -247,22 +235,19 @@ int cl_local_size(struct inode *inode)
clob = io->ci_obj;
result = cl_io_init(env, io, CIT_MISC, clob);
- if (result > 0)
+ if (result > 0) {
result = io->ci_result;
- else if (result == 0) {
- cti = ccc_env_info(env);
- descr = &cti->cti_descr;
-
- *descr = whole_file;
- descr->cld_obj = clob;
- lock = cl_lock_peek(env, io, descr, "localsize", current);
- if (lock) {
- cl_merge_lvb(env, inode);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, "localsize", current);
- result = 0;
- } else
- result = -ENODATA;
+ } else if (result == 0) {
+ struct cl_lock *lock = vvp_env_lock(env);
+
+ lock->cll_descr = whole_file;
+ lock->cll_descr.cld_enq_flags = CEF_PEEK;
+ lock->cll_descr.cld_obj = clob;
+ result = cl_lock_request(env, io, lock);
+ if (result == 0) {
+ ll_merge_attr(env, inode);
+ cl_lock_release(env, lock);
+ }
}
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
diff --git a/drivers/staging/lustre/lustre/llite/lcommon_cl.c b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
new file mode 100644
index 000000000000..6c00715b438f
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/lcommon_cl.c
@@ -0,0 +1,327 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
+ *
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2015, Intel Corporation.
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * cl code shared between vvp and liblustre (and other Lustre clients in the
+ * future).
+ *
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include "../../include/linux/libcfs/libcfs.h"
+# include <linux/fs.h>
+# include <linux/sched.h>
+# include <linux/mm.h>
+# include <linux/quotaops.h>
+# include <linux/highmem.h>
+# include <linux/pagemap.h>
+# include <linux/rbtree.h>
+
+#include "../include/obd.h"
+#include "../include/obd_support.h"
+#include "../include/lustre_fid.h"
+#include "../include/lustre_lite.h"
+#include "../include/lustre_dlm.h"
+#include "../include/lustre_ver.h"
+#include "../include/lustre_mdc.h"
+#include "../include/cl_object.h"
+
+#include "../llite/llite_internal.h"
+
+/*
+ * ccc_ prefix stands for "Common Client Code".
+ */
+
+/*****************************************************************************
+ *
+ * Vvp device and device type functions.
+ *
+ */
+
+/**
+ * An `emergency' environment used by cl_inode_fini() when cl_env_get()
+ * fails. Access to this environment is serialized by cl_inode_fini_guard
+ * mutex.
+ */
+struct lu_env *cl_inode_fini_env;
+int cl_inode_fini_refcheck;
+
+/**
+ * A mutex serializing calls to slp_inode_fini() under extreme memory
+ * pressure, when environments cannot be allocated.
+ */
+static DEFINE_MUTEX(cl_inode_fini_guard);
+
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr)
+{
+ struct lu_env *env;
+ struct cl_io *io;
+ int result;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ io = vvp_env_thread_io(env);
+ io->ci_obj = ll_i2info(inode)->lli_clob;
+
+ io->u.ci_setattr.sa_attr.lvb_atime = LTIME_S(attr->ia_atime);
+ io->u.ci_setattr.sa_attr.lvb_mtime = LTIME_S(attr->ia_mtime);
+ io->u.ci_setattr.sa_attr.lvb_ctime = LTIME_S(attr->ia_ctime);
+ io->u.ci_setattr.sa_attr.lvb_size = attr->ia_size;
+ io->u.ci_setattr.sa_valid = attr->ia_valid;
+
+again:
+ if (cl_io_init(env, io, CIT_SETATTR, io->ci_obj) == 0) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ if (attr->ia_valid & ATTR_FILE)
+ /* populate the file descriptor for ftruncate to honor
+ * group lock - see LU-787
+ */
+ vio->vui_fd = LUSTRE_FPRIVATE(attr->ia_file);
+
+ result = cl_io_loop(env, io);
+ } else {
+ result = io->ci_result;
+ }
+ cl_io_fini(env, io);
+ if (unlikely(io->ci_need_restart))
+ goto again;
+ /* HSM import case: file is released, cannot be restored
+ * no need to fail except if restore registration failed
+ * with -ENODATA
+ */
+ if (result == -ENODATA && io->ci_restore_needed &&
+ io->ci_result != -ENODATA)
+ result = 0;
+ cl_env_put(env, &refcheck);
+ return result;
+}
+
+/**
+ * Initialize or update CLIO structures for regular files when new
+ * meta-data arrives from the server.
+ *
+ * \param inode regular file inode
+ * \param md new file metadata from MDS
+ * - allocates cl_object if necessary,
+ * - updated layout, if object was already here.
+ */
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md)
+{
+ struct lu_env *env;
+ struct ll_inode_info *lli;
+ struct cl_object *clob;
+ struct lu_site *site;
+ struct lu_fid *fid;
+ struct cl_object_conf conf = {
+ .coc_inode = inode,
+ .u = {
+ .coc_md = md
+ }
+ };
+ int result = 0;
+ int refcheck;
+
+ LASSERT(md->body->valid & OBD_MD_FLID);
+ LASSERT(S_ISREG(inode->i_mode));
+
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ site = ll_i2sbi(inode)->ll_site;
+ lli = ll_i2info(inode);
+ fid = &lli->lli_fid;
+ LASSERT(fid_is_sane(fid));
+
+ if (!lli->lli_clob) {
+ /* clob is slave of inode, empty lli_clob means for new inode,
+ * there is no clob in cache with the given fid, so it is
+ * unnecessary to perform lookup-alloc-lookup-insert, just
+ * alloc and insert directly.
+ */
+ LASSERT(inode->i_state & I_NEW);
+ conf.coc_lu.loc_flags = LOC_F_NEW;
+ clob = cl_object_find(env, lu2cl_dev(site->ls_top_dev),
+ fid, &conf);
+ if (!IS_ERR(clob)) {
+ /*
+ * No locking is necessary, as new inode is
+ * locked by I_NEW bit.
+ */
+ lli->lli_clob = clob;
+ lli->lli_has_smd = lsm_has_objects(md->lsm);
+ lu_object_ref_add(&clob->co_lu, "inode", inode);
+ } else {
+ result = PTR_ERR(clob);
+ }
+ } else {
+ result = cl_conf_set(env, lli->lli_clob, &conf);
+ }
+
+ cl_env_put(env, &refcheck);
+
+ if (result != 0)
+ CERROR("Failure to initialize cl object " DFID ": %d\n",
+ PFID(fid), result);
+ return result;
+}
+
+/**
+ * Wait for others drop their references of the object at first, then we drop
+ * the last one, which will lead to the object be destroyed immediately.
+ * Must be called after cl_object_kill() against this object.
+ *
+ * The reason we want to do this is: destroying top object will wait for sub
+ * objects being destroyed first, so we can't let bottom layer (e.g. from ASTs)
+ * to initiate top object destroying which may deadlock. See bz22520.
+ */
+static void cl_object_put_last(struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *header = obj->co_lu.lo_header;
+ wait_queue_t waiter;
+
+ if (unlikely(atomic_read(&header->loh_ref) != 1)) {
+ struct lu_site *site = obj->co_lu.lo_dev->ld_site;
+ struct lu_site_bkt_data *bkt;
+
+ bkt = lu_site_bkt_from_fid(site, &header->loh_fid);
+
+ init_waitqueue_entry(&waiter, current);
+ add_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+
+ while (1) {
+ set_current_state(TASK_UNINTERRUPTIBLE);
+ if (atomic_read(&header->loh_ref) == 1)
+ break;
+ schedule();
+ }
+
+ set_current_state(TASK_RUNNING);
+ remove_wait_queue(&bkt->lsb_marche_funebre, &waiter);
+ }
+
+ cl_object_put(env, obj);
+}
+
+void cl_inode_fini(struct inode *inode)
+{
+ struct lu_env *env;
+ struct ll_inode_info *lli = ll_i2info(inode);
+ struct cl_object *clob = lli->lli_clob;
+ int refcheck;
+ int emergency;
+
+ if (clob) {
+ void *cookie;
+
+ cookie = cl_env_reenter();
+ env = cl_env_get(&refcheck);
+ emergency = IS_ERR(env);
+ if (emergency) {
+ mutex_lock(&cl_inode_fini_guard);
+ LASSERT(cl_inode_fini_env);
+ cl_env_implant(cl_inode_fini_env, &refcheck);
+ env = cl_inode_fini_env;
+ }
+ /*
+ * cl_object cache is a slave to inode cache (which, in turn
+ * is a slave to dentry cache), don't keep cl_object in memory
+ * when its master is evicted.
+ */
+ cl_object_kill(env, clob);
+ lu_object_ref_del(&clob->co_lu, "inode", inode);
+ cl_object_put_last(env, clob);
+ lli->lli_clob = NULL;
+ if (emergency) {
+ cl_env_unplant(cl_inode_fini_env, &refcheck);
+ mutex_unlock(&cl_inode_fini_guard);
+ } else {
+ cl_env_put(env, &refcheck);
+ }
+ cl_env_reexit(cookie);
+ }
+}
+
+/**
+ * build inode number from passed @fid
+ */
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32)
+{
+ if (BITS_PER_LONG == 32 || api32)
+ return fid_flatten32(fid);
+ else
+ return fid_flatten(fid);
+}
+
+/**
+ * build inode generation from passed @fid. If our FID overflows the 32-bit
+ * inode number then return a non-zero generation to distinguish them.
+ */
+__u32 cl_fid_build_gen(const struct lu_fid *fid)
+{
+ __u32 gen;
+
+ if (fid_is_igif(fid)) {
+ gen = lu_igif_gen(fid);
+ return gen;
+ }
+
+ gen = fid_flatten(fid) >> 32;
+ return gen;
+}
+
+/* lsm is unreliable after hsm implementation as layout can be changed at
+ * any time. This is only to support old, non-clio-ized interfaces. It will
+ * cause deadlock if clio operations are called with this extra layout refcount
+ * because in case the layout changed during the IO, ll_layout_refresh() will
+ * have to wait for the refcount to become zero to destroy the older layout.
+ *
+ * Notice that the lsm returned by this function may not be valid unless called
+ * inside layout lock - MDS_INODELOCK_LAYOUT.
+ */
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode)
+{
+ return lov_lsm_get(ll_i2info(inode)->lli_clob);
+}
+
+inline void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm)
+{
+ lov_lsm_put(ll_i2info(inode)->lli_clob, lsm);
+}
diff --git a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
index d80bcedd78d1..12f3e71f48c2 100644
--- a/drivers/staging/lustre/lustre/lclient/lcommon_misc.c
+++ b/drivers/staging/lustre/lustre/llite/lcommon_misc.c
@@ -41,9 +41,9 @@
#include "../include/obd_support.h"
#include "../include/obd.h"
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
/* Initialize the default and maximum LOV EA and cookie sizes. This allows
* us to make MDS RPCs with large enough reply buffers to hold the
@@ -126,7 +126,7 @@ int cl_ocd_update(struct obd_device *host,
#define GROUPLOCK_SCOPE "grouplock"
int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
- struct ccc_grouplock *cg)
+ struct ll_grouplock *cg)
{
struct lu_env *env;
struct cl_io *io;
@@ -140,20 +140,22 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
if (IS_ERR(env))
return PTR_ERR(env);
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = obj;
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (rc) {
+ if (rc != 0) {
+ cl_io_fini(env, io);
+ cl_env_put(env, &refcheck);
/* Does not make sense to take GL for released layout */
if (rc > 0)
rc = -ENOTSUPP;
- cl_env_put(env, &refcheck);
return rc;
}
- descr = &ccc_env_info(env)->cti_descr;
+ lock = vvp_env_lock(env);
+ descr = &lock->cll_descr;
descr->cld_obj = obj;
descr->cld_start = 0;
descr->cld_end = CL_PAGE_EOF;
@@ -163,38 +165,37 @@ int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
enqflags = CEF_MUST | (nonblock ? CEF_NONBLOCK : 0);
descr->cld_enq_flags = enqflags;
- lock = cl_lock_request(env, io, descr, GROUPLOCK_SCOPE, current);
- if (IS_ERR(lock)) {
+ rc = cl_lock_request(env, io, lock);
+ if (rc < 0) {
cl_io_fini(env, io);
cl_env_put(env, &refcheck);
- return PTR_ERR(lock);
+ return rc;
}
- cg->cg_env = cl_env_get(&refcheck);
- cg->cg_io = io;
- cg->cg_lock = lock;
- cg->cg_gid = gid;
- LASSERT(cg->cg_env == env);
+ cg->lg_env = cl_env_get(&refcheck);
+ cg->lg_io = io;
+ cg->lg_lock = lock;
+ cg->lg_gid = gid;
+ LASSERT(cg->lg_env == env);
cl_env_unplant(env, &refcheck);
return 0;
}
-void cl_put_grouplock(struct ccc_grouplock *cg)
+void cl_put_grouplock(struct ll_grouplock *cg)
{
- struct lu_env *env = cg->cg_env;
- struct cl_io *io = cg->cg_io;
- struct cl_lock *lock = cg->cg_lock;
+ struct lu_env *env = cg->lg_env;
+ struct cl_io *io = cg->lg_io;
+ struct cl_lock *lock = cg->lg_lock;
int refcheck;
- LASSERT(cg->cg_env);
- LASSERT(cg->cg_gid);
+ LASSERT(cg->lg_env);
+ LASSERT(cg->lg_gid);
cl_env_implant(env, &refcheck);
cl_env_put(env, &refcheck);
- cl_unuse(env, lock);
- cl_lock_release(env, lock, GROUPLOCK_SCOPE, current);
+ cl_lock_release(env, lock);
cl_io_fini(env, io);
cl_env_put(env, NULL);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_close.c b/drivers/staging/lustre/lustre/llite/llite_close.c
index a55ac4dccd90..2df551d3ae6c 100644
--- a/drivers/staging/lustre/lustre/llite/llite_close.c
+++ b/drivers/staging/lustre/lustre/llite/llite_close.c
@@ -46,31 +46,31 @@
#include "llite_internal.h"
/** records that a write is in flight */
-void vvp_write_pending(struct ccc_object *club, struct ccc_page *page)
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ struct ll_inode_info *lli = ll_i2info(club->vob_inode);
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
+ if (page && list_empty(&page->vpg_pending_linkage))
+ list_add(&page->vpg_pending_linkage, &club->vob_pending_list);
spin_unlock(&lli->lli_lock);
}
/** records that a write has completed */
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page)
{
- struct ll_inode_info *lli = ll_i2info(club->cob_inode);
+ struct ll_inode_info *lli = ll_i2info(club->vob_inode);
int rc = 0;
spin_lock(&lli->lli_lock);
- if (page && !list_empty(&page->cpg_pending_linkage)) {
- list_del_init(&page->cpg_pending_linkage);
+ if (page && !list_empty(&page->vpg_pending_linkage)) {
+ list_del_init(&page->vpg_pending_linkage);
rc = 1;
}
spin_unlock(&lli->lli_lock);
if (rc)
- ll_queue_done_writing(club->cob_inode, 0);
+ ll_queue_done_writing(club->vob_inode, 0);
}
/** Queues DONE_WRITING if
@@ -80,25 +80,25 @@ void vvp_write_complete(struct ccc_object *club, struct ccc_page *page)
void ll_queue_done_writing(struct inode *inode, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->cob_pending_list)) {
+ list_empty(&club->vob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, done writing allowed and no diry pages\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
spin_lock(&lcq->lcq_lock);
LASSERT(list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
+ PFID(ll_inode2fid(inode)));
list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
/* Avoid a concurrent insertion into the close thread queue:
@@ -124,9 +124,9 @@ void ll_done_writing_attr(struct inode *inode, struct md_op_data *op_data)
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
if (!cl_local_size(inode)) {
/* Send Size-on-MDS Attributes if valid. */
@@ -140,10 +140,10 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
struct obd_client_handle **och, unsigned long flags)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
+ struct vvp_object *club = cl2vvp(ll_i2info(inode)->lli_clob);
spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->cob_pending_list))) {
+ if (!(list_empty(&club->vob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och);
LASSERT(!lli->lli_pending_och);
@@ -198,7 +198,7 @@ void ll_ioepoch_close(struct inode *inode, struct md_op_data *op_data,
}
}
- LASSERT(list_empty(&club->cob_pending_list));
+ LASSERT(list_empty(&club->vob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
@@ -221,9 +221,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
LASSERT(op_data);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
oa = kmem_cache_zalloc(obdo_cachep, GFP_NOFS);
if (!oa) {
@@ -241,9 +241,9 @@ int ll_som_update(struct inode *inode, struct md_op_data *op_data)
if (rc) {
oa->o_valid = 0;
if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to send a Size-on-MDS attribute update for inode %lu/%u\n",
- rc, inode->i_ino,
- inode->i_generation);
+ CERROR("%s: inode_getattr failed - unable to send a Size-on-MDS attribute update for inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
} else {
CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
PFID(&lli->lli_fid));
@@ -302,9 +302,11 @@ static void ll_done_writing(struct inode *inode)
* OSTs and send setattr to back to MDS.
*/
rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc done_writing failed: rc = %d\n",
- inode->i_ino, rc);
+ else if (rc) {
+ CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
+ }
out:
ll_finish_md_op_data(op_data);
if (och) {
@@ -323,8 +325,9 @@ static struct ll_inode_info *ll_close_next_lli(struct ll_close_queue *lcq)
lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
lli_close_list);
list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop))
+ } else if (atomic_read(&lcq->lcq_stop)) {
lli = ERR_PTR(-EALREADY);
+ }
spin_unlock(&lcq->lcq_lock);
return lli;
@@ -348,8 +351,8 @@ static int ll_close_thread(void *arg)
break;
inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writing for inode %lu/%u\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INFO, "done_writing for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
ll_done_writing(inode);
iput(inode);
}
diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h
index 65a6acec663b..ce1f949430f1 100644
--- a/drivers/staging/lustre/lustre/llite/llite_internal.h
+++ b/drivers/staging/lustre/lustre/llite/llite_internal.h
@@ -43,11 +43,11 @@
/* for struct cl_lock_descr and struct cl_io */
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "../include/lustre_mdc.h"
#include "../include/lustre_intent.h"
#include <linux/compat.h>
#include <linux/posix_acl_xattr.h>
+#include "vvp_internal.h"
#ifndef FMODE_EXEC
#define FMODE_EXEC 0
@@ -99,6 +99,13 @@ struct ll_remote_perm {
*/
};
+struct ll_grouplock {
+ struct lu_env *lg_env;
+ struct cl_io *lg_io;
+ struct cl_lock *lg_lock;
+ unsigned long lg_gid;
+};
+
enum lli_flags {
/* MDS has an authority for the Size-on-MDS attributes. */
LLIF_MDS_SIZE_LOCK = (1 << 0),
@@ -161,7 +168,9 @@ struct ll_inode_info {
struct inode lli_vfs_inode;
/* the most recent timestamps obtained from mds */
- struct ost_lvb lli_lvb;
+ s64 lli_atime;
+ s64 lli_mtime;
+ s64 lli_ctime;
spinlock_t lli_agl_lock;
/* Try to make the d::member and f::member are aligned. Before using
@@ -328,6 +337,7 @@ enum ra_stat {
RA_STAT_EOF,
RA_STAT_MAX_IN_FLIGHT,
RA_STAT_WRONG_GRAB_PAGE,
+ RA_STAT_FAILED_REACH_END,
_NR_RA_STAT,
};
@@ -481,6 +491,12 @@ struct ll_sb_info {
struct lprocfs_stats *ll_stats; /* lprocfs stats counter */
+ /*
+ * Used to track "unstable" pages on a client, and maintain a
+ * LRU list of clean pages. An "unstable" page is defined as
+ * any page which is sent to a server as part of a bulk request,
+ * but is uncommitted to stable storage.
+ */
struct cl_client_cache ll_cache;
struct lprocfs_stats *ll_ra_stats;
@@ -525,13 +541,6 @@ struct ll_sb_info {
struct completion ll_kobj_unregister;
};
-struct ll_ra_read {
- pgoff_t lrr_start;
- pgoff_t lrr_count;
- struct task_struct *lrr_reader;
- struct list_head lrr_linkage;
-};
-
/*
* per file-descriptor read-ahead data.
*/
@@ -590,12 +599,6 @@ struct ll_readahead_state {
*/
unsigned long ras_request_index;
/*
- * list of struct ll_ra_read's one per read(2) call current in
- * progress against this file descriptor. Used by read-ahead code,
- * protected by ->ras_lock.
- */
- struct list_head ras_read_beads;
- /*
* The following 3 items are used for detecting the stride I/O
* mode.
* In stride I/O mode,
@@ -622,7 +625,7 @@ extern struct kmem_cache *ll_file_data_slab;
struct lustre_handle;
struct ll_file_data {
struct ll_readahead_state fd_ras;
- struct ccc_grouplock fd_grouplock;
+ struct ll_grouplock fd_grouplock;
__u64 lfd_pos;
__u32 fd_flags;
fmode_t fd_omode;
@@ -663,8 +666,16 @@ static inline int ll_need_32bit_api(struct ll_sb_info *sbi)
#endif
}
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar);
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar);
+void ll_ras_enter(struct file *f);
+
+/* llite/lcommon_misc.c */
+int cl_init_ea_size(struct obd_export *md_exp, struct obd_export *dt_exp);
+int cl_ocd_update(struct obd_device *host,
+ struct obd_device *watched,
+ enum obd_notify_event ev, void *owner, void *data);
+int cl_get_grouplock(struct cl_object *obj, unsigned long gid, int nonblock,
+ struct ll_grouplock *cg);
+void cl_put_grouplock(struct ll_grouplock *cg);
/* llite/lproc_llite.c */
int ldebugfs_register_mountpoint(struct dentry *parent,
@@ -697,15 +708,15 @@ int ll_md_blocking_ast(struct ldlm_lock *, struct ldlm_lock_desc *,
struct dentry *ll_splice_alias(struct inode *inode, struct dentry *de);
/* llite/rw.c */
-int ll_prepare_write(struct file *, struct page *, unsigned from, unsigned to);
-int ll_commit_write(struct file *, struct page *, unsigned from, unsigned to);
int ll_writepage(struct page *page, struct writeback_control *wbc);
int ll_writepages(struct address_space *, struct writeback_control *wbc);
int ll_readpage(struct file *file, struct page *page);
void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras);
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags);
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit);
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage);
+void ll_cl_fini(struct ll_cl_context *lcc);
extern const struct address_space_operations ll_aops;
@@ -740,7 +751,7 @@ struct posix_acl *ll_get_acl(struct inode *inode, int type);
int ll_inode_permission(struct inode *inode, int mask);
int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry,
- int flags, struct lov_user_md *lum,
+ __u64 flags, struct lov_user_md *lum,
int lum_size);
int ll_lov_getstripe_ea_info(struct inode *inode, const char *filename,
struct lov_mds_md **lmm, int *lmm_size,
@@ -750,9 +761,9 @@ int ll_dir_setstripe(struct inode *inode, struct lov_user_md *lump,
int ll_dir_getstripe(struct inode *inode, struct lov_mds_md **lmmp,
int *lmm_size, struct ptlrpc_request **request);
int ll_fsync(struct file *file, loff_t start, loff_t end, int data);
-int ll_merge_lvb(const struct lu_env *env, struct inode *inode);
+int ll_merge_attr(const struct lu_env *env, struct inode *inode);
int ll_fid2path(struct inode *inode, void __user *arg);
-int ll_data_version(struct inode *inode, __u64 *data_version, int extent_lock);
+int ll_data_version(struct inode *inode, __u64 *data_version, int flags);
int ll_hsm_release(struct inode *inode);
/* llite/dcache.c */
@@ -824,65 +835,8 @@ struct ll_close_queue {
atomic_t lcq_stop;
};
-struct ccc_object *cl_inode2ccc(struct inode *inode);
-
-void vvp_write_pending (struct ccc_object *club, struct ccc_page *page);
-void vvp_write_complete(struct ccc_object *club, struct ccc_page *page);
-
-/* specific architecture can implement only part of this list */
-enum vvp_io_subtype {
- /** normal IO */
- IO_NORMAL,
- /** io started from splice_{read|write} */
- IO_SPLICE
-};
-
-/* IO subtypes */
-struct vvp_io {
- /** io subtype */
- enum vvp_io_subtype cui_io_subtype;
-
- union {
- struct {
- struct pipe_inode_info *cui_pipe;
- unsigned int cui_flags;
- } splice;
- struct vvp_fault_io {
- /**
- * Inode modification time that is checked across DLM
- * lock request.
- */
- time64_t ft_mtime;
- struct vm_area_struct *ft_vma;
- /**
- * locked page returned from vvp_io
- */
- struct page *ft_vmpage;
- struct vm_fault_api {
- /**
- * kernel fault info
- */
- struct vm_fault *ft_vmf;
- /**
- * fault API used bitflags for return code.
- */
- unsigned int ft_flags;
- /**
- * check that flags are from filemap_fault
- */
- bool ft_flags_valid;
- } fault;
- } fault;
- } u;
- /**
- * Read-ahead state used by read and page-fault IO contexts.
- */
- struct ll_ra_read cui_bead;
- /**
- * Set when cui_bead has been initialized.
- */
- int cui_ra_window_set;
-};
+void vvp_write_pending(struct vvp_object *club, struct vvp_page *page);
+void vvp_write_complete(struct vvp_object *club, struct vvp_page *page);
/**
* IO arguments for various VFS I/O interfaces.
@@ -911,54 +865,32 @@ struct ll_cl_context {
int lcc_refcheck;
};
-struct vvp_thread_info {
- struct vvp_io_args vti_args;
- struct ra_io_arg vti_ria;
- struct ll_cl_context vti_io_ctx;
+struct ll_thread_info {
+ struct vvp_io_args lti_args;
+ struct ra_io_arg lti_ria;
+ struct ll_cl_context lti_io_ctx;
};
-static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
-{
- extern struct lu_context_key vvp_key;
- struct vvp_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &vvp_key);
- LASSERT(info);
- return info;
-}
-
-static inline struct vvp_io_args *vvp_env_args(const struct lu_env *env,
- enum vvp_io_subtype type)
+extern struct lu_context_key ll_thread_key;
+static inline struct ll_thread_info *ll_env_info(const struct lu_env *env)
{
- struct vvp_io_args *ret = &vvp_env_info(env)->vti_args;
-
- ret->via_io_subtype = type;
+ struct ll_thread_info *lti;
- return ret;
+ lti = lu_context_key_get(&env->le_ctx, &ll_thread_key);
+ LASSERT(lti);
+ return lti;
}
-struct vvp_session {
- struct vvp_io vs_ios;
-};
-
-static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+static inline struct vvp_io_args *ll_env_args(const struct lu_env *env,
+ enum vvp_io_subtype type)
{
- extern struct lu_context_key vvp_session_key;
- struct vvp_session *ses;
+ struct vvp_io_args *via = &ll_env_info(env)->lti_args;
- ses = lu_context_key_get(env->le_ses, &vvp_session_key);
- LASSERT(ses);
- return ses;
-}
+ via->via_io_subtype = type;
-static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
-{
- return &vvp_env_session(env)->vs_ios;
+ return via;
}
-int vvp_global_init(void);
-void vvp_global_fini(void);
-
void ll_queue_done_writing(struct inode *inode, unsigned long flags);
void ll_close_thread_shutdown(struct ll_close_queue *lcq);
int ll_close_thread_start(struct ll_close_queue **lcq_ret);
@@ -981,6 +913,10 @@ static inline void ll_invalidate_page(struct page *vmpage)
if (!mapping)
return;
+ /*
+ * truncate_complete_page() calls
+ * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
+ */
ll_teardown_mmaps(mapping, offset, offset + PAGE_SIZE);
truncate_complete_page(mapping, vmpage);
}
@@ -1055,9 +991,6 @@ void free_rmtperm_hash(struct hlist_head *hash);
int ll_update_remote_perm(struct inode *inode, struct mdt_remote_perm *perm);
int lustre_check_remote_perm(struct inode *inode, int mask);
-/* llite/llite_cl.c */
-extern struct lu_device_type vvp_device_type;
-
/**
* Common IO arguments for various VFS I/O interfaces.
*/
@@ -1069,7 +1002,7 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
struct ll_readahead_state *ras, unsigned long index,
unsigned hit);
void ll_ra_count_put(struct ll_sb_info *sbi, unsigned long len);
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which);
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which);
/* llite/llite_rmtacl.c */
#ifdef CONFIG_FS_POSIX_ACL
@@ -1163,6 +1096,22 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentry,
int only_unplug);
void ll_stop_statahead(struct inode *dir, void *key);
+blkcnt_t dirty_cnt(struct inode *inode);
+
+int cl_glimpse_size0(struct inode *inode, int agl);
+int cl_glimpse_lock(const struct lu_env *env, struct cl_io *io,
+ struct inode *inode, struct cl_object *clob, int agl);
+
+static inline int cl_glimpse_size(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 0);
+}
+
+static inline int cl_agl(struct inode *inode)
+{
+ return cl_glimpse_size0(inode, 1);
+}
+
static inline int ll_glimpse_size(struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
@@ -1285,43 +1234,6 @@ typedef enum llioc_iter (*llioc_callback_t)(struct inode *inode,
void *ll_iocontrol_register(llioc_callback_t cb, int count, unsigned int *cmd);
void ll_iocontrol_unregister(void *magic);
-/* lclient compat stuff */
-#define cl_inode_info ll_inode_info
-#define cl_i2info(info) ll_i2info(info)
-#define cl_inode_mode(inode) ((inode)->i_mode)
-#define cl_i2sbi ll_i2sbi
-
-static inline struct ll_file_data *cl_iattr2fd(struct inode *inode,
- const struct iattr *attr)
-{
- LASSERT(attr->ia_valid & ATTR_FILE);
- return LUSTRE_FPRIVATE(attr->ia_file);
-}
-
-static inline void cl_isize_write_nolock(struct inode *inode, loff_t kms)
-{
- LASSERT(mutex_is_locked(&ll_i2info(inode)->lli_size_mutex));
- i_size_write(inode, kms);
-}
-
-static inline void cl_isize_write(struct inode *inode, loff_t kms)
-{
- ll_inode_size_lock(inode);
- i_size_write(inode, kms);
- ll_inode_size_unlock(inode);
-}
-
-#define cl_isize_read(inode) i_size_read(inode)
-
-static inline int cl_merge_lvb(const struct lu_env *env, struct inode *inode)
-{
- return ll_merge_lvb(env, inode);
-}
-
-#define cl_inode_atime(inode) LTIME_S((inode)->i_atime)
-#define cl_inode_ctime(inode) LTIME_S((inode)->i_ctime)
-#define cl_inode_mtime(inode) LTIME_S((inode)->i_mtime)
-
int cl_sync_file_range(struct inode *inode, loff_t start, loff_t end,
enum cl_fsync_mode mode, int ignore_layout);
@@ -1350,7 +1262,7 @@ static inline void cl_stats_tally(struct cl_device *dev, enum cl_req_type crt,
int opc = (crt == CRT_READ) ? LPROC_LL_OSC_READ :
LPROC_LL_OSC_WRITE;
- ll_stats_ops_tally(ll_s2sbi(cl2ccc_dev(dev)->cdv_sb), opc, rc);
+ ll_stats_ops_tally(ll_s2sbi(cl2vvp_dev(dev)->vdv_sb), opc, rc);
}
ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
@@ -1382,18 +1294,16 @@ static inline void ll_set_lock_data(struct obd_export *exp, struct inode *inode,
*/
if (it->d.lustre.it_remote_lock_mode) {
handle.cookie = it->d.lustre.it_remote_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p(%lu/%u) for remote lock %#llx\n",
- inode,
- inode->i_ino, inode->i_generation,
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for remote lock %#llx\n",
+ PFID(ll_inode2fid(inode)), inode,
handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode, NULL);
}
handle.cookie = it->d.lustre.it_lock_handle;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u) for lock %#llx\n",
- inode, inode->i_ino,
- inode->i_generation, handle.cookie);
+ CDEBUG(D_DLMTRACE, "setting l_data to inode "DFID"%p for lock %#llx\n",
+ PFID(ll_inode2fid(inode)), inode, handle.cookie);
md_set_lock_data(exp, &handle.cookie, inode,
&it->d.lustre.it_lock_bits);
@@ -1471,9 +1381,25 @@ enum {
int ll_layout_conf(struct inode *inode, const struct cl_object_conf *conf);
int ll_layout_refresh(struct inode *inode, __u32 *gen);
-int ll_layout_restore(struct inode *inode);
+int ll_layout_restore(struct inode *inode, loff_t start, __u64 length);
int ll_xattr_init(void);
void ll_xattr_fini(void);
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt);
+
+/* lcommon_cl.c */
+int cl_setattr_ost(struct inode *inode, const struct iattr *attr);
+
+extern struct lu_env *cl_inode_fini_env;
+extern int cl_inode_fini_refcheck;
+
+int cl_file_inode_init(struct inode *inode, struct lustre_md *md);
+void cl_inode_fini(struct inode *inode);
+int cl_local_size(struct inode *inode);
+
+__u64 cl_fid_build_ino(const struct lu_fid *fid, int api32);
+__u32 cl_fid_build_gen(const struct lu_fid *fid);
+
#endif /* LLITE_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/llite/llite_lib.c b/drivers/staging/lustre/lustre/llite/llite_lib.c
index b57a992688a8..96c7e9fc6e5f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_lib.c
+++ b/drivers/staging/lustre/lustre/llite/llite_lib.c
@@ -85,18 +85,18 @@ static struct ll_sb_info *ll_init_sbi(struct super_block *sb)
si_meminfo(&si);
pages = si.totalram - si.totalhigh;
- if (pages >> (20 - PAGE_SHIFT) < 512)
- lru_page_max = pages / 2;
- else
- lru_page_max = (pages / 4) * 3;
+ lru_page_max = pages / 2;
- /* initialize lru data */
+ /* initialize ll_cache data */
atomic_set(&sbi->ll_cache.ccc_users, 0);
sbi->ll_cache.ccc_lru_max = lru_page_max;
atomic_set(&sbi->ll_cache.ccc_lru_left, lru_page_max);
spin_lock_init(&sbi->ll_cache.ccc_lru_lock);
INIT_LIST_HEAD(&sbi->ll_cache.ccc_lru);
+ atomic_set(&sbi->ll_cache.ccc_unstable_nr, 0);
+ init_waitqueue_head(&sbi->ll_cache.ccc_unstable_waitq);
+
sbi->ll_ra_info.ra_max_pages_per_file = min(pages / 32,
SBI_DEFAULT_READAHEAD_MAX);
sbi->ll_ra_info.ra_max_pages = sbi->ll_ra_info.ra_max_pages_per_file;
@@ -169,12 +169,6 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
return -ENOMEM;
}
- if (llite_root) {
- err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
- if (err < 0)
- CERROR("could not register mount in <debugfs>/lustre/llite\n");
- }
-
/* indicate the features supported by this client */
data->ocd_connect_flags = OBD_CONNECT_IBITS | OBD_CONNECT_NODEVOH |
OBD_CONNECT_ATTRFID |
@@ -337,10 +331,8 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
else
sbi->ll_md_brw_size = PAGE_SIZE;
- if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK) {
- LCONSOLE_INFO("Layout lock feature supported.\n");
+ if (data->ocd_connect_flags & OBD_CONNECT_LAYOUTLOCK)
sbi->ll_flags |= LL_SBI_LAYOUT_LOCK;
- }
if (data->ocd_ibits_known & MDS_INODELOCK_XATTR) {
if (!(data->ocd_connect_flags & OBD_CONNECT_MAX_EASIZE)) {
@@ -453,7 +445,7 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
/* make root inode
* XXX: move this to after cbd setup?
*/
- valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS;
+ valid = OBD_MD_FLGETATTR | OBD_MD_FLBLOCKS | OBD_MD_FLMODEASIZE;
if (sbi->ll_flags & LL_SBI_RMT_CLIENT)
valid |= OBD_MD_FLRMTPERM;
else if (sbi->ll_flags & LL_SBI_ACL)
@@ -555,6 +547,15 @@ static int client_common_fill_super(struct super_block *sb, char *md, char *dt,
kfree(data);
kfree(osfs);
+ if (llite_root) {
+ err = ldebugfs_register_mountpoint(llite_root, sb, dt, md);
+ if (err < 0) {
+ CERROR("%s: could not register mount in debugfs: "
+ "rc = %d\n", ll_get_fsname(sb, NULL, 0), err);
+ err = 0;
+ }
+ }
+
return err;
out_root:
iput(root);
@@ -573,7 +574,6 @@ out_md:
out:
kfree(data);
kfree(osfs);
- ldebugfs_unregister_mountpoint(sbi);
return err;
}
@@ -897,10 +897,8 @@ int ll_fill_super(struct super_block *sb, struct vfsmount *mnt)
cfg->cfg_callback = class_config_llog_handler;
/* set up client obds */
err = lustre_process_log(sb, profilenm, cfg);
- if (err < 0) {
- CERROR("Unable to process log: %d\n", err);
+ if (err < 0)
goto out_free;
- }
/* Profile set with LCFG_MOUNTOPT so we can find our mdc and osc obds */
lprof = class_get_profile(profilenm);
@@ -947,7 +945,7 @@ void ll_put_super(struct super_block *sb)
struct lustre_sb_info *lsi = s2lsi(sb);
struct ll_sb_info *sbi = ll_s2sbi(sb);
char *profilenm = get_profile_name(sb);
- int next, force = 1;
+ int ccc_count, next, force = 1, rc = 0;
CDEBUG(D_VFSTRACE, "VFS Op: sb %p - %s\n", sb, profilenm);
@@ -963,6 +961,19 @@ void ll_put_super(struct super_block *sb)
force = obd->obd_force;
}
+ /* Wait for unstable pages to be committed to stable storage */
+ if (!force) {
+ struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+
+ rc = l_wait_event(sbi->ll_cache.ccc_unstable_waitq,
+ !atomic_read(&sbi->ll_cache.ccc_unstable_nr),
+ &lwi);
+ }
+
+ ccc_count = atomic_read(&sbi->ll_cache.ccc_unstable_nr);
+ if (!force && rc != -EINTR)
+ LASSERTF(!ccc_count, "count: %i\n", ccc_count);
+
/* We need to set force before the lov_disconnect in
* lustre_common_put_super, since l_d cleans up osc's as well.
*/
@@ -999,6 +1010,8 @@ void ll_put_super(struct super_block *sb)
lustre_common_put_super(sb);
+ cl_env_cache_purge(~0);
+
module_put(THIS_MODULE);
} /* client_put_super */
@@ -1032,8 +1045,8 @@ void ll_clear_inode(struct inode *inode)
struct ll_inode_info *lli = ll_i2info(inode);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n", inode->i_ino,
- inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
if (S_ISDIR(inode->i_mode)) {
/* these should have been cleared in ll_file_release */
@@ -1180,9 +1193,11 @@ static int ll_setattr_done_writing(struct inode *inode,
* from OSTs and send setattr to back to MDS.
*/
rc = ll_som_update(inode, op_data);
- else if (rc)
- CERROR("inode %lu mdc truncate failed: rc = %d\n",
- inode->i_ino, rc);
+ else if (rc) {
+ CERROR("%s: inode "DFID" mdc truncate failed: rc = %d\n",
+ ll_i2sbi(inode)->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
+ }
return rc;
}
@@ -1210,12 +1225,9 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
bool file_is_released = false;
int rc = 0, rc1 = 0;
- CDEBUG(D_VFSTRACE,
- "%s: setattr inode %p/fid:" DFID
- " from %llu to %llu, valid %x, hsm_import %d\n",
- ll_get_fsname(inode->i_sb, NULL, 0), inode,
- PFID(&lli->lli_fid), i_size_read(inode), attr->ia_size,
- attr->ia_valid, hsm_import);
+ CDEBUG(D_VFSTRACE, "%s: setattr inode "DFID"(%p) from %llu to %llu, valid %x, hsm_import %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0), PFID(&lli->lli_fid), inode,
+ i_size_read(inode), attr->ia_size, attr->ia_valid, hsm_import);
if (attr->ia_valid & ATTR_SIZE) {
/* Check new size against VFS/VM file size limit and rlimit */
@@ -1265,14 +1277,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
LTIME_S(attr->ia_mtime), LTIME_S(attr->ia_ctime),
(s64)ktime_get_real_seconds());
- /* If we are changing file size, file content is modified, flag it. */
- if (attr->ia_valid & ATTR_SIZE) {
- attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
/* We always do an MDS RPC, even if we're only changing the size;
* only the MDS knows whether truncate() should fail with -ETXTBUSY
*/
@@ -1284,13 +1288,6 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (!S_ISDIR(inode->i_mode))
inode_unlock(inode);
- memcpy(&op_data->op_attr, attr, sizeof(*attr));
-
- /* Open epoch for truncate. */
- if (exp_connect_som(ll_i2mdexp(inode)) &&
- (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
- op_data->op_flags = MF_EPOCH_OPEN;
-
/* truncate on a released file must failed with -ENODATA,
* so size must not be set on MDS for released file
* but other attributes must be set
@@ -1304,29 +1301,40 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
if (lsm && lsm->lsm_pattern & LOV_PATTERN_F_RELEASED)
file_is_released = true;
ccc_inode_lsm_put(inode, lsm);
+
+ if (!hsm_import && attr->ia_valid & ATTR_SIZE) {
+ if (file_is_released) {
+ rc = ll_layout_restore(inode, 0, attr->ia_size);
+ if (rc < 0)
+ goto out;
+
+ file_is_released = false;
+ ll_layout_refresh(inode, &gen);
+ }
+
+ /*
+ * If we are changing file size, file content is
+ * modified, flag it.
+ */
+ attr->ia_valid |= MDS_OPEN_OWNEROVERRIDE;
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+ op_data->op_bias |= MDS_DATA_MODIFIED;
+ }
}
- /* if not in HSM import mode, clear size attr for released file
- * we clear the attribute send to MDT in op_data, not the original
- * received from caller in attr which is used later to
- * decide return code
- */
- if (file_is_released && (attr->ia_valid & ATTR_SIZE) && !hsm_import)
- op_data->op_attr.ia_valid &= ~ATTR_SIZE;
+ memcpy(&op_data->op_attr, attr, sizeof(*attr));
+
+ /* Open epoch for truncate. */
+ if (exp_connect_som(ll_i2mdexp(inode)) && !hsm_import &&
+ (attr->ia_valid & (ATTR_SIZE | ATTR_MTIME | ATTR_MTIME_SET)))
+ op_data->op_flags = MF_EPOCH_OPEN;
rc = ll_md_setattr(dentry, op_data, &mod);
if (rc)
goto out;
- /* truncate failed (only when non HSM import), others succeed */
- if (file_is_released) {
- if ((attr->ia_valid & ATTR_SIZE) && !hsm_import)
- rc = -ENODATA;
- else
- rc = 0;
- goto out;
- }
-
/* RPC to MDT is sent, cancel data modification flag */
if (op_data->op_bias & MDS_DATA_MODIFIED) {
spin_lock(&lli->lli_lock);
@@ -1335,7 +1343,7 @@ int ll_setattr_raw(struct dentry *dentry, struct iattr *attr, bool hsm_import)
}
ll_ioepoch_open(lli, op_data->op_ioepoch);
- if (!S_ISREG(inode->i_mode)) {
+ if (!S_ISREG(inode->i_mode) || file_is_released) {
rc = 0;
goto out;
}
@@ -1552,7 +1560,7 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (body->valid & OBD_MD_FLATIME) {
if (body->atime > LTIME_S(inode->i_atime))
LTIME_S(inode->i_atime) = body->atime;
- lli->lli_lvb.lvb_atime = body->atime;
+ lli->lli_atime = body->atime;
}
if (body->valid & OBD_MD_FLMTIME) {
if (body->mtime > LTIME_S(inode->i_mtime)) {
@@ -1561,12 +1569,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
body->mtime);
LTIME_S(inode->i_mtime) = body->mtime;
}
- lli->lli_lvb.lvb_mtime = body->mtime;
+ lli->lli_mtime = body->mtime;
}
if (body->valid & OBD_MD_FLCTIME) {
if (body->ctime > LTIME_S(inode->i_ctime))
LTIME_S(inode->i_ctime) = body->ctime;
- lli->lli_lvb.lvb_ctime = body->ctime;
+ lli->lli_ctime = body->ctime;
}
if (body->valid & OBD_MD_FLMODE)
inode->i_mode = (inode->i_mode & S_IFMT)|(body->mode & ~S_IFMT);
@@ -1593,12 +1601,12 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
/* FID shouldn't be changed! */
if (fid_is_sane(&lli->lli_fid)) {
LASSERTF(lu_fid_eq(&lli->lli_fid, &body->fid1),
- "Trying to change FID "DFID
- " to the "DFID", inode %lu/%u(%p)\n",
+ "Trying to change FID "DFID" to the "DFID", inode "DFID"(%p)\n",
PFID(&lli->lli_fid), PFID(&body->fid1),
- inode->i_ino, inode->i_generation, inode);
- } else
+ PFID(ll_inode2fid(inode)), inode);
+ } else {
lli->lli_fid = body->fid1;
+ }
}
LASSERT(fid_seq(&lli->lli_fid) != 0);
@@ -1622,8 +1630,10 @@ void ll_update_inode(struct inode *inode, struct lustre_md *md)
if (lli->lli_flags & (LLIF_DONE_WRITING |
LLIF_EPOCH_PENDING |
LLIF_SOM_DIRTY)) {
- CERROR("ino %lu flags %u still has size authority! do not trust the size got from MDS\n",
- inode->i_ino, lli->lli_flags);
+ CERROR("%s: inode "DFID" flags %u still has size authority! do not trust the size got from MDS\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)),
+ lli->lli_flags);
} else {
/* Use old size assignment to avoid
* deadlock bz14138 & bz14326
@@ -1699,7 +1709,7 @@ void ll_read_inode2(struct inode *inode, void *opaque)
void ll_delete_inode(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
if (S_ISREG(inode->i_mode) && lli->lli_clob)
/* discard all dirty pages before truncating them, required by
@@ -1715,8 +1725,8 @@ void ll_delete_inode(struct inode *inode)
spin_lock_irq(&inode->i_data.tree_lock);
spin_unlock_irq(&inode->i_data.tree_lock);
LASSERTF(inode->i_data.nrpages == 0,
- "inode=%lu/%u(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
- inode->i_ino, inode->i_generation, inode,
+ "inode="DFID"(%p) nrpages=%lu, see http://jira.whamcloud.com/browse/LU-118\n",
+ PFID(ll_inode2fid(inode)), inode,
inode->i_data.nrpages);
}
/* Workaround end */
@@ -1747,7 +1757,9 @@ int ll_iocontrol(struct inode *inode, struct file *file,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
- CERROR("failure %d inode %lu\n", rc, inode->i_ino);
+ CERROR("%s: failure inode "DFID": rc = %d\n",
+ sbi->ll_md_exp->exp_obd->obd_name,
+ PFID(ll_inode2fid(inode)), rc);
return -abs(rc);
}
@@ -1772,7 +1784,7 @@ int ll_iocontrol(struct inode *inode, struct file *file,
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags = flags;
+ op_data->op_attr_flags = flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
rc = md_setattr(sbi->ll_md_exp, op_data,
NULL, 0, NULL, 0, &req, NULL);
@@ -2066,11 +2078,11 @@ int ll_obd_statfs(struct inode *inode, void __user *arg)
}
memcpy(&type, data->ioc_inlbuf1, sizeof(__u32));
- if (type & LL_STATFS_LMV)
+ if (type & LL_STATFS_LMV) {
exp = sbi->ll_md_exp;
- else if (type & LL_STATFS_LOV)
+ } else if (type & LL_STATFS_LOV) {
exp = sbi->ll_dt_exp;
- else {
+ } else {
rc = -ENODEV;
goto out_statfs;
}
@@ -2271,7 +2283,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
{
char *buf, *path = NULL;
struct dentry *dentry = NULL;
- struct ccc_object *obj = cl_inode2ccc(page->mapping->host);
+ struct vvp_object *obj = cl_inode2vvp(page->mapping->host);
/* this can be called inside spin lock so use GFP_ATOMIC. */
buf = (char *)__get_free_page(GFP_ATOMIC);
@@ -2285,7 +2297,7 @@ void ll_dirty_page_discard_warn(struct page *page, int ioret)
"%s: dirty page discard: %s/fid: " DFID "/%s may get corrupted (rc %d)\n",
ll_get_fsname(page->mapping->host->i_sb, NULL, 0),
s2lsi(page->mapping->host->i_sb)->lsi_lmd->lmd_dev,
- PFID(&obj->cob_header.coh_lu.loh_fid),
+ PFID(&obj->vob_header.coh_lu.loh_fid),
(path && !IS_ERR(path)) ? path : "", ioret);
if (dentry)
diff --git a/drivers/staging/lustre/lustre/llite/llite_mmap.c b/drivers/staging/lustre/lustre/llite/llite_mmap.c
index 5b484e62ffd0..88ef1cac9e0f 100644
--- a/drivers/staging/lustre/lustre/llite/llite_mmap.c
+++ b/drivers/staging/lustre/lustre/llite/llite_mmap.c
@@ -57,10 +57,10 @@ void policy_from_vma(ldlm_policy_data_t *policy,
struct vm_area_struct *vma, unsigned long addr,
size_t count)
{
- policy->l_extent.start = ((addr - vma->vm_start) & CFS_PAGE_MASK) +
+ policy->l_extent.start = ((addr - vma->vm_start) & PAGE_MASK) +
(vma->vm_pgoff << PAGE_SHIFT);
policy->l_extent.end = (policy->l_extent.start + count - 1) |
- ~CFS_PAGE_MASK;
+ ~PAGE_MASK;
}
struct vm_area_struct *our_vma(struct mm_struct *mm, unsigned long addr,
@@ -123,7 +123,8 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
*env_ret = env;
- io = ccc_env_thread_io(env);
+restart:
+ io = vvp_env_thread_io(env);
io->ci_obj = ll_i2info(inode)->lli_clob;
LASSERT(io->ci_obj);
@@ -146,17 +147,20 @@ ll_fault_io_init(struct vm_area_struct *vma, struct lu_env **env_ret,
rc = cl_io_init(env, io, CIT_FAULT, io->ci_obj);
if (rc == 0) {
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
struct ll_file_data *fd = LUSTRE_FPRIVATE(file);
- LASSERT(cio->cui_cl.cis_io == io);
+ LASSERT(vio->vui_cl.cis_io == io);
/* mmap lock must be MANDATORY it has to cache pages. */
io->ci_lockreq = CILR_MANDATORY;
- cio->cui_fd = fd;
+ vio->vui_fd = fd;
} else {
LASSERT(rc < 0);
cl_io_fini(env, io);
+ if (io->ci_need_restart)
+ goto restart;
+
cl_env_nested_put(nest, env);
io = ERR_PTR(rc);
}
@@ -200,7 +204,7 @@ static int ll_page_mkwrite0(struct vm_area_struct *vma, struct page *vmpage,
* Otherwise, we could add dirty pages into osc cache
* while truncate is on-going.
*/
- inode = ccc_object_inode(io->ci_obj);
+ inode = vvp_object_inode(io->ci_obj);
lli = ll_i2info(inode);
down_read(&lli->lli_trunc_sem);
@@ -307,17 +311,17 @@ static int ll_fault0(struct vm_area_struct *vma, struct vm_fault *vmf)
vio = vvp_env_io(env);
vio->u.fault.ft_vma = vma;
vio->u.fault.ft_vmpage = NULL;
- vio->u.fault.fault.ft_vmf = vmf;
- vio->u.fault.fault.ft_flags = 0;
- vio->u.fault.fault.ft_flags_valid = false;
+ vio->u.fault.ft_vmf = vmf;
+ vio->u.fault.ft_flags = 0;
+ vio->u.fault.ft_flags_valid = false;
result = cl_io_loop(env, io);
/* ft_flags are only valid if we reached
* the call to filemap_fault
*/
- if (vio->u.fault.fault.ft_flags_valid)
- fault_ret = vio->u.fault.fault.ft_flags;
+ if (vio->u.fault.ft_flags_valid)
+ fault_ret = vio->u.fault.ft_flags;
vmpage = vio->u.fault.ft_vmpage;
if (result != 0 && vmpage) {
@@ -390,9 +394,11 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
result = ll_page_mkwrite0(vma, vmf->page, &retry);
if (!printed && ++count > 16) {
- CWARN("app(%s): the page %lu of file %lu is under heavy contention.\n",
+ const struct dentry *de = vma->vm_file->f_path.dentry;
+
+ CWARN("app(%s): the page %lu of file "DFID" is under heavy contention\n",
current->comm, vmf->pgoff,
- file_inode(vma->vm_file)->i_ino);
+ PFID(ll_inode2fid(de->d_inode)));
printed = true;
}
} while (retry);
@@ -422,16 +428,16 @@ static int ll_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
/**
* To avoid cancel the locks covering mmapped region for lock cache pressure,
- * we track the mapped vma count in ccc_object::cob_mmap_cnt.
+ * we track the mapped vma count in vvp_object::vob_mmap_cnt.
*/
static void ll_vm_open(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
LASSERT(vma->vm_file);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
- atomic_inc(&vob->cob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
+ atomic_inc(&vob->vob_mmap_cnt);
}
/**
@@ -440,11 +446,11 @@ static void ll_vm_open(struct vm_area_struct *vma)
static void ll_vm_close(struct vm_area_struct *vma)
{
struct inode *inode = file_inode(vma->vm_file);
- struct ccc_object *vob = cl_inode2ccc(inode);
+ struct vvp_object *vob = cl_inode2vvp(inode);
LASSERT(vma->vm_file);
- atomic_dec(&vob->cob_mmap_cnt);
- LASSERT(atomic_read(&vob->cob_mmap_cnt) >= 0);
+ atomic_dec(&vob->vob_mmap_cnt);
+ LASSERT(atomic_read(&vob->vob_mmap_cnt) >= 0);
}
/* XXX put nice comment here. talk about __free_pte -> dirty pages and
diff --git a/drivers/staging/lustre/lustre/llite/llite_nfs.c b/drivers/staging/lustre/lustre/llite/llite_nfs.c
index 193aab879709..c1eef6198b25 100644
--- a/drivers/staging/lustre/lustre/llite/llite_nfs.c
+++ b/drivers/staging/lustre/lustre/llite/llite_nfs.c
@@ -119,7 +119,7 @@ struct inode *search_inode_for_lustre(struct super_block *sb,
rc = md_getattr(sbi->ll_md_exp, op_data, &req);
kfree(op_data);
if (rc) {
- CERROR("can't get object attrs, fid "DFID", rc %d\n",
+ CDEBUG(D_INFO, "can't get object attrs, fid "DFID", rc %d\n",
PFID(fid), rc);
return ERR_PTR(rc);
}
@@ -191,8 +191,9 @@ static int ll_encode_fh(struct inode *inode, __u32 *fh, int *plen,
int fileid_len = sizeof(struct lustre_nfs_fid) / 4;
struct lustre_nfs_fid *nfs_fid = (void *)fh;
- CDEBUG(D_INFO, "encoding for (%lu," DFID ") maxlen=%d minlen=%d\n",
- inode->i_ino, PFID(ll_inode2fid(inode)), *plen, fileid_len);
+ CDEBUG(D_INFO, "%s: encoding for ("DFID") maxlen=%d minlen=%d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), *plen, fileid_len);
if (*plen < fileid_len) {
*plen = fileid_len;
@@ -298,8 +299,9 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
sbi = ll_s2sbi(dir->i_sb);
- CDEBUG(D_INFO, "getting parent for (%lu," DFID ")\n",
- dir->i_ino, PFID(ll_inode2fid(dir)));
+ CDEBUG(D_INFO, "%s: getting parent for ("DFID")\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
+ PFID(ll_inode2fid(dir)));
rc = ll_get_default_mdsize(sbi, &lmmsize);
if (rc != 0)
@@ -314,15 +316,20 @@ static struct dentry *ll_get_parent(struct dentry *dchild)
rc = md_getattr_name(sbi->ll_md_exp, op_data, &req);
ll_finish_md_op_data(op_data);
if (rc) {
- CERROR("failure %d inode %lu get parent\n", rc, dir->i_ino);
+ CERROR("%s: failure inode "DFID" get parent: rc = %d\n",
+ ll_get_fsname(dir->i_sb, NULL, 0),
+ PFID(ll_inode2fid(dir)), rc);
return ERR_PTR(rc);
}
body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
- LASSERT(body->valid & OBD_MD_FLID);
-
- CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
- PFID(ll_inode2fid(dir)), PFID(&body->fid1));
-
+ /*
+ * LU-3952: MDT may lost the FID of its parent, we should not crash
+ * the NFS server, ll_iget_for_nfs() will handle the error.
+ */
+ if (body->valid & OBD_MD_FLID) {
+ CDEBUG(D_INFO, "parent for " DFID " is " DFID "\n",
+ PFID(ll_inode2fid(dir)), PFID(&body->fid1));
+ }
result = ll_iget_for_nfs(dir->i_sb, &body->fid1, NULL);
ptlrpc_req_finished(req);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index f169c0db63b4..813a9a354e5f 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -274,8 +274,9 @@ static void loop_add_bio(struct lloop_device *lo, struct bio *bio)
if (lo->lo_biotail) {
lo->lo_biotail->bi_next = bio;
lo->lo_biotail = bio;
- } else
+ } else {
lo->lo_bio = lo->lo_biotail = bio;
+ }
spin_unlock_irqrestore(&lo->lo_lock, flags);
atomic_inc(&lo->lo_pending);
diff --git a/drivers/staging/lustre/lustre/llite/lproc_llite.c b/drivers/staging/lustre/lustre/llite/lproc_llite.c
index 27ab1261400e..55d62eb11957 100644
--- a/drivers/staging/lustre/lustre/llite/lproc_llite.c
+++ b/drivers/staging/lustre/lustre/llite/lproc_llite.c
@@ -254,7 +254,6 @@ static ssize_t max_read_ahead_mb_store(struct kobject *kobj,
pages_number *= 1 << (20 - PAGE_SHIFT); /* MB -> pages */
if (pages_number > totalram_pages / 2) {
-
CERROR("can't set file readahead more than %lu MB\n",
totalram_pages >> (20 - PAGE_SHIFT + 1)); /*1/2 of RAM*/
return -ERANGE;
@@ -393,6 +392,8 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
struct super_block *sb = ((struct seq_file *)file->private_data)->private;
struct ll_sb_info *sbi = ll_s2sbi(sb);
struct cl_client_cache *cache = &sbi->ll_cache;
+ struct lu_env *env;
+ int refcheck;
int mult, rc, pages_number;
int diff = 0;
int nrpages = 0;
@@ -430,6 +431,10 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
goto out;
}
+ env = cl_env_get(&refcheck);
+ if (IS_ERR(env))
+ return 0;
+
diff = -diff;
while (diff > 0) {
int tmp;
@@ -455,19 +460,20 @@ static ssize_t ll_max_cached_mb_seq_write(struct file *file,
break;
if (!sbi->ll_dt_exp) { /* being initialized */
- rc = -ENODEV;
- break;
+ rc = 0;
+ goto out;
}
/* difficult - have to ask OSCs to drop LRU slots. */
tmp = diff << 1;
- rc = obd_set_info_async(NULL, sbi->ll_dt_exp,
+ rc = obd_set_info_async(env, sbi->ll_dt_exp,
sizeof(KEY_CACHE_LRU_SHRINK),
KEY_CACHE_LRU_SHRINK,
sizeof(tmp), &tmp, NULL);
if (rc < 0)
break;
}
+ cl_env_put(env, &refcheck);
out:
if (rc >= 0) {
@@ -818,6 +824,23 @@ static ssize_t xattr_cache_store(struct kobject *kobj,
}
LUSTRE_RW_ATTR(xattr_cache);
+static ssize_t unstable_stats_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct ll_sb_info *sbi = container_of(kobj, struct ll_sb_info,
+ ll_kobj);
+ struct cl_client_cache *cache = &sbi->ll_cache;
+ int pages, mb;
+
+ pages = atomic_read(&cache->ccc_unstable_nr);
+ mb = (pages * PAGE_SIZE) >> 20;
+
+ return sprintf(buf, "unstable_pages: %8d\n"
+ "unstable_mb: %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
static struct lprocfs_vars lprocfs_llite_obd_vars[] = {
/* { "mntpt_path", ll_rd_path, 0, 0 }, */
{ "site", &ll_site_stats_fops, NULL, 0 },
@@ -853,6 +876,7 @@ static struct attribute *llite_attrs[] = {
&lustre_attr_max_easize.attr,
&lustre_attr_default_easize.attr,
&lustre_attr_xattr_cache.attr,
+ &lustre_attr_unstable_stats.attr,
NULL,
};
@@ -953,6 +977,7 @@ static const char *ra_stat_string[] = {
[RA_STAT_EOF] = "read-ahead to EOF",
[RA_STAT_MAX_IN_FLIGHT] = "hit max r-a issue",
[RA_STAT_WRONG_GRAB_PAGE] = "wrong page from grab_cache_page",
+ [RA_STAT_FAILED_REACH_END] = "failed to reach end"
};
int ldebugfs_register_mountpoint(struct dentry *parent,
diff --git a/drivers/staging/lustre/lustre/llite/namei.c b/drivers/staging/lustre/lustre/llite/namei.c
index f8f98e4e8258..5eba0ebae10f 100644
--- a/drivers/staging/lustre/lustre/llite/namei.c
+++ b/drivers/staging/lustre/lustre/llite/namei.c
@@ -128,12 +128,14 @@ struct inode *ll_iget(struct super_block *sb, ino_t hash,
if (rc != 0) {
iget_failed(inode);
inode = NULL;
- } else
+ } else {
unlock_new_inode(inode);
- } else if (!(inode->i_state & (I_FREEING | I_CLEAR)))
+ }
+ } else if (!(inode->i_state & (I_FREEING | I_CLEAR))) {
ll_update_inode(inode, md);
- CDEBUG(D_VFSTRACE, "got inode: %p for "DFID"\n",
- inode, PFID(&md->body->fid1));
+ CDEBUG(D_VFSTRACE, "got inode: "DFID"(%p)\n",
+ PFID(&md->body->fid1), inode);
+ }
}
return inode;
}
@@ -188,7 +190,7 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
break;
/* Invalidate all dentries associated with this inode */
- LASSERT(lock->l_flags & LDLM_FL_CANCELING);
+ LASSERT(ldlm_is_canceling(lock));
if (!fid_res_name_eq(ll_inode2fid(inode),
&lock->l_resource->lr_name)) {
@@ -255,8 +257,8 @@ int ll_md_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
}
if ((bits & MDS_INODELOCK_UPDATE) && S_ISDIR(inode->i_mode)) {
- CDEBUG(D_INODE, "invalidating inode %lu\n",
- inode->i_ino);
+ CDEBUG(D_INODE, "invalidating inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
truncate_inode_pages(inode->i_mapping, 0);
ll_invalidate_negative_children(inode);
}
@@ -476,9 +478,8 @@ static struct dentry *ll_lookup_it(struct inode *parent, struct dentry *dentry,
if (dentry->d_name.len > ll_i2sbi(parent)->ll_namelen)
return ERR_PTR(-ENAMETOOLONG);
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, LL_IT2STR(it));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),intent=%s\n",
+ dentry, PFID(ll_inode2fid(parent)), parent, LL_IT2STR(it));
if (d_mountpoint(dentry))
CERROR("Tell Peter, lookup on mtpt, it %s\n", LL_IT2STR(it));
@@ -553,9 +554,8 @@ static struct dentry *ll_lookup_nd(struct inode *parent, struct dentry *dentry,
struct lookup_intent *itp, it = { .it_op = IT_GETATTR };
struct dentry *de;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u\n",
- dentry, parent->i_ino,
- parent->i_generation, parent, flags);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),flags=%u\n",
+ dentry, PFID(ll_inode2fid(parent)), parent, flags);
/* Optimize away (CREATE && !OPEN). Let .create handle the race. */
if ((flags & LOOKUP_CREATE) && !(flags & LOOKUP_OPEN))
@@ -586,10 +586,9 @@ static int ll_atomic_open(struct inode *dir, struct dentry *dentry,
long long lookup_flags = LOOKUP_OPEN;
int rc = 0;
- CDEBUG(D_VFSTRACE,
- "VFS Op:name=%pd,dir=%lu/%u(%p),file %p,open_flags %x,mode %x opened %d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, file, open_flags, mode, *opened);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),file %p,open_flags %x,mode %x opened %d\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, file, open_flags, mode,
+ *opened);
it = kzalloc(sizeof(*it), GFP_NOFS);
if (!it)
@@ -680,8 +679,8 @@ static struct inode *ll_create_node(struct inode *dir, struct lookup_intent *it)
* lock on the inode. Since we finally have an inode pointer,
* stuff it in the lock.
*/
- CDEBUG(D_DLMTRACE, "setting l_ast_data to inode %p (%lu/%u)\n",
- inode, inode->i_ino, inode->i_generation);
+ CDEBUG(D_DLMTRACE, "setting l_ast_data to inode "DFID"(%p)\n",
+ PFID(ll_inode2fid(dir)), inode);
ll_set_lock_data(sbi->ll_md_exp, inode, it, NULL);
out:
ptlrpc_req_finished(request);
@@ -708,9 +707,8 @@ static int ll_create_it(struct inode *dir, struct dentry *dentry, int mode,
struct inode *inode;
int rc = 0;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),intent=%s\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, LL_IT2STR(it));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), intent=%s\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, LL_IT2STR(it));
rc = it_open_error(DISP_OPEN_CREATE, it);
if (rc)
@@ -733,8 +731,9 @@ static void ll_update_times(struct ptlrpc_request *request,
LASSERT(body);
if (body->valid & OBD_MD_FLMTIME &&
body->mtime > LTIME_S(inode->i_mtime)) {
- CDEBUG(D_INODE, "setting ino %lu mtime from %lu to %llu\n",
- inode->i_ino, LTIME_S(inode->i_mtime), body->mtime);
+ CDEBUG(D_INODE, "setting fid "DFID" mtime from %lu to %llu\n",
+ PFID(ll_inode2fid(inode)), LTIME_S(inode->i_mtime),
+ body->mtime);
LTIME_S(inode->i_mtime) = body->mtime;
}
if (body->valid & OBD_MD_FLCTIME &&
@@ -791,9 +790,9 @@ static int ll_mknod(struct inode *dir, struct dentry *dchild,
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p) mode %o dev %x\n",
- dchild, dir->i_ino, dir->i_generation, dir,
- mode, old_encode_dev(rdev));
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p) mode %o dev %x\n",
+ dchild, PFID(ll_inode2fid(dir)), dir, mode,
+ old_encode_dev(rdev));
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
@@ -831,9 +830,8 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
{
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),flags=%u, excl=%d\n",
- dentry, dir->i_ino,
- dir->i_generation, dir, mode, want_excl);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p), flags=%u, excl=%d\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, mode, want_excl);
rc = ll_mknod(dir, dentry, mode, 0);
@@ -845,12 +843,6 @@ static int ll_create_nd(struct inode *dir, struct dentry *dentry,
return rc;
}
-static inline void ll_get_child_fid(struct dentry *child, struct lu_fid *fid)
-{
- if (d_really_is_positive(child))
- *fid = *ll_inode2fid(d_inode(child));
-}
-
int ll_objects_destroy(struct ptlrpc_request *request, struct inode *dir)
{
struct mdt_body *body;
@@ -927,23 +919,25 @@ out:
* is any lock existing. They will recycle dentries and inodes based upon locks
* too. b=20433
*/
-static int ll_unlink(struct inode *dir, struct dentry *dentry)
+static int ll_unlink(struct inode *dir, struct dentry *dchild)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ dchild, dir->i_ino, dir->i_generation, dir);
op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
+ dchild->d_name.name,
+ dchild->d_name.len,
0, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(dentry, &op_data->op_fid3);
+ if (dchild && dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -963,8 +957,8 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir"DFID"(%p)\n",
+ dentry, PFID(ll_inode2fid(dir)), dir);
if (!IS_POSIXACL(dir) || !exp_connect_umask(ll_i2mdexp(dir)))
mode &= ~current_umask();
@@ -977,23 +971,25 @@ static int ll_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
return err;
}
-static int ll_rmdir(struct inode *dir, struct dentry *dentry)
+static int ll_rmdir(struct inode *dir, struct dentry *dchild)
{
struct ptlrpc_request *request = NULL;
struct md_op_data *op_data;
int rc;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p)\n",
- dentry, dir->i_ino, dir->i_generation, dir);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p)\n",
+ dchild, PFID(ll_inode2fid(dir)), dir);
op_data = ll_prep_md_op_data(NULL, dir, NULL,
- dentry->d_name.name,
- dentry->d_name.len,
+ dchild->d_name.name,
+ dchild->d_name.len,
S_IFDIR, LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(dentry, &op_data->op_fid3);
+ if (dchild && dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(dchild->d_inode);
+
op_data->op_fid2 = op_data->op_fid3;
rc = md_unlink(ll_i2sbi(dir)->ll_md_exp, op_data, &request);
ll_finish_md_op_data(op_data);
@@ -1011,9 +1007,8 @@ static int ll_symlink(struct inode *dir, struct dentry *dentry,
{
int err;
- CDEBUG(D_VFSTRACE, "VFS Op:name=%pd,dir=%lu/%u(%p),target=%.*s\n",
- dentry, dir->i_ino, dir->i_generation,
- dir, 3000, oldname);
+ CDEBUG(D_VFSTRACE, "VFS Op:name=%pd, dir="DFID"(%p),target=%.*s\n",
+ dentry, PFID(ll_inode2fid(dir)), dir, 3000, oldname);
err = ll_new_node(dir, dentry, oldname, S_IFLNK | S_IRWXUGO,
0, LUSTRE_OPC_SYMLINK);
@@ -1033,10 +1028,9 @@ static int ll_link(struct dentry *old_dentry, struct inode *dir,
struct md_op_data *op_data;
int err;
- CDEBUG(D_VFSTRACE,
- "VFS Op: inode=%lu/%u(%p), dir=%lu/%u(%p), target=%pd\n",
- src->i_ino, src->i_generation, src, dir->i_ino,
- dir->i_generation, dir, new_dentry);
+ CDEBUG(D_VFSTRACE, "VFS Op: inode="DFID"(%p), dir="DFID"(%p), target=%pd\n",
+ PFID(ll_inode2fid(src)), src, PFID(ll_inode2fid(dir)), dir,
+ new_dentry);
op_data = ll_prep_md_op_data(NULL, src, dir, new_dentry->d_name.name,
new_dentry->d_name.len,
@@ -1056,42 +1050,45 @@ out:
return err;
}
-static int ll_rename(struct inode *old_dir, struct dentry *old_dentry,
- struct inode *new_dir, struct dentry *new_dentry)
+static int ll_rename(struct inode *src, struct dentry *src_dchild,
+ struct inode *tgt, struct dentry *tgt_dchild)
{
struct ptlrpc_request *request = NULL;
- struct ll_sb_info *sbi = ll_i2sbi(old_dir);
+ struct ll_sb_info *sbi = ll_i2sbi(src);
struct md_op_data *op_data;
int err;
CDEBUG(D_VFSTRACE,
- "VFS Op:oldname=%pd,src_dir=%lu/%u(%p),newname=%pd,tgt_dir=%lu/%u(%p)\n",
- old_dentry, old_dir->i_ino, old_dir->i_generation, old_dir,
- new_dentry, new_dir->i_ino, new_dir->i_generation, new_dir);
+ "VFS Op:oldname=%pd, src_dir="DFID"(%p), newname=%pd, tgt_dir="DFID"(%p)\n",
+ src_dchild, PFID(ll_inode2fid(src)), src,
+ tgt_dchild, PFID(ll_inode2fid(tgt)), tgt);
- op_data = ll_prep_md_op_data(NULL, old_dir, new_dir, NULL, 0, 0,
+ op_data = ll_prep_md_op_data(NULL, src, tgt, NULL, 0, 0,
LUSTRE_OPC_ANY, NULL);
if (IS_ERR(op_data))
return PTR_ERR(op_data);
- ll_get_child_fid(old_dentry, &op_data->op_fid3);
- ll_get_child_fid(new_dentry, &op_data->op_fid4);
+ if (src_dchild && src_dchild->d_inode)
+ op_data->op_fid3 = *ll_inode2fid(src_dchild->d_inode);
+ if (tgt_dchild && tgt_dchild->d_inode)
+ op_data->op_fid4 = *ll_inode2fid(tgt_dchild->d_inode);
+
err = md_rename(sbi->ll_md_exp, op_data,
- old_dentry->d_name.name,
- old_dentry->d_name.len,
- new_dentry->d_name.name,
- new_dentry->d_name.len, &request);
+ src_dchild->d_name.name,
+ src_dchild->d_name.len,
+ tgt_dchild->d_name.name,
+ tgt_dchild->d_name.len, &request);
ll_finish_md_op_data(op_data);
if (!err) {
- ll_update_times(request, old_dir);
- ll_update_times(request, new_dir);
+ ll_update_times(request, src);
+ ll_update_times(request, tgt);
ll_stats_ops_tally(sbi, LPROC_LL_RENAME, 1);
- err = ll_objects_destroy(request, old_dir);
+ err = ll_objects_destroy(request, src);
}
ptlrpc_req_finished(request);
if (!err)
- d_move(old_dentry, new_dentry);
+ d_move(src_dchild, tgt_dchild);
return err;
}
diff --git a/drivers/staging/lustre/lustre/llite/rw.c b/drivers/staging/lustre/lustre/llite/rw.c
index edab6c5b7e50..336397773fbb 100644
--- a/drivers/staging/lustre/lustre/llite/rw.c
+++ b/drivers/staging/lustre/lustre/llite/rw.c
@@ -63,7 +63,7 @@
* Finalizes cl-data before exiting typical address_space operation. Dual to
* ll_cl_init().
*/
-static void ll_cl_fini(struct ll_cl_context *lcc)
+void ll_cl_fini(struct ll_cl_context *lcc)
{
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
@@ -84,200 +84,59 @@ static void ll_cl_fini(struct ll_cl_context *lcc)
* Initializes common cl-data at the typical address_space operation entry
* point.
*/
-static struct ll_cl_context *ll_cl_init(struct file *file,
- struct page *vmpage, int create)
+struct ll_cl_context *ll_cl_init(struct file *file, struct page *vmpage)
{
struct ll_cl_context *lcc;
struct lu_env *env;
struct cl_io *io;
struct cl_object *clob;
- struct ccc_io *cio;
+ struct vvp_io *vio;
int refcheck;
int result = 0;
- clob = ll_i2info(vmpage->mapping->host)->lli_clob;
+ clob = ll_i2info(file_inode(file))->lli_clob;
LASSERT(clob);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
return ERR_CAST(env);
- lcc = &vvp_env_info(env)->vti_io_ctx;
+ lcc = &ll_env_info(env)->lti_io_ctx;
memset(lcc, 0, sizeof(*lcc));
lcc->lcc_env = env;
lcc->lcc_refcheck = refcheck;
lcc->lcc_cookie = current;
- cio = ccc_env_io(env);
- io = cio->cui_cl.cis_io;
- if (!io && create) {
- struct inode *inode = vmpage->mapping->host;
- loff_t pos;
-
- if (inode_trylock(inode)) {
- inode_unlock((inode));
-
- /* this is too bad. Someone is trying to write the
- * page w/o holding inode mutex. This means we can
- * add dirty pages into cache during truncate
- */
- CERROR("Proc %s is dirtying page w/o inode lock, this will break truncate\n",
- current->comm);
- dump_stack();
- LBUG();
- return ERR_PTR(-EIO);
- }
-
- /*
- * Loop-back driver calls ->prepare_write().
- * methods directly, bypassing file system ->write() operation,
- * so cl_io has to be created here.
- */
- io = ccc_env_thread_io(env);
- ll_io_init(io, file, 1);
-
- /* No lock at all for this kind of IO - we can't do it because
- * we have held page lock, it would cause deadlock.
- * XXX: This causes poor performance to loop device - One page
- * per RPC.
- * In order to get better performance, users should use
- * lloop driver instead.
- */
- io->ci_lockreq = CILR_NEVER;
-
- pos = vmpage->index << PAGE_SHIFT;
-
- /* Create a temp IO to serve write. */
- result = cl_io_rw_init(env, io, CIT_WRITE, pos, PAGE_SIZE);
- if (result == 0) {
- cio->cui_fd = LUSTRE_FPRIVATE(file);
- cio->cui_iter = NULL;
- result = cl_io_iter_init(env, io);
- if (result == 0) {
- result = cl_io_lock(env, io);
- if (result == 0)
- result = cl_io_start(env, io);
- }
- } else
- result = io->ci_result;
- }
-
+ vio = vvp_env_io(env);
+ io = vio->vui_cl.cis_io;
lcc->lcc_io = io;
if (!io)
result = -EIO;
- if (result == 0) {
+
+ if (result == 0 && vmpage) {
struct cl_page *page;
LASSERT(io->ci_state == CIS_IO_GOING);
- LASSERT(cio->cui_fd == LUSTRE_FPRIVATE(file));
+ LASSERT(vio->vui_fd == LUSTRE_FPRIVATE(file));
page = cl_page_find(env, clob, vmpage->index, vmpage,
CPT_CACHEABLE);
if (!IS_ERR(page)) {
lcc->lcc_page = page;
lu_ref_add(&page->cp_reference, "cl_io", io);
result = 0;
- } else
+ } else {
result = PTR_ERR(page);
+ }
}
if (result) {
ll_cl_fini(lcc);
lcc = ERR_PTR(result);
}
- CDEBUG(D_VFSTRACE, "%lu@"DFID" -> %d %p %p\n",
- vmpage->index, PFID(lu_object_fid(&clob->co_lu)), result,
- env, io);
- return lcc;
-}
-
-static struct ll_cl_context *ll_cl_get(void)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- int refcheck;
-
- env = cl_env_get(&refcheck);
- LASSERT(!IS_ERR(env));
- lcc = &vvp_env_info(env)->vti_io_ctx;
- LASSERT(env == lcc->lcc_env);
- LASSERT(current == lcc->lcc_cookie);
- cl_env_put(env, &refcheck);
-
- /* env has got in ll_cl_init, so it is still usable. */
return lcc;
}
-/**
- * ->prepare_write() address space operation called by generic_file_write()
- * for every page during write.
- */
-int ll_prepare_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- int result;
-
- lcc = ll_cl_init(file, vmpage, 1);
- if (!IS_ERR(lcc)) {
- struct lu_env *env = lcc->lcc_env;
- struct cl_io *io = lcc->lcc_io;
- struct cl_page *page = lcc->lcc_page;
-
- cl_page_assume(env, io, page);
-
- result = cl_io_prepare_write(env, io, page, from, to);
- if (result == 0) {
- /*
- * Add a reference, so that page is not evicted from
- * the cache until ->commit_write() is called.
- */
- cl_page_get(page);
- lu_ref_add(&page->cp_reference, "prepare_write",
- current);
- } else {
- cl_page_unassume(env, io, page);
- ll_cl_fini(lcc);
- }
- /* returning 0 in prepare assumes commit must be called
- * afterwards
- */
- } else {
- result = PTR_ERR(lcc);
- }
- return result;
-}
-
-int ll_commit_write(struct file *file, struct page *vmpage, unsigned from,
- unsigned to)
-{
- struct ll_cl_context *lcc;
- struct lu_env *env;
- struct cl_io *io;
- struct cl_page *page;
- int result = 0;
-
- lcc = ll_cl_get();
- env = lcc->lcc_env;
- page = lcc->lcc_page;
- io = lcc->lcc_io;
-
- LASSERT(cl_page_is_owned(page, io));
- LASSERT(from <= to);
- if (from != to) /* handle short write case. */
- result = cl_io_commit_write(env, io, page, from, to);
- if (cl_page_is_owned(page, io))
- cl_page_unassume(env, io, page);
-
- /*
- * Release reference acquired by ll_prepare_write().
- */
- lu_ref_del(&page->cp_reference, "prepare_write", current);
- cl_page_put(env, page);
- ll_cl_fini(lcc);
- return result;
-}
-
static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
/**
@@ -301,7 +160,7 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which);
*/
static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
struct ra_io_arg *ria,
- unsigned long pages)
+ unsigned long pages, unsigned long min)
{
struct ll_ra_info *ra = &sbi->ll_ra_info;
long ret;
@@ -341,6 +200,11 @@ static unsigned long ll_ra_count_get(struct ll_sb_info *sbi,
}
out:
+ if (ret < min) {
+ /* override ra limit for maximum performance */
+ atomic_add(min - ret, &ra->ra_cur_pages);
+ ret = min;
+ }
return ret;
}
@@ -357,9 +221,9 @@ static void ll_ra_stats_inc_sbi(struct ll_sb_info *sbi, enum ra_stat which)
lprocfs_counter_incr(sbi->ll_ra_stats, which);
}
-void ll_ra_stats_inc(struct address_space *mapping, enum ra_stat which)
+void ll_ra_stats_inc(struct inode *inode, enum ra_stat which)
{
- struct ll_sb_info *sbi = ll_i2sbi(mapping->host);
+ struct ll_sb_info *sbi = ll_i2sbi(inode);
ll_ra_stats_inc_sbi(sbi, which);
}
@@ -388,61 +252,42 @@ static int index_in_window(unsigned long index, unsigned long point,
return start <= index && index <= end;
}
-static struct ll_readahead_state *ll_ras_get(struct file *f)
+void ll_ras_enter(struct file *f)
{
- struct ll_file_data *fd;
-
- fd = LUSTRE_FPRIVATE(f);
- return &fd->fd_ras;
-}
-
-void ll_ra_read_in(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
+ struct ll_file_data *fd = LUSTRE_FPRIVATE(f);
+ struct ll_readahead_state *ras = &fd->fd_ras;
spin_lock(&ras->ras_lock);
ras->ras_requests++;
ras->ras_request_index = 0;
ras->ras_consecutive_requests++;
- rar->lrr_reader = current;
-
- list_add(&rar->lrr_linkage, &ras->ras_read_beads);
- spin_unlock(&ras->ras_lock);
-}
-
-void ll_ra_read_ex(struct file *f, struct ll_ra_read *rar)
-{
- struct ll_readahead_state *ras;
-
- ras = ll_ras_get(f);
-
- spin_lock(&ras->ras_lock);
- list_del_init(&rar->lrr_linkage);
spin_unlock(&ras->ras_lock);
}
static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue, struct cl_page *page,
- struct page *vmpage)
+ struct cl_object *clob, pgoff_t *max_index)
{
- struct ccc_page *cp;
+ struct page *vmpage = page->cp_vmpage;
+ struct vvp_page *vpg;
int rc;
rc = 0;
cl_page_assume(env, io, page);
lu_ref_add(&page->cp_reference, "ra", current);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- if (!cp->cpg_defer_uptodate && !PageUptodate(vmpage)) {
- rc = cl_page_is_under_lock(env, io, page);
- if (rc == -EBUSY) {
- cp->cpg_defer_uptodate = 1;
- cp->cpg_ra_used = 0;
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ if (!vpg->vpg_defer_uptodate && !PageUptodate(vmpage)) {
+ CDEBUG(D_READA, "page index %lu, max_index: %lu\n",
+ vvp_index(vpg), *max_index);
+ if (*max_index == 0 || vvp_index(vpg) > *max_index)
+ rc = cl_page_is_under_lock(env, io, page, max_index);
+ if (rc == 0) {
+ vpg->vpg_defer_uptodate = 1;
+ vpg->vpg_ra_used = 0;
cl_page_list_add(queue, page);
rc = 1;
} else {
- cl_page_delete(env, page);
+ cl_page_discard(env, io, page);
rc = -ENOLCK;
}
} else {
@@ -466,24 +311,25 @@ static int cl_read_ahead_page(const struct lu_env *env, struct cl_io *io,
*/
static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
struct cl_page_list *queue,
- pgoff_t index, struct address_space *mapping)
+ pgoff_t index, pgoff_t *max_index)
{
+ struct cl_object *clob = io->ci_obj;
+ struct inode *inode = vvp_object_inode(clob);
struct page *vmpage;
- struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
struct cl_page *page;
enum ra_stat which = _NR_RA_STAT; /* keep gcc happy */
int rc = 0;
const char *msg = NULL;
- vmpage = grab_cache_page_nowait(mapping, index);
+ vmpage = grab_cache_page_nowait(inode->i_mapping, index);
if (vmpage) {
/* Check if vmpage was truncated or reclaimed */
- if (vmpage->mapping == mapping) {
+ if (vmpage->mapping == inode->i_mapping) {
page = cl_page_find(env, clob, vmpage->index,
vmpage, CPT_CACHEABLE);
if (!IS_ERR(page)) {
rc = cl_read_ahead_page(env, io, queue,
- page, vmpage);
+ page, clob, max_index);
if (rc == -ENOLCK) {
which = RA_STAT_FAILED_MATCH;
msg = "lock match failed";
@@ -504,7 +350,7 @@ static int ll_read_ahead_page(const struct lu_env *env, struct cl_io *io,
msg = "g_c_p_n failed";
}
if (msg) {
- ll_ra_stats_inc(mapping, which);
+ ll_ra_stats_inc(inode, which);
CDEBUG(D_READA, "%s\n", msg);
}
return rc;
@@ -616,11 +462,12 @@ static int ll_read_ahead_pages(const struct lu_env *env,
struct cl_io *io, struct cl_page_list *queue,
struct ra_io_arg *ria,
unsigned long *reserved_pages,
- struct address_space *mapping,
unsigned long *ra_end)
{
- int rc, count = 0, stride_ria;
- unsigned long page_idx;
+ int rc, count = 0;
+ bool stride_ria;
+ pgoff_t page_idx;
+ pgoff_t max_index = 0;
LASSERT(ria);
RIA_DEBUG(ria);
@@ -631,12 +478,13 @@ static int ll_read_ahead_pages(const struct lu_env *env,
if (ras_inside_ra_window(page_idx, ria)) {
/* If the page is inside the read-ahead window*/
rc = ll_read_ahead_page(env, io, queue,
- page_idx, mapping);
+ page_idx, &max_index);
if (rc == 1) {
(*reserved_pages)--;
count++;
- } else if (rc == -ENOLCK)
+ } else if (rc == -ENOLCK) {
break;
+ }
} else if (stride_ria) {
/* If it is not in the read-ahead window, and it is
* read-ahead mode, then check whether it should skip
@@ -666,25 +514,22 @@ static int ll_read_ahead_pages(const struct lu_env *env,
}
int ll_readahead(const struct lu_env *env, struct cl_io *io,
- struct ll_readahead_state *ras, struct address_space *mapping,
- struct cl_page_list *queue, int flags)
+ struct cl_page_list *queue, struct ll_readahead_state *ras,
+ bool hit)
{
struct vvp_io *vio = vvp_env_io(env);
- struct vvp_thread_info *vti = vvp_env_info(env);
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct ll_thread_info *lti = ll_env_info(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
unsigned long start = 0, end = 0, reserved;
- unsigned long ra_end, len;
+ unsigned long ra_end, len, mlen = 0;
struct inode *inode;
- struct ll_ra_read *bead;
- struct ra_io_arg *ria = &vti->vti_ria;
- struct ll_inode_info *lli;
+ struct ra_io_arg *ria = &lti->lti_ria;
struct cl_object *clob;
int ret = 0;
__u64 kms;
- inode = mapping->host;
- lli = ll_i2info(inode);
- clob = lli->lli_clob;
+ clob = io->ci_obj;
+ inode = vvp_object_inode(clob);
memset(ria, 0, sizeof(*ria));
@@ -696,22 +541,20 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
return ret;
kms = attr->cat_kms;
if (kms == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_LEN);
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_LEN);
return 0;
}
spin_lock(&ras->ras_lock);
- if (vio->cui_ra_window_set)
- bead = &vio->cui_bead;
- else
- bead = NULL;
/* Enlarge the RA window to encompass the full read */
- if (bead && ras->ras_window_start + ras->ras_window_len <
- bead->lrr_start + bead->lrr_count) {
- ras->ras_window_len = bead->lrr_start + bead->lrr_count -
+ if (vio->vui_ra_valid &&
+ ras->ras_window_start + ras->ras_window_len <
+ vio->vui_ra_start + vio->vui_ra_count) {
+ ras->ras_window_len = vio->vui_ra_start + vio->vui_ra_count -
ras->ras_window_start;
}
+
/* Reserve a part of the read-ahead window that we'll be issuing */
if (ras->ras_window_len) {
start = ras->ras_next_readahead;
@@ -755,29 +598,48 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
spin_unlock(&ras->ras_lock);
if (end == 0) {
- ll_ra_stats_inc(mapping, RA_STAT_ZERO_WINDOW);
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
return 0;
}
len = ria_page_count(ria);
- if (len == 0)
+ if (len == 0) {
+ ll_ra_stats_inc(inode, RA_STAT_ZERO_WINDOW);
return 0;
+ }
+
+ CDEBUG(D_READA, DFID ": ria: %lu/%lu, bead: %lu/%lu, hit: %d\n",
+ PFID(lu_object_fid(&clob->co_lu)),
+ ria->ria_start, ria->ria_end,
+ vio->vui_ra_valid ? vio->vui_ra_start : 0,
+ vio->vui_ra_valid ? vio->vui_ra_count : 0,
+ hit);
+
+ /* at least to extend the readahead window to cover current read */
+ if (!hit && vio->vui_ra_valid &&
+ vio->vui_ra_start + vio->vui_ra_count > ria->ria_start) {
+ /* to the end of current read window. */
+ mlen = vio->vui_ra_start + vio->vui_ra_count - ria->ria_start;
+ /* trim to RPC boundary */
+ start = ria->ria_start & (PTLRPC_MAX_BRW_PAGES - 1);
+ mlen = min(mlen, PTLRPC_MAX_BRW_PAGES - start);
+ }
- reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len);
+ reserved = ll_ra_count_get(ll_i2sbi(inode), ria, len, mlen);
if (reserved < len)
- ll_ra_stats_inc(mapping, RA_STAT_MAX_IN_FLIGHT);
+ ll_ra_stats_inc(inode, RA_STAT_MAX_IN_FLIGHT);
- CDEBUG(D_READA, "reserved page %lu ra_cur %d ra_max %lu\n", reserved,
+ CDEBUG(D_READA, "reserved pages %lu/%lu/%lu, ra_cur %d, ra_max %lu\n",
+ reserved, len, mlen,
atomic_read(&ll_i2sbi(inode)->ll_ra_info.ra_cur_pages),
ll_i2sbi(inode)->ll_ra_info.ra_max_pages);
- ret = ll_read_ahead_pages(env, io, queue,
- ria, &reserved, mapping, &ra_end);
+ ret = ll_read_ahead_pages(env, io, queue, ria, &reserved, &ra_end);
if (reserved != 0)
ll_ra_count_put(ll_i2sbi(inode), reserved);
if (ra_end == end + 1 && ra_end == (kms >> PAGE_SHIFT))
- ll_ra_stats_inc(mapping, RA_STAT_EOF);
+ ll_ra_stats_inc(inode, RA_STAT_EOF);
/* if we didn't get to the end of the region we reserved from
* the ras we need to go back and update the ras so that the
@@ -789,6 +651,7 @@ int ll_readahead(const struct lu_env *env, struct cl_io *io,
ra_end, end, ria->ria_end);
if (ra_end != end + 1) {
+ ll_ra_stats_inc(inode, RA_STAT_FAILED_REACH_END);
spin_lock(&ras->ras_lock);
if (ra_end < ras->ras_next_readahead &&
index_in_window(ra_end, ras->ras_window_start, 0,
@@ -836,7 +699,6 @@ void ll_readahead_init(struct inode *inode, struct ll_readahead_state *ras)
spin_lock_init(&ras->ras_lock);
ras_reset(inode, ras, 0);
ras->ras_requests = 0;
- INIT_LIST_HEAD(&ras->ras_read_beads);
}
/*
@@ -1059,15 +921,18 @@ void ras_update(struct ll_sb_info *sbi, struct inode *inode,
ras->ras_last_readpage = index;
ras_set_start(inode, ras, index);
- if (stride_io_mode(ras))
+ if (stride_io_mode(ras)) {
/* Since stride readahead is sensitive to the offset
* of read-ahead, so we use original offset here,
* instead of ras_window_start, which is RPC aligned
*/
ras->ras_next_readahead = max(index, ras->ras_next_readahead);
- else
- ras->ras_next_readahead = max(ras->ras_window_start,
- ras->ras_next_readahead);
+ } else {
+ if (ras->ras_next_readahead < ras->ras_window_start)
+ ras->ras_next_readahead = ras->ras_window_start;
+ if (!hit)
+ ras->ras_next_readahead = index + 1;
+ }
RAS_CDEBUG(ras);
/* Trigger RA in the mmap case where ras_consecutive_requests
@@ -1129,7 +994,7 @@ int ll_writepage(struct page *vmpage, struct writeback_control *wbc)
clob = ll_i2info(inode)->lli_clob;
LASSERT(clob);
- io = ccc_env_thread_io(env);
+ io = vvp_env_thread_io(env);
io->ci_obj = clob;
io->ci_ignore_layout = 1;
result = cl_io_init(env, io, CIT_MISC, clob);
@@ -1240,8 +1105,9 @@ int ll_writepages(struct address_space *mapping, struct writeback_control *wbc)
if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0)) {
if (end == OBD_OBJECT_EOF)
- end = i_size_read(inode);
- mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
+ mapping->writeback_index = 0;
+ else
+ mapping->writeback_index = (end >> PAGE_SHIFT) + 1;
}
return result;
}
@@ -1251,7 +1117,7 @@ int ll_readpage(struct file *file, struct page *vmpage)
struct ll_cl_context *lcc;
int result;
- lcc = ll_cl_init(file, vmpage, 0);
+ lcc = ll_cl_init(file, vmpage);
if (!IS_ERR(lcc)) {
struct lu_env *env = lcc->lcc_env;
struct cl_io *io = lcc->lcc_io;
@@ -1273,3 +1139,28 @@ int ll_readpage(struct file *file, struct page *vmpage)
}
return result;
}
+
+int ll_page_sync_io(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page, enum cl_req_type crt)
+{
+ struct cl_2queue *queue;
+ int result;
+
+ LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
+
+ queue = &io->ci_queue;
+ cl_2queue_init_page(queue, page);
+
+ result = cl_io_submit_sync(env, io, crt, queue, 0);
+ LASSERT(cl_page_is_owned(page, io));
+
+ if (crt == CRT_READ)
+ /*
+ * in CRT_WRITE case page is left locked even in case of
+ * error.
+ */
+ cl_page_list_disown(env, io, &queue->c2_qin);
+ cl_2queue_fini(env, queue);
+
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/rw26.c b/drivers/staging/lustre/lustre/llite/rw26.c
index 0c3459c1a518..c12a048fce59 100644
--- a/drivers/staging/lustre/lustre/llite/rw26.c
+++ b/drivers/staging/lustre/lustre/llite/rw26.c
@@ -95,15 +95,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
if (obj) {
page = cl_vmpage_page(vmpage, obj);
if (page) {
- lu_ref_add(&page->cp_reference,
- "delete", vmpage);
cl_page_delete(env, page);
- lu_ref_del(&page->cp_reference,
- "delete", vmpage);
cl_page_put(env, page);
}
- } else
+ } else {
LASSERT(vmpage->private == 0);
+ }
cl_env_put(env, &refcheck);
}
}
@@ -111,12 +108,12 @@ static void ll_invalidatepage(struct page *vmpage, unsigned int offset,
static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
{
- struct cl_env_nest nest;
struct lu_env *env;
+ void *cookie;
struct cl_object *obj;
struct cl_page *page;
struct address_space *mapping;
- int result;
+ int result = 0;
LASSERT(PageLocked(vmpage));
if (PageWriteback(vmpage) || PageDirty(vmpage))
@@ -130,53 +127,42 @@ static int ll_releasepage(struct page *vmpage, gfp_t gfp_mask)
if (!obj)
return 1;
- /* 1 for page allocator, 1 for cl_page and 1 for page cache */
+ /* 1 for caller, 1 for cl_page and 1 for page cache */
if (page_count(vmpage) > 3)
return 0;
- /* TODO: determine what gfp should be used by @gfp_mask. */
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- /* If we can't allocate an env we won't call cl_page_put()
- * later on which further means it's impossible to drop
- * page refcount by cl_page, so ask kernel to not free
- * this page.
- */
- return 0;
-
page = cl_vmpage_page(vmpage, obj);
- result = !page;
- if (page) {
- if (!cl_page_in_use(page)) {
- result = 1;
- cl_page_delete(env, page);
- }
- cl_page_put(env, page);
- }
- cl_env_nested_put(&nest, env);
- return result;
-}
+ if (!page)
+ return 1;
-static int ll_set_page_dirty(struct page *vmpage)
-{
-#if 0
- struct cl_page *page = vvp_vmpage_page_transient(vmpage);
- struct vvp_object *obj = cl_inode2vvp(vmpage->mapping->host);
- struct vvp_page *cpg;
+ cookie = cl_env_reenter();
+ env = cl_env_percpu_get();
+ LASSERT(!IS_ERR(env));
- /*
- * XXX should page method be called here?
- */
- LASSERT(&obj->co_cl == page->cp_obj);
- cpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
- /*
- * XXX cannot do much here, because page is possibly not locked:
- * sys_munmap()->...
- * ->unmap_page_range()->zap_pte_range()->set_page_dirty().
+ if (!cl_page_in_use(page)) {
+ result = 1;
+ cl_page_delete(env, page);
+ }
+
+ /* To use percpu env array, the call path can not be rescheduled;
+ * otherwise percpu array will be messed if ll_releaspage() called
+ * again on the same CPU.
+ *
+ * If this page holds the last refc of cl_object, the following
+ * call path may cause reschedule:
+ * cl_page_put -> cl_page_free -> cl_object_put ->
+ * lu_object_put -> lu_object_free -> lov_delete_raid0.
+ *
+ * However, the kernel can't get rid of this inode until all pages have
+ * been cleaned up. Now that we hold page lock here, it's pretty safe
+ * that we won't get into object delete path.
*/
- vvp_write_pending(obj, cpg);
-#endif
- return __set_page_dirty_nobuffers(vmpage);
+ LASSERT(cl_object_refc(obj) > 1);
+ cl_page_put(env, page);
+
+ cl_env_percpu_put(env);
+ cl_env_reexit(cookie);
+ return result;
}
#define MAX_DIRECTIO_SIZE (2*1024*1024*1024UL)
@@ -266,7 +252,7 @@ ssize_t ll_direct_rw_pages(const struct lu_env *env, struct cl_io *io,
* write directly
*/
if (clp->cp_type == CPT_CACHEABLE) {
- struct page *vmpage = cl_page_vmpage(env, clp);
+ struct page *vmpage = cl_page_vmpage(clp);
struct page *src_page;
struct page *dst_page;
void *src;
@@ -364,7 +350,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
struct cl_io *io;
struct file *file = iocb->ki_filp;
struct inode *inode = file->f_mapping->host;
- struct ccc_object *obj = cl_inode2ccc(inode);
+ struct vvp_object *obj = cl_inode2vvp(inode);
loff_t file_offset = iocb->ki_pos;
ssize_t count = iov_iter_count(iter);
ssize_t tot_bytes = 0, result = 0;
@@ -376,22 +362,21 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
return -EBADF;
/* FIXME: io smaller than PAGE_SIZE is broken on ia64 ??? */
- if ((file_offset & ~CFS_PAGE_MASK) || (count & ~CFS_PAGE_MASK))
+ if ((file_offset & ~PAGE_MASK) || (count & ~PAGE_MASK))
return -EINVAL;
- CDEBUG(D_VFSTRACE,
- "VFS Op:inode=%lu/%u(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
- inode->i_ino, inode->i_generation, inode, count, MAX_DIO_SIZE,
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), size=%zd (max %lu), offset=%lld=%llx, pages %zd (max %lu)\n",
+ PFID(ll_inode2fid(inode)), inode, count, MAX_DIO_SIZE,
file_offset, file_offset, count >> PAGE_SHIFT,
MAX_DIO_SIZE >> PAGE_SHIFT);
/* Check that all user buffers are aligned as well */
- if (iov_iter_alignment(iter) & ~CFS_PAGE_MASK)
+ if (iov_iter_alignment(iter) & ~PAGE_MASK)
return -EINVAL;
env = cl_env_get(&refcheck);
LASSERT(!IS_ERR(env));
- io = ccc_env_io(env)->cui_cl.cis_io;
+ io = vvp_env_io(env)->vui_cl.cis_io;
LASSERT(io);
/* 0. Need locking between buffered and direct access. and race with
@@ -401,7 +386,7 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
if (iov_iter_rw(iter) == READ)
inode_lock(inode);
- LASSERT(obj->cob_transient_pages == 0);
+ LASSERT(obj->vob_transient_pages == 0);
while (iov_iter_count(iter)) {
struct page **pages;
size_t offs;
@@ -435,8 +420,8 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
size > (PAGE_SIZE / sizeof(*pages)) *
PAGE_SIZE) {
size = ((((size / 2) - 1) |
- ~CFS_PAGE_MASK) + 1) &
- CFS_PAGE_MASK;
+ ~PAGE_MASK) + 1) &
+ PAGE_MASK;
CDEBUG(D_VFSTRACE, "DIO size now %lu\n",
size);
continue;
@@ -449,62 +434,213 @@ static ssize_t ll_direct_IO_26(struct kiocb *iocb, struct iov_iter *iter)
file_offset += result;
}
out:
- LASSERT(obj->cob_transient_pages == 0);
+ LASSERT(obj->vob_transient_pages == 0);
if (iov_iter_rw(iter) == READ)
inode_unlock(inode);
if (tot_bytes > 0) {
- if (iov_iter_rw(iter) == WRITE) {
- struct lov_stripe_md *lsm;
-
- lsm = ccc_inode_lsm_get(inode);
- LASSERT(lsm);
- lov_stripe_lock(lsm);
- obd_adjust_kms(ll_i2dtexp(inode), lsm, file_offset, 0);
- lov_stripe_unlock(lsm);
- ccc_inode_lsm_put(inode, lsm);
- }
+ struct vvp_io *vio = vvp_env_io(env);
+
+ /* no commit async for direct IO */
+ vio->u.write.vui_written += tot_bytes;
}
cl_env_put(env, &refcheck);
- return tot_bytes ? : result;
+ return tot_bytes ? tot_bytes : result;
+}
+
+/**
+ * Prepare partially written-to page for a write.
+ */
+static int ll_prepare_partial_page(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *pg)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct cl_object *obj = io->ci_obj;
+ struct vvp_page *vpg = cl_object_page_slice(obj, pg);
+ loff_t offset = cl_offset(obj, vvp_index(vpg));
+ int result;
+
+ cl_object_attr_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ cl_object_attr_unlock(obj);
+ if (result == 0) {
+ /*
+ * If are writing to a new page, no need to read old data.
+ * The extent locking will have updated the KMS, and for our
+ * purposes here we can treat it like i_size.
+ */
+ if (attr->cat_kms <= offset) {
+ char *kaddr = kmap_atomic(vpg->vpg_page);
+
+ memset(kaddr, 0, cl_page_size(obj));
+ kunmap_atomic(kaddr);
+ } else if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
+ } else {
+ result = ll_page_sync_io(env, io, pg, CRT_READ);
+ }
+ }
+ return result;
}
static int ll_write_begin(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned flags,
struct page **pagep, void **fsdata)
{
+ struct ll_cl_context *lcc;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct cl_page *page;
+ struct cl_object *clob = ll_i2info(mapping->host)->lli_clob;
pgoff_t index = pos >> PAGE_SHIFT;
- struct page *page;
- int rc;
- unsigned from = pos & (PAGE_SIZE - 1);
+ struct page *vmpage = NULL;
+ unsigned int from = pos & (PAGE_SIZE - 1);
+ unsigned int to = from + len;
+ int result = 0;
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
- return -ENOMEM;
+ CDEBUG(D_VFSTRACE, "Writing %lu of %d to %d bytes\n", index, from, len);
- *pagep = page;
+ lcc = ll_cl_init(file, NULL);
+ if (IS_ERR(lcc)) {
+ result = PTR_ERR(lcc);
+ goto out;
+ }
- rc = ll_prepare_write(file, page, from, from + len);
- if (rc) {
- unlock_page(page);
- put_page(page);
+ env = lcc->lcc_env;
+ io = lcc->lcc_io;
+
+ /* To avoid deadlock, try to lock page first. */
+ vmpage = grab_cache_page_nowait(mapping, index);
+ if (unlikely(!vmpage || PageDirty(vmpage) || PageWriteback(vmpage))) {
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *plist = &vio->u.write.vui_queue;
+
+ /* if the page is already in dirty cache, we have to commit
+ * the pages right now; otherwise, it may cause deadlock
+ * because it holds page lock of a dirty page and request for
+ * more grants. It's okay for the dirty page to be the first
+ * one in commit page list, though.
+ */
+ if (vmpage && plist->pl_nr > 0) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ vmpage = NULL;
+ }
+
+ /* commit pages and then wait for page lock */
+ result = vvp_io_write_commit(env, io);
+ if (result < 0)
+ goto out;
+
+ if (!vmpage) {
+ vmpage = grab_cache_page_write_begin(mapping, index,
+ flags);
+ if (!vmpage) {
+ result = -ENOMEM;
+ goto out;
+ }
+ }
}
- return rc;
+
+ page = cl_page_find(env, clob, vmpage->index, vmpage, CPT_CACHEABLE);
+ if (IS_ERR(page)) {
+ result = PTR_ERR(page);
+ goto out;
+ }
+
+ lcc->lcc_page = page;
+ lu_ref_add(&page->cp_reference, "cl_io", io);
+
+ cl_page_assume(env, io, page);
+ if (!PageUptodate(vmpage)) {
+ /*
+ * We're completely overwriting an existing page,
+ * so _don't_ set it up to date until commit_write
+ */
+ if (from == 0 && to == PAGE_SIZE) {
+ CL_PAGE_HEADER(D_PAGE, env, page, "full page write\n");
+ POISON_PAGE(vmpage, 0x11);
+ } else {
+ /* TODO: can be optimized at OSC layer to check if it
+ * is a lockless IO. In that case, it's not necessary
+ * to read the data.
+ */
+ result = ll_prepare_partial_page(env, io, page);
+ if (result == 0)
+ SetPageUptodate(vmpage);
+ }
+ }
+ if (result < 0)
+ cl_page_unassume(env, io, page);
+out:
+ if (result < 0) {
+ if (vmpage) {
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
+ if (!IS_ERR(lcc))
+ ll_cl_fini(lcc);
+ } else {
+ *pagep = vmpage;
+ *fsdata = lcc;
+ }
+ return result;
}
static int ll_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *vmpage, void *fsdata)
{
+ struct ll_cl_context *lcc = fsdata;
+ struct lu_env *env;
+ struct cl_io *io;
+ struct vvp_io *vio;
+ struct cl_page *page;
unsigned from = pos & (PAGE_SIZE - 1);
- int rc;
+ bool unplug = false;
+ int result = 0;
+
+ put_page(vmpage);
+
+ env = lcc->lcc_env;
+ page = lcc->lcc_page;
+ io = lcc->lcc_io;
+ vio = vvp_env_io(env);
+
+ LASSERT(cl_page_is_owned(page, io));
+ if (copied > 0) {
+ struct cl_page_list *plist = &vio->u.write.vui_queue;
+
+ lcc->lcc_page = NULL; /* page will be queued */
+
+ /* Add it into write queue */
+ cl_page_list_add(plist, page);
+ if (plist->pl_nr == 1) /* first page */
+ vio->u.write.vui_from = from;
+ else
+ LASSERT(from == 0);
+ vio->u.write.vui_to = from + copied;
+
+ /* We may have one full RPC, commit it soon */
+ if (plist->pl_nr >= PTLRPC_MAX_BRW_PAGES)
+ unplug = true;
+
+ CL_PAGE_DEBUG(D_VFSTRACE, env, page,
+ "queued page: %d.\n", plist->pl_nr);
+ } else {
+ cl_page_disown(env, io, page);
+
+ /* page list is not contiguous now, commit it now */
+ unplug = true;
+ }
- rc = ll_commit_write(file, page, from, from + copied);
- unlock_page(page);
- put_page(page);
+ if (unplug ||
+ file->f_flags & O_SYNC || IS_SYNC(file_inode(file)))
+ result = vvp_io_write_commit(env, io);
- return rc ?: copied;
+ ll_cl_fini(lcc);
+ return result >= 0 ? copied : result;
}
#ifdef CONFIG_MIGRATION
@@ -523,7 +659,7 @@ const struct address_space_operations ll_aops = {
.direct_IO = ll_direct_IO_26,
.writepage = ll_writepage,
.writepages = ll_writepages,
- .set_page_dirty = ll_set_page_dirty,
+ .set_page_dirty = __set_page_dirty_nobuffers,
.write_begin = ll_write_begin,
.write_end = ll_write_end,
.invalidatepage = ll_invalidatepage,
diff --git a/drivers/staging/lustre/lustre/llite/statahead.c b/drivers/staging/lustre/lustre/llite/statahead.c
index 99ffd1589df8..6322f88661e8 100644
--- a/drivers/staging/lustre/lustre/llite/statahead.c
+++ b/drivers/staging/lustre/lustre/llite/statahead.c
@@ -661,8 +661,9 @@ static void ll_post_statahead(struct ll_statahead_info *sai)
if (rc)
goto out;
- CDEBUG(D_DLMTRACE, "setting l_data to inode %p (%lu/%u)\n",
- child, child->i_ino, child->i_generation);
+ CDEBUG(D_DLMTRACE, "%s: setting l_data to inode "DFID"%p\n",
+ ll_get_fsname(child->i_sb, NULL, 0),
+ PFID(ll_inode2fid(child)), child);
ll_set_lock_data(ll_i2sbi(dir)->ll_md_exp, child, it, NULL);
entry->se_inode = child;
@@ -1591,13 +1592,11 @@ int do_statahead_enter(struct inode *dir, struct dentry **dentryp,
*dentryp = alias;
} else if (d_inode(*dentryp) != inode) {
/* revalidate, but inode is recreated */
- CDEBUG(D_READA,
- "stale dentry %pd inode %lu/%u, statahead inode %lu/%u\n",
- *dentryp,
- d_inode(*dentryp)->i_ino,
- d_inode(*dentryp)->i_generation,
- inode->i_ino,
- inode->i_generation);
+ CDEBUG(D_READA, "%s: stale dentry %pd inode "DFID", statahead inode "DFID"\n",
+ ll_get_fsname(d_inode(*dentryp)->i_sb, NULL, 0),
+ *dentryp,
+ PFID(ll_inode2fid(d_inode(*dentryp))),
+ PFID(ll_inode2fid(inode)));
ll_sai_unplug(sai, entry);
return -ESTALE;
} else {
diff --git a/drivers/staging/lustre/lustre/llite/super25.c b/drivers/staging/lustre/lustre/llite/super25.c
index 61856d37afc5..415750b0bff4 100644
--- a/drivers/staging/lustre/lustre/llite/super25.c
+++ b/drivers/staging/lustre/lustre/llite/super25.c
@@ -164,9 +164,18 @@ static int __init lustre_init(void)
if (rc != 0)
goto out_sysfs;
+ cl_inode_fini_env = cl_env_alloc(&cl_inode_fini_refcheck,
+ LCT_REMEMBER | LCT_NOREF);
+ if (IS_ERR(cl_inode_fini_env)) {
+ rc = PTR_ERR(cl_inode_fini_env);
+ goto out_vvp;
+ }
+
+ cl_inode_fini_env->le_ctx.lc_cookie = 0x4;
+
rc = ll_xattr_init();
if (rc != 0)
- goto out_vvp;
+ goto out_inode_fini_env;
lustre_register_client_fill_super(ll_fill_super);
lustre_register_kill_super_cb(ll_kill_super);
@@ -174,6 +183,8 @@ static int __init lustre_init(void)
return 0;
+out_inode_fini_env:
+ cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
out_vvp:
vvp_global_fini();
out_sysfs:
@@ -198,6 +209,7 @@ static void __exit lustre_exit(void)
kset_unregister(llite_kset);
ll_xattr_fini();
+ cl_env_put(cl_inode_fini_env, &cl_inode_fini_refcheck);
vvp_global_fini();
kmem_cache_destroy(ll_inode_cachep);
diff --git a/drivers/staging/lustre/lustre/llite/symlink.c b/drivers/staging/lustre/lustre/llite/symlink.c
index 46d03ea48352..3fc736ccf85e 100644
--- a/drivers/staging/lustre/lustre/llite/symlink.c
+++ b/drivers/staging/lustre/lustre/llite/symlink.c
@@ -77,7 +77,9 @@ static int ll_readlink_internal(struct inode *inode,
ll_finish_md_op_data(op_data);
if (rc) {
if (rc != -ENOENT)
- CERROR("inode %lu: rc = %d\n", inode->i_ino, rc);
+ CERROR("%s: inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
goto failed;
}
@@ -90,8 +92,10 @@ static int ll_readlink_internal(struct inode *inode,
LASSERT(symlen != 0);
if (body->eadatasize != symlen) {
- CERROR("inode %lu: symlink length %d not expected %d\n",
- inode->i_ino, body->eadatasize - 1, symlen - 1);
+ CERROR("%s: inode "DFID": symlink length %d not expected %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), body->eadatasize - 1,
+ symlen - 1);
rc = -EPROTO;
goto failed;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_dev.c b/drivers/staging/lustre/lustre/llite/vvp_dev.c
index 282b70b776da..47101de1c020 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_dev.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_dev.c
@@ -36,6 +36,7 @@
* cl_device and cl_device_type implementation for VVP layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_LLITE
@@ -56,13 +57,33 @@
* "llite_" (var. "ll_") prefix.
*/
-static struct kmem_cache *vvp_thread_kmem;
+static struct kmem_cache *ll_thread_kmem;
+struct kmem_cache *vvp_lock_kmem;
+struct kmem_cache *vvp_object_kmem;
+struct kmem_cache *vvp_req_kmem;
static struct kmem_cache *vvp_session_kmem;
+static struct kmem_cache *vvp_thread_kmem;
+
static struct lu_kmem_descr vvp_caches[] = {
{
- .ckd_cache = &vvp_thread_kmem,
- .ckd_name = "vvp_thread_kmem",
- .ckd_size = sizeof(struct vvp_thread_info),
+ .ckd_cache = &ll_thread_kmem,
+ .ckd_name = "ll_thread_kmem",
+ .ckd_size = sizeof(struct ll_thread_info),
+ },
+ {
+ .ckd_cache = &vvp_lock_kmem,
+ .ckd_name = "vvp_lock_kmem",
+ .ckd_size = sizeof(struct vvp_lock),
+ },
+ {
+ .ckd_cache = &vvp_object_kmem,
+ .ckd_name = "vvp_object_kmem",
+ .ckd_size = sizeof(struct vvp_object),
+ },
+ {
+ .ckd_cache = &vvp_req_kmem,
+ .ckd_name = "vvp_req_kmem",
+ .ckd_size = sizeof(struct vvp_req),
},
{
.ckd_cache = &vvp_session_kmem,
@@ -70,29 +91,40 @@ static struct lu_kmem_descr vvp_caches[] = {
.ckd_size = sizeof(struct vvp_session)
},
{
+ .ckd_cache = &vvp_thread_kmem,
+ .ckd_name = "vvp_thread_kmem",
+ .ckd_size = sizeof(struct vvp_thread_info),
+ },
+ {
.ckd_cache = NULL
}
};
-static void *vvp_key_init(const struct lu_context *ctx,
- struct lu_context_key *key)
+static void *ll_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
{
struct vvp_thread_info *info;
- info = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ info = kmem_cache_zalloc(ll_thread_kmem, GFP_NOFS);
if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
-static void vvp_key_fini(const struct lu_context *ctx,
- struct lu_context_key *key, void *data)
+static void ll_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
{
struct vvp_thread_info *info = data;
- kmem_cache_free(vvp_thread_kmem, info);
+ kmem_cache_free(ll_thread_kmem, info);
}
+struct lu_context_key ll_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = ll_thread_key_init,
+ .lct_fini = ll_thread_key_fini
+};
+
static void *vvp_session_key_init(const struct lu_context *ctx,
struct lu_context_key *key)
{
@@ -112,34 +144,127 @@ static void vvp_session_key_fini(const struct lu_context *ctx,
kmem_cache_free(vvp_session_kmem, session);
}
-struct lu_context_key vvp_key = {
- .lct_tags = LCT_CL_THREAD,
- .lct_init = vvp_key_init,
- .lct_fini = vvp_key_fini
-};
-
struct lu_context_key vvp_session_key = {
.lct_tags = LCT_SESSION,
.lct_init = vvp_session_key_init,
.lct_fini = vvp_session_key_fini
};
+void *vvp_thread_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key)
+{
+ struct vvp_thread_info *vti;
+
+ vti = kmem_cache_zalloc(vvp_thread_kmem, GFP_NOFS);
+ if (!vti)
+ vti = ERR_PTR(-ENOMEM);
+ return vti;
+}
+
+void vvp_thread_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data)
+{
+ struct vvp_thread_info *vti = data;
+
+ kmem_cache_free(vvp_thread_kmem, vti);
+}
+
+struct lu_context_key vvp_thread_key = {
+ .lct_tags = LCT_CL_THREAD,
+ .lct_init = vvp_thread_key_init,
+ .lct_fini = vvp_thread_key_fini
+};
+
/* type constructor/destructor: vvp_type_{init,fini,start,stop}(). */
-LU_TYPE_INIT_FINI(vvp, &ccc_key, &ccc_session_key, &vvp_key, &vvp_session_key);
+LU_TYPE_INIT_FINI(vvp, &vvp_thread_key, &ll_thread_key, &vvp_session_key);
static const struct lu_device_operations vvp_lu_ops = {
.ldo_object_alloc = vvp_object_alloc
};
static const struct cl_device_operations vvp_cl_ops = {
- .cdo_req_init = ccc_req_init
+ .cdo_req_init = vvp_req_init
};
+static struct lu_device *vvp_device_free(const struct lu_env *env,
+ struct lu_device *d)
+{
+ struct vvp_device *vdv = lu2vvp_dev(d);
+ struct cl_site *site = lu2cl_site(d->ld_site);
+ struct lu_device *next = cl2lu_dev(vdv->vdv_next);
+
+ if (d->ld_site) {
+ cl_site_fini(site);
+ kfree(site);
+ }
+ cl_device_fini(lu2cl_dev(d));
+ kfree(vdv);
+ return next;
+}
+
static struct lu_device *vvp_device_alloc(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *cfg)
{
- return ccc_device_alloc(env, t, cfg, &vvp_lu_ops, &vvp_cl_ops);
+ struct vvp_device *vdv;
+ struct lu_device *lud;
+ struct cl_site *site;
+ int rc;
+
+ vdv = kzalloc(sizeof(*vdv), GFP_NOFS);
+ if (!vdv)
+ return ERR_PTR(-ENOMEM);
+
+ lud = &vdv->vdv_cl.cd_lu_dev;
+ cl_device_init(&vdv->vdv_cl, t);
+ vvp2lu_dev(vdv)->ld_ops = &vvp_lu_ops;
+ vdv->vdv_cl.cd_ops = &vvp_cl_ops;
+
+ site = kzalloc(sizeof(*site), GFP_NOFS);
+ if (site) {
+ rc = cl_site_init(site, &vdv->vdv_cl);
+ if (rc == 0) {
+ rc = lu_site_init_finish(&site->cs_lu);
+ } else {
+ LASSERT(!lud->ld_site);
+ CERROR("Cannot init lu_site, rc %d.\n", rc);
+ kfree(site);
+ }
+ } else {
+ rc = -ENOMEM;
+ }
+ if (rc != 0) {
+ vvp_device_free(env, lud);
+ lud = ERR_PTR(rc);
+ }
+ return lud;
+}
+
+static int vvp_device_init(const struct lu_env *env, struct lu_device *d,
+ const char *name, struct lu_device *next)
+{
+ struct vvp_device *vdv;
+ int rc;
+
+ vdv = lu2vvp_dev(d);
+ vdv->vdv_next = lu2cl_dev(next);
+
+ LASSERT(d->ld_site && next->ld_type);
+ next->ld_site = d->ld_site;
+ rc = next->ld_type->ldt_ops->ldto_device_init(env, next,
+ next->ld_type->ldt_name,
+ NULL);
+ if (rc == 0) {
+ lu_device_get(next);
+ lu_ref_add(&next->ld_reference, "lu-stack", &lu_site_init);
+ }
+ return rc;
+}
+
+static struct lu_device *vvp_device_fini(const struct lu_env *env,
+ struct lu_device *d)
+{
+ return cl2lu_dev(lu2vvp_dev(d)->vdv_next);
}
static const struct lu_device_type_operations vvp_device_type_ops = {
@@ -150,9 +275,9 @@ static const struct lu_device_type_operations vvp_device_type_ops = {
.ldto_stop = vvp_type_stop,
.ldto_device_alloc = vvp_device_alloc,
- .ldto_device_free = ccc_device_free,
- .ldto_device_init = ccc_device_init,
- .ldto_device_fini = ccc_device_fini
+ .ldto_device_free = vvp_device_free,
+ .ldto_device_init = vvp_device_init,
+ .ldto_device_fini = vvp_device_fini,
};
struct lu_device_type vvp_device_type = {
@@ -168,20 +293,27 @@ struct lu_device_type vvp_device_type = {
*/
int vvp_global_init(void)
{
- int result;
+ int rc;
- result = lu_kmem_init(vvp_caches);
- if (result == 0) {
- result = ccc_global_init(&vvp_device_type);
- if (result != 0)
- lu_kmem_fini(vvp_caches);
- }
- return result;
+ rc = lu_kmem_init(vvp_caches);
+ if (rc != 0)
+ return rc;
+
+ rc = lu_device_type_init(&vvp_device_type);
+ if (rc != 0)
+ goto out_kmem;
+
+ return 0;
+
+out_kmem:
+ lu_kmem_fini(vvp_caches);
+
+ return rc;
}
void vvp_global_fini(void)
{
- ccc_global_fini(&vvp_device_type);
+ lu_device_type_fini(&vvp_device_type);
lu_kmem_fini(vvp_caches);
}
@@ -205,13 +337,14 @@ int cl_sb_init(struct super_block *sb)
cl = cl_type_setup(env, NULL, &vvp_device_type,
sbi->ll_dt_exp->exp_obd->obd_lu_dev);
if (!IS_ERR(cl)) {
- cl2ccc_dev(cl)->cdv_sb = sb;
+ cl2vvp_dev(cl)->vdv_sb = sb;
sbi->ll_cl = cl;
sbi->ll_site = cl2lu_dev(cl)->ld_site;
}
cl_env_put(env, &refcheck);
- } else
+ } else {
rc = PTR_ERR(env);
+ }
return rc;
}
@@ -356,23 +489,18 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
return ~0ULL;
clob = vvp_pgcache_obj(env, dev, &id);
if (clob) {
- struct cl_object_header *hdr;
- int nr;
- struct cl_page *pg;
-
- /* got an object. Find next page. */
- hdr = cl_object_header(clob);
+ struct inode *inode = vvp_object_inode(clob);
+ struct page *vmpage;
+ int nr;
- spin_lock(&hdr->coh_page_guard);
- nr = radix_tree_gang_lookup(&hdr->coh_tree,
- (void **)&pg,
- id.vpi_index, 1);
+ nr = find_get_pages_contig(inode->i_mapping,
+ id.vpi_index, 1, &vmpage);
if (nr > 0) {
- id.vpi_index = pg->cp_index;
+ id.vpi_index = vmpage->index;
/* Cant support over 16T file */
- nr = !(pg->cp_index > 0xffffffff);
+ nr = !(vmpage->index > 0xffffffff);
+ put_page(vmpage);
}
- spin_unlock(&hdr->coh_page_guard);
lu_object_ref_del(&clob->co_lu, "dump", current);
cl_object_put(env, clob);
@@ -398,21 +526,20 @@ static loff_t vvp_pgcache_find(const struct lu_env *env,
static void vvp_pgcache_page_show(const struct lu_env *env,
struct seq_file *seq, struct cl_page *page)
{
- struct ccc_page *cpg;
+ struct vvp_page *vpg;
struct page *vmpage;
int has_flags;
- cpg = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vmpage = cpg->cpg_page;
- seq_printf(seq, " %5i | %p %p %s %s %s %s | %p %lu/%u(%p) %lu %u [",
+ vpg = cl2vvp_page(cl_page_at(page, &vvp_device_type));
+ vmpage = vpg->vpg_page;
+ seq_printf(seq, " %5i | %p %p %s %s %s %s | %p "DFID"(%p) %lu %u [",
0 /* gen */,
- cpg, page,
+ vpg, page,
"none",
- cpg->cpg_write_queued ? "wq" : "- ",
- cpg->cpg_defer_uptodate ? "du" : "- ",
+ vpg->vpg_write_queued ? "wq" : "- ",
+ vpg->vpg_defer_uptodate ? "du" : "- ",
PageWriteback(vmpage) ? "wb" : "-",
- vmpage, vmpage->mapping->host->i_ino,
- vmpage->mapping->host->i_generation,
+ vmpage, PFID(ll_inode2fid(vmpage->mapping->host)),
vmpage->mapping->host, vmpage->index,
page_count(vmpage));
has_flags = 0;
@@ -431,8 +558,6 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
struct ll_sb_info *sbi;
struct cl_object *clob;
struct lu_env *env;
- struct cl_page *page;
- struct cl_object_header *hdr;
struct vvp_pgcache_id id;
int refcheck;
int result;
@@ -444,27 +569,38 @@ static int vvp_pgcache_show(struct seq_file *f, void *v)
sbi = f->private;
clob = vvp_pgcache_obj(env, &sbi->ll_cl->cd_lu_dev, &id);
if (clob) {
- hdr = cl_object_header(clob);
-
- spin_lock(&hdr->coh_page_guard);
- page = cl_page_lookup(hdr, id.vpi_index);
- spin_unlock(&hdr->coh_page_guard);
+ struct inode *inode = vvp_object_inode(clob);
+ struct cl_page *page = NULL;
+ struct page *vmpage;
+
+ result = find_get_pages_contig(inode->i_mapping,
+ id.vpi_index, 1,
+ &vmpage);
+ if (result > 0) {
+ lock_page(vmpage);
+ page = cl_vmpage_page(vmpage, clob);
+ unlock_page(vmpage);
+ put_page(vmpage);
+ }
- seq_printf(f, "%8x@"DFID": ",
- id.vpi_index, PFID(&hdr->coh_lu.loh_fid));
+ seq_printf(f, "%8x@" DFID ": ", id.vpi_index,
+ PFID(lu_object_fid(&clob->co_lu)));
if (page) {
vvp_pgcache_page_show(env, f, page);
cl_page_put(env, page);
- } else
+ } else {
seq_puts(f, "missing\n");
+ }
lu_object_ref_del(&clob->co_lu, "dump", current);
cl_object_put(env, clob);
- } else
+ } else {
seq_printf(f, "%llx missing\n", pos);
+ }
cl_env_put(env, &refcheck);
result = 0;
- } else
+ } else {
result = PTR_ERR(env);
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h
index bb393378c9bb..27b9b0a01f32 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_internal.h
+++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h
@@ -41,21 +41,337 @@
#ifndef VVP_INTERNAL_H
#define VVP_INTERNAL_H
+#include "../include/lustre/lustre_idl.h"
#include "../include/cl_object.h"
-#include "llite_internal.h"
-int vvp_io_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int vvp_lock_init(const struct lu_env *env,
- struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *io);
+enum obd_notify_event;
+struct inode;
+struct lov_stripe_md;
+struct lustre_md;
+struct obd_capa;
+struct obd_device;
+struct obd_export;
+struct page;
+
+/* specific architecture can implement only part of this list */
+enum vvp_io_subtype {
+ /** normal IO */
+ IO_NORMAL,
+ /** io started from splice_{read|write} */
+ IO_SPLICE
+};
+
+/**
+ * IO state private to IO state private to VVP layer.
+ */
+struct vvp_io {
+ /** super class */
+ struct cl_io_slice vui_cl;
+ struct cl_io_lock_link vui_link;
+ /**
+ * I/O vector information to or from which read/write is going.
+ */
+ struct iov_iter *vui_iter;
+ /**
+ * Total size for the left IO.
+ */
+ size_t vui_tot_count;
+
+ union {
+ struct vvp_fault_io {
+ /**
+ * Inode modification time that is checked across DLM
+ * lock request.
+ */
+ time64_t ft_mtime;
+ struct vm_area_struct *ft_vma;
+ /**
+ * locked page returned from vvp_io
+ */
+ struct page *ft_vmpage;
+ /**
+ * kernel fault info
+ */
+ struct vm_fault *ft_vmf;
+ /**
+ * fault API used bitflags for return code.
+ */
+ unsigned int ft_flags;
+ /**
+ * check that flags are from filemap_fault
+ */
+ bool ft_flags_valid;
+ } fault;
+ struct {
+ struct pipe_inode_info *vui_pipe;
+ unsigned int vui_flags;
+ } splice;
+ struct {
+ struct cl_page_list vui_queue;
+ unsigned long vui_written;
+ int vui_from;
+ int vui_to;
+ } write;
+ } u;
+
+ enum vvp_io_subtype vui_io_subtype;
+
+ /**
+ * Layout version when this IO is initialized
+ */
+ __u32 vui_layout_gen;
+ /**
+ * File descriptor against which IO is done.
+ */
+ struct ll_file_data *vui_fd;
+ struct kiocb *vui_iocb;
+
+ /* Readahead state. */
+ pgoff_t vui_ra_start;
+ pgoff_t vui_ra_count;
+ /* Set when vui_ra_{start,count} have been initialized. */
+ bool vui_ra_valid;
+};
+
+extern struct lu_device_type vvp_device_type;
+
+extern struct lu_context_key vvp_session_key;
+extern struct lu_context_key vvp_thread_key;
+
+extern struct kmem_cache *vvp_lock_kmem;
+extern struct kmem_cache *vvp_object_kmem;
+extern struct kmem_cache *vvp_req_kmem;
+
+struct vvp_thread_info {
+ struct cl_lock vti_lock;
+ struct cl_lock_descr vti_descr;
+ struct cl_io vti_io;
+ struct cl_attr vti_attr;
+};
+
+static inline struct vvp_thread_info *vvp_env_info(const struct lu_env *env)
+{
+ struct vvp_thread_info *vti;
+
+ vti = lu_context_key_get(&env->le_ctx, &vvp_thread_key);
+ LASSERT(vti);
+
+ return vti;
+}
+
+static inline struct cl_lock *vvp_env_lock(const struct lu_env *env)
+{
+ struct cl_lock *lock = &vvp_env_info(env)->vti_lock;
+
+ memset(lock, 0, sizeof(*lock));
+ return lock;
+}
+
+static inline struct cl_attr *vvp_env_thread_attr(const struct lu_env *env)
+{
+ struct cl_attr *attr = &vvp_env_info(env)->vti_attr;
+
+ memset(attr, 0, sizeof(*attr));
+
+ return attr;
+}
+
+static inline struct cl_io *vvp_env_thread_io(const struct lu_env *env)
+{
+ struct cl_io *io = &vvp_env_info(env)->vti_io;
+
+ memset(io, 0, sizeof(*io));
+
+ return io;
+}
+
+struct vvp_session {
+ struct vvp_io cs_ios;
+};
+
+static inline struct vvp_session *vvp_env_session(const struct lu_env *env)
+{
+ struct vvp_session *ses;
+
+ ses = lu_context_key_get(env->le_ses, &vvp_session_key);
+ LASSERT(ses);
+
+ return ses;
+}
+
+static inline struct vvp_io *vvp_env_io(const struct lu_env *env)
+{
+ return &vvp_env_session(env)->cs_ios;
+}
+
+/**
+ * ccc-private object state.
+ */
+struct vvp_object {
+ struct cl_object_header vob_header;
+ struct cl_object vob_cl;
+ struct inode *vob_inode;
+
+ /**
+ * A list of dirty pages pending IO in the cache. Used by
+ * SOM. Protected by ll_inode_info::lli_lock.
+ *
+ * \see vvp_page::vpg_pending_linkage
+ */
+ struct list_head vob_pending_list;
+
+ /**
+ * Access this counter is protected by inode->i_sem. Now that
+ * the lifetime of transient pages must be covered by inode sem,
+ * we don't need to hold any lock..
+ */
+ int vob_transient_pages;
+ /**
+ * Number of outstanding mmaps on this file.
+ *
+ * \see ll_vm_open(), ll_vm_close().
+ */
+ atomic_t vob_mmap_cnt;
+
+ /**
+ * various flags
+ * vob_discard_page_warned
+ * if pages belonging to this object are discarded when a client
+ * is evicted, some debug info will be printed, this flag will be set
+ * during processing the first discarded page, then avoid flooding
+ * debug message for lots of discarded pages.
+ *
+ * \see ll_dirty_page_discard_warn.
+ */
+ unsigned int vob_discard_page_warned:1;
+};
+
+/**
+ * VVP-private page state.
+ */
+struct vvp_page {
+ struct cl_page_slice vpg_cl;
+ int vpg_defer_uptodate;
+ int vpg_ra_used;
+ int vpg_write_queued;
+ /**
+ * Non-empty iff this page is already counted in
+ * vvp_object::vob_pending_list. This list is only used as a flag,
+ * that is, never iterated through, only checked for list_empty(), but
+ * having a list is useful for debugging.
+ */
+ struct list_head vpg_pending_linkage;
+ /** VM page */
+ struct page *vpg_page;
+};
+
+static inline struct vvp_page *cl2vvp_page(const struct cl_page_slice *slice)
+{
+ return container_of(slice, struct vvp_page, vpg_cl);
+}
+
+static inline pgoff_t vvp_index(struct vvp_page *vvp)
+{
+ return vvp->vpg_cl.cpl_index;
+}
+
+struct vvp_device {
+ struct cl_device vdv_cl;
+ struct super_block *vdv_sb;
+ struct cl_device *vdv_next;
+};
+
+struct vvp_lock {
+ struct cl_lock_slice vlk_cl;
+};
+
+struct vvp_req {
+ struct cl_req_slice vrq_cl;
+};
+
+void *ccc_key_init(const struct lu_context *ctx,
+ struct lu_context_key *key);
+void ccc_key_fini(const struct lu_context *ctx,
+ struct lu_context_key *key, void *data);
+
+void ccc_umount(const struct lu_env *env, struct cl_device *dev);
+
+static inline struct lu_device *vvp2lu_dev(struct vvp_device *vdv)
+{
+ return &vdv->vdv_cl.cd_lu_dev;
+}
+
+static inline struct vvp_device *lu2vvp_dev(const struct lu_device *d)
+{
+ return container_of0(d, struct vvp_device, vdv_cl.cd_lu_dev);
+}
+
+static inline struct vvp_device *cl2vvp_dev(const struct cl_device *d)
+{
+ return container_of0(d, struct vvp_device, vdv_cl);
+}
+
+static inline struct vvp_object *cl2vvp(const struct cl_object *obj)
+{
+ return container_of0(obj, struct vvp_object, vob_cl);
+}
+
+static inline struct vvp_object *lu2vvp(const struct lu_object *obj)
+{
+ return container_of0(obj, struct vvp_object, vob_cl.co_lu);
+}
+
+static inline struct inode *vvp_object_inode(const struct cl_object *obj)
+{
+ return cl2vvp(obj)->vob_inode;
+}
+
+int vvp_object_invariant(const struct cl_object *obj);
+struct vvp_object *cl_inode2vvp(struct inode *inode);
+
+static inline struct page *cl2vm_page(const struct cl_page_slice *slice)
+{
+ return cl2vvp_page(slice)->vpg_page;
+}
+
+static inline struct vvp_lock *cl2vvp_lock(const struct cl_lock_slice *slice)
+{
+ return container_of(slice, struct vvp_lock, vlk_cl);
+}
+
+# define CLOBINVRNT(env, clob, expr) \
+ ((void)sizeof(env), (void)sizeof(clob), (void)sizeof(!!(expr)))
+
+/**
+ * New interfaces to get and put lov_stripe_md from lov layer. This violates
+ * layering because lov_stripe_md is supposed to be a private data in lov.
+ *
+ * NB: If you find you have to use these interfaces for your new code, please
+ * think about it again. These interfaces may be removed in the future for
+ * better layering.
+ */
+struct lov_stripe_md *lov_lsm_get(struct cl_object *clobj);
+void lov_lsm_put(struct cl_object *clobj, struct lov_stripe_md *lsm);
+int lov_read_and_clear_async_rc(struct cl_object *clob);
+
+struct lov_stripe_md *ccc_inode_lsm_get(struct inode *inode);
+void ccc_inode_lsm_put(struct inode *inode, struct lov_stripe_md *lsm);
+
+int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io);
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io);
+int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
+ struct cl_lock *lock, const struct cl_io *io);
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
+int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
struct lu_object *vvp_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
-struct ccc_object *cl_inode2ccc(struct inode *inode);
+int vvp_global_init(void);
+void vvp_global_fini(void);
extern const struct file_operations vvp_dump_pgcache_file_ops;
diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c
index 85a835976174..5bf9592ae5d2 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_io.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_io.c
@@ -44,21 +44,30 @@
#include "../include/obd.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice);
+struct vvp_io *cl2vvp_io(const struct lu_env *env,
+ const struct cl_io_slice *slice)
+{
+ struct vvp_io *vio;
+
+ vio = container_of(slice, struct vvp_io, vui_cl);
+ LASSERT(vio == vvp_env_io(env));
+
+ return vio;
+}
/**
* True, if \a io is a normal io, False for splice_{read,write}
*/
-int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
+static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- return vio->cui_io_subtype == IO_NORMAL;
+ return vio->vui_io_subtype == IO_NORMAL;
}
/**
@@ -71,7 +80,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
struct inode *inode)
{
struct ll_inode_info *lli = ll_i2info(inode);
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
bool rc = true;
switch (io->ci_type) {
@@ -80,7 +89,7 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
/* don't need lock here to check lli_layout_gen as we have held
* extent lock and GROUP lock has to hold to swap layout
*/
- if (ll_layout_version_get(lli) != cio->cui_layout_gen) {
+ if (ll_layout_version_get(lli) != vio->vui_layout_gen) {
io->ci_need_restart = 1;
/* this will return application a short read/write */
io->ci_continue = 0;
@@ -95,20 +104,187 @@ static bool can_populate_pages(const struct lu_env *env, struct cl_io *io,
return rc;
}
+static void vvp_object_size_lock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ ll_inode_size_lock(inode);
+ cl_object_attr_lock(obj);
+}
+
+static void vvp_object_size_unlock(struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ cl_object_attr_unlock(obj);
+ ll_inode_size_unlock(inode);
+}
+
+/**
+ * Helper function that if necessary adjusts file size (inode->i_size), when
+ * position at the offset \a pos is accessed. File size can be arbitrary stale
+ * on a Lustre client, but client at least knows KMS. If accessed area is
+ * inside [0, KMS], set file size to KMS, otherwise glimpse file size.
+ *
+ * Locking: cl_isize_lock is used to serialize changes to inode size and to
+ * protect consistency between inode size and cl_object
+ * attributes. cl_object_size_lock() protects consistency between cl_attr's of
+ * top-object and sub-objects.
+ */
+static int vvp_prep_size(const struct lu_env *env, struct cl_object *obj,
+ struct cl_io *io, loff_t start, size_t count,
+ int *exceed)
+{
+ struct cl_attr *attr = vvp_env_thread_attr(env);
+ struct inode *inode = vvp_object_inode(obj);
+ loff_t pos = start + count - 1;
+ loff_t kms;
+ int result;
+
+ /*
+ * Consistency guarantees: following possibilities exist for the
+ * relation between region being accessed and real file size at this
+ * moment:
+ *
+ * (A): the region is completely inside of the file;
+ *
+ * (B-x): x bytes of region are inside of the file, the rest is
+ * outside;
+ *
+ * (C): the region is completely outside of the file.
+ *
+ * This classification is stable under DLM lock already acquired by
+ * the caller, because to change the class, other client has to take
+ * DLM lock conflicting with our lock. Also, any updates to ->i_size
+ * by other threads on this client are serialized by
+ * ll_inode_size_lock(). This guarantees that short reads are handled
+ * correctly in the face of concurrent writes and truncates.
+ */
+ vvp_object_size_lock(obj);
+ result = cl_object_attr_get(env, obj, attr);
+ if (result == 0) {
+ kms = attr->cat_kms;
+ if (pos > kms) {
+ /*
+ * A glimpse is necessary to determine whether we
+ * return a short read (B) or some zeroes at the end
+ * of the buffer (C)
+ */
+ vvp_object_size_unlock(obj);
+ result = cl_glimpse_lock(env, io, inode, obj, 0);
+ if (result == 0 && exceed) {
+ /* If objective page index exceed end-of-file
+ * page index, return directly. Do not expect
+ * kernel will check such case correctly.
+ * linux-2.6.18-128.1.1 miss to do that.
+ * --bug 17336
+ */
+ loff_t size = i_size_read(inode);
+ loff_t cur_index = start >> PAGE_SHIFT;
+ loff_t size_index = (size - 1) >> PAGE_SHIFT;
+
+ if ((size == 0 && cur_index != 0) ||
+ size_index < cur_index)
+ *exceed = 1;
+ }
+ return result;
+ }
+ /*
+ * region is within kms and, hence, within real file
+ * size (A). We need to increase i_size to cover the
+ * read region so that generic_file_read() will do its
+ * job, but that doesn't mean the kms size is
+ * _correct_, it is only the _minimum_ size. If
+ * someone does a stat they will get the correct size
+ * which will always be >= the kms value here.
+ * b=11081
+ */
+ if (i_size_read(inode) < kms) {
+ i_size_write(inode, kms);
+ CDEBUG(D_VFSTRACE, DFID " updating i_size %llu\n",
+ PFID(lu_object_fid(&obj->co_lu)),
+ (__u64)i_size_read(inode));
+ }
+ }
+
+ vvp_object_size_unlock(obj);
+
+ return result;
+}
+
/*****************************************************************************
*
* io operations.
*
*/
+static int vvp_io_one_lock_index(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ pgoff_t start, pgoff_t end)
+{
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_lock_descr *descr = &vio->vui_link.cill_descr;
+ struct cl_object *obj = io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ CDEBUG(D_VFSTRACE, "lock: %d [%lu, %lu]\n", mode, start, end);
+
+ memset(&vio->vui_link, 0, sizeof(vio->vui_link));
+
+ if (vio->vui_fd && (vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) {
+ descr->cld_mode = CLM_GROUP;
+ descr->cld_gid = vio->vui_fd->fd_grouplock.lg_gid;
+ } else {
+ descr->cld_mode = mode;
+ }
+ descr->cld_obj = obj;
+ descr->cld_start = start;
+ descr->cld_end = end;
+ descr->cld_enq_flags = enqflags;
+
+ cl_io_lock_add(env, io, &vio->vui_link);
+ return 0;
+}
+
+static int vvp_io_one_lock(const struct lu_env *env, struct cl_io *io,
+ __u32 enqflags, enum cl_lock_mode mode,
+ loff_t start, loff_t end)
+{
+ struct cl_object *obj = io->ci_obj;
+
+ return vvp_io_one_lock_index(env, io, enqflags, mode,
+ cl_index(obj, start), cl_index(obj, end));
+}
+
+static int vvp_io_write_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ cl_page_list_init(&vio->u.write.vui_queue);
+ vio->u.write.vui_written = 0;
+ vio->u.write.vui_from = 0;
+ vio->u.write.vui_to = PAGE_SIZE;
+
+ return 0;
+}
+
+static void vvp_io_write_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+
+ LASSERT(vio->u.write.vui_queue.pl_nr == 0);
+}
+
static int vvp_io_fault_iter_init(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- struct inode *inode = ccc_object_inode(ios->cis_obj);
+ struct inode *inode = vvp_object_inode(ios->cis_obj);
- LASSERT(inode ==
- file_inode(cl2ccc_io(env, ios)->cui_fd->fd_file));
+ LASSERT(inode == file_inode(vio->vui_fd->fd_file));
vio->u.fault.ft_mtime = inode->i_mtime.tv_sec;
return 0;
}
@@ -117,15 +293,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct inode *inode = vvp_object_inode(obj);
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID
" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
if (io->ci_restore_needed == 1) {
int rc;
@@ -133,7 +310,7 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
/* file was detected release, we need to restore it
* before finishing the io
*/
- rc = ll_layout_restore(ccc_object_inode(obj));
+ rc = ll_layout_restore(inode, 0, OBD_OBJECT_EOF);
/* if restore registration failed, no restart,
* we will return -ENODATA
*/
@@ -159,16 +336,16 @@ static void vvp_io_fini(const struct lu_env *env, const struct cl_io_slice *ios)
__u32 gen = 0;
/* check layout version */
- ll_layout_refresh(ccc_object_inode(obj), &gen);
- io->ci_need_restart = cio->cui_layout_gen != gen;
+ ll_layout_refresh(inode, &gen);
+ io->ci_need_restart = vio->vui_layout_gen != gen;
if (io->ci_need_restart) {
CDEBUG(D_VFSTRACE,
DFID" layout changed from %d to %d.\n",
PFID(lu_object_fid(&obj->co_lu)),
- cio->cui_layout_gen, gen);
+ vio->vui_layout_gen, gen);
/* today successful restore is the only possible case */
/* restore was done, clear restoring state */
- ll_i2info(ccc_object_inode(obj))->lli_flags &=
+ ll_i2info(vvp_object_inode(obj))->lli_flags &=
~LLIF_FILE_RESTORING;
}
}
@@ -180,7 +357,7 @@ static void vvp_io_fault_fini(const struct lu_env *env,
struct cl_io *io = ios->cis_io;
struct cl_page *page = io->u.ci_fault.ft_page;
- CLOBINVRNT(env, io->ci_obj, ccc_object_invariant(io->ci_obj));
+ CLOBINVRNT(env, io->ci_obj, vvp_object_invariant(io->ci_obj));
if (page) {
lu_ref_del(&page->cp_reference, "fault", io);
@@ -203,16 +380,16 @@ static enum cl_lock_mode vvp_mode_from_vma(struct vm_area_struct *vma)
}
static int vvp_mmap_locks(const struct lu_env *env,
- struct ccc_io *vio, struct cl_io *io)
+ struct vvp_io *vio, struct cl_io *io)
{
- struct ccc_thread_info *cti = ccc_env_info(env);
+ struct vvp_thread_info *cti = vvp_env_info(env);
struct mm_struct *mm = current->mm;
struct vm_area_struct *vma;
- struct cl_lock_descr *descr = &cti->cti_descr;
+ struct cl_lock_descr *descr = &cti->vti_descr;
ldlm_policy_data_t policy;
unsigned long addr;
ssize_t count;
- int result;
+ int result = 0;
struct iov_iter i;
struct iovec iov;
@@ -221,21 +398,21 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (!cl_is_normalio(env, io))
return 0;
- if (!vio->cui_iter) /* nfs or loop back device write */
+ if (!vio->vui_iter) /* nfs or loop back device write */
return 0;
/* No MM (e.g. NFS)? No vmas too. */
if (!mm)
return 0;
- iov_for_each(iov, i, *(vio->cui_iter)) {
+ iov_for_each(iov, i, *vio->vui_iter) {
addr = (unsigned long)iov.iov_base;
count = iov.iov_len;
if (count == 0)
continue;
- count += addr & (~CFS_PAGE_MASK);
- addr &= CFS_PAGE_MASK;
+ count += addr & (~PAGE_MASK);
+ addr &= PAGE_MASK;
down_read(&mm->mmap_sem);
while ((vma = our_vma(mm, addr, count)) != NULL) {
@@ -244,10 +421,10 @@ static int vvp_mmap_locks(const struct lu_env *env,
if (ll_file_nolock(vma->vm_file)) {
/*
- * For no lock case, a lockless lock will be
- * generated.
+ * For no lock case is not allowed for mmap
*/
- flags = CEF_NEVER;
+ result = -EINVAL;
+ break;
}
/*
@@ -269,10 +446,8 @@ static int vvp_mmap_locks(const struct lu_env *env,
descr->cld_mode, descr->cld_start,
descr->cld_end);
- if (result < 0) {
- up_read(&mm->mmap_sem);
- return result;
- }
+ if (result < 0)
+ break;
if (vma->vm_end - addr >= count)
break;
@@ -281,26 +456,55 @@ static int vvp_mmap_locks(const struct lu_env *env,
addr = vma->vm_end;
}
up_read(&mm->mmap_sem);
+ if (result < 0)
+ break;
}
- return 0;
+ return result;
+}
+
+static void vvp_io_advance(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ size_t nob)
+{
+ struct vvp_io *vio = cl2vvp_io(env, ios);
+ struct cl_io *io = ios->cis_io;
+ struct cl_object *obj = ios->cis_io->ci_obj;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ if (!cl_is_normalio(env, io))
+ return;
+
+ iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob);
+}
+
+static void vvp_io_update_iov(const struct lu_env *env,
+ struct vvp_io *vio, struct cl_io *io)
+{
+ size_t size = io->u.ci_rw.crw_count;
+
+ if (!cl_is_normalio(env, io) || !vio->vui_iter)
+ return;
+
+ iov_iter_truncate(vio->vui_iter, size);
}
static int vvp_io_rw_lock(const struct lu_env *env, struct cl_io *io,
enum cl_lock_mode mode, loff_t start, loff_t end)
{
- struct ccc_io *cio = ccc_env_io(env);
+ struct vvp_io *vio = vvp_env_io(env);
int result;
int ast_flags = 0;
LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
- ccc_io_update_iov(env, cio, io);
+ vvp_io_update_iov(env, vio, io);
if (io->u.ci_rw.crw_nonblock)
ast_flags |= CEF_NONBLOCK;
- result = vvp_mmap_locks(env, cio, io);
+ result = vvp_mmap_locks(env, vio, io);
if (result == 0)
- result = ccc_io_one_lock(env, io, ast_flags, mode, start, end);
+ result = vvp_io_one_lock(env, io, ast_flags, mode, start, end);
return result;
}
@@ -325,9 +529,11 @@ static int vvp_io_fault_lock(const struct lu_env *env,
/*
* XXX LDLM_FL_CBPENDING
*/
- return ccc_io_one_lock_index
- (env, io, 0, vvp_mode_from_vma(vio->u.fault.ft_vma),
- io->u.ci_fault.ft_index, io->u.ci_fault.ft_index);
+ return vvp_io_one_lock_index(env,
+ io, 0,
+ vvp_mode_from_vma(vio->u.fault.ft_vma),
+ io->u.ci_fault.ft_index,
+ io->u.ci_fault.ft_index);
}
static int vvp_io_write_lock(const struct lu_env *env,
@@ -354,14 +560,13 @@ static int vvp_io_setattr_iter_init(const struct lu_env *env,
}
/**
- * Implementation of cl_io_operations::cio_lock() method for CIT_SETATTR io.
+ * Implementation of cl_io_operations::vio_lock() method for CIT_SETATTR io.
*
* Handles "lockless io" mode when extent locking is done by server.
*/
static int vvp_io_setattr_lock(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = ccc_env_io(env);
struct cl_io *io = ios->cis_io;
__u64 new_size;
__u32 enqflags = 0;
@@ -378,8 +583,8 @@ static int vvp_io_setattr_lock(const struct lu_env *env,
return 0;
new_size = 0;
}
- cio->u.setattr.cui_local_lock = SETATTR_EXTENT_LOCK;
- return ccc_io_one_lock(env, io, enqflags, CLM_WRITE,
+
+ return vvp_io_one_lock(env, io, enqflags, CLM_WRITE,
new_size, OBD_OBJECT_EOF);
}
@@ -413,7 +618,7 @@ static int vvp_io_setattr_time(const struct lu_env *env,
{
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct cl_attr *attr = ccc_env_thread_attr(env);
+ struct cl_attr *attr = vvp_env_thread_attr(env);
int result;
unsigned valid = CAT_CTIME;
@@ -437,7 +642,7 @@ static int vvp_io_setattr_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
int result = 0;
inode_lock(inode);
@@ -453,7 +658,7 @@ static void vvp_io_setattr_end(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct cl_io *io = ios->cis_io;
- struct inode *inode = ccc_object_inode(io->ci_obj);
+ struct inode *inode = vvp_object_inode(io->ci_obj);
if (cl_io_is_trunc(io))
/* Truncate in memory pages - they must be clean pages
@@ -474,27 +679,25 @@ static int vvp_io_read_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_ra_read *bead = &vio->cui_bead;
- struct file *file = cio->cui_fd->fd_file;
+ struct inode *inode = vvp_object_inode(obj);
+ struct file *file = vio->vui_fd->fd_file;
int result;
loff_t pos = io->u.ci_rd.rd.crw_pos;
long cnt = io->u.ci_rd.rd.crw_count;
- long tot = cio->cui_tot_count;
+ long tot = vio->vui_tot_count;
int exceed = 0;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, "read: -> [%lli, %lli)\n", pos, pos + cnt);
if (!can_populate_pages(env, io, inode))
return 0;
- result = ccc_prep_size(env, obj, io, pos, tot, &exceed);
+ result = vvp_prep_size(env, obj, io, pos, tot, &exceed);
if (result != 0)
return result;
else if (exceed != 0)
@@ -505,30 +708,27 @@ static int vvp_io_read_start(const struct lu_env *env,
inode->i_ino, cnt, pos, i_size_read(inode));
/* turn off the kernel's read-ahead */
- cio->cui_fd->fd_file->f_ra.ra_pages = 0;
+ vio->vui_fd->fd_file->f_ra.ra_pages = 0;
/* initialize read-ahead window once per syscall */
- if (!vio->cui_ra_window_set) {
- vio->cui_ra_window_set = 1;
- bead->lrr_start = cl_index(obj, pos);
- /*
- * XXX: explicit PAGE_SIZE
- */
- bead->lrr_count = cl_index(obj, tot + PAGE_SIZE - 1);
- ll_ra_read_in(file, bead);
+ if (!vio->vui_ra_valid) {
+ vio->vui_ra_valid = true;
+ vio->vui_ra_start = cl_index(obj, pos);
+ vio->vui_ra_count = cl_index(obj, tot + PAGE_SIZE - 1);
+ ll_ras_enter(file);
}
/* BUG: 5972 */
file_accessed(file);
- switch (vio->cui_io_subtype) {
+ switch (vio->vui_io_subtype) {
case IO_NORMAL:
- LASSERT(cio->cui_iocb->ki_pos == pos);
- result = generic_file_read_iter(cio->cui_iocb, cio->cui_iter);
+ LASSERT(vio->vui_iocb->ki_pos == pos);
+ result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter);
break;
case IO_SPLICE:
result = generic_file_splice_read(file, &pos,
- vio->u.splice.cui_pipe, cnt,
- vio->u.splice.cui_flags);
+ vio->u.splice.vui_pipe, cnt,
+ vio->u.splice.vui_flags);
/* LU-1109: do splice read stripe by stripe otherwise if it
* may make nfsd stuck if this read occupied all internal pipe
* buffers.
@@ -536,7 +736,7 @@ static int vvp_io_read_start(const struct lu_env *env,
io->ci_continue = 0;
break;
default:
- CERROR("Wrong IO type %u\n", vio->cui_io_subtype);
+ CERROR("Wrong IO type %u\n", vio->vui_io_subtype);
LBUG();
}
@@ -546,30 +746,201 @@ out:
io->ci_continue = 0;
io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, READ);
+ vio->vui_fd, pos, result, READ);
result = 0;
}
return result;
}
-static void vvp_io_read_fini(const struct lu_env *env, const struct cl_io_slice *ios)
+static int vvp_io_commit_sync(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *plist, int from, int to)
{
- struct vvp_io *vio = cl2vvp_io(env, ios);
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct cl_2queue *queue = &io->ci_queue;
+ struct cl_page *page;
+ unsigned int bytes = 0;
+ int rc = 0;
- if (vio->cui_ra_window_set)
- ll_ra_read_ex(cio->cui_fd->fd_file, &vio->cui_bead);
+ if (plist->pl_nr == 0)
+ return 0;
- vvp_io_fini(env, ios);
+ if (from > 0 || to != PAGE_SIZE) {
+ page = cl_page_list_first(plist);
+ if (plist->pl_nr == 1) {
+ cl_page_clip(env, page, from, to);
+ } else {
+ if (from > 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE) {
+ page = cl_page_list_last(plist);
+ cl_page_clip(env, page, 0, to);
+ }
+ }
+ }
+
+ cl_2queue_init(queue);
+ cl_page_list_splice(plist, &queue->c2_qin);
+ rc = cl_io_submit_sync(env, io, CRT_WRITE, queue, 0);
+
+ /* plist is not sorted any more */
+ cl_page_list_splice(&queue->c2_qin, plist);
+ cl_page_list_splice(&queue->c2_qout, plist);
+ cl_2queue_fini(env, queue);
+
+ if (rc == 0) {
+ /* calculate bytes */
+ bytes = plist->pl_nr << PAGE_SHIFT;
+ bytes -= from + PAGE_SIZE - to;
+
+ while (plist->pl_nr > 0) {
+ page = cl_page_list_first(plist);
+ cl_page_list_del(env, plist, page);
+
+ cl_page_clip(env, page, 0, PAGE_SIZE);
+
+ SetPageUptodate(cl_page_vmpage(page));
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ }
+
+ return bytes > 0 ? bytes : rc;
+}
+
+static void write_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct vvp_page *vpg;
+ struct page *vmpage = page->cp_vmpage;
+ struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+ SetPageUptodate(vmpage);
+ set_page_dirty(vmpage);
+
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+}
+
+/* make sure the page list is contiguous */
+static bool page_list_sanity_check(struct cl_object *obj,
+ struct cl_page_list *plist)
+{
+ struct cl_page *page;
+ pgoff_t index = CL_PAGE_EOF;
+
+ cl_page_list_for_each(page, plist) {
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+
+ if (index == CL_PAGE_EOF) {
+ index = vvp_index(vpg);
+ continue;
+ }
+
+ ++index;
+ if (index == vvp_index(vpg))
+ continue;
+
+ return false;
+ }
+ return true;
+}
+
+/* Return how many bytes have queued or written */
+int vvp_io_write_commit(const struct lu_env *env, struct cl_io *io)
+{
+ struct cl_object *obj = io->ci_obj;
+ struct inode *inode = vvp_object_inode(obj);
+ struct vvp_io *vio = vvp_env_io(env);
+ struct cl_page_list *queue = &vio->u.write.vui_queue;
+ struct cl_page *page;
+ int rc = 0;
+ int bytes = 0;
+ unsigned int npages = vio->u.write.vui_queue.pl_nr;
+
+ if (npages == 0)
+ return 0;
+
+ CDEBUG(D_VFSTRACE, "commit async pages: %d, from %d, to %d\n",
+ npages, vio->u.write.vui_from, vio->u.write.vui_to);
+
+ LASSERT(page_list_sanity_check(obj, queue));
+
+ /* submit IO with async write */
+ rc = cl_io_commit_async(env, io, queue,
+ vio->u.write.vui_from, vio->u.write.vui_to,
+ write_commit_callback);
+ npages -= queue->pl_nr; /* already committed pages */
+ if (npages > 0) {
+ /* calculate how many bytes were written */
+ bytes = npages << PAGE_SHIFT;
+
+ /* first page */
+ bytes -= vio->u.write.vui_from;
+ if (queue->pl_nr == 0) /* last page */
+ bytes -= PAGE_SIZE - vio->u.write.vui_to;
+ LASSERTF(bytes > 0, "bytes = %d, pages = %d\n", bytes, npages);
+
+ vio->u.write.vui_written += bytes;
+
+ CDEBUG(D_VFSTRACE, "Committed %d pages %d bytes, tot: %ld\n",
+ npages, bytes, vio->u.write.vui_written);
+
+ /* the first page must have been written. */
+ vio->u.write.vui_from = 0;
+ }
+ LASSERT(page_list_sanity_check(obj, queue));
+ LASSERT(ergo(rc == 0, queue->pl_nr == 0));
+
+ /* out of quota, try sync write */
+ if (rc == -EDQUOT && !cl_io_is_mkwrite(io)) {
+ rc = vvp_io_commit_sync(env, io, queue,
+ vio->u.write.vui_from,
+ vio->u.write.vui_to);
+ if (rc > 0) {
+ vio->u.write.vui_written += rc;
+ rc = 0;
+ }
+ }
+
+ /* update inode size */
+ ll_merge_attr(env, inode);
+
+ /* Now the pages in queue were failed to commit, discard them
+ * unless they were dirtied before.
+ */
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ cl_page_list_del(env, queue, page);
+
+ if (!PageDirty(cl_page_vmpage(page)))
+ cl_page_discard(env, io, page);
+
+ cl_page_disown(env, io, page);
+
+ /* held in ll_cl_init() */
+ lu_ref_del(&page->cp_reference, "cl_io", io);
+ cl_page_put(env, page);
+ }
+ cl_page_list_fini(env, queue);
+
+ return rc;
}
static int vvp_io_write_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
- struct ccc_io *cio = cl2ccc_io(env, ios);
+ struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
ssize_t result = 0;
loff_t pos = io->u.ci_wr.wr.crw_pos;
size_t cnt = io->u.ci_wr.wr.crw_count;
@@ -582,25 +953,41 @@ static int vvp_io_write_start(const struct lu_env *env,
* PARALLEL IO This has to be changed for parallel IO doing
* out-of-order writes.
*/
+ ll_merge_attr(env, inode);
pos = io->u.ci_wr.wr.crw_pos = i_size_read(inode);
- cio->cui_iocb->ki_pos = pos;
+ vio->vui_iocb->ki_pos = pos;
} else {
- LASSERT(cio->cui_iocb->ki_pos == pos);
+ LASSERT(vio->vui_iocb->ki_pos == pos);
}
CDEBUG(D_VFSTRACE, "write: [%lli, %lli)\n", pos, pos + (long long)cnt);
- if (!cio->cui_iter) /* from a temp io in ll_cl_init(). */
+ if (!vio->vui_iter) /* from a temp io in ll_cl_init(). */
result = 0;
else
- result = generic_file_write_iter(cio->cui_iocb, cio->cui_iter);
+ result = generic_file_write_iter(vio->vui_iocb, vio->vui_iter);
+
+ if (result > 0) {
+ result = vvp_io_write_commit(env, io);
+ if (vio->u.write.vui_written > 0) {
+ result = vio->u.write.vui_written;
+ io->ci_nob += result;
+ CDEBUG(D_VFSTRACE, "write: nob %zd, result: %zd\n",
+ io->ci_nob, result);
+ }
+ }
if (result > 0) {
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ spin_lock(&lli->lli_lock);
+ lli->lli_flags |= LLIF_DATA_MODIFIED;
+ spin_unlock(&lli->lli_lock);
+
if (result < cnt)
io->ci_continue = 0;
- io->ci_nob += result;
ll_rw_stats_tally(ll_i2sbi(inode), current->pid,
- cio->cui_fd, pos, result, WRITE);
+ vio->vui_fd, pos, result, WRITE);
result = 0;
}
return result;
@@ -608,10 +995,10 @@ static int vvp_io_write_start(const struct lu_env *env,
static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
{
- struct vm_fault *vmf = cfio->fault.ft_vmf;
+ struct vm_fault *vmf = cfio->ft_vmf;
- cfio->fault.ft_flags = filemap_fault(cfio->ft_vma, vmf);
- cfio->fault.ft_flags_valid = 1;
+ cfio->ft_flags = filemap_fault(cfio->ft_vma, vmf);
+ cfio->ft_flags_valid = 1;
if (vmf->page) {
CDEBUG(D_PAGE,
@@ -619,39 +1006,51 @@ static int vvp_io_kernel_fault(struct vvp_fault_io *cfio)
vmf->page, vmf->page->mapping, vmf->page->index,
(long)vmf->page->flags, page_count(vmf->page),
page_private(vmf->page), vmf->virtual_address);
- if (unlikely(!(cfio->fault.ft_flags & VM_FAULT_LOCKED))) {
+ if (unlikely(!(cfio->ft_flags & VM_FAULT_LOCKED))) {
lock_page(vmf->page);
- cfio->fault.ft_flags |= VM_FAULT_LOCKED;
+ cfio->ft_flags |= VM_FAULT_LOCKED;
}
cfio->ft_vmpage = vmf->page;
return 0;
}
- if (cfio->fault.ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
+ if (cfio->ft_flags & (VM_FAULT_SIGBUS | VM_FAULT_SIGSEGV)) {
CDEBUG(D_PAGE, "got addr %p - SIGBUS\n", vmf->virtual_address);
return -EFAULT;
}
- if (cfio->fault.ft_flags & VM_FAULT_OOM) {
+ if (cfio->ft_flags & VM_FAULT_OOM) {
CDEBUG(D_PAGE, "got addr %p - OOM\n", vmf->virtual_address);
return -ENOMEM;
}
- if (cfio->fault.ft_flags & VM_FAULT_RETRY)
+ if (cfio->ft_flags & VM_FAULT_RETRY)
return -EAGAIN;
- CERROR("Unknown error in page fault %d!\n", cfio->fault.ft_flags);
+ CERROR("Unknown error in page fault %d!\n", cfio->ft_flags);
return -EINVAL;
}
+static void mkwrite_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
+{
+ struct vvp_page *vpg;
+ struct cl_object *clob = cl_io_top(io)->ci_obj;
+
+ set_page_dirty(page->cp_vmpage);
+
+ vpg = cl2vvp_page(cl_object_page_slice(clob, page));
+ vvp_write_pending(cl2vvp(clob), vpg);
+}
+
static int vvp_io_fault_start(const struct lu_env *env,
const struct cl_io_slice *ios)
{
struct vvp_io *vio = cl2vvp_io(env, ios);
struct cl_io *io = ios->cis_io;
struct cl_object *obj = io->ci_obj;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
struct cl_fault_io *fio = &io->u.ci_fault;
struct vvp_fault_io *cfio = &vio->u.fault;
loff_t offset;
@@ -659,7 +1058,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
struct page *vmpage = NULL;
struct cl_page *page;
loff_t size;
- pgoff_t last; /* last page in a file data region */
+ pgoff_t last_index;
if (fio->ft_executable &&
inode->i_mtime.tv_sec != vio->u.fault.ft_mtime)
@@ -670,7 +1069,7 @@ static int vvp_io_fault_start(const struct lu_env *env,
/* offset of the last byte on the page */
offset = cl_offset(obj, fio->ft_index + 1) - 1;
LASSERT(cl_index(obj, offset) == fio->ft_index);
- result = ccc_prep_size(env, obj, io, 0, offset + 1, NULL);
+ result = vvp_prep_size(env, obj, io, 0, offset + 1, NULL);
if (result != 0)
return result;
@@ -705,15 +1104,15 @@ static int vvp_io_fault_start(const struct lu_env *env,
goto out;
}
+ last_index = cl_index(obj, size - 1);
+
if (fio->ft_mkwrite) {
- pgoff_t last_index;
/*
* Capture the size while holding the lli_trunc_sem from above
* we want to make sure that we complete the mkwrite action
* while holding this lock. We need to make sure that we are
* not past the end of the file.
*/
- last_index = cl_index(obj, size - 1);
if (last_index < fio->ft_index) {
CDEBUG(D_PAGE,
"llite: mkwrite and truncate race happened: %p: 0x%lx 0x%lx\n",
@@ -745,25 +1144,32 @@ static int vvp_io_fault_start(const struct lu_env *env,
*/
if (fio->ft_mkwrite) {
wait_on_page_writeback(vmpage);
- if (set_page_dirty(vmpage)) {
- struct ccc_page *cp;
+ if (!PageDirty(vmpage)) {
+ struct cl_page_list *plist = &io->ci_queue.c2_qin;
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+ int to = PAGE_SIZE;
/* vvp_page_assume() calls wait_on_page_writeback(). */
cl_page_assume(env, io, page);
- cp = cl2ccc_page(cl_page_at(page, &vvp_device_type));
- vvp_write_pending(cl2ccc(obj), cp);
+ cl_page_list_init(plist);
+ cl_page_list_add(plist, page);
+
+ /* size fixup */
+ if (last_index == vvp_index(vpg))
+ to = size & ~PAGE_MASK;
/* Do not set Dirty bit here so that in case IO is
* started before the page is really made dirty, we
* still have chance to detect it.
*/
- result = cl_page_cache_add(env, io, page, CRT_WRITE);
+ result = cl_io_commit_async(env, io, plist, 0, to,
+ mkwrite_commit_callback);
LASSERT(cl_page_is_owned(page, io));
+ cl_page_list_fini(env, plist);
vmpage = NULL;
if (result < 0) {
- cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
@@ -773,20 +1179,20 @@ static int vvp_io_fault_start(const struct lu_env *env,
if (result == -EDQUOT)
result = -ENOSPC;
goto out;
- } else
+ } else {
cl_page_disown(env, io, page);
+ }
}
}
- last = cl_index(obj, size - 1);
/*
* The ft_index is only used in the case of
* a mkwrite action. We need to check
* our assertions are correct, since
* we should have caught this above
*/
- LASSERT(!fio->ft_mkwrite || fio->ft_index <= last);
- if (fio->ft_index == last)
+ LASSERT(!fio->ft_mkwrite || fio->ft_index <= last_index);
+ if (fio->ft_index == last_index)
/*
* Last page is mapped partially.
*/
@@ -801,7 +1207,9 @@ out:
/* return unlocked vmpage to avoid deadlocking */
if (vmpage)
unlock_page(vmpage);
- cfio->fault.ft_flags &= ~VM_FAULT_LOCKED;
+
+ cfio->ft_flags &= ~VM_FAULT_LOCKED;
+
return result;
}
@@ -820,293 +1228,58 @@ static int vvp_io_read_page(const struct lu_env *env,
const struct cl_page_slice *slice)
{
struct cl_io *io = ios->cis_io;
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *page = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_file_data *fd = cl2ccc_io(env, ios)->cui_fd;
+ struct ll_file_data *fd = cl2vvp_io(env, ios)->vui_fd;
struct ll_readahead_state *ras = &fd->fd_ras;
- struct page *vmpage = cp->cpg_page;
struct cl_2queue *queue = &io->ci_queue;
- int rc;
-
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
- LASSERT(slice->cpl_obj == obj);
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
- ras_update(sbi, inode, ras, page->cp_index,
- cp->cpg_defer_uptodate);
-
- /* Sanity check whether the page is protected by a lock. */
- rc = cl_page_is_under_lock(env, io, page);
- if (rc != -EBUSY) {
- CL_PAGE_HEADER(D_WARNING, env, page, "%s: %d\n",
- rc == -ENODATA ? "without a lock" :
- "match failed", rc);
- if (rc != -ENODATA)
- return rc;
- }
+ ras_update(sbi, inode, ras, vvp_index(vpg),
+ vpg->vpg_defer_uptodate);
- if (cp->cpg_defer_uptodate) {
- cp->cpg_ra_used = 1;
+ if (vpg->vpg_defer_uptodate) {
+ vpg->vpg_ra_used = 1;
cl_page_export(env, page, 1);
}
/*
* Add page into the queue even when it is marked uptodate above.
* this will unlock it automatically as part of cl_page_list_disown().
*/
+
cl_page_list_add(&queue->c2_qin, page);
if (sbi->ll_ra_info.ra_max_pages_per_file &&
sbi->ll_ra_info.ra_max_pages)
- ll_readahead(env, io, ras,
- vmpage->mapping, &queue->c2_qin, fd->fd_flags);
+ ll_readahead(env, io, &queue->c2_qin, ras,
+ vpg->vpg_defer_uptodate);
return 0;
}
-static int vvp_page_sync_io(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, struct ccc_page *cp,
- enum cl_req_type crt)
+void vvp_io_end(const struct lu_env *env, const struct cl_io_slice *ios)
{
- struct cl_2queue *queue;
- int result;
-
- LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE);
-
- queue = &io->ci_queue;
- cl_2queue_init_page(queue, page);
-
- result = cl_io_submit_sync(env, io, crt, queue, 0);
- LASSERT(cl_page_is_owned(page, io));
-
- if (crt == CRT_READ)
- /*
- * in CRT_WRITE case page is left locked even in case of
- * error.
- */
- cl_page_list_disown(env, io, &queue->c2_qin);
- cl_2queue_fini(env, queue);
-
- return result;
-}
-
-/**
- * Prepare partially written-to page for a write.
- */
-static int vvp_io_prepare_partial(const struct lu_env *env, struct cl_io *io,
- struct cl_object *obj, struct cl_page *pg,
- struct ccc_page *cp,
- unsigned from, unsigned to)
-{
- struct cl_attr *attr = ccc_env_thread_attr(env);
- loff_t offset = cl_offset(obj, pg->cp_index);
- int result;
-
- cl_object_attr_lock(obj);
- result = cl_object_attr_get(env, obj, attr);
- cl_object_attr_unlock(obj);
- if (result == 0) {
- /*
- * If are writing to a new page, no need to read old data.
- * The extent locking will have updated the KMS, and for our
- * purposes here we can treat it like i_size.
- */
- if (attr->cat_kms <= offset) {
- char *kaddr = kmap_atomic(cp->cpg_page);
-
- memset(kaddr, 0, cl_page_size(obj));
- kunmap_atomic(kaddr);
- } else if (cp->cpg_defer_uptodate)
- cp->cpg_ra_used = 1;
- else
- result = vvp_page_sync_io(env, io, pg, cp, CRT_READ);
- /*
- * In older implementations, obdo_refresh_inode is called here
- * to update the inode because the write might modify the
- * object info at OST. However, this has been proven useless,
- * since LVB functions will be called when user space program
- * tries to retrieve inode attribute. Also, see bug 15909 for
- * details. -jay
- */
- if (result == 0)
- cl_page_export(env, pg, 1);
- }
- return result;
-}
-
-static int vvp_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
-
- int result;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == ccc_object_inode(obj));
-
- result = 0;
-
- CL_PAGE_HEADER(D_PAGE, env, pg, "preparing: [%d, %d]\n", from, to);
- if (!PageUptodate(vmpage)) {
- /*
- * We're completely overwriting an existing page, so _don't_
- * set it up to date until commit_write
- */
- if (from == 0 && to == PAGE_SIZE) {
- CL_PAGE_HEADER(D_PAGE, env, pg, "full page write\n");
- POISON_PAGE(page, 0x11);
- } else
- result = vvp_io_prepare_partial(env, ios->cis_io, obj,
- pg, cp, from, to);
- } else
- CL_PAGE_HEADER(D_PAGE, env, pg, "uptodate\n");
- return result;
-}
-
-static int vvp_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct cl_object *obj = slice->cpl_obj;
- struct cl_io *io = ios->cis_io;
- struct ccc_page *cp = cl2ccc_page(slice);
- struct cl_page *pg = slice->cpl_page;
- struct inode *inode = ccc_object_inode(obj);
- struct ll_sb_info *sbi = ll_i2sbi(inode);
- struct ll_inode_info *lli = ll_i2info(inode);
- struct page *vmpage = cp->cpg_page;
-
- int result;
- int tallyop;
- loff_t size;
-
- LINVRNT(cl_page_is_vmlocked(env, pg));
- LASSERT(vmpage->mapping->host == inode);
-
- LU_OBJECT_HEADER(D_INODE, env, &obj->co_lu, "committing page write\n");
- CL_PAGE_HEADER(D_PAGE, env, pg, "committing: [%d, %d]\n", from, to);
-
- /*
- * queue a write for some time in the future the first time we
- * dirty the page.
- *
- * This is different from what other file systems do: they usually
- * just mark page (and some of its buffers) dirty and rely on
- * balance_dirty_pages() to start a write-back. Lustre wants write-back
- * to be started earlier for the following reasons:
- *
- * (1) with a large number of clients we need to limit the amount
- * of cached data on the clients a lot;
- *
- * (2) large compute jobs generally want compute-only then io-only
- * and the IO should complete as quickly as possible;
- *
- * (3) IO is batched up to the RPC size and is async until the
- * client max cache is hit
- * (/sys/fs/lustre/osc/OSC.../max_dirty_mb)
- *
- */
- if (!PageDirty(vmpage)) {
- tallyop = LPROC_LL_DIRTY_MISSES;
- result = cl_page_cache_add(env, io, pg, CRT_WRITE);
- if (result == 0) {
- /* page was added into cache successfully. */
- set_page_dirty(vmpage);
- vvp_write_pending(cl2ccc(obj), cp);
- } else if (result == -EDQUOT) {
- pgoff_t last_index = i_size_read(inode) >> PAGE_SHIFT;
- bool need_clip = true;
-
- /*
- * Client ran out of disk space grant. Possible
- * strategies are:
- *
- * (a) do a sync write, renewing grant;
- *
- * (b) stop writing on this stripe, switch to the
- * next one.
- *
- * (b) is a part of "parallel io" design that is the
- * ultimate goal. (a) is what "old" client did, and
- * what the new code continues to do for the time
- * being.
- */
- if (last_index > pg->cp_index) {
- to = PAGE_SIZE;
- need_clip = false;
- } else if (last_index == pg->cp_index) {
- int size_to = i_size_read(inode) & ~CFS_PAGE_MASK;
-
- if (to < size_to)
- to = size_to;
- }
- if (need_clip)
- cl_page_clip(env, pg, 0, to);
- result = vvp_page_sync_io(env, io, pg, cp, CRT_WRITE);
- if (result)
- CERROR("Write page %lu of inode %p failed %d\n",
- pg->cp_index, inode, result);
- }
- } else {
- tallyop = LPROC_LL_DIRTY_HITS;
- result = 0;
- }
- ll_stats_ops_tally(sbi, tallyop, 1);
-
- /* Inode should be marked DIRTY even if no new page was marked DIRTY
- * because page could have been not flushed between 2 modifications.
- * It is important the file is marked DIRTY as soon as the I/O is done
- * Indeed, when cache is flushed, file could be already closed and it
- * is too late to warn the MDT.
- * It is acceptable that file is marked DIRTY even if I/O is dropped
- * for some reasons before being flushed to OST.
- */
- if (result == 0) {
- spin_lock(&lli->lli_lock);
- lli->lli_flags |= LLIF_DATA_MODIFIED;
- spin_unlock(&lli->lli_lock);
- }
-
- size = cl_offset(obj, pg->cp_index) + to;
-
- ll_inode_size_lock(inode);
- if (result == 0) {
- if (size > i_size_read(inode)) {
- cl_isize_write_nolock(inode, size);
- CDEBUG(D_VFSTRACE, DFID" updating i_size %lu\n",
- PFID(lu_object_fid(&obj->co_lu)),
- (unsigned long)size);
- }
- cl_page_export(env, pg, 1);
- } else {
- if (size > i_size_read(inode))
- cl_page_discard(env, io, pg);
- }
- ll_inode_size_unlock(inode);
- return result;
+ CLOBINVRNT(env, ios->cis_io->ci_obj,
+ vvp_object_invariant(ios->cis_io->ci_obj));
}
static const struct cl_io_operations vvp_io_ops = {
.op = {
[CIT_READ] = {
- .cio_fini = vvp_io_read_fini,
+ .cio_fini = vvp_io_fini,
.cio_lock = vvp_io_read_lock,
.cio_start = vvp_io_read_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_WRITE] = {
.cio_fini = vvp_io_fini,
+ .cio_iter_init = vvp_io_write_iter_init,
+ .cio_iter_fini = vvp_io_write_iter_fini,
.cio_lock = vvp_io_write_lock,
.cio_start = vvp_io_write_start,
- .cio_advance = ccc_io_advance
+ .cio_advance = vvp_io_advance,
},
[CIT_SETATTR] = {
.cio_fini = vvp_io_setattr_fini,
@@ -1120,7 +1293,7 @@ static const struct cl_io_operations vvp_io_ops = {
.cio_iter_init = vvp_io_fault_iter_init,
.cio_lock = vvp_io_fault_lock,
.cio_start = vvp_io_fault_start,
- .cio_end = ccc_io_end
+ .cio_end = vvp_io_end,
},
[CIT_FSYNC] = {
.cio_start = vvp_io_fsync_start,
@@ -1131,29 +1304,26 @@ static const struct cl_io_operations vvp_io_ops = {
}
},
.cio_read_page = vvp_io_read_page,
- .cio_prepare_write = vvp_io_prepare_write,
- .cio_commit_write = vvp_io_commit_write
};
int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
struct cl_io *io)
{
struct vvp_io *vio = vvp_env_io(env);
- struct ccc_io *cio = ccc_env_io(env);
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
int result;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
CDEBUG(D_VFSTRACE, DFID
" ignore/verify layout %d/%d, layout version %d restore needed %d\n",
PFID(lu_object_fid(&obj->co_lu)),
io->ci_ignore_layout, io->ci_verify_layout,
- cio->cui_layout_gen, io->ci_restore_needed);
+ vio->vui_layout_gen, io->ci_restore_needed);
- CL_IO_SLICE_CLEAN(cio, cui_cl);
- cl_io_slice_add(io, &cio->cui_cl, obj, &vvp_io_ops);
- vio->cui_ra_window_set = 0;
+ CL_IO_SLICE_CLEAN(vio, vui_cl);
+ cl_io_slice_add(io, &vio->vui_cl, obj, &vvp_io_ops);
+ vio->vui_ra_valid = false;
result = 0;
if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE) {
size_t count;
@@ -1166,7 +1336,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
if (count == 0)
result = 1;
else
- cio->cui_tot_count = count;
+ vio->vui_tot_count = count;
/* for read/write, we store the jobid in the inode, and
* it'll be fetched by osc when building RPC.
@@ -1192,7 +1362,7 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
* because it might not grant layout lock in IT_OPEN.
*/
if (result == 0 && !io->ci_ignore_layout) {
- result = ll_layout_refresh(inode, &cio->cui_layout_gen);
+ result = ll_layout_refresh(inode, &vio->vui_layout_gen);
if (result == -ENOENT)
/* If the inode on MDS has been removed, but the objects
* on OSTs haven't been destroyed (async unlink), layout
@@ -1208,11 +1378,3 @@ int vvp_io_init(const struct lu_env *env, struct cl_object *obj,
return result;
}
-
-static struct vvp_io *cl2vvp_io(const struct lu_env *env,
- const struct cl_io_slice *slice)
-{
- /* Calling just for assertion */
- cl2ccc_io(env, slice);
- return vvp_env_io(env);
-}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_lock.c b/drivers/staging/lustre/lustre/llite/vvp_lock.c
index ff0948043c7a..f5bd6c22e112 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_lock.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_lock.c
@@ -40,7 +40,7 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/obd.h"
+#include "../include/obd_support.h"
#include "../include/lustre_lite.h"
#include "vvp_internal.h"
@@ -51,36 +51,41 @@
*
*/
-/**
- * Estimates lock value for the purpose of managing the lock cache during
- * memory shortages.
- *
- * Locks for memory mapped files are almost infinitely precious, others are
- * junk. "Mapped locks" are heavy, but not infinitely heavy, so that they are
- * ordered within themselves by weights assigned from other layers.
- */
-static unsigned long vvp_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void vvp_lock_fini(const struct lu_env *env, struct cl_lock_slice *slice)
+{
+ struct vvp_lock *vlk = cl2vvp_lock(slice);
+
+ kmem_cache_free(vvp_lock_kmem, vlk);
+}
+
+static int vvp_lock_enqueue(const struct lu_env *env,
+ const struct cl_lock_slice *slice,
+ struct cl_io *unused, struct cl_sync_io *anchor)
{
- struct ccc_object *cob = cl2ccc(slice->cls_obj);
+ CLOBINVRNT(env, slice->cls_obj, vvp_object_invariant(slice->cls_obj));
- return atomic_read(&cob->cob_mmap_cnt) > 0 ? ~0UL >> 2 : 0;
+ return 0;
}
static const struct cl_lock_operations vvp_lock_ops = {
- .clo_delete = ccc_lock_delete,
- .clo_fini = ccc_lock_fini,
- .clo_enqueue = ccc_lock_enqueue,
- .clo_wait = ccc_lock_wait,
- .clo_use = ccc_lock_use,
- .clo_unuse = ccc_lock_unuse,
- .clo_fits_into = ccc_lock_fits_into,
- .clo_state = ccc_lock_state,
- .clo_weigh = vvp_lock_weigh
+ .clo_fini = vvp_lock_fini,
+ .clo_enqueue = vvp_lock_enqueue,
};
int vvp_lock_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *unused)
{
- return ccc_lock_init(env, obj, lock, io, &vvp_lock_ops);
+ struct vvp_lock *vlk;
+ int result;
+
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
+
+ vlk = kmem_cache_zalloc(vvp_lock_kmem, GFP_NOFS);
+ if (vlk) {
+ cl_lock_slice_add(lock, &vlk->vlk_cl, obj, &vvp_lock_ops);
+ result = 0;
+ } else {
+ result = -ENOMEM;
+ }
+ return result;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_object.c b/drivers/staging/lustre/lustre/llite/vvp_object.c
index 03c887d8ed83..18c9df7ebdda 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_object.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_object.c
@@ -45,6 +45,7 @@
#include "../include/obd.h"
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
/*****************************************************************************
@@ -53,16 +54,25 @@
*
*/
+int vvp_object_invariant(const struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
+
+ return (S_ISREG(inode->i_mode) || inode->i_mode == 0) &&
+ lli->lli_clob == obj;
+}
+
static int vvp_object_print(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o)
{
- struct ccc_object *obj = lu2ccc(o);
- struct inode *inode = obj->cob_inode;
+ struct vvp_object *obj = lu2vvp(o);
+ struct inode *inode = obj->vob_inode;
struct ll_inode_info *lli;
(*p)(env, cookie, "(%s %d %d) inode: %p ",
- list_empty(&obj->cob_pending_list) ? "-" : "+",
- obj->cob_transient_pages, atomic_read(&obj->cob_mmap_cnt),
+ list_empty(&obj->vob_pending_list) ? "-" : "+",
+ obj->vob_transient_pages, atomic_read(&obj->vob_mmap_cnt),
inode);
if (inode) {
lli = ll_i2info(inode);
@@ -77,7 +87,7 @@ static int vvp_object_print(const struct lu_env *env, void *cookie,
static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
/*
* lov overwrites most of these fields in
@@ -99,7 +109,7 @@ static int vvp_attr_get(const struct lu_env *env, struct cl_object *obj,
static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
const struct cl_attr *attr, unsigned valid)
{
- struct inode *inode = ccc_object_inode(obj);
+ struct inode *inode = vvp_object_inode(obj);
if (valid & CAT_UID)
inode->i_uid = make_kuid(&init_user_ns, attr->cat_uid);
@@ -112,7 +122,7 @@ static int vvp_attr_set(const struct lu_env *env, struct cl_object *obj,
if (valid & CAT_CTIME)
inode->i_ctime.tv_sec = attr->cat_ctime;
if (0 && valid & CAT_SIZE)
- cl_isize_write_nolock(inode, attr->cat_size);
+ i_size_write(inode, attr->cat_size);
/* not currently necessary */
if (0 && valid & (CAT_UID|CAT_GID|CAT_SIZE))
mark_inode_dirty(inode);
@@ -165,6 +175,40 @@ static int vvp_conf_set(const struct lu_env *env, struct cl_object *obj,
return 0;
}
+static int vvp_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct inode *inode = vvp_object_inode(obj);
+ int rc;
+
+ rc = cl_sync_file_range(inode, 0, OBD_OBJECT_EOF, CL_FSYNC_LOCAL, 1);
+ if (rc < 0) {
+ CDEBUG(D_VFSTRACE, DFID ": writeback failed: %d\n",
+ PFID(lu_object_fid(&obj->co_lu)), rc);
+ return rc;
+ }
+
+ truncate_inode_pages(inode->i_mapping, 0);
+ return 0;
+}
+
+static int vvp_object_glimpse(const struct lu_env *env,
+ const struct cl_object *obj, struct ost_lvb *lvb)
+{
+ struct inode *inode = vvp_object_inode(obj);
+
+ lvb->lvb_mtime = LTIME_S(inode->i_mtime);
+ lvb->lvb_atime = LTIME_S(inode->i_atime);
+ lvb->lvb_ctime = LTIME_S(inode->i_ctime);
+ /*
+ * LU-417: Add dirty pages block count lest i_blocks reports 0, some
+ * "cp" or "tar" on remote node may think it's a completely sparse file
+ * and skip it.
+ */
+ if (lvb->lvb_size > 0 && lvb->lvb_blocks == 0)
+ lvb->lvb_blocks = dirty_cnt(inode);
+ return 0;
+}
+
static const struct cl_object_operations vvp_ops = {
.coo_page_init = vvp_page_init,
.coo_lock_init = vvp_lock_init,
@@ -172,29 +216,94 @@ static const struct cl_object_operations vvp_ops = {
.coo_attr_get = vvp_attr_get,
.coo_attr_set = vvp_attr_set,
.coo_conf_set = vvp_conf_set,
- .coo_glimpse = ccc_object_glimpse
+ .coo_prune = vvp_prune,
+ .coo_glimpse = vvp_object_glimpse
};
+static int vvp_object_init0(const struct lu_env *env,
+ struct vvp_object *vob,
+ const struct cl_object_conf *conf)
+{
+ vob->vob_inode = conf->coc_inode;
+ vob->vob_transient_pages = 0;
+ cl_object_page_init(&vob->vob_cl, sizeof(struct vvp_page));
+ return 0;
+}
+
+static int vvp_object_init(const struct lu_env *env, struct lu_object *obj,
+ const struct lu_object_conf *conf)
+{
+ struct vvp_device *dev = lu2vvp_dev(obj->lo_dev);
+ struct vvp_object *vob = lu2vvp(obj);
+ struct lu_object *below;
+ struct lu_device *under;
+ int result;
+
+ under = &dev->vdv_next->cd_lu_dev;
+ below = under->ld_ops->ldo_object_alloc(env, obj->lo_header, under);
+ if (below) {
+ const struct cl_object_conf *cconf;
+
+ cconf = lu2cl_conf(conf);
+ INIT_LIST_HEAD(&vob->vob_pending_list);
+ lu_object_add(obj, below);
+ result = vvp_object_init0(env, vob, cconf);
+ } else {
+ result = -ENOMEM;
+ }
+
+ return result;
+}
+
+static void vvp_object_free(const struct lu_env *env, struct lu_object *obj)
+{
+ struct vvp_object *vob = lu2vvp(obj);
+
+ lu_object_fini(obj);
+ lu_object_header_fini(obj->lo_header);
+ kmem_cache_free(vvp_object_kmem, vob);
+}
+
static const struct lu_object_operations vvp_lu_obj_ops = {
- .loo_object_init = ccc_object_init,
- .loo_object_free = ccc_object_free,
- .loo_object_print = vvp_object_print
+ .loo_object_init = vvp_object_init,
+ .loo_object_free = vvp_object_free,
+ .loo_object_print = vvp_object_print,
};
-struct ccc_object *cl_inode2ccc(struct inode *inode)
+struct vvp_object *cl_inode2vvp(struct inode *inode)
{
- struct cl_inode_info *lli = cl_i2info(inode);
+ struct ll_inode_info *lli = ll_i2info(inode);
struct cl_object *obj = lli->lli_clob;
struct lu_object *lu;
lu = lu_object_locate(obj->co_lu.lo_header, &vvp_device_type);
LASSERT(lu);
- return lu2ccc(lu);
+ return lu2vvp(lu);
}
struct lu_object *vvp_object_alloc(const struct lu_env *env,
- const struct lu_object_header *hdr,
+ const struct lu_object_header *unused,
struct lu_device *dev)
{
- return ccc_object_alloc(env, hdr, dev, &vvp_ops, &vvp_lu_obj_ops);
+ struct vvp_object *vob;
+ struct lu_object *obj;
+
+ vob = kmem_cache_zalloc(vvp_object_kmem, GFP_NOFS);
+ if (vob) {
+ struct cl_object_header *hdr;
+
+ obj = &vob->vob_cl.co_lu;
+ hdr = &vob->vob_header;
+ cl_object_header_init(hdr);
+ hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
+ lu_object_init(obj, &hdr->coh_lu, dev);
+ lu_object_add_top(&hdr->coh_lu, obj);
+
+ vob->vob_cl.co_ops = &vvp_ops;
+ obj->lo_ops = &vvp_lu_obj_ops;
+ } else {
+ obj = NULL;
+ }
+ return obj;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_page.c b/drivers/staging/lustre/lustre/llite/vvp_page.c
index 33ca3eb34965..6cd2af7a958f 100644
--- a/drivers/staging/lustre/lustre/llite/vvp_page.c
+++ b/drivers/staging/lustre/lustre/llite/vvp_page.c
@@ -41,9 +41,16 @@
#define DEBUG_SUBSYSTEM S_LLITE
-#include "../include/obd.h"
+#include <linux/atomic.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <linux/mutex.h>
+#include <linux/page-flags.h>
+#include <linux/pagemap.h>
+
#include "../include/lustre_lite.h"
+#include "llite_internal.h"
#include "vvp_internal.h"
/*****************************************************************************
@@ -52,9 +59,9 @@
*
*/
-static void vvp_page_fini_common(struct ccc_page *cp)
+static void vvp_page_fini_common(struct vvp_page *vpg)
{
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage);
put_page(vmpage);
@@ -63,23 +70,23 @@ static void vvp_page_fini_common(struct ccc_page *cp)
static void vvp_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
/*
* vmpage->private was already cleared when page was moved into
* VPG_FREEING state.
*/
LASSERT((struct cl_page *)vmpage->private != slice->cpl_page);
- vvp_page_fini_common(cp);
+ vvp_page_fini_common(vpg);
}
static int vvp_page_own(const struct lu_env *env,
const struct cl_page_slice *slice, struct cl_io *io,
int nonblock)
{
- struct ccc_page *vpg = cl2ccc_page(slice);
- struct page *vmpage = vpg->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
LASSERT(vmpage);
if (nonblock) {
@@ -96,6 +103,7 @@ static int vvp_page_own(const struct lu_env *env,
lock_page(vmpage);
wait_on_page_writeback(vmpage);
+
return 0;
}
@@ -136,41 +144,15 @@ static void vvp_page_discard(const struct lu_env *env,
struct cl_io *unused)
{
struct page *vmpage = cl2vm_page(slice);
- struct address_space *mapping;
- struct ccc_page *cpg = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
LASSERT(vmpage);
LASSERT(PageLocked(vmpage));
- mapping = vmpage->mapping;
-
- if (cpg->cpg_defer_uptodate && !cpg->cpg_ra_used)
- ll_ra_stats_inc(mapping, RA_STAT_DISCARDED);
+ if (vpg->vpg_defer_uptodate && !vpg->vpg_ra_used)
+ ll_ra_stats_inc(vmpage->mapping->host, RA_STAT_DISCARDED);
- /*
- * truncate_complete_page() calls
- * a_ops->invalidatepage()->cl_page_delete()->vvp_page_delete().
- */
- truncate_complete_page(mapping, vmpage);
-}
-
-static int vvp_page_unmap(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- struct page *vmpage = cl2vm_page(slice);
- __u64 offset;
-
- LASSERT(vmpage);
- LASSERT(PageLocked(vmpage));
-
- offset = vmpage->index << PAGE_SHIFT;
-
- /*
- * XXX is it safe to call this with the page lock held?
- */
- ll_teardown_mmaps(vmpage->mapping, offset, offset + PAGE_SIZE);
- return 0;
+ ll_invalidate_page(vmpage);
}
static void vvp_page_delete(const struct lu_env *env,
@@ -179,12 +161,20 @@ static void vvp_page_delete(const struct lu_env *env,
struct page *vmpage = cl2vm_page(slice);
struct inode *inode = vmpage->mapping->host;
struct cl_object *obj = slice->cpl_obj;
+ struct cl_page *page = slice->cpl_page;
+ int refc;
LASSERT(PageLocked(vmpage));
- LASSERT((struct cl_page *)vmpage->private == slice->cpl_page);
- LASSERT(inode == ccc_object_inode(obj));
+ LASSERT((struct cl_page *)vmpage->private == page);
+ LASSERT(inode == vvp_object_inode(obj));
- vvp_write_complete(cl2ccc(obj), cl2ccc_page(slice));
+ vvp_write_complete(cl2vvp(obj), cl2vvp_page(slice));
+
+ /* Drop the reference count held in vvp_page_init */
+ refc = atomic_dec_return(&page->cp_ref);
+ LASSERTF(refc >= 1, "page = %p, refc = %d\n", page, refc);
+
+ ClearPageUptodate(vmpage);
ClearPagePrivate(vmpage);
vmpage->private = 0;
/*
@@ -237,7 +227,7 @@ static int vvp_page_prep_write(const struct lu_env *env,
if (!pg->cp_sync_io)
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+ vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
return 0;
}
@@ -250,11 +240,11 @@ static int vvp_page_prep_write(const struct lu_env *env,
*/
static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret)
{
- struct ccc_object *obj = cl_inode2ccc(inode);
+ struct vvp_object *obj = cl_inode2vvp(inode);
if (ioret == 0) {
ClearPageError(vmpage);
- obj->cob_discard_page_warned = 0;
+ obj->vob_discard_page_warned = 0;
} else {
SetPageError(vmpage);
if (ioret == -ENOSPC)
@@ -263,8 +253,8 @@ static void vvp_vmpage_error(struct inode *inode, struct page *vmpage, int ioret
set_bit(AS_EIO, &inode->i_mapping->flags);
if ((ioret == -ESHUTDOWN || ioret == -EINTR) &&
- obj->cob_discard_page_warned == 0) {
- obj->cob_discard_page_warned = 1;
+ obj->vob_discard_page_warned == 0) {
+ obj->vob_discard_page_warned = 1;
ll_dirty_page_discard_warn(vmpage, ioret);
}
}
@@ -274,22 +264,23 @@ static void vvp_page_completion_read(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
- struct page *vmpage = cp->cpg_page;
- struct cl_page *page = cl_page_top(slice->cpl_page);
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
+ struct cl_page *page = slice->cpl_page;
+ struct inode *inode = vvp_object_inode(page->cp_obj);
LASSERT(PageLocked(vmpage));
CL_PAGE_HEADER(D_PAGE, env, page, "completing READ with %d\n", ioret);
- if (cp->cpg_defer_uptodate)
+ if (vpg->vpg_defer_uptodate)
ll_ra_count_put(ll_i2sbi(inode), 1);
if (ioret == 0) {
- if (!cp->cpg_defer_uptodate)
+ if (!vpg->vpg_defer_uptodate)
cl_page_export(env, page, 1);
- } else
- cp->cpg_defer_uptodate = 0;
+ } else {
+ vpg->vpg_defer_uptodate = 0;
+ }
if (!page->cp_sync_io)
unlock_page(vmpage);
@@ -299,9 +290,9 @@ static void vvp_page_completion_write(const struct lu_env *env,
const struct cl_page_slice *slice,
int ioret)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *pg = slice->cpl_page;
- struct page *vmpage = cp->cpg_page;
+ struct page *vmpage = vpg->vpg_page;
CL_PAGE_HEADER(D_PAGE, env, pg, "completing WRITE with %d\n", ioret);
@@ -315,8 +306,8 @@ static void vvp_page_completion_write(const struct lu_env *env,
* and then re-add the page into pending transfer queue. -jay
*/
- cp->cpg_write_queued = 0;
- vvp_write_complete(cl2ccc(slice->cpl_obj), cp);
+ vpg->vpg_write_queued = 0;
+ vvp_write_complete(cl2vvp(slice->cpl_obj), vpg);
if (pg->cp_sync_io) {
LASSERT(PageLocked(vmpage));
@@ -327,7 +318,7 @@ static void vvp_page_completion_write(const struct lu_env *env,
* Only mark the page error only when it's an async write
* because applications won't wait for IO to finish.
*/
- vvp_vmpage_error(ccc_object_inode(pg->cp_obj), vmpage, ioret);
+ vvp_vmpage_error(vvp_object_inode(pg->cp_obj), vmpage, ioret);
end_page_writeback(vmpage);
}
@@ -359,7 +350,7 @@ static int vvp_page_make_ready(const struct lu_env *env,
LASSERT(pg->cp_state == CPS_CACHED);
/* This actually clears the dirty bit in the radix tree. */
set_page_writeback(vmpage);
- vvp_write_pending(cl2ccc(slice->cpl_obj), cl2ccc_page(slice));
+ vvp_write_pending(cl2vvp(slice->cpl_obj), cl2vvp_page(slice));
CL_PAGE_HEADER(D_PAGE, env, pg, "readied\n");
} else if (pg->cp_state == CPS_PAGEOUT) {
/* is it possible for osc_flush_async_page() to already
@@ -375,24 +366,51 @@ static int vvp_page_make_ready(const struct lu_env *env,
return result;
}
+static int vvp_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *io, pgoff_t *max_index)
+{
+ if (io->ci_type == CIT_READ || io->ci_type == CIT_WRITE ||
+ io->ci_type == CIT_FAULT) {
+ struct vvp_io *vio = vvp_env_io(env);
+
+ if (unlikely(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED))
+ *max_index = CL_PAGE_EOF;
+ }
+ return 0;
+}
+
static int vvp_page_print(const struct lu_env *env,
const struct cl_page_slice *slice,
void *cookie, lu_printer_t printer)
{
- struct ccc_page *vp = cl2ccc_page(slice);
- struct page *vmpage = vp->cpg_page;
+ struct vvp_page *vpg = cl2vvp_page(slice);
+ struct page *vmpage = vpg->vpg_page;
(*printer)(env, cookie, LUSTRE_VVP_NAME "-page@%p(%d:%d:%d) vm@%p ",
- vp, vp->cpg_defer_uptodate, vp->cpg_ra_used,
- vp->cpg_write_queued, vmpage);
+ vpg, vpg->vpg_defer_uptodate, vpg->vpg_ra_used,
+ vpg->vpg_write_queued, vmpage);
if (vmpage) {
(*printer)(env, cookie, "%lx %d:%d %lx %lu %slru",
(long)vmpage->flags, page_count(vmpage),
page_mapcount(vmpage), vmpage->private,
- page_index(vmpage),
+ vmpage->index,
list_empty(&vmpage->lru) ? "not-" : "");
}
+
(*printer)(env, cookie, "\n");
+
+ return 0;
+}
+
+static int vvp_page_fail(const struct lu_env *env,
+ const struct cl_page_slice *slice)
+{
+ /*
+ * Cached read?
+ */
+ LBUG();
+
return 0;
}
@@ -401,32 +419,38 @@ static const struct cl_page_operations vvp_page_ops = {
.cpo_assume = vvp_page_assume,
.cpo_unassume = vvp_page_unassume,
.cpo_disown = vvp_page_disown,
- .cpo_vmpage = ccc_page_vmpage,
.cpo_discard = vvp_page_discard,
.cpo_delete = vvp_page_delete,
- .cpo_unmap = vvp_page_unmap,
.cpo_export = vvp_page_export,
.cpo_is_vmlocked = vvp_page_is_vmlocked,
.cpo_fini = vvp_page_fini,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
+ .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
.cpo_prep = vvp_page_prep_read,
.cpo_completion = vvp_page_completion_read,
- .cpo_make_ready = ccc_fail,
+ .cpo_make_ready = vvp_page_fail,
},
[CRT_WRITE] = {
.cpo_prep = vvp_page_prep_write,
.cpo_completion = vvp_page_completion_write,
.cpo_make_ready = vvp_page_make_ready,
- }
- }
+ },
+ },
};
+static int vvp_transient_page_prep(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused)
+{
+ /* transient page should always be sent. */
+ return 0;
+}
+
static void vvp_transient_page_verify(const struct cl_page *page)
{
- struct inode *inode = ccc_object_inode(page->cp_obj);
+ struct inode *inode = vvp_object_inode(page->cp_obj);
LASSERT(!inode_trylock(inode));
}
@@ -477,7 +501,7 @@ static void vvp_transient_page_discard(const struct lu_env *env,
static int vvp_transient_page_is_vmlocked(const struct lu_env *env,
const struct cl_page_slice *slice)
{
- struct inode *inode = ccc_object_inode(slice->cpl_obj);
+ struct inode *inode = vvp_object_inode(slice->cpl_obj);
int locked;
locked = !inode_trylock(inode);
@@ -497,13 +521,13 @@ vvp_transient_page_completion(const struct lu_env *env,
static void vvp_transient_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct ccc_page *cp = cl2ccc_page(slice);
+ struct vvp_page *vpg = cl2vvp_page(slice);
struct cl_page *clp = slice->cpl_page;
- struct ccc_object *clobj = cl2ccc(clp->cp_obj);
+ struct vvp_object *clobj = cl2vvp(clp->cp_obj);
- vvp_page_fini_common(cp);
- LASSERT(!inode_trylock(clobj->cob_inode));
- clobj->cob_transient_pages--;
+ vvp_page_fini_common(vpg);
+ LASSERT(!inode_trylock(clobj->vob_inode));
+ clobj->vob_transient_pages--;
}
static const struct cl_page_operations vvp_transient_page_ops = {
@@ -512,45 +536,48 @@ static const struct cl_page_operations vvp_transient_page_ops = {
.cpo_unassume = vvp_transient_page_unassume,
.cpo_disown = vvp_transient_page_disown,
.cpo_discard = vvp_transient_page_discard,
- .cpo_vmpage = ccc_page_vmpage,
.cpo_fini = vvp_transient_page_fini,
.cpo_is_vmlocked = vvp_transient_page_is_vmlocked,
.cpo_print = vvp_page_print,
- .cpo_is_under_lock = ccc_page_is_under_lock,
+ .cpo_is_under_lock = vvp_page_is_under_lock,
.io = {
[CRT_READ] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
},
[CRT_WRITE] = {
- .cpo_prep = ccc_transient_page_prep,
+ .cpo_prep = vvp_transient_page_prep,
.cpo_completion = vvp_transient_page_completion,
}
}
};
int vvp_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
- struct ccc_page *cpg = cl_object_page_slice(obj, page);
+ struct vvp_page *vpg = cl_object_page_slice(obj, page);
+ struct page *vmpage = page->cp_vmpage;
- CLOBINVRNT(env, obj, ccc_object_invariant(obj));
+ CLOBINVRNT(env, obj, vvp_object_invariant(obj));
- cpg->cpg_page = vmpage;
+ vpg->vpg_page = vmpage;
get_page(vmpage);
- INIT_LIST_HEAD(&cpg->cpg_pending_linkage);
+ INIT_LIST_HEAD(&vpg->vpg_pending_linkage);
if (page->cp_type == CPT_CACHEABLE) {
+ /* in cache, decref in vvp_page_delete */
+ atomic_inc(&page->cp_ref);
SetPagePrivate(vmpage);
vmpage->private = (unsigned long)page;
- cl_page_slice_add(page, &cpg->cpg_cl, obj, &vvp_page_ops);
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
+ &vvp_page_ops);
} else {
- struct ccc_object *clobj = cl2ccc(obj);
+ struct vvp_object *clobj = cl2vvp(obj);
- LASSERT(!inode_trylock(clobj->cob_inode));
- cl_page_slice_add(page, &cpg->cpg_cl, obj,
+ LASSERT(!inode_trylock(clobj->vob_inode));
+ cl_page_slice_add(page, &vpg->vpg_cl, obj, index,
&vvp_transient_page_ops);
- clobj->cob_transient_pages++;
+ clobj->vob_transient_pages++;
}
return 0;
}
diff --git a/drivers/staging/lustre/lustre/llite/vvp_req.c b/drivers/staging/lustre/lustre/llite/vvp_req.c
new file mode 100644
index 000000000000..fb886291a4e2
--- /dev/null
+++ b/drivers/staging/lustre/lustre/llite/vvp_req.c
@@ -0,0 +1,121 @@
+/*
+ * GPL HEADER START
+ *
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
+ *
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.gnu.org/licenses/gpl-2.0.html
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2014, Intel Corporation.
+ */
+
+#define DEBUG_SUBSYSTEM S_LLITE
+
+#include "../include/lustre/lustre_idl.h"
+#include "../include/cl_object.h"
+#include "../include/obd.h"
+#include "../include/obd_support.h"
+#include "../include/lustre_lite.h"
+#include "llite_internal.h"
+#include "vvp_internal.h"
+
+static inline struct vvp_req *cl2vvp_req(const struct cl_req_slice *slice)
+{
+ return container_of0(slice, struct vvp_req, vrq_cl);
+}
+
+/**
+ * Implementation of struct cl_req_operations::cro_attr_set() for VVP
+ * layer. VVP is responsible for
+ *
+ * - o_[mac]time
+ *
+ * - o_mode
+ *
+ * - o_parent_seq
+ *
+ * - o_[ug]id
+ *
+ * - o_parent_oid
+ *
+ * - o_parent_ver
+ *
+ * - o_ioepoch,
+ *
+ */
+void vvp_req_attr_set(const struct lu_env *env,
+ const struct cl_req_slice *slice,
+ const struct cl_object *obj,
+ struct cl_req_attr *attr, u64 flags)
+{
+ struct inode *inode;
+ struct obdo *oa;
+ u32 valid_flags;
+
+ oa = attr->cra_oa;
+ inode = vvp_object_inode(obj);
+ valid_flags = OBD_MD_FLTYPE;
+
+ if (slice->crs_req->crq_type == CRT_WRITE) {
+ if (flags & OBD_MD_FLEPOCH) {
+ oa->o_valid |= OBD_MD_FLEPOCH;
+ oa->o_ioepoch = ll_i2info(inode)->lli_ioepoch;
+ valid_flags |= OBD_MD_FLMTIME | OBD_MD_FLCTIME |
+ OBD_MD_FLUID | OBD_MD_FLGID;
+ }
+ }
+ obdo_from_inode(oa, inode, valid_flags & flags);
+ obdo_set_parent_fid(oa, &ll_i2info(inode)->lli_fid);
+ memcpy(attr->cra_jobid, ll_i2info(inode)->lli_jobid,
+ JOBSTATS_JOBID_SIZE);
+}
+
+void vvp_req_completion(const struct lu_env *env,
+ const struct cl_req_slice *slice, int ioret)
+{
+ struct vvp_req *vrq;
+
+ if (ioret > 0)
+ cl_stats_tally(slice->crs_dev, slice->crs_req->crq_type, ioret);
+
+ vrq = cl2vvp_req(slice);
+ kmem_cache_free(vvp_req_kmem, vrq);
+}
+
+static const struct cl_req_operations vvp_req_ops = {
+ .cro_attr_set = vvp_req_attr_set,
+ .cro_completion = vvp_req_completion
+};
+
+int vvp_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req)
+{
+ struct vvp_req *vrq;
+ int result;
+
+ vrq = kmem_cache_zalloc(vvp_req_kmem, GFP_NOFS);
+ if (vrq) {
+ cl_req_slice_add(req, &vrq->vrq_cl, dev, &vvp_req_ops);
+ result = 0;
+ } else {
+ result = -ENOMEM;
+ }
+ return result;
+}
diff --git a/drivers/staging/lustre/lustre/llite/xattr.c b/drivers/staging/lustre/lustre/llite/xattr.c
index c671f221c28c..ed4de04381c3 100644
--- a/drivers/staging/lustre/lustre/llite/xattr.c
+++ b/drivers/staging/lustre/lustre/llite/xattr.c
@@ -181,8 +181,9 @@ int ll_setxattr_common(struct inode *inode, const char *name,
size = rc;
pv = (const char *)new_value;
- } else
+ } else {
return -EOPNOTSUPP;
+ }
valid |= rce_ops2valid(rce->rce_ops);
}
@@ -218,8 +219,8 @@ int ll_setxattr(struct dentry *dentry, const char *name,
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_SETXATTR, 1);
@@ -243,12 +244,12 @@ int ll_setxattr(struct dentry *dentry, const char *name,
lump->lmm_stripe_offset = -1;
if (lump && S_ISREG(inode->i_mode)) {
- int flags = FMODE_WRITE;
+ __u64 it_flags = FMODE_WRITE;
int lum_size = (lump->lmm_magic == LOV_USER_MAGIC_V1) ?
sizeof(*lump) : sizeof(struct lov_user_md_v3);
- rc = ll_lov_setstripe_ea_info(inode, dentry, flags, lump,
- lum_size);
+ rc = ll_lov_setstripe_ea_info(inode, dentry, it_flags,
+ lump, lum_size);
/* b10667: rc always be 0 here for now */
rc = 0;
} else if (S_ISDIR(inode->i_mode)) {
@@ -272,8 +273,8 @@ int ll_removexattr(struct dentry *dentry, const char *name)
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_REMOVEXATTR, 1);
return ll_setxattr_common(inode, name, NULL, 0, 0,
@@ -292,8 +293,8 @@ int ll_getxattr_common(struct inode *inode, const char *name,
struct rmtacl_ctl_entry *rce = NULL;
struct ll_inode_info *lli = ll_i2info(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
/* listxattr have slightly different behavior from of ext3:
* without 'user_xattr' ext3 will list all xattr names but
@@ -338,7 +339,6 @@ int ll_getxattr_common(struct inode *inode, const char *name,
*/
if (xattr_type == XATTR_ACL_ACCESS_T &&
!(sbi->ll_flags & LL_SBI_RMT_CLIENT)) {
-
struct posix_acl *acl;
spin_lock(&lli->lli_lock);
@@ -423,8 +423,7 @@ getxattr_nocache:
if (rce && rce->rce_ops == RMT_LSETFACL) {
ext_acl_xattr_header *acl;
- acl = lustre_posix_acl_xattr_2ext(
- (posix_acl_xattr_header *)buffer, rc);
+ acl = lustre_posix_acl_xattr_2ext(buffer, rc);
if (IS_ERR(acl)) {
rc = PTR_ERR(acl);
goto out;
@@ -457,8 +456,8 @@ ssize_t ll_getxattr(struct dentry *dentry, struct inode *inode,
LASSERT(inode);
LASSERT(name);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p), xattr %s\n",
- inode->i_ino, inode->i_generation, inode, name);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p), xattr %s\n",
+ PFID(ll_inode2fid(inode)), inode, name);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_GETXATTR, 1);
@@ -552,8 +551,8 @@ ssize_t ll_listxattr(struct dentry *dentry, char *buffer, size_t size)
LASSERT(inode);
- CDEBUG(D_VFSTRACE, "VFS Op:inode=%lu/%u(%p)\n",
- inode->i_ino, inode->i_generation, inode);
+ CDEBUG(D_VFSTRACE, "VFS Op:inode="DFID"(%p)\n",
+ PFID(ll_inode2fid(inode)), inode);
ll_stats_ops_tally(ll_i2sbi(inode), LPROC_LL_LISTXATTR, 1);
diff --git a/drivers/staging/lustre/lustre/llite/xattr_cache.c b/drivers/staging/lustre/lustre/llite/xattr_cache.c
index 3480ce2bb3cc..d7e17abbe361 100644
--- a/drivers/staging/lustre/lustre/llite/xattr_cache.c
+++ b/drivers/staging/lustre/lustre/llite/xattr_cache.c
@@ -229,7 +229,6 @@ static int ll_xattr_cache_valid(struct ll_inode_info *lli)
*/
static int ll_xattr_cache_destroy_locked(struct ll_inode_info *lli)
{
-
if (!ll_xattr_cache_valid(lli))
return 0;
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_internal.h b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
index 8a0087190e23..7007e4c48035 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_internal.h
+++ b/drivers/staging/lustre/lustre/lmv/lmv_internal.h
@@ -42,9 +42,6 @@
#define LMV_MAX_TGT_COUNT 128
-#define lmv_init_lock(lmv) mutex_lock(&lmv->init_mutex)
-#define lmv_init_unlock(lmv) mutex_unlock(&lmv->init_mutex)
-
#define LL_IT2STR(it) \
((it) ? ldlm_it2str((it)->it_op) : "0")
diff --git a/drivers/staging/lustre/lustre/lmv/lmv_obd.c b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
index 9abb7c2b9231..9e31f6b03f9e 100644
--- a/drivers/staging/lustre/lustre/lmv/lmv_obd.c
+++ b/drivers/staging/lustre/lustre/lmv/lmv_obd.c
@@ -132,8 +132,9 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
static struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
- return obd_get_uuid(lmv->tgts[0]->ltd_exp);
+ return tgt ? obd_get_uuid(tgt->ltd_exp) : NULL;
}
static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
@@ -249,7 +250,6 @@ static int lmv_connect(const struct lu_env *env,
static void lmv_set_timeouts(struct obd_device *obd)
{
- struct lmv_tgt_desc *tgt;
struct lmv_obd *lmv;
int i;
@@ -261,8 +261,10 @@ static void lmv_set_timeouts(struct obd_device *obd)
return;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS),
@@ -302,13 +304,14 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize,
return 0;
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active) {
CWARN("%s: NULL export for %d\n", obd->obd_name, i);
continue;
}
- rc = md_init_ea_size(lmv->tgts[i]->ltd_exp, easize, def_easize,
+ rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize,
cookiesize, def_cookiesize);
if (rc) {
CERROR("%s: obd_init_ea_size() failed on MDT target %d: rc = %d\n",
@@ -425,7 +428,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
- lmv_init_lock(lmv);
+ mutex_lock(&lmv->lmv_init_mutex);
if (lmv->desc.ld_tgt_count == 0) {
struct obd_device *mdc_obd;
@@ -433,7 +436,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
&obd->obd_uuid);
if (!mdc_obd) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
CERROR("%s: Target %s not attached: rc = %d\n",
obd->obd_name, uuidp->uuid, -EINVAL);
return -EINVAL;
@@ -445,7 +448,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
CERROR("%s: UUID %s already assigned at LOV target index %d: rc = %d\n",
obd->obd_name,
obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -EEXIST;
}
@@ -459,7 +462,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
newsize <<= 1;
newtgts = kcalloc(newsize, sizeof(*newtgts), GFP_NOFS);
if (!newtgts) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -ENOMEM;
}
@@ -481,7 +484,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
tgt = kzalloc(sizeof(*tgt), GFP_NOFS);
if (!tgt) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return -ENOMEM;
}
@@ -507,7 +510,7 @@ static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
}
}
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return rc;
}
@@ -522,18 +525,27 @@ int lmv_check_connect(struct obd_device *obd)
if (lmv->connected)
return 0;
- lmv_init_lock(lmv);
+ mutex_lock(&lmv->lmv_init_mutex);
if (lmv->connected) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return 0;
}
if (lmv->desc.ld_tgt_count == 0) {
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
CERROR("%s: no targets configured.\n", obd->obd_name);
return -EINVAL;
}
+ LASSERT(lmv->tgts);
+
+ if (!lmv->tgts[0]) {
+ mutex_unlock(&lmv->lmv_init_mutex);
+ CERROR("%s: no target configured for index 0.\n",
+ obd->obd_name);
+ return -EINVAL;
+ }
+
CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
lmv->cluuid.uuid, obd->obd_name);
@@ -551,7 +563,7 @@ int lmv_check_connect(struct obd_device *obd)
lmv->connected = 1;
easize = lmv_get_easize(lmv);
lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return 0;
out_disc:
@@ -572,7 +584,7 @@ int lmv_check_connect(struct obd_device *obd)
}
}
class_disconnect(lmv->exp);
- lmv_init_unlock(lmv);
+ mutex_unlock(&lmv->lmv_init_mutex);
return rc;
}
@@ -796,6 +808,11 @@ static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
/* unregister request (call from llapi_hsm_copytool_fini) */
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
/* best effort: try to clean as much as possible
* (continue on error)
*/
@@ -825,20 +842,28 @@ static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
* except if it because of inactive target.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len, lk, uarg);
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
if (err) {
- if (lmv->tgts[i]->ltd_active) {
+ if (tgt->ltd_active) {
/* permanent error */
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ tgt->ltd_uuid.uuid, i, cmd, err);
rc = err;
lk->lk_flags |= LK_FLG_STOP;
/* unregister from previous MDS */
- for (j = 0; j < i; j++)
- obd_iocontrol(cmd,
- lmv->tgts[j]->ltd_exp,
- len, lk, uarg);
+ for (j = 0; j < i; j++) {
+ tgt = lmv->tgts[j];
+
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+ obd_iocontrol(cmd, tgt->ltd_exp, len,
+ lk, uarg);
+ }
return rc;
}
/* else: transient error.
@@ -877,6 +902,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
{
struct obd_device *obddev = class_exp2obd(exp);
struct lmv_obd *lmv = &obddev->u.lmv;
+ struct lmv_tgt_desc *tgt = NULL;
int i = 0;
int rc = 0;
int set = 0;
@@ -896,10 +922,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (index >= count)
return -ENODEV;
- if (!lmv->tgts[index] || lmv->tgts[index]->ltd_active == 0)
+ tgt = lmv->tgts[index];
+ if (!tgt || !tgt->ltd_active)
return -ENODATA;
- mdc_obd = class_exp2obd(lmv->tgts[index]->ltd_exp);
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
if (!mdc_obd)
return -EINVAL;
@@ -909,7 +936,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
(int)sizeof(struct obd_uuid))))
return -EFAULT;
- rc = obd_statfs(NULL, lmv->tgts[index]->ltd_exp, &stat_buf,
+ rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
0);
if (rc)
@@ -922,11 +949,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case OBD_IOC_QUOTACTL: {
struct if_quotactl *qctl = karg;
- struct lmv_tgt_desc *tgt = NULL;
struct obd_quotactl *oqctl;
if (qctl->qc_valid == QC_MDTIDX) {
- if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
+ if (count <= qctl->qc_idx)
return -EINVAL;
tgt = lmv->tgts[qctl->qc_idx];
@@ -975,18 +1001,18 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (icc->icc_mdtindex >= count)
return -ENODEV;
- if (!lmv->tgts[icc->icc_mdtindex] ||
- !lmv->tgts[icc->icc_mdtindex]->ltd_exp ||
- lmv->tgts[icc->icc_mdtindex]->ltd_active == 0)
+ tgt = lmv->tgts[icc->icc_mdtindex];
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
return -ENODEV;
- rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex]->ltd_exp,
- sizeof(*icc), icc, NULL);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL);
break;
}
case LL_IOC_GET_CONNECT_FLAGS: {
- if (!lmv->tgts[0])
+ tgt = lmv->tgts[0];
+
+ if (!tgt || !tgt->ltd_exp)
return -ENODATA;
- rc = obd_iocontrol(cmd, lmv->tgts[0]->ltd_exp, len, karg, uarg);
+ rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
break;
}
case OBD_IOC_FID2PATH: {
@@ -997,7 +1023,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
case LL_IOC_HSM_STATE_SET:
case LL_IOC_HSM_ACTION: {
struct md_op_data *op_data = karg;
- struct lmv_tgt_desc *tgt;
tgt = lmv_find_target(lmv, &op_data->op_fid1);
if (IS_ERR(tgt))
@@ -1011,7 +1036,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_PROGRESS: {
const struct hsm_progress_kernel *hpk = karg;
- struct lmv_tgt_desc *tgt;
tgt = lmv_find_target(lmv, &hpk->hpk_fid);
if (IS_ERR(tgt))
@@ -1021,7 +1045,6 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
}
case LL_IOC_HSM_REQUEST: {
struct hsm_user_request *hur = karg;
- struct lmv_tgt_desc *tgt;
unsigned int reqcount = hur->hur_request.hr_itemcount;
if (reqcount == 0)
@@ -1044,7 +1067,11 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
int rc1;
struct hsm_user_request *req;
- nr = lmv_hsm_req_count(lmv, hur, lmv->tgts[i]);
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
+ continue;
+
+ nr = lmv_hsm_req_count(lmv, hur, tgt);
if (nr == 0) /* nothing for this MDS */
continue;
@@ -1056,10 +1083,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
if (!req)
return -ENOMEM;
- lmv_hsm_req_build(lmv, hur, lmv->tgts[i], req);
+ lmv_hsm_req_build(lmv, hur, tgt, req);
- rc1 = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp,
- reqlen, req, uarg);
+ rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
+ req, uarg);
if (rc1 != 0 && rc == 0)
rc = rc1;
kvfree(req);
@@ -1103,27 +1130,27 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
struct obd_device *mdc_obd;
int err;
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp)
+ tgt = lmv->tgts[i];
+ if (!tgt || !tgt->ltd_exp)
continue;
/* ll_umount_begin() sets force flag but for lmv, not
* mdc. Let's pass it through
*/
- mdc_obd = class_exp2obd(lmv->tgts[i]->ltd_exp);
+ mdc_obd = class_exp2obd(tgt->ltd_exp);
mdc_obd->obd_force = obddev->obd_force;
- err = obd_iocontrol(cmd, lmv->tgts[i]->ltd_exp, len,
- karg, uarg);
+ err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
return err;
} else if (err) {
- if (lmv->tgts[i]->ltd_active) {
+ if (tgt->ltd_active) {
CERROR("error: iocontrol MDC %s on MDTidx %d cmd %x: err = %d\n",
- lmv->tgts[i]->ltd_uuid.uuid,
- i, cmd, err);
+ tgt->ltd_uuid.uuid, i, cmd, err);
if (!rc)
rc = err;
}
- } else
+ } else {
set = 1;
+ }
}
if (!set && !rc)
rc = -EIO;
@@ -1269,7 +1296,7 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
spin_lock_init(&lmv->lmv_lock);
- mutex_init(&lmv->init_mutex);
+ mutex_init(&lmv->lmv_init_mutex);
lprocfs_lmv_init_vars(&lvars);
@@ -2071,7 +2098,7 @@ static void lmv_adjust_dirpages(struct page **pages, int ncfspgs, int nlupgs)
dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
/* Check if we've reached the end of the CFS_PAGE. */
- if (!((unsigned long)dp & ~CFS_PAGE_MASK))
+ if (!((unsigned long)dp & ~PAGE_MASK))
break;
/* Save the hash and flags of this lu_dirpage. */
@@ -2268,7 +2295,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
lmv = &obd->u.lmv;
if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
- struct lmv_tgt_desc *tgt;
int i;
rc = lmv_check_connect(obd);
@@ -2277,7 +2303,8 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
LASSERT(*vallen == sizeof(__u32));
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- tgt = lmv->tgts[i];
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
/*
* All tgts should be connected when this gets called.
*/
@@ -2466,12 +2493,13 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
LASSERT(fid);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0)
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- err = md_cancel_unused(lmv->tgts[i]->ltd_exp, fid,
- policy, mode, flags, opaque);
+ err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
+ opaque);
if (!rc)
rc = err;
}
@@ -2482,9 +2510,13 @@ static int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
__u64 *bits)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
int rc;
- rc = md_set_lock_data(lmv->tgts[0]->ltd_exp, lockh, data, bits);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+
+ rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
return rc;
}
@@ -2509,12 +2541,13 @@ static enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
* one fid was created in.
*/
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i] || !lmv->tgts[i]->ltd_exp ||
- lmv->tgts[i]->ltd_active == 0)
+ struct lmv_tgt_desc *tgt = lmv->tgts[i];
+
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- rc = md_lock_match(lmv->tgts[i]->ltd_exp, flags, fid,
- type, policy, mode, lockh);
+ rc = md_lock_match(tgt->ltd_exp, flags, fid, type, policy, mode,
+ lockh);
if (rc)
return rc;
}
@@ -2529,18 +2562,24 @@ static int lmv_get_lustre_md(struct obd_export *exp,
struct lustre_md *md)
{
struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
- return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ return md_get_lustre_md(tgt->ltd_exp, req, dt_exp, md_exp, md);
}
static int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
{
struct obd_device *obd = exp->exp_obd;
struct lmv_obd *lmv = &obd->u.lmv;
+ struct lmv_tgt_desc *tgt = lmv->tgts[0];
if (md->mea)
obd_free_memmd(exp, (void *)&md->mea);
- return md_free_lustre_md(lmv->tgts[0]->ltd_exp, md);
+ if (!tgt || !tgt->ltd_exp)
+ return -EINVAL;
+ return md_free_lustre_md(tgt->ltd_exp, md);
}
static int lmv_set_open_replay_data(struct obd_export *exp,
@@ -2649,7 +2688,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
int rc = 0, i;
__u64 curspace, curinodes;
- if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active ||
+ !lmv->desc.ld_tgt_count) {
CERROR("master lmv inactive\n");
return -EIO;
}
@@ -2665,12 +2705,8 @@ static int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
tgt = lmv->tgts[i];
- if (!tgt || !tgt->ltd_exp || tgt->ltd_active == 0)
+ if (!tgt || !tgt->ltd_exp || !tgt->ltd_active)
continue;
- if (!tgt->ltd_active) {
- CDEBUG(D_HA, "mdt %d is inactive.\n", i);
- continue;
- }
err = obd_quotactl(tgt->ltd_exp, oqctl);
if (err) {
diff --git a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
index 7dd3162b51e9..ac9744e887ae 100644
--- a/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_cl_internal.h
@@ -73,19 +73,6 @@
* - top-page keeps a reference to its sub-page, and destroys it when it
* is destroyed.
*
- * - sub-lock keep a reference to its top-locks. Top-lock keeps a
- * reference (and a hold, see cl_lock_hold()) on its sub-locks when it
- * actively using them (that is, in cl_lock_state::CLS_QUEUING,
- * cl_lock_state::CLS_ENQUEUED, cl_lock_state::CLS_HELD states). When
- * moving into cl_lock_state::CLS_CACHED state, top-lock releases a
- * hold. From this moment top-lock has only a 'weak' reference to its
- * sub-locks. This reference is protected by top-lock
- * cl_lock::cll_guard, and will be automatically cleared by the sub-lock
- * when the latter is destroyed. When a sub-lock is canceled, a
- * reference to it is removed from the top-lock array, and top-lock is
- * moved into CLS_NEW state. It is guaranteed that all sub-locks exist
- * while their top-lock is in CLS_HELD or CLS_CACHED states.
- *
* - IO's are not reference counted.
*
* To implement a connection between top and sub entities, lov layer is split
@@ -281,24 +268,17 @@ struct lov_object {
};
/**
- * Flags that top-lock can set on each of its sub-locks.
- */
-enum lov_sub_flags {
- /** Top-lock acquired a hold (cl_lock_hold()) on a sub-lock. */
- LSF_HELD = 1 << 0
-};
-
-/**
* State lov_lock keeps for each sub-lock.
*/
struct lov_lock_sub {
/** sub-lock itself */
- struct lovsub_lock *sub_lock;
- /** An array of per-sub-lock flags, taken from enum lov_sub_flags */
- unsigned sub_flags;
+ struct cl_lock sub_lock;
+ /** Set if the sublock has ever been enqueued, meaning it may
+ * hold resources of underlying layers
+ */
+ unsigned int sub_is_enqueued:1,
+ sub_initialized:1;
int sub_stripe;
- struct cl_lock_descr sub_descr;
- struct cl_lock_descr sub_got;
};
/**
@@ -308,59 +288,8 @@ struct lov_lock {
struct cl_lock_slice lls_cl;
/** Number of sub-locks in this lock */
int lls_nr;
- /**
- * Number of existing sub-locks.
- */
- unsigned lls_nr_filled;
- /**
- * Set when sub-lock was canceled, while top-lock was being
- * used, or unused.
- */
- unsigned int lls_cancel_race:1;
- /**
- * An array of sub-locks
- *
- * There are two issues with managing sub-locks:
- *
- * - sub-locks are concurrently canceled, and
- *
- * - sub-locks are shared with other top-locks.
- *
- * To manage cancellation, top-lock acquires a hold on a sublock
- * (lov_sublock_adopt()) when the latter is inserted into
- * lov_lock::lls_sub[]. This hold is released (lov_sublock_release())
- * when top-lock is going into CLS_CACHED state or destroyed. Hold
- * prevents sub-lock from cancellation.
- *
- * Sub-lock sharing means, among other things, that top-lock that is
- * in the process of creation (i.e., not yet inserted into lock list)
- * is already accessible to other threads once at least one of its
- * sub-locks is created, see lov_lock_sub_init().
- *
- * Sub-lock can be in one of the following states:
- *
- * - doesn't exist, lov_lock::lls_sub[]::sub_lock == NULL. Such
- * sub-lock was either never created (top-lock is in CLS_NEW
- * state), or it was created, then canceled, then destroyed
- * (lov_lock_unlink() cleared sub-lock pointer in the top-lock).
- *
- * - sub-lock exists and is on
- * hold. (lov_lock::lls_sub[]::sub_flags & LSF_HELD). This is a
- * normal state of a sub-lock in CLS_HELD and CLS_CACHED states
- * of a top-lock.
- *
- * - sub-lock exists, but is not held by the top-lock. This
- * happens after top-lock released a hold on sub-locks before
- * going into cache (lov_lock_unuse()).
- *
- * \todo To support wide-striping, array has to be replaced with a set
- * of queues to avoid scanning.
- */
- struct lov_lock_sub *lls_sub;
- /**
- * Original description with which lock was enqueued.
- */
- struct cl_lock_descr lls_orig;
+ /** sublock array */
+ struct lov_lock_sub lls_sub[0];
};
struct lov_page {
@@ -444,8 +373,9 @@ struct lov_thread_info {
struct cl_lock_descr lti_ldescr;
struct ost_lvb lti_lvb;
struct cl_2queue lti_cl2q;
- struct cl_lock_closure lti_closure;
+ struct cl_page_list lti_plist;
wait_queue_t lti_waiter;
+ struct cl_attr lti_attr;
};
/**
@@ -611,14 +541,13 @@ int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
const struct cl_lock_descr *d, int idx);
int lov_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int lovsub_page_init(const struct lu_env *env, struct cl_object *ob,
- struct cl_page *page, struct page *vmpage);
-
+ struct cl_page *page, pgoff_t index);
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
struct lu_object *lov_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
@@ -631,6 +560,7 @@ struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
struct lovsub_lock *sub);
struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
const struct cl_page_slice *slice);
+int lov_page_stripe(const struct cl_page *page);
#define lov_foreach_target(lov, var) \
for (var = 0; var < lov_targets_nr(lov); ++var)
@@ -789,11 +719,6 @@ static inline struct lovsub_req *cl2lovsub_req(const struct cl_req_slice *slice)
return container_of0(slice, struct lovsub_req, lsrq_cl);
}
-static inline struct cl_page *lov_sub_page(const struct cl_page_slice *slice)
-{
- return slice->cpl_page->cp_child;
-}
-
static inline struct lov_io *cl2lov_io(const struct lu_env *env,
const struct cl_io_slice *ios)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_dev.c b/drivers/staging/lustre/lustre/lov/lov_dev.c
index 532ef87dfb44..dae8e89bcf6d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lov_dev.c
@@ -143,9 +143,7 @@ static void *lov_key_init(const struct lu_context *ctx,
struct lov_thread_info *info;
info = kmem_cache_zalloc(lov_thread_kmem, GFP_NOFS);
- if (info)
- INIT_LIST_HEAD(&info->lti_closure.clc_list);
- else
+ if (!info)
info = ERR_PTR(-ENOMEM);
return info;
}
@@ -155,7 +153,6 @@ static void lov_key_fini(const struct lu_context *ctx,
{
struct lov_thread_info *info = data;
- LINVRNT(list_empty(&info->lti_closure.clc_list));
kmem_cache_free(lov_thread_kmem, info);
}
@@ -265,8 +262,9 @@ static int lov_req_init(const struct lu_env *env, struct cl_device *dev,
if (lr) {
cl_req_slice_add(req, &lr->lr_cl, dev, &lov_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -335,14 +333,15 @@ static struct lov_device_emerg **lov_emerg_alloc(int nr)
cl_page_list_init(&em->emrg_page_list);
em->emrg_env = cl_env_alloc(&em->emrg_refcheck,
LCT_REMEMBER | LCT_NOREF);
- if (!IS_ERR(em->emrg_env))
+ if (!IS_ERR(em->emrg_env)) {
em->emrg_env->le_ctx.lc_cookie = 0x2;
- else {
+ } else {
result = PTR_ERR(em->emrg_env);
em->emrg_env = NULL;
}
- } else
+ } else {
result = -ENOMEM;
+ }
}
if (result != 0) {
lov_emerg_free(emerg, nr);
diff --git a/drivers/staging/lustre/lustre/lov/lov_ea.c b/drivers/staging/lustre/lustre/lov/lov_ea.c
index b6529401c713..460f0fa5e6b1 100644
--- a/drivers/staging/lustre/lustre/lov/lov_ea.c
+++ b/drivers/staging/lustre/lustre/lov/lov_ea.c
@@ -48,11 +48,6 @@
#include "lov_internal.h"
-struct lovea_unpack_args {
- struct lov_stripe_md *lsm;
- int cursor;
-};
-
static int lsm_lmm_verify_common(struct lov_mds_md *lmm, int lmm_bytes,
__u16 stripe_count)
{
diff --git a/drivers/staging/lustre/lustre/lov/lov_internal.h b/drivers/staging/lustre/lustre/lov/lov_internal.h
index 590f9326af37..eef9afac8467 100644
--- a/drivers/staging/lustre/lustre/lov/lov_internal.h
+++ b/drivers/staging/lustre/lustre/lov/lov_internal.h
@@ -72,6 +72,21 @@
})
#endif
+#define pool_tgt_size(p) ((p)->pool_obds.op_size)
+#define pool_tgt_count(p) ((p)->pool_obds.op_count)
+#define pool_tgt_array(p) ((p)->pool_obds.op_array)
+#define pool_tgt_rw_sem(p) ((p)->pool_obds.op_rw_sem)
+
+struct pool_desc {
+ char pool_name[LOV_MAXPOOLNAME + 1];
+ struct ost_pool pool_obds;
+ atomic_t pool_refcount;
+ struct hlist_node pool_hash; /* access by poolname */
+ struct list_head pool_list; /* serial access */
+ struct dentry *pool_debugfs_entry; /* file in debugfs */
+ struct obd_device *pool_lobd; /* owner */
+};
+
struct lov_request {
struct obd_info rq_oi;
struct lov_request_set *rq_rqset;
@@ -88,7 +103,6 @@ struct lov_request {
};
struct lov_request_set {
- struct ldlm_enqueue_info *set_ei;
struct obd_info *set_oi;
atomic_t set_refcount;
struct obd_export *set_exp;
@@ -102,10 +116,8 @@ struct lov_request_set {
atomic_t set_finish_checked;
struct llog_cookie *set_cookies;
int set_cookie_sent;
- struct obd_trans_info *set_oti;
struct list_head set_list;
wait_queue_head_t set_waitq;
- spinlock_t set_lock;
};
extern struct kmem_cache *lov_oinfo_slab;
@@ -114,12 +126,6 @@ extern struct lu_kmem_descr lov_caches[];
void lov_finish_set(struct lov_request_set *set);
-static inline void lov_get_reqset(struct lov_request_set *set)
-{
- LASSERT(atomic_read(&set->set_refcount) > 0);
- atomic_inc(&set->set_refcount);
-}
-
static inline void lov_put_reqset(struct lov_request_set *set)
{
if (atomic_dec_and_test(&set->set_refcount))
@@ -146,10 +152,8 @@ int lov_stripe_intersects(struct lov_stripe_md *lsm, int stripeno,
u64 start, u64 end,
u64 *obd_start, u64 *obd_end);
int lov_stripe_number(struct lov_stripe_md *lsm, u64 lov_off);
-
-/* lov_qos.c */
-#define LOV_USES_ASSIGNED_STRIPE 0
-#define LOV_USES_DEFAULT_STRIPE 1
+pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
+ int stripe);
/* lov_request.c */
int lov_update_common_set(struct lov_request_set *set,
@@ -176,6 +180,8 @@ int lov_fini_statfs_set(struct lov_request_set *set);
int lov_statfs_interpret(struct ptlrpc_request_set *rqset, void *data, int rc);
/* lov_obd.c */
+void lov_stripe_lock(struct lov_stripe_md *md);
+void lov_stripe_unlock(struct lov_stripe_md *md);
void lov_fix_desc(struct lov_desc *desc);
void lov_fix_desc_stripe_size(__u64 *val);
void lov_fix_desc_stripe_count(__u32 *val);
@@ -231,8 +237,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname);
int lov_pool_del(struct obd_device *obd, char *poolname);
int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname);
int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname);
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname);
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool);
void lov_pool_putref(struct pool_desc *pool);
static inline struct lov_stripe_md *lsm_addref(struct lov_stripe_md *lsm)
diff --git a/drivers/staging/lustre/lustre/lov/lov_io.c b/drivers/staging/lustre/lustre/lov/lov_io.c
index 4296aacd84fc..86cb3f8f9246 100644
--- a/drivers/staging/lustre/lustre/lov/lov_io.c
+++ b/drivers/staging/lustre/lustre/lov/lov_io.c
@@ -225,8 +225,9 @@ struct lov_io_sub *lov_sub_get(const struct lu_env *env,
if (!sub->sub_io_initialized) {
sub->sub_stripe = stripe;
rc = lov_io_sub_init(env, lio, sub);
- } else
+ } else {
rc = 0;
+ }
if (rc == 0)
lov_sub_enter(sub);
else
@@ -245,13 +246,15 @@ void lov_sub_put(struct lov_io_sub *sub)
*
*/
-static int lov_page_stripe(const struct cl_page *page)
+int lov_page_stripe(const struct cl_page *page)
{
struct lovsub_object *subobj;
+ const struct cl_page_slice *slice;
+
+ slice = cl_page_at(page, &lovsub_device_type);
+ LASSERT(slice->cpl_obj);
- subobj = lu2lovsub(
- lu_object_locate(page->cp_child->cp_obj->co_lu.lo_header,
- &lovsub_device_type));
+ subobj = cl2lovsub(slice->cpl_obj);
return subobj->lso_index;
}
@@ -274,10 +277,11 @@ struct lov_io_sub *lov_page_subio(const struct lu_env *env, struct lov_io *lio,
static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
struct cl_io *io)
{
- struct lov_stripe_md *lsm = lio->lis_object->lo_lsm;
+ struct lov_stripe_md *lsm;
int result;
LASSERT(lio->lis_object);
+ lsm = lio->lis_object->lo_lsm;
/*
* Need to be optimized, we can't afford to allocate a piece of memory
@@ -292,8 +296,9 @@ static int lov_io_subio_init(const struct lu_env *env, struct lov_io *lio,
lio->lis_single_subio_index = -1;
lio->lis_active_subios = 0;
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -411,8 +416,9 @@ static int lov_io_iter_init(const struct lu_env *env,
lov_sub_put(sub);
CDEBUG(D_VFSTRACE, "shrink: %d [%llu, %llu)\n",
stripe, start, end);
- } else
+ } else {
rc = PTR_ERR(sub);
+ }
if (!rc)
list_add_tail(&sub->sub_linkage, &lio->lis_active);
@@ -436,7 +442,6 @@ static int lov_io_rw_iter_init(const struct lu_env *env,
/* fast path for common case. */
if (lio->lis_nr_subios != 1 && !cl_io_is_append(io)) {
-
lov_do_div64(start, ssize);
next = (start + 1) * ssize;
if (next <= start * ssize)
@@ -543,13 +548,6 @@ static void lov_io_unlock(const struct lu_env *env,
LASSERT(rc == 0);
}
-static struct cl_page_list *lov_io_submit_qin(struct lov_device *ld,
- struct cl_page_list *qin,
- int idx, int alloc)
-{
- return alloc ? &qin[idx] : &ld->ld_emrg[idx]->emrg_page_list;
-}
-
/**
* lov implementation of cl_operations::cio_submit() method. It takes a list
* of pages in \a queue, splits it into per-stripe sub-lists, invokes
@@ -569,25 +567,17 @@ static int lov_io_submit(const struct lu_env *env,
const struct cl_io_slice *ios,
enum cl_req_type crt, struct cl_2queue *queue)
{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct lov_object *obj = lio->lis_object;
- struct lov_device *ld = lu2lov_dev(lov2cl(obj)->co_lu.lo_dev);
- struct cl_page_list *qin = &queue->c2_qin;
- struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- struct cl_page_list *stripes_qin = NULL;
+ struct cl_page_list *qin = &queue->c2_qin;
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
struct cl_page *page;
- struct cl_page *tmp;
int stripe;
-#define QIN(stripe) lov_io_submit_qin(ld, stripes_qin, stripe, alloc)
-
int rc = 0;
- int alloc =
- !(current->flags & PF_MEMALLOC);
if (lio->lis_active_subios == 1) {
int idx = lio->lis_single_subio_index;
- struct lov_io_sub *sub;
LASSERT(idx < lio->lis_nr_subios);
sub = lov_sub_get(env, lio, idx);
@@ -600,119 +590,120 @@ static int lov_io_submit(const struct lu_env *env,
}
LASSERT(lio->lis_subs);
- if (alloc) {
- stripes_qin =
- libcfs_kvzalloc(sizeof(*stripes_qin) *
- lio->lis_nr_subios,
- GFP_NOFS);
- if (!stripes_qin)
- return -ENOMEM;
-
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++)
- cl_page_list_init(&stripes_qin[stripe]);
- } else {
- /*
- * If we get here, it means pageout & swap doesn't help.
- * In order to not make things worse, even don't try to
- * allocate the memory with __GFP_NOWARN. -jay
- */
- mutex_lock(&ld->ld_mutex);
- lio->lis_mem_frozen = 1;
- }
- cl_2queue_init(cl2q);
- cl_page_list_for_each_safe(page, tmp, qin) {
- stripe = lov_page_stripe(page);
- cl_page_list_move(QIN(stripe), qin, page);
- }
+ cl_page_list_init(plist);
+ while (qin->pl_nr > 0) {
+ struct cl_2queue *cl2q = &lov_env_info(env)->lti_cl2q;
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct lov_io_sub *sub;
- struct cl_page_list *sub_qin = QIN(stripe);
+ cl_2queue_init(cl2q);
- if (list_empty(&sub_qin->pl_pages))
- continue;
+ page = cl_page_list_first(qin);
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+
+ stripe = lov_page_stripe(page);
+ while (qin->pl_nr > 0) {
+ page = cl_page_list_first(qin);
+ if (stripe != lov_page_stripe(page))
+ break;
+
+ cl_page_list_move(&cl2q->c2_qin, qin, page);
+ }
- cl_page_list_splice(sub_qin, &cl2q->c2_qin);
sub = lov_sub_get(env, lio, stripe);
if (!IS_ERR(sub)) {
rc = cl_io_submit_rw(sub->sub_env, sub->sub_io,
crt, cl2q);
lov_sub_put(sub);
- } else
+ } else {
rc = PTR_ERR(sub);
- cl_page_list_splice(&cl2q->c2_qin, &queue->c2_qin);
+ }
+
+ cl_page_list_splice(&cl2q->c2_qin, plist);
cl_page_list_splice(&cl2q->c2_qout, &queue->c2_qout);
+ cl_2queue_fini(env, cl2q);
+
if (rc != 0)
break;
}
- for (stripe = 0; stripe < lio->lis_nr_subios; stripe++) {
- struct cl_page_list *sub_qin = QIN(stripe);
+ cl_page_list_splice(plist, qin);
+ cl_page_list_fini(env, plist);
- if (list_empty(&sub_qin->pl_pages))
- continue;
+ return rc;
+}
+
+static int lov_io_commit_async(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb)
+{
+ struct cl_page_list *plist = &lov_env_info(env)->lti_plist;
+ struct lov_io *lio = cl2lov_io(env, ios);
+ struct lov_io_sub *sub;
+ struct cl_page *page;
+ int rc = 0;
+
+ if (lio->lis_active_subios == 1) {
+ int idx = lio->lis_single_subio_index;
- cl_page_list_splice(sub_qin, qin);
+ LASSERT(idx < lio->lis_nr_subios);
+ sub = lov_sub_get(env, lio, idx);
+ LASSERT(!IS_ERR(sub));
+ LASSERT(sub->sub_io == &lio->lis_single_subio);
+ rc = cl_io_commit_async(sub->sub_env, sub->sub_io, queue,
+ from, to, cb);
+ lov_sub_put(sub);
+ return rc;
}
- if (alloc) {
- kvfree(stripes_qin);
- } else {
- int i;
+ LASSERT(lio->lis_subs);
- for (i = 0; i < lio->lis_nr_subios; i++) {
- struct cl_io *cio = lio->lis_subs[i].sub_io;
+ cl_page_list_init(plist);
+ while (queue->pl_nr > 0) {
+ int stripe_to = to;
+ int stripe;
- if (cio && cio == &ld->ld_emrg[i]->emrg_subio)
- lov_io_sub_fini(env, lio, &lio->lis_subs[i]);
+ LASSERT(plist->pl_nr == 0);
+ page = cl_page_list_first(queue);
+ cl_page_list_move(plist, queue, page);
+
+ stripe = lov_page_stripe(page);
+ while (queue->pl_nr > 0) {
+ page = cl_page_list_first(queue);
+ if (stripe != lov_page_stripe(page))
+ break;
+
+ cl_page_list_move(plist, queue, page);
}
- lio->lis_mem_frozen = 0;
- mutex_unlock(&ld->ld_mutex);
- }
- return rc;
-#undef QIN
-}
+ if (queue->pl_nr > 0) /* still has more pages */
+ stripe_to = PAGE_SIZE;
-static int lov_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
+ sub = lov_sub_get(env, lio, stripe);
+ if (!IS_ERR(sub)) {
+ rc = cl_io_commit_async(sub->sub_env, sub->sub_io,
+ plist, from, stripe_to, cb);
+ lov_sub_put(sub);
+ } else {
+ rc = PTR_ERR(sub);
+ break;
+ }
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_prepare_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
-}
+ if (plist->pl_nr > 0) /* short write */
+ break;
-static int lov_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
-{
- struct lov_io *lio = cl2lov_io(env, ios);
- struct cl_page *sub_page = lov_sub_page(slice);
- struct lov_io_sub *sub;
- int result;
+ from = 0;
+ }
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- result = cl_io_commit_write(sub->sub_env, sub->sub_io,
- sub_page, from, to);
- lov_sub_put(sub);
- } else
- result = PTR_ERR(sub);
- return result;
+ /* for error case, add the page back into the qin list */
+ LASSERT(ergo(rc == 0, plist->pl_nr == 0));
+ while (plist->pl_nr > 0) {
+ /* error occurred, add the uncommitted pages back into queue */
+ page = cl_page_list_last(plist);
+ cl_page_list_move_head(queue, plist, page);
+ }
+
+ return rc;
}
static int lov_io_fault_start(const struct lu_env *env,
@@ -803,16 +794,8 @@ static const struct cl_io_operations lov_io_ops = {
.cio_fini = lov_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = lov_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = lov_io_submit
- }
- },
- .cio_prepare_write = lov_io_prepare_write,
- .cio_commit_write = lov_io_commit_write
+ .cio_submit = lov_io_submit,
+ .cio_commit_async = lov_io_commit_async,
};
/*****************************************************************************
@@ -880,15 +863,8 @@ static const struct cl_io_operations lov_empty_io_ops = {
.cio_fini = lov_empty_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- },
- [CRT_WRITE] = {
- .cio_submit = LOV_EMPTY_IMPOSSIBLE
- }
- },
- .cio_commit_write = LOV_EMPTY_IMPOSSIBLE
+ .cio_submit = LOV_EMPTY_IMPOSSIBLE,
+ .cio_commit_async = LOV_EMPTY_IMPOSSIBLE
};
int lov_io_init_raid0(const struct lu_env *env, struct cl_object *obj,
@@ -943,7 +919,7 @@ int lov_io_init_empty(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- return result != 0;
+ return result;
}
int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
@@ -986,7 +962,7 @@ int lov_io_init_released(const struct lu_env *env, struct cl_object *obj,
}
io->ci_result = result < 0 ? result : 0;
- return result != 0;
+ return result;
}
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_lock.c b/drivers/staging/lustre/lustre/lov/lov_lock.c
index ae854bc25dbe..1b203d18c6e9 100644
--- a/drivers/staging/lustre/lustre/lov/lov_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lov_lock.c
@@ -46,11 +46,6 @@
* @{
*/
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent);
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice);
/*****************************************************************************
*
* Lov lock operations.
@@ -58,7 +53,7 @@ static int lov_lock_unuse(const struct lu_env *env,
*/
static struct lov_sublock_env *lov_sublock_env_get(const struct lu_env *env,
- struct cl_lock *parent,
+ const struct cl_lock *parent,
struct lov_lock_sub *lls)
{
struct lov_sublock_env *subenv;
@@ -100,185 +95,26 @@ static void lov_sublock_env_put(struct lov_sublock_env *subenv)
lov_sub_put(subenv->lse_sub);
}
-static void lov_sublock_adopt(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock, int idx,
- struct lov_lock_link *link)
+static int lov_sublock_init(const struct lu_env *env,
+ const struct cl_lock *parent,
+ struct lov_lock_sub *lls)
{
- struct lovsub_lock *lsl;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
- int rc;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sublock));
-
- lsl = cl2sub_lock(sublock);
- /*
- * check that sub-lock doesn't have lock link to this top-lock.
- */
- LASSERT(!lov_lock_link_find(env, lck, lsl));
- LASSERT(idx < lck->lls_nr);
-
- lck->lls_sub[idx].sub_lock = lsl;
- lck->lls_nr_filled++;
- LASSERT(lck->lls_nr_filled <= lck->lls_nr);
- list_add_tail(&link->lll_list, &lsl->lss_parents);
- link->lll_idx = idx;
- link->lll_super = lck;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lov-child", sublock);
- lck->lls_sub[idx].sub_flags |= LSF_HELD;
- cl_lock_user_add(env, sublock);
-
- rc = lov_sublock_modify(env, lck, lsl, &sublock->cll_descr, idx);
- LASSERT(rc == 0); /* there is no way this can fail, currently */
-}
-
-static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
- const struct cl_io *io,
- struct lov_lock *lck,
- int idx, struct lov_lock_link **out)
-{
- struct cl_lock *sublock;
- struct cl_lock *parent;
- struct lov_lock_link *link;
-
- LASSERT(idx < lck->lls_nr);
-
- link = kmem_cache_zalloc(lov_lock_link_kmem, GFP_NOFS);
- if (link) {
- struct lov_sublock_env *subenv;
- struct lov_lock_sub *lls;
- struct cl_lock_descr *descr;
-
- parent = lck->lls_cl.cls_lock;
- lls = &lck->lls_sub[idx];
- descr = &lls->sub_got;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- /* CAVEAT: Don't try to add a field in lov_lock_sub
- * to remember the subio. This is because lock is able
- * to be cached, but this is not true for IO. This
- * further means a sublock might be referenced in
- * different io context. -jay
- */
-
- sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
- descr, "lov-parent", parent);
- lov_sublock_env_put(subenv);
- } else {
- /* error occurs. */
- sublock = (void *)subenv;
- }
-
- if (!IS_ERR(sublock))
- *out = link;
- else
- kmem_cache_free(lov_lock_link_kmem, link);
- } else
- sublock = ERR_PTR(-ENOMEM);
- return sublock;
-}
-
-static void lov_sublock_unlock(const struct lu_env *env,
- struct lovsub_lock *lsl,
- struct cl_lock_closure *closure,
- struct lov_sublock_env *subenv)
-{
- lov_sublock_env_put(subenv);
- lsl->lss_active = NULL;
- cl_lock_disclosure(env, closure);
-}
-
-static int lov_sublock_lock(const struct lu_env *env,
- struct lov_lock *lck,
- struct lov_lock_sub *lls,
- struct cl_lock_closure *closure,
- struct lov_sublock_env **lsep)
-{
- struct lovsub_lock *sublock;
- struct cl_lock *child;
- int result = 0;
-
- LASSERT(list_empty(&closure->clc_list));
-
- sublock = lls->sub_lock;
- child = sublock->lss_cl.cls_lock;
- result = cl_lock_closure_build(env, child, closure);
- if (result == 0) {
- struct cl_lock *parent = closure->clc_origin;
-
- LASSERT(cl_lock_is_mutexed(child));
- sublock->lss_active = parent;
-
- if (unlikely((child->cll_state == CLS_FREEING) ||
- (child->cll_flags & CLF_CANCELLED))) {
- struct lov_lock_link *link;
- /*
- * we could race with lock deletion which temporarily
- * put the lock in freeing state, bug 19080.
- */
- LASSERT(!(lls->sub_flags & LSF_HELD));
-
- link = lov_lock_link_find(env, lck, sublock);
- LASSERT(link);
- lov_lock_unlink(env, link, sublock);
- lov_sublock_unlock(env, sublock, closure, NULL);
- lck->lls_cancel_race = 1;
- result = CLO_REPEAT;
- } else if (lsep) {
- struct lov_sublock_env *subenv;
+ struct lov_sublock_env *subenv;
+ int result;
- subenv = lov_sublock_env_get(env, parent, lls);
- if (IS_ERR(subenv)) {
- lov_sublock_unlock(env, sublock,
- closure, NULL);
- result = PTR_ERR(subenv);
- } else {
- *lsep = subenv;
- }
- }
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (!IS_ERR(subenv)) {
+ result = cl_lock_init(subenv->lse_env, &lls->sub_lock,
+ subenv->lse_io);
+ lov_sublock_env_put(subenv);
+ } else {
+ /* error occurs. */
+ result = PTR_ERR(subenv);
}
return result;
}
/**
- * Updates the result of a top-lock operation from a result of sub-lock
- * sub-operations. Top-operations like lov_lock_{enqueue,use,unuse}() iterate
- * over sub-locks and lov_subresult() is used to calculate return value of a
- * top-operation. To this end, possible return values of sub-operations are
- * ordered as
- *
- * - 0 success
- * - CLO_WAIT wait for event
- * - CLO_REPEAT repeat top-operation
- * - -ne fundamental error
- *
- * Top-level return code can only go down through this list. CLO_REPEAT
- * overwrites CLO_WAIT, because lock mutex was released and sleeping condition
- * has to be rechecked by the upper layer.
- */
-static int lov_subresult(int result, int rc)
-{
- int result_rank;
- int rc_rank;
-
- LASSERTF(result <= 0 || result == CLO_REPEAT || result == CLO_WAIT,
- "result = %d\n", result);
- LASSERTF(rc <= 0 || rc == CLO_REPEAT || rc == CLO_WAIT,
- "rc = %d\n", rc);
- CLASSERT(CLO_WAIT < CLO_REPEAT);
-
- /* calculate ranks in the ordering above */
- result_rank = result < 0 ? 1 + CLO_REPEAT : result;
- rc_rank = rc < 0 ? 1 + CLO_REPEAT : rc;
-
- if (result_rank < rc_rank)
- result = rc;
- return result;
-}
-
-/**
* Creates sub-locks for a given lov_lock for the first time.
*
* Goes through all sub-objects of top-object, and creates sub-locks on every
@@ -286,8 +122,9 @@ static int lov_subresult(int result, int rc)
* fact that top-lock (that is being created) can be accessed concurrently
* through already created sub-locks (possibly shared with other top-locks).
*/
-static int lov_lock_sub_init(const struct lu_env *env,
- struct lov_lock *lck, const struct cl_io *io)
+static struct lov_lock *lov_lock_sub_init(const struct lu_env *env,
+ const struct cl_object *obj,
+ struct cl_lock *lock)
{
int result = 0;
int i;
@@ -297,241 +134,86 @@ static int lov_lock_sub_init(const struct lu_env *env,
u64 file_start;
u64 file_end;
- struct lov_object *loo = cl2lov(lck->lls_cl.cls_obj);
+ struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
- struct cl_lock *parent = lck->lls_cl.cls_lock;
+ struct lov_lock *lovlck;
- lck->lls_orig = parent->cll_descr;
- file_start = cl_offset(lov2cl(loo), parent->cll_descr.cld_start);
- file_end = cl_offset(lov2cl(loo), parent->cll_descr.cld_end + 1) - 1;
+ file_start = cl_offset(lov2cl(loo), lock->cll_descr.cld_start);
+ file_end = cl_offset(lov2cl(loo), lock->cll_descr.cld_end + 1) - 1;
for (i = 0, nr = 0; i < r0->lo_nr; i++) {
/*
* XXX for wide striping smarter algorithm is desirable,
* breaking out of the loop, early.
*/
- if (likely(r0->lo_sub[i]) &&
+ if (likely(r0->lo_sub[i]) && /* spare layout */
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end))
nr++;
}
LASSERT(nr > 0);
- lck->lls_sub = libcfs_kvzalloc(nr * sizeof(lck->lls_sub[0]), GFP_NOFS);
- if (!lck->lls_sub)
- return -ENOMEM;
+ lovlck = libcfs_kvzalloc(offsetof(struct lov_lock, lls_sub[nr]),
+ GFP_NOFS);
+ if (!lovlck)
+ return ERR_PTR(-ENOMEM);
- lck->lls_nr = nr;
- /*
- * First, fill in sub-lock descriptions in
- * lck->lls_sub[].sub_descr. They are used by lov_sublock_alloc()
- * (called below in this function, and by lov_lock_enqueue()) to
- * create sub-locks. At this moment, no other thread can access
- * top-lock.
- */
+ lovlck->lls_nr = nr;
for (i = 0, nr = 0; i < r0->lo_nr; ++i) {
if (likely(r0->lo_sub[i]) &&
lov_stripe_intersects(loo->lo_lsm, i,
file_start, file_end, &start, &end)) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[nr];
struct cl_lock_descr *descr;
- descr = &lck->lls_sub[nr].sub_descr;
+ descr = &lls->sub_lock.cll_descr;
LASSERT(!descr->cld_obj);
descr->cld_obj = lovsub2cl(r0->lo_sub[i]);
descr->cld_start = cl_index(descr->cld_obj, start);
descr->cld_end = cl_index(descr->cld_obj, end);
- descr->cld_mode = parent->cll_descr.cld_mode;
- descr->cld_gid = parent->cll_descr.cld_gid;
- descr->cld_enq_flags = parent->cll_descr.cld_enq_flags;
- /* XXX has no effect */
- lck->lls_sub[nr].sub_got = *descr;
- lck->lls_sub[nr].sub_stripe = i;
+ descr->cld_mode = lock->cll_descr.cld_mode;
+ descr->cld_gid = lock->cll_descr.cld_gid;
+ descr->cld_enq_flags = lock->cll_descr.cld_enq_flags;
+ lls->sub_stripe = i;
+
+ /* initialize sub lock */
+ result = lov_sublock_init(env, lock, lls);
+ if (result < 0)
+ break;
+
+ lls->sub_initialized = 1;
nr++;
}
}
- LASSERT(nr == lck->lls_nr);
-
- /*
- * Some sub-locks can be missing at this point. This is not a problem,
- * because enqueue will create them anyway. Main duty of this function
- * is to fill in sub-lock descriptions in a race free manner.
- */
- return result;
-}
+ LASSERT(ergo(result == 0, nr == lovlck->lls_nr));
-static int lov_sublock_release(const struct lu_env *env, struct lov_lock *lck,
- int i, int deluser, int rc)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (lck->lls_sub[i].sub_flags & LSF_HELD) {
- struct cl_lock *sublock;
- int dying;
-
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
+ if (result != 0) {
+ for (i = 0; i < nr; ++i) {
+ if (!lovlck->lls_sub[i].sub_initialized)
+ break;
- lck->lls_sub[i].sub_flags &= ~LSF_HELD;
- if (deluser)
- cl_lock_user_del(env, sublock);
- /*
- * If the last hold is released, and cancellation is pending
- * for a sub-lock, release parent mutex, to avoid keeping it
- * while sub-lock is being paged out.
- */
- dying = (sublock->cll_descr.cld_mode == CLM_PHANTOM ||
- sublock->cll_descr.cld_mode == CLM_GROUP ||
- (sublock->cll_flags & (CLF_CANCELPEND|CLF_DOOMED))) &&
- sublock->cll_holds == 1;
- if (dying)
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- if (dying) {
- cl_lock_mutex_get(env, parent);
- rc = lov_subresult(rc, CLO_REPEAT);
+ cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
}
- /*
- * From now on lck->lls_sub[i].sub_lock is a "weak" pointer,
- * not backed by a reference on a
- * sub-lock. lovsub_lock_delete() will clear
- * lck->lls_sub[i].sub_lock under semaphores, just before
- * sub-lock is destroyed.
- */
+ kvfree(lovlck);
+ lovlck = ERR_PTR(result);
}
- return rc;
-}
-
-static void lov_sublock_hold(const struct lu_env *env, struct lov_lock *lck,
- int i)
-{
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
-
- if (!(lck->lls_sub[i].sub_flags & LSF_HELD)) {
- struct cl_lock *sublock;
-
- sublock = lck->lls_sub[i].sub_lock->lss_cl.cls_lock;
- LASSERT(cl_lock_is_mutexed(sublock));
- LASSERT(sublock->cll_state != CLS_FREEING);
- lck->lls_sub[i].sub_flags |= LSF_HELD;
-
- cl_lock_get_trust(sublock);
- cl_lock_hold_add(env, sublock, "lov-parent", parent);
- cl_lock_user_add(env, sublock);
- cl_lock_put(env, sublock);
- }
+ return lovlck;
}
static void lov_lock_fini(const struct lu_env *env,
struct cl_lock_slice *slice)
{
- struct lov_lock *lck;
+ struct lov_lock *lovlck;
int i;
- lck = cl2lov_lock(slice);
- LASSERT(lck->lls_nr_filled == 0);
- if (lck->lls_sub) {
- for (i = 0; i < lck->lls_nr; ++i)
- /*
- * No sub-locks exists at this point, as sub-lock has
- * a reference on its parent.
- */
- LASSERT(!lck->lls_sub[i].sub_lock);
- kvfree(lck->lls_sub);
+ lovlck = cl2lov_lock(slice);
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ LASSERT(!lovlck->lls_sub[i].sub_is_enqueued);
+ if (lovlck->lls_sub[i].sub_initialized)
+ cl_lock_fini(env, &lovlck->lls_sub[i].sub_lock);
}
- kmem_cache_free(lov_lock_kmem, lck);
-}
-
-static int lov_lock_enqueue_wait(const struct lu_env *env,
- struct lov_lock *lck,
- struct cl_lock *sublock)
-{
- struct cl_lock *lock = lck->lls_cl.cls_lock;
- int result;
-
- LASSERT(cl_lock_is_mutexed(lock));
-
- cl_lock_mutex_put(env, lock);
- result = cl_lock_enqueue_wait(env, sublock, 0);
- cl_lock_mutex_get(env, lock);
- return result ?: CLO_REPEAT;
-}
-
-/**
- * Tries to advance a state machine of a given sub-lock toward enqueuing of
- * the top-lock.
- *
- * \retval 0 if state-transition can proceed
- * \retval -ve otherwise.
- */
-static int lov_lock_enqueue_one(const struct lu_env *env, struct lov_lock *lck,
- struct cl_lock *sublock,
- struct cl_io *io, __u32 enqflags, int last)
-{
- int result;
-
- /* first, try to enqueue a sub-lock ... */
- result = cl_enqueue_try(env, sublock, io, enqflags);
- if ((sublock->cll_state == CLS_ENQUEUED) && !(enqflags & CEF_AGL)) {
- /* if it is enqueued, try to `wait' on it---maybe it's already
- * granted
- */
- result = cl_wait_try(env, sublock);
- if (result == CLO_REENQUEUED)
- result = CLO_WAIT;
- }
- /*
- * If CEF_ASYNC flag is set, then all sub-locks can be enqueued in
- * parallel, otherwise---enqueue has to wait until sub-lock is granted
- * before proceeding to the next one.
- */
- if ((result == CLO_WAIT) && (sublock->cll_state <= CLS_HELD) &&
- (enqflags & CEF_ASYNC) && (!last || (enqflags & CEF_AGL)))
- result = 0;
- return result;
-}
-
-/**
- * Helper function for lov_lock_enqueue() that creates missing sub-lock.
- */
-static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
- struct cl_io *io, struct lov_lock *lck, int idx)
-{
- struct lov_lock_link *link = NULL;
- struct cl_lock *sublock;
- int result;
-
- LASSERT(parent->cll_depth == 1);
- cl_lock_mutex_put(env, parent);
- sublock = lov_sublock_alloc(env, io, lck, idx, &link);
- if (!IS_ERR(sublock))
- cl_lock_mutex_get(env, sublock);
- cl_lock_mutex_get(env, parent);
-
- if (!IS_ERR(sublock)) {
- cl_lock_get_trust(sublock);
- if (parent->cll_state == CLS_QUEUING &&
- !lck->lls_sub[idx].sub_lock) {
- lov_sublock_adopt(env, lck, sublock, idx, link);
- } else {
- kmem_cache_free(lov_lock_link_kmem, link);
- /* other thread allocated sub-lock, or enqueue is no
- * longer going on
- */
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock, "lov-parent", parent);
- cl_lock_mutex_get(env, parent);
- }
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- result = CLO_REPEAT;
- } else
- result = PTR_ERR(sublock);
- return result;
+ kvfree(lovlck);
}
/**
@@ -543,529 +225,59 @@ static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
*/
static int lov_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *io, __u32 enqflags)
+ struct cl_io *io, struct cl_sync_io *anchor)
{
- struct cl_lock *lock = slice->cls_lock;
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, lock);
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
- int result;
- enum cl_lock_state minstate;
+ int rc = 0;
- for (result = 0, minstate = CLS_FREEING, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct lov_lock_sub *lls;
- struct cl_lock *sublock;
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
struct lov_sublock_env *subenv;
- if (lock->cll_state != CLS_QUEUING) {
- /*
- * Lock might have left QUEUING state if previous
- * iteration released its mutex. Stop enqueing in this
- * case and let the upper layer to decide what to do.
- */
- LASSERT(i > 0 && result != 0);
- break;
- }
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- if (!sub) {
- result = lov_sublock_fill(env, lock, io, lck, i);
- /* lov_sublock_fill() released @lock mutex,
- * restart.
- */
+ subenv = lov_sublock_env_get(env, lock, lls);
+ if (IS_ERR(subenv)) {
+ rc = PTR_ERR(subenv);
break;
}
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- lov_sublock_hold(env, lck, i);
- rc = lov_lock_enqueue_one(subenv->lse_env, lck, sublock,
- subenv->lse_io, enqflags,
- i == lck->lls_nr - 1);
- minstate = min(minstate, sublock->cll_state);
- if (rc == CLO_WAIT) {
- switch (sublock->cll_state) {
- case CLS_QUEUING:
- /* take recursive mutex, the lock is
- * released in lov_lock_enqueue_wait.
- */
- cl_lock_mutex_get(env, sublock);
- lov_sublock_unlock(env, sub, closure,
- subenv);
- rc = lov_lock_enqueue_wait(env, lck,
- sublock);
- break;
- case CLS_CACHED:
- cl_lock_get(sublock);
- /* take recursive mutex of sublock */
- cl_lock_mutex_get(env, sublock);
- /* need to release all locks in closure
- * otherwise it may deadlock. LU-2683.
- */
- lov_sublock_unlock(env, sub, closure,
- subenv);
- /* sublock and parent are held. */
- rc = lov_sublock_release(env, lck, i,
- 1, rc);
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- break;
- default:
- lov_sublock_unlock(env, sub, closure,
- subenv);
- break;
- }
- } else {
- LASSERT(!sublock->cll_conflict);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- }
- result = lov_subresult(result, rc);
- if (result != 0)
+ rc = cl_lock_enqueue(subenv->lse_env, subenv->lse_io,
+ &lls->sub_lock, anchor);
+ lov_sublock_env_put(subenv);
+ if (rc != 0)
break;
- }
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_ENQUEUED ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int i;
- int result;
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion.
- */
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (lls->sub_flags & LSF_HELD) {
- LASSERT(sublock->cll_state == CLS_HELD ||
- sublock->cll_state == CLS_ENQUEUED);
- rc = cl_unuse_try(subenv->lse_env, sublock);
- rc = lov_sublock_release(env, lck, i, 0, rc);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
+ lls->sub_is_enqueued = 1;
}
-
- if (result == 0 && lck->lls_cancel_race) {
- lck->lls_cancel_race = 0;
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
+ return rc;
}
static void lov_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
+ struct cl_lock *lock = slice->cls_lock;
+ struct lov_lock *lovlck = cl2lov_lock(slice);
int i;
- int result;
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
+ for (i = 0; i < lovlck->lls_nr; ++i) {
+ struct lov_lock_sub *lls = &lovlck->lls_sub[i];
+ struct cl_lock *sublock = &lls->sub_lock;
struct lov_sublock_env *subenv;
- /* top-lock state cannot change concurrently, because single
- * thread (one that released the last hold) carries unlocking
- * to the completion.
- */
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub)
- continue;
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (!(lls->sub_flags & LSF_HELD)) {
- lov_sublock_unlock(env, sub, closure, subenv);
- continue;
- }
-
- switch (sublock->cll_state) {
- case CLS_HELD:
- rc = cl_unuse_try(subenv->lse_env, sublock);
- lov_sublock_release(env, lck, i, 0, 0);
- break;
- default:
- lov_sublock_release(env, lck, i, 1, 0);
- break;
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
-
- if (rc == CLO_REPEAT) {
- --i;
- continue;
- }
-
- result = lov_subresult(result, rc);
- }
-
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
- "lov_lock_cancel fails with %d.\n", result);
-
- cl_lock_closure_fini(closure);
-}
-
-static int lov_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- enum cl_lock_state minstate;
- int reenqueued;
- int result;
- int i;
-
-again:
- for (result = 0, minstate = CLS_FREEING, i = 0, reenqueued = 0;
- i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state >= CLS_ENQUEUED);
- if (sublock->cll_state < CLS_HELD)
- rc = cl_wait_try(env, sublock);
-
- minstate = min(minstate, sublock->cll_state);
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- if (rc == CLO_REENQUEUED) {
- reenqueued++;
- rc = 0;
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
- /* Each sublock only can be reenqueued once, so will not loop
- * forever.
- */
- if (result == 0 && reenqueued != 0)
- goto again;
- cl_lock_closure_fini(closure);
- return result ?: minstate >= CLS_HELD ? 0 : CLO_WAIT;
-}
-
-static int lov_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- int result;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- for (result = 0, i = 0; i < lck->lls_nr; ++i) {
- int rc;
- struct lovsub_lock *sub;
- struct cl_lock *sublock;
- struct lov_lock_sub *lls;
- struct lov_sublock_env *subenv;
-
- LASSERT(slice->cls_lock->cll_state == CLS_INTRANSIT);
-
- lls = &lck->lls_sub[i];
- sub = lls->sub_lock;
- if (!sub) {
- /*
- * Sub-lock might have been canceled, while top-lock was
- * cached.
- */
- result = -ESTALE;
- break;
- }
-
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- LASSERT(sublock->cll_state != CLS_FREEING);
- lov_sublock_hold(env, lck, i);
- if (sublock->cll_state == CLS_CACHED) {
- rc = cl_use_try(subenv->lse_env, sublock, 0);
- if (rc != 0)
- rc = lov_sublock_release(env, lck,
- i, 1, rc);
- } else if (sublock->cll_state == CLS_NEW) {
- /* Sub-lock might have been canceled, while
- * top-lock was cached.
- */
- result = -ESTALE;
- lov_sublock_release(env, lck, i, 1, result);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
- if (result != 0)
- break;
- }
-
- if (lck->lls_cancel_race) {
- /*
- * If there is unlocking happened at the same time, then
- * sublock_lock state should be FREEING, and lov_sublock_lock
- * should return CLO_REPEAT. In this case, it should return
- * ESTALE, and up layer should reset the lock state to be NEW.
- */
- lck->lls_cancel_race = 0;
- LASSERT(result != 0);
- result = -ESTALE;
- }
- cl_lock_closure_fini(closure);
- return result;
-}
-
-/**
- * Check if the extent region \a descr is covered by \a child against the
- * specific \a stripe.
- */
-static int lov_lock_stripe_is_matching(const struct lu_env *env,
- struct lov_object *lov, int stripe,
- const struct cl_lock_descr *child,
- const struct cl_lock_descr *descr)
-{
- struct lov_stripe_md *lsm = lov->lo_lsm;
- u64 start;
- u64 end;
- int result;
-
- if (lov_r0(lov)->lo_nr == 1)
- return cl_lock_ext_match(child, descr);
-
- /*
- * For a multi-stripes object:
- * - make sure the descr only covers child's stripe, and
- * - check if extent is matching.
- */
- start = cl_offset(&lov->lo_cl, descr->cld_start);
- end = cl_offset(&lov->lo_cl, descr->cld_end + 1) - 1;
- result = 0;
- /* glimpse should work on the object with LOV EA hole. */
- if (end - start <= lsm->lsm_stripe_size) {
- int idx;
-
- idx = lov_stripe_number(lsm, start);
- if (idx == stripe ||
- unlikely(!lov_r0(lov)->lo_sub[idx])) {
- idx = lov_stripe_number(lsm, end);
- if (idx == stripe ||
- unlikely(!lov_r0(lov)->lo_sub[idx]))
- result = 1;
- }
- }
-
- if (result != 0) {
- struct cl_lock_descr *subd = &lov_env_info(env)->lti_ldescr;
- u64 sub_start;
- u64 sub_end;
-
- subd->cld_obj = NULL; /* don't need sub object at all */
- subd->cld_mode = descr->cld_mode;
- subd->cld_gid = descr->cld_gid;
- result = lov_stripe_intersects(lsm, stripe, start, end,
- &sub_start, &sub_end);
- LASSERT(result);
- subd->cld_start = cl_index(child->cld_obj, sub_start);
- subd->cld_end = cl_index(child->cld_obj, sub_end);
- result = cl_lock_ext_match(child, subd);
- }
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_fits_into() method.
- *
- * Checks whether a lock (given by \a slice) is suitable for \a
- * io. Multi-stripe locks can be used only for "quick" io, like truncate, or
- * O_APPEND write.
- *
- * \see ccc_lock_fits_into().
- */
-static int lov_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct lov_lock *lov = cl2lov_lock(slice);
- struct lov_object *obj = cl2lov(slice->cls_obj);
- int result;
-
- LASSERT(cl_object_same(need->cld_obj, slice->cls_obj));
- LASSERT(lov->lls_nr > 0);
-
- /* for top lock, it's necessary to match enq flags otherwise it will
- * run into problem if a sublock is missing and reenqueue.
- */
- if (need->cld_enq_flags != lov->lls_orig.cld_enq_flags)
- return 0;
-
- if (need->cld_mode == CLM_GROUP)
- /*
- * always allow to match group lock.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- else if (lov->lls_nr == 1) {
- struct cl_lock_descr *got = &lov->lls_sub[0].sub_got;
-
- result = lov_lock_stripe_is_matching(env,
- cl2lov(slice->cls_obj),
- lov->lls_sub[0].sub_stripe,
- got, need);
- } else if (io->ci_type != CIT_SETATTR && io->ci_type != CIT_MISC &&
- !cl_io_is_append(io) && need->cld_mode != CLM_PHANTOM)
- /*
- * Multi-stripe locks are only suitable for `quick' IO and for
- * glimpse.
- */
- result = 0;
- else
- /*
- * Most general case: multi-stripe existing lock, and
- * (potentially) multi-stripe @need lock. Check that @need is
- * covered by @lov's sub-locks.
- *
- * For now, ignore lock expansions made by the server, and
- * match against original lock extent.
- */
- result = cl_lock_ext_match(&lov->lls_orig, need);
- CDEBUG(D_DLMTRACE, DDESCR"/"DDESCR" %d %d/%d: %d\n",
- PDESCR(&lov->lls_orig), PDESCR(&lov->lls_sub[0].sub_got),
- lov->lls_sub[0].sub_stripe, lov->lls_nr, lov_r0(obj)->lo_nr,
- result);
- return result;
-}
-
-void lov_lock_unlink(const struct lu_env *env,
- struct lov_lock_link *link, struct lovsub_lock *sub)
-{
- struct lov_lock *lck = link->lll_super;
- struct cl_lock *parent = lck->lls_cl.cls_lock;
-
- LASSERT(cl_lock_is_mutexed(parent));
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_del_init(&link->lll_list);
- LASSERT(lck->lls_sub[link->lll_idx].sub_lock == sub);
- /* yank this sub-lock from parent's array */
- lck->lls_sub[link->lll_idx].sub_lock = NULL;
- LASSERT(lck->lls_nr_filled > 0);
- lck->lls_nr_filled--;
- lu_ref_del(&parent->cll_reference, "lov-child", sub->lss_cl.cls_lock);
- cl_lock_put(env, parent);
- kmem_cache_free(lov_lock_link_kmem, link);
-}
-
-struct lov_lock_link *lov_lock_link_find(const struct lu_env *env,
- struct lov_lock *lck,
- struct lovsub_lock *sub)
-{
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(sub->lss_cl.cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- if (scan->lll_super == lck)
- return scan;
- }
- return NULL;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked for "top-to-bottom" delete, when lock destruction starts from the
- * top-lock, e.g., as a result of inode destruction.
- *
- * Unlinks top-lock from all its sub-locks. Sub-locks are not deleted there:
- * this is done separately elsewhere:
- *
- * - for inode destruction, lov_object_delete() calls cl_object_kill() for
- * each sub-object, purging its locks;
- *
- * - in other cases (e.g., a fatal error with a top-lock) sub-locks are
- * left in the cache.
- */
-static void lov_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lov_lock *lck = cl2lov_lock(slice);
- struct cl_lock_closure *closure = lov_closure_get(env, slice->cls_lock);
- struct lov_lock_link *link;
- int rc;
- int i;
-
- LASSERT(slice->cls_lock->cll_state == CLS_FREEING);
-
- for (i = 0; i < lck->lls_nr; ++i) {
- struct lov_lock_sub *lls = &lck->lls_sub[i];
- struct lovsub_lock *lsl = lls->sub_lock;
-
- if (!lsl) /* already removed */
+ if (!lls->sub_is_enqueued)
continue;
- rc = lov_sublock_lock(env, lck, lls, closure, NULL);
- if (rc == CLO_REPEAT) {
- --i;
- continue;
+ lls->sub_is_enqueued = 0;
+ subenv = lov_sublock_env_get(env, lock, lls);
+ if (!IS_ERR(subenv)) {
+ cl_lock_cancel(subenv->lse_env, sublock);
+ lov_sublock_env_put(subenv);
+ } else {
+ CL_LOCK_DEBUG(D_ERROR, env, slice->cls_lock,
+ "lov_lock_cancel fails with %ld.\n",
+ PTR_ERR(subenv));
}
-
- LASSERT(rc == 0);
- LASSERT(lsl->lss_cl.cls_lock->cll_state < CLS_FREEING);
-
- if (lls->sub_flags & LSF_HELD)
- lov_sublock_release(env, lck, i, 1, 0);
-
- link = lov_lock_link_find(env, lck, lsl);
- LASSERT(link);
- lov_lock_unlink(env, link, lsl);
- LASSERT(!lck->lls_sub[i].sub_lock);
-
- lov_sublock_unlock(env, lsl, closure, NULL);
}
-
- cl_lock_closure_fini(closure);
}
static int lov_lock_print(const struct lu_env *env, void *cookie,
@@ -1079,12 +291,8 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
struct lov_lock_sub *sub;
sub = &lck->lls_sub[i];
- (*p)(env, cookie, " %d %x: ", i, sub->sub_flags);
- if (sub->sub_lock)
- cl_lock_print(env, cookie, p,
- sub->sub_lock->lss_cl.cls_lock);
- else
- (*p)(env, cookie, "---\n");
+ (*p)(env, cookie, " %d %x: ", i, sub->sub_is_enqueued);
+ cl_lock_print(env, cookie, p, &sub->sub_lock);
}
return 0;
}
@@ -1092,12 +300,7 @@ static int lov_lock_print(const struct lu_env *env, void *cookie,
static const struct cl_lock_operations lov_lock_ops = {
.clo_fini = lov_lock_fini,
.clo_enqueue = lov_lock_enqueue,
- .clo_wait = lov_lock_wait,
- .clo_use = lov_lock_use,
- .clo_unuse = lov_lock_unuse,
.clo_cancel = lov_lock_cancel,
- .clo_fits_into = lov_lock_fits_into,
- .clo_delete = lov_lock_delete,
.clo_print = lov_lock_print
};
@@ -1105,14 +308,13 @@ int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
- int result;
+ int result = 0;
- lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
- if (lck) {
+ lck = lov_lock_sub_init(env, obj, lock);
+ if (!IS_ERR(lck))
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- result = lov_lock_sub_init(env, lck, io);
- } else
- result = -ENOMEM;
+ else
+ result = PTR_ERR(lck);
return result;
}
@@ -1147,21 +349,9 @@ int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
lck = kmem_cache_zalloc(lov_lock_kmem, GFP_NOFS);
if (lck) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
- lck->lls_orig = lock->cll_descr;
result = 0;
}
return result;
}
-static struct cl_lock_closure *lov_closure_get(const struct lu_env *env,
- struct cl_lock *parent)
-{
- struct cl_lock_closure *closure;
-
- closure = &lov_env_info(env)->lti_closure;
- LASSERT(list_empty(&closure->clc_list));
- cl_lock_closure_init(env, closure, parent, 1);
- return closure;
-}
-
/** @} lov */
diff --git a/drivers/staging/lustre/lustre/lov/lov_merge.c b/drivers/staging/lustre/lustre/lov/lov_merge.c
index 029cd4d62796..56ef41d17ad7 100644
--- a/drivers/staging/lustre/lustre/lov/lov_merge.c
+++ b/drivers/staging/lustre/lustre/lov/lov_merge.c
@@ -154,6 +154,7 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
valid &= src->o_valid;
if (*set) {
+ tgt->o_valid &= valid;
if (valid & OBD_MD_FLSIZE) {
/* this handles sparse files properly */
u64 lov_size;
@@ -172,12 +173,22 @@ void lov_merge_attrs(struct obdo *tgt, struct obdo *src, u64 valid,
tgt->o_mtime = src->o_mtime;
if (valid & OBD_MD_FLDATAVERSION)
tgt->o_data_version += src->o_data_version;
+
+ /* handle flags */
+ if (valid & OBD_MD_FLFLAGS)
+ tgt->o_flags &= src->o_flags;
+ else
+ tgt->o_flags = 0;
} else {
memcpy(tgt, src, sizeof(*tgt));
tgt->o_oi = lsm->lsm_oi;
+ tgt->o_valid = valid;
if (valid & OBD_MD_FLSIZE)
tgt->o_size = lov_stripe_size(lsm, src->o_size,
stripeno);
+ tgt->o_flags = 0;
+ if (valid & OBD_MD_FLFLAGS)
+ tgt->o_flags = src->o_flags;
}
/* data_version needs to be valid on all stripes to be correct! */
diff --git a/drivers/staging/lustre/lustre/lov/lov_obd.c b/drivers/staging/lustre/lustre/lov/lov_obd.c
index 5daa7faf4dda..e15ef2ece893 100644
--- a/drivers/staging/lustre/lustre/lov/lov_obd.c
+++ b/drivers/staging/lustre/lustre/lov/lov_obd.c
@@ -54,7 +54,6 @@
#include "../include/lprocfs_status.h"
#include "../include/lustre_param.h"
#include "../include/cl_object.h"
-#include "../include/lclient.h" /* for cl_client_lru */
#include "../include/lustre/ll_fiemap.h"
#include "../include/lustre_fid.h"
@@ -124,7 +123,6 @@ static int lov_set_osc_active(struct obd_device *obd, struct obd_uuid *uuid,
static int lov_notify(struct obd_device *obd, struct obd_device *watched,
enum obd_notify_event ev, void *data);
-#define MAX_STRING_SIZE 128
int lov_connect_obd(struct obd_device *obd, __u32 index, int activate,
struct obd_connect_data *data)
{
@@ -965,7 +963,6 @@ int lov_process_config_base(struct obd_device *obd, struct lustre_cfg *lcfg,
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
rc = -EINVAL;
goto out;
-
}
}
out:
@@ -1734,6 +1731,27 @@ static int lov_fiemap(struct lov_obd *lov, __u32 keylen, void *key,
unsigned int buffer_size = FIEMAP_BUFFER_SIZE;
if (!lsm_has_objects(lsm)) {
+ if (lsm && lsm_is_released(lsm) && (fm_key->fiemap.fm_start <
+ fm_key->oa.o_size)) {
+ /*
+ * released file, return a minimal FIEMAP if
+ * request fits in file-size.
+ */
+ fiemap->fm_mapped_extents = 1;
+ fiemap->fm_extents[0].fe_logical =
+ fm_key->fiemap.fm_start;
+ if (fm_key->fiemap.fm_start + fm_key->fiemap.fm_length <
+ fm_key->oa.o_size) {
+ fiemap->fm_extents[0].fe_length =
+ fm_key->fiemap.fm_length;
+ } else {
+ fiemap->fm_extents[0].fe_length =
+ fm_key->oa.o_size - fm_key->fiemap.fm_start;
+ fiemap->fm_extents[0].fe_flags |=
+ (FIEMAP_EXTENT_UNKNOWN |
+ FIEMAP_EXTENT_LAST);
+ }
+ }
rc = 0;
goto out;
}
@@ -2173,7 +2191,6 @@ void lov_stripe_lock(struct lov_stripe_md *md)
LASSERT(md->lsm_lock_owner == 0);
md->lsm_lock_owner = current_pid();
}
-EXPORT_SYMBOL(lov_stripe_lock);
void lov_stripe_unlock(struct lov_stripe_md *md)
__releases(&md->lsm_lock)
@@ -2182,7 +2199,6 @@ void lov_stripe_unlock(struct lov_stripe_md *md)
md->lsm_lock_owner = 0;
spin_unlock(&md->lsm_lock);
}
-EXPORT_SYMBOL(lov_stripe_unlock);
static int lov_quotactl(struct obd_device *obd, struct obd_export *exp,
struct obd_quotactl *oqctl)
diff --git a/drivers/staging/lustre/lustre/lov/lov_object.c b/drivers/staging/lustre/lustre/lov/lov_object.c
index 1f8ed95a6d89..561d493b2cdf 100644
--- a/drivers/staging/lustre/lustre/lov/lov_object.c
+++ b/drivers/staging/lustre/lustre/lov/lov_object.c
@@ -67,7 +67,7 @@ struct lov_layout_operations {
int (*llo_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
int (*llo_page_init)(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t index);
int (*llo_lock_init)(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
@@ -193,6 +193,18 @@ static int lov_init_sub(const struct lu_env *env, struct lov_object *lov,
return result;
}
+static int lov_page_slice_fixup(struct lov_object *lov,
+ struct cl_object *stripe)
+{
+ struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
+ struct cl_object *o;
+
+ cl_object_for_each(o, stripe)
+ o->co_slice_off += hdr->coh_page_bufsize;
+
+ return cl_object_header(stripe)->coh_page_bufsize;
+}
+
static int lov_init_raid0(const struct lu_env *env,
struct lov_device *dev, struct lov_object *lov,
const struct cl_object_conf *conf,
@@ -222,6 +234,8 @@ static int lov_init_raid0(const struct lu_env *env,
r0->lo_sub = libcfs_kvzalloc(r0->lo_nr * sizeof(r0->lo_sub[0]),
GFP_NOFS);
if (r0->lo_sub) {
+ int psz = 0;
+
result = 0;
subconf->coc_inode = conf->coc_inode;
spin_lock_init(&r0->lo_sub_lock);
@@ -254,13 +268,24 @@ static int lov_init_raid0(const struct lu_env *env,
if (result == -EAGAIN) { /* try again */
--i;
result = 0;
+ continue;
}
} else {
result = PTR_ERR(stripe);
}
+
+ if (result == 0) {
+ int sz = lov_page_slice_fixup(lov, stripe);
+
+ LASSERT(ergo(psz > 0, psz == sz));
+ psz = sz;
+ }
}
- } else
+ if (result == 0)
+ cl_object_header(&lov->lo_cl)->coh_page_bufsize += psz;
+ } else {
result = -ENOMEM;
+ }
out:
return result;
}
@@ -286,8 +311,6 @@ static int lov_delete_empty(const struct lu_env *env, struct lov_object *lov,
LASSERT(lov->lo_type == LLT_EMPTY || lov->lo_type == LLT_RELEASED);
lov_layout_wait(env, lov);
-
- cl_object_prune(env, &lov->lo_cl);
return 0;
}
@@ -355,7 +378,7 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
struct lovsub_object *los = r0->lo_sub[i];
if (los) {
- cl_locks_prune(env, &los->lso_cl, 1);
+ cl_object_prune(env, &los->lso_cl);
/*
* If top-level object is to be evicted from
* the cache, so are its sub-objects.
@@ -364,7 +387,6 @@ static int lov_delete_raid0(const struct lu_env *env, struct lov_object *lov,
}
}
}
- cl_object_prune(env, &lov->lo_cl);
return 0;
}
@@ -666,7 +688,6 @@ static int lov_layout_change(const struct lu_env *unused,
const struct lov_layout_operations *old_ops;
const struct lov_layout_operations *new_ops;
- struct cl_object_header *hdr = cl_object_header(&lov->lo_cl);
void *cookie;
struct lu_env *env;
int refcheck;
@@ -691,13 +712,15 @@ static int lov_layout_change(const struct lu_env *unused,
old_ops = &lov_dispatch[lov->lo_type];
new_ops = &lov_dispatch[llt];
+ result = cl_object_prune(env, &lov->lo_cl);
+ if (result != 0)
+ goto out;
+
result = old_ops->llo_delete(env, lov, &lov->u);
if (result == 0) {
old_ops->llo_fini(env, lov, &lov->u);
LASSERT(atomic_read(&lov->lo_active_ios) == 0);
- LASSERT(!hdr->coh_tree.rnode);
- LASSERT(hdr->coh_pages == 0);
lov->lo_type = LLT_EMPTY;
result = new_ops->llo_init(env,
@@ -713,6 +736,7 @@ static int lov_layout_change(const struct lu_env *unused,
}
}
+out:
cl_env_put(env, &refcheck);
cl_env_reexit(cookie);
return result;
@@ -793,7 +817,8 @@ static int lov_conf_set(const struct lu_env *env, struct cl_object *obj,
goto out;
}
- lov->lo_layout_invalid = lov_layout_change(env, lov, conf);
+ result = lov_layout_change(env, lov, conf);
+ lov->lo_layout_invalid = result != 0;
out:
lov_conf_unlock(lov);
@@ -825,10 +850,10 @@ static int lov_object_print(const struct lu_env *env, void *cookie,
}
int lov_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
- return LOV_2DISPATCH_NOLOCK(cl2lov(obj),
- llo_page_init, env, obj, page, vmpage);
+ return LOV_2DISPATCH_NOLOCK(cl2lov(obj), llo_page_init, env, obj, page,
+ index);
}
/**
@@ -911,8 +936,9 @@ struct lu_object *lov_object_alloc(const struct lu_env *env,
* for object with different layouts.
*/
obj->lo_ops = &lov_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_offset.c b/drivers/staging/lustre/lustre/lov/lov_offset.c
index ae83eb0f6f36..9302f06c34ef 100644
--- a/drivers/staging/lustre/lustre/lov/lov_offset.c
+++ b/drivers/staging/lustre/lustre/lov/lov_offset.c
@@ -66,6 +66,18 @@ u64 lov_stripe_size(struct lov_stripe_md *lsm, u64 ost_size, int stripeno)
return lov_size;
}
+/**
+ * Compute file level page index by stripe level page offset
+ */
+pgoff_t lov_stripe_pgoff(struct lov_stripe_md *lsm, pgoff_t stripe_index,
+ int stripe)
+{
+ loff_t offset;
+
+ offset = lov_stripe_size(lsm, stripe_index << PAGE_SHIFT, stripe);
+ return offset >> PAGE_SHIFT;
+}
+
/* we have an offset in file backed by an lov and want to find out where
* that offset lands in our given stripe of the file. for the easy
* case where the offset is within the stripe, we just have to scale the
diff --git a/drivers/staging/lustre/lustre/lov/lov_pack.c b/drivers/staging/lustre/lustre/lov/lov_pack.c
index 3925633a99ec..0215ea54df8d 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pack.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pack.c
@@ -136,7 +136,6 @@ int lov_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
CERROR("bad mem LOV MAGIC: 0x%08X != 0x%08X nor 0x%08X\n",
lmm_magic, LOV_MAGIC_V1, LOV_MAGIC_V3);
return -EINVAL;
-
}
if (lsm) {
@@ -444,8 +443,7 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
if (lum.lmm_magic == LOV_USER_MAGIC) {
/* User request for v1, we need skip lmm_pool_name */
if (lmmk->lmm_magic == LOV_MAGIC_V3) {
- memmove((char *)(&lmmk->lmm_stripe_count) +
- sizeof(lmmk->lmm_stripe_count),
+ memmove(((struct lov_mds_md_v1 *)lmmk)->lmm_objects,
((struct lov_mds_md_v3 *)lmmk)->lmm_objects,
lmmk->lmm_stripe_count *
sizeof(struct lov_ost_data_v1));
@@ -457,9 +455,9 @@ int lov_getstripe(struct obd_export *exp, struct lov_stripe_md *lsm,
}
/* User wasn't expecting this many OST entries */
- if (lum.lmm_stripe_count == 0)
+ if (lum.lmm_stripe_count == 0) {
lmm_size = lum_size;
- else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
+ } else if (lum.lmm_stripe_count < lmmk->lmm_stripe_count) {
rc = -EOVERFLOW;
goto out_set;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_page.c b/drivers/staging/lustre/lustre/lov/lov_page.c
index fdcaf8047ad8..0306f00c3f33 100644
--- a/drivers/staging/lustre/lustre/lov/lov_page.c
+++ b/drivers/staging/lustre/lustre/lov/lov_page.c
@@ -36,6 +36,7 @@
* Implementation of cl_page for LOV layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_LOV
@@ -52,116 +53,66 @@
*
*/
-static int lov_page_invariant(const struct cl_page_slice *slice)
+/**
+ * Adjust the stripe index by layout of raid0. @max_index is the maximum
+ * page index covered by an underlying DLM lock.
+ * This function converts max_index from stripe level to file level, and make
+ * sure it's not beyond one stripe.
+ */
+static int lov_raid0_page_is_under_lock(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ struct cl_io *unused,
+ pgoff_t *max_index)
{
- const struct cl_page *page = slice->cpl_page;
- const struct cl_page *sub = lov_sub_page(slice);
-
- return ergo(sub,
- page->cp_child == sub &&
- sub->cp_parent == page &&
- page->cp_state == sub->cp_state);
-}
+ struct lov_object *loo = cl2lov(slice->cpl_obj);
+ struct lov_layout_raid0 *r0 = lov_r0(loo);
+ pgoff_t index = *max_index;
+ unsigned int pps; /* pages per stripe */
-static void lov_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct cl_page *sub = lov_sub_page(slice);
+ CDEBUG(D_READA, "*max_index = %lu, nr = %d\n", index, r0->lo_nr);
+ if (index == 0) /* the page is not covered by any lock */
+ return 0;
- LINVRNT(lov_page_invariant(slice));
+ if (r0->lo_nr == 1) /* single stripe file */
+ return 0;
- if (sub) {
- LASSERT(sub->cp_state == CPS_FREEING);
- lu_ref_del(&sub->cp_reference, "lov", sub->cp_parent);
- sub->cp_parent = NULL;
- slice->cpl_page->cp_child = NULL;
- cl_page_put(env, sub);
+ /* max_index is stripe level, convert it into file level */
+ if (index != CL_PAGE_EOF) {
+ int stripeno = lov_page_stripe(slice->cpl_page);
+ *max_index = lov_stripe_pgoff(loo->lo_lsm, index, stripeno);
}
-}
-
-static int lov_page_own(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io,
- int nonblock)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
+ /* calculate the end of current stripe */
+ pps = loo->lo_lsm->lsm_stripe_size >> PAGE_SHIFT;
+ index = ((slice->cpl_index + pps) & ~(pps - 1)) - 1;
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- lov_sub_page(slice)->cp_owner = sub->sub_io;
- lov_sub_put(sub);
- } else
- LBUG(); /* Arrgh */
+ /* never exceed the end of the stripe */
+ *max_index = min_t(pgoff_t, *max_index, index);
return 0;
}
-static void lov_page_assume(const struct lu_env *env,
- const struct cl_page_slice *slice, struct cl_io *io)
-{
- lov_page_own(env, slice, io, 0);
-}
-
-static int lov_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct lov_io *lio = lov_env_io(env);
- struct lov_io_sub *sub;
- int rc = 0;
-
- LINVRNT(lov_page_invariant(slice));
- LINVRNT(!cl2lov_page(slice)->lps_invalid);
-
- sub = lov_page_subio(env, lio, slice);
- if (!IS_ERR(sub)) {
- rc = cl_page_cache_add(sub->sub_env, sub->sub_io,
- slice->cpl_page->cp_child, CRT_WRITE);
- lov_sub_put(sub);
- } else {
- rc = PTR_ERR(sub);
- CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page, "rc = %d\n", rc);
- }
- return rc;
-}
-
-static int lov_page_print(const struct lu_env *env,
- const struct cl_page_slice *slice,
- void *cookie, lu_printer_t printer)
+static int lov_raid0_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
{
struct lov_page *lp = cl2lov_page(slice);
- return (*printer)(env, cookie, LUSTRE_LOV_NAME"-page@%p\n", lp);
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, raid0\n", lp);
}
-static const struct cl_page_operations lov_page_ops = {
- .cpo_fini = lov_page_fini,
- .cpo_own = lov_page_own,
- .cpo_assume = lov_page_assume,
- .io = {
- [CRT_WRITE] = {
- .cpo_cache_add = lov_page_cache_add
- }
- },
- .cpo_print = lov_page_print
+static const struct cl_page_operations lov_raid0_page_ops = {
+ .cpo_is_under_lock = lov_raid0_page_is_under_lock,
+ .cpo_print = lov_raid0_page_print
};
-static void lov_empty_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- LASSERT(!slice->cpl_page->cp_child);
-}
-
int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct lov_object *loo = cl2lov(obj);
struct lov_layout_raid0 *r0 = lov_r0(loo);
struct lov_io *lio = lov_env_io(env);
- struct cl_page *subpage;
struct cl_object *subobj;
+ struct cl_object *o;
struct lov_io_sub *sub;
struct lov_page *lpg = cl_object_page_slice(obj, page);
loff_t offset;
@@ -169,59 +120,57 @@ int lov_page_init_raid0(const struct lu_env *env, struct cl_object *obj,
int stripe;
int rc;
- offset = cl_offset(obj, page->cp_index);
+ offset = cl_offset(obj, index);
stripe = lov_stripe_number(loo->lo_lsm, offset);
LASSERT(stripe < r0->lo_nr);
rc = lov_stripe_offset(loo->lo_lsm, offset, stripe, &suboff);
LASSERT(rc == 0);
- lpg->lps_invalid = 1;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_page_ops);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_raid0_page_ops);
sub = lov_sub_get(env, lio, stripe);
- if (IS_ERR(sub)) {
- rc = PTR_ERR(sub);
- goto out;
- }
+ if (IS_ERR(sub))
+ return PTR_ERR(sub);
subobj = lovsub2cl(r0->lo_sub[stripe]);
- subpage = cl_page_find_sub(sub->sub_env, subobj,
- cl_index(subobj, suboff), vmpage, page);
- lov_sub_put(sub);
- if (IS_ERR(subpage)) {
- rc = PTR_ERR(subpage);
- goto out;
- }
-
- if (likely(subpage->cp_parent == page)) {
- lu_ref_add(&subpage->cp_reference, "lov", page);
- lpg->lps_invalid = 0;
- rc = 0;
- } else {
- CL_PAGE_DEBUG(D_ERROR, env, page, "parent page\n");
- CL_PAGE_DEBUG(D_ERROR, env, subpage, "child page\n");
- LASSERT(0);
+ list_for_each_entry(o, &subobj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
+ if (o->co_ops->coo_page_init) {
+ rc = o->co_ops->coo_page_init(sub->sub_env, o, page,
+ cl_index(subobj, suboff));
+ if (rc != 0)
+ break;
+ }
}
+ lov_sub_put(sub);
-out:
return rc;
}
+static int lov_empty_page_print(const struct lu_env *env,
+ const struct cl_page_slice *slice,
+ void *cookie, lu_printer_t printer)
+{
+ struct lov_page *lp = cl2lov_page(slice);
+
+ return (*printer)(env, cookie, LUSTRE_LOV_NAME "-page@%p, empty.\n",
+ lp);
+}
+
static const struct cl_page_operations lov_empty_page_ops = {
- .cpo_fini = lov_empty_page_fini,
- .cpo_print = lov_page_print
+ .cpo_print = lov_empty_page_print
};
int lov_page_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct lov_page *lpg = cl_object_page_slice(obj, page);
void *addr;
- cl_page_slice_add(page, &lpg->lps_cl, obj, &lov_empty_page_ops);
- addr = kmap(vmpage);
+ cl_page_slice_add(page, &lpg->lps_cl, obj, index, &lov_empty_page_ops);
+ addr = kmap(page->cp_vmpage);
memset(addr, 0, cl_page_size(obj));
- kunmap(vmpage);
+ kunmap(page->cp_vmpage);
cl_page_export(env, page, 1);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/lov/lov_pool.c b/drivers/staging/lustre/lustre/lov/lov_pool.c
index 9ae1d6f42d6e..690292ecebdc 100644
--- a/drivers/staging/lustre/lustre/lov/lov_pool.c
+++ b/drivers/staging/lustre/lustre/lov/lov_pool.c
@@ -65,7 +65,6 @@ void lov_pool_putref(struct pool_desc *pool)
LASSERT(hlist_unhashed(&pool->pool_hash));
LASSERT(list_empty(&pool->pool_list));
LASSERT(!pool->pool_debugfs_entry);
- lov_ost_pool_free(&(pool->pool_rr.lqr_pool));
lov_ost_pool_free(&(pool->pool_obds));
kfree(pool);
}
@@ -424,11 +423,6 @@ int lov_pool_new(struct obd_device *obd, char *poolname)
if (rc)
goto out_err;
- memset(&(new_pool->pool_rr), 0, sizeof(struct lov_qos_rr));
- rc = lov_ost_pool_init(&new_pool->pool_rr.lqr_pool, 0);
- if (rc)
- goto out_free_pool_obds;
-
INIT_HLIST_NODE(&new_pool->pool_hash);
/* get ref for debugfs file */
@@ -469,13 +463,10 @@ out_err:
list_del_init(&new_pool->pool_list);
lov->lov_pool_count--;
spin_unlock(&obd->obd_dev_lock);
-
ldebugfs_remove(&new_pool->pool_debugfs_entry);
-
- lov_ost_pool_free(&new_pool->pool_rr.lqr_pool);
-out_free_pool_obds:
lov_ost_pool_free(&new_pool->pool_obds);
kfree(new_pool);
+
return rc;
}
@@ -543,8 +534,6 @@ int lov_pool_add(struct obd_device *obd, char *poolname, char *ostname)
if (rc)
goto out;
- pool->pool_rr.lqr_dirty = 1;
-
CDEBUG(D_CONFIG, "Added %s to "LOV_POOLNAMEF" as member %d\n",
ostname, poolname, pool_tgt_count(pool));
@@ -589,8 +578,6 @@ int lov_pool_remove(struct obd_device *obd, char *poolname, char *ostname)
lov_ost_pool_remove(&pool->pool_obds, lov_idx);
- pool->pool_rr.lqr_dirty = 1;
-
CDEBUG(D_CONFIG, "%s removed from "LOV_POOLNAMEF"\n", ostname,
poolname);
@@ -599,50 +586,3 @@ out:
lov_pool_putref(pool);
return rc;
}
-
-int lov_check_index_in_pool(__u32 idx, struct pool_desc *pool)
-{
- int i, rc;
-
- /* caller may no have a ref on pool if it got the pool
- * without calling lov_find_pool() (e.g. go through the lov pool
- * list)
- */
- lov_pool_getref(pool);
-
- down_read(&pool_tgt_rw_sem(pool));
-
- for (i = 0; i < pool_tgt_count(pool); i++) {
- if (pool_tgt_array(pool)[i] == idx) {
- rc = 0;
- goto out;
- }
- }
- rc = -ENOENT;
-out:
- up_read(&pool_tgt_rw_sem(pool));
-
- lov_pool_putref(pool);
- return rc;
-}
-
-struct pool_desc *lov_find_pool(struct lov_obd *lov, char *poolname)
-{
- struct pool_desc *pool;
-
- pool = NULL;
- if (poolname[0] != '\0') {
- pool = cfs_hash_lookup(lov->lov_pools_hash_body, poolname);
- if (!pool)
- CWARN("Request for an unknown pool ("LOV_POOLNAMEF")\n",
- poolname);
- if (pool && (pool_tgt_count(pool) == 0)) {
- CWARN("Request for an empty pool ("LOV_POOLNAMEF")\n",
- poolname);
- /* pool is ignored, so we remove ref on it */
- lov_pool_putref(pool);
- pool = NULL;
- }
- }
- return pool;
-}
diff --git a/drivers/staging/lustre/lustre/lov/lov_request.c b/drivers/staging/lustre/lustre/lov/lov_request.c
index 7178a02d6267..1be4b921c01f 100644
--- a/drivers/staging/lustre/lustre/lov/lov_request.c
+++ b/drivers/staging/lustre/lustre/lov/lov_request.c
@@ -52,7 +52,6 @@ static void lov_init_set(struct lov_request_set *set)
INIT_LIST_HEAD(&set->set_list);
atomic_set(&set->set_refcount, 1);
init_waitqueue_head(&set->set_waitq);
- spin_lock_init(&set->set_lock);
}
void lov_finish_set(struct lov_request_set *set)
@@ -235,7 +234,6 @@ out:
if (tmp_oa)
kmem_cache_free(obdo_cachep, tmp_oa);
return rc;
-
}
int lov_fini_getattr_set(struct lov_request_set *set)
@@ -363,7 +361,6 @@ int lov_prep_destroy_set(struct obd_export *exp, struct obd_info *oinfo,
set->set_oi = oinfo;
set->set_oi->oi_md = lsm;
set->set_oi->oi_oa = src_oa;
- set->set_oti = oti;
if (oti && src_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
@@ -480,7 +477,6 @@ int lov_prep_setattr_set(struct obd_export *exp, struct obd_info *oinfo,
lov_init_set(set);
set->set_exp = exp;
- set->set_oti = oti;
set->set_oi = oinfo;
if (oti && oinfo->oi_oa->o_valid & OBD_MD_FLCOOKIE)
set->set_cookies = oti->oti_logcookies;
@@ -716,12 +712,15 @@ int lov_prep_statfs_set(struct obd_device *obd, struct obd_info *oinfo,
struct lov_request *req;
if (!lov->lov_tgts[i] ||
- (!lov_check_and_wait_active(lov, i) &&
- (oinfo->oi_flags & OBD_STATFS_NODELAY))) {
+ (oinfo->oi_flags & OBD_STATFS_NODELAY &&
+ !lov->lov_tgts[i]->ltd_active)) {
CDEBUG(D_HA, "lov idx %d inactive\n", i);
continue;
}
+ if (!lov->lov_tgts[i]->ltd_active)
+ lov_check_and_wait_active(lov, i);
+
/* skip targets that have been explicitly disabled by the
* administrator
*/
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_dev.c b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
index c335c020f4f4..35f6b1d66ff4 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_dev.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_dev.c
@@ -151,8 +151,9 @@ static int lovsub_req_init(const struct lu_env *env, struct cl_device *dev,
if (lsr) {
cl_req_slice_add(req, &lsr->lsrq_cl, dev, &lovsub_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -182,10 +183,12 @@ static struct lu_device *lovsub_device_alloc(const struct lu_env *env,
d = lovsub2lu_dev(lsd);
d->ld_ops = &lovsub_lu_ops;
lsd->acid_cl.cd_ops = &lovsub_cl_ops;
- } else
+ } else {
d = ERR_PTR(result);
- } else
+ }
+ } else {
d = ERR_PTR(-ENOMEM);
+ }
return d;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_lock.c b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
index 3bb0c9068a90..e92edfb618b7 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_lock.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_lock.c
@@ -62,391 +62,8 @@ static void lovsub_lock_fini(const struct lu_env *env,
kmem_cache_free(lovsub_lock_kmem, lsl);
}
-static void lovsub_parent_lock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_get(parent);
- lu_ref_add(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_mutex_get(env, parent);
-}
-
-static void lovsub_parent_unlock(const struct lu_env *env, struct lov_lock *lov)
-{
- struct cl_lock *parent;
-
- parent = lov->lls_cl.cls_lock;
- cl_lock_mutex_put(env, lov->lls_cl.cls_lock);
- lu_ref_del(&parent->cll_reference, "lovsub-parent", current);
- cl_lock_put(env, parent);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for lovsub layer, which
- * method is called whenever sub-lock state changes. Propagates state change
- * to the top-locks.
- */
-static void lovsub_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock_link *scan;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- struct lov_lock *lov = scan->lll_super;
- struct cl_lock *parent = lov->lls_cl.cls_lock;
-
- if (sub->lss_active != parent) {
- lovsub_parent_lock(env, lov);
- cl_lock_signal(env, parent);
- lovsub_parent_unlock(env, lov);
- }
- }
-}
-
-/**
- * Implementation of cl_lock_operation::clo_weigh() estimating lock weight by
- * asking parent lock.
- */
-static unsigned long lovsub_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- unsigned long dumbbell;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- if (!list_empty(&lock->lss_parents)) {
- /*
- * It is not clear whether all parents have to be asked and
- * their estimations summed, or it is enough to ask one. For
- * the current usages, one is always enough.
- */
- lov = container_of(lock->lss_parents.next,
- struct lov_lock_link, lll_list)->lll_super;
-
- lovsub_parent_lock(env, lov);
- dumbbell = cl_lock_weigh(env, lov->lls_cl.cls_lock);
- lovsub_parent_unlock(env, lov);
- } else
- dumbbell = 0;
-
- return dumbbell;
-}
-
-/**
- * Maps start/end offsets within a stripe, to offsets within a file.
- */
-static void lovsub_lock_descr_map(const struct cl_lock_descr *in,
- struct lov_object *lov,
- int stripe, struct cl_lock_descr *out)
-{
- pgoff_t size; /* stripe size in pages */
- pgoff_t skip; /* how many pages in every stripe are occupied by
- * "other" stripes
- */
- pgoff_t start;
- pgoff_t end;
-
- start = in->cld_start;
- end = in->cld_end;
-
- if (lov->lo_lsm->lsm_stripe_count > 1) {
- size = cl_index(lov2cl(lov), lov->lo_lsm->lsm_stripe_size);
- skip = (lov->lo_lsm->lsm_stripe_count - 1) * size;
-
- /* XXX overflow check here? */
- start += start/size * skip + stripe * size;
-
- if (end != CL_PAGE_EOF) {
- end += end/size * skip + stripe * size;
- /*
- * And check for overflow...
- */
- if (end < in->cld_end)
- end = CL_PAGE_EOF;
- }
- }
- out->cld_start = start;
- out->cld_end = end;
-}
-
-/**
- * Adjusts parent lock extent when a sub-lock is attached to a parent. This is
- * called in two ways:
- *
- * - as part of receive call-back, when server returns granted extent to
- * the client, and
- *
- * - when top-lock finds existing sub-lock in the cache.
- *
- * Note, that lock mode is not propagated to the parent: i.e., if CLM_READ
- * top-lock matches CLM_WRITE sub-lock, top-lock is still CLM_READ.
- */
-int lov_sublock_modify(const struct lu_env *env, struct lov_lock *lov,
- struct lovsub_lock *sublock,
- const struct cl_lock_descr *d, int idx)
-{
- struct cl_lock *parent;
- struct lovsub_object *subobj;
- struct cl_lock_descr *pd;
- struct cl_lock_descr *parent_descr;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- parent_descr = &parent->cll_descr;
- LASSERT(cl_lock_mode_match(d->cld_mode, parent_descr->cld_mode));
-
- subobj = cl2lovsub(sublock->lss_cl.cls_obj);
- pd = &lov_env_info(env)->lti_ldescr;
-
- pd->cld_obj = parent_descr->cld_obj;
- pd->cld_mode = parent_descr->cld_mode;
- pd->cld_gid = parent_descr->cld_gid;
- lovsub_lock_descr_map(d, subobj->lso_super, subobj->lso_index, pd);
- lov->lls_sub[idx].sub_got = *d;
- /*
- * Notify top-lock about modification, if lock description changes
- * materially.
- */
- if (!cl_lock_ext_match(parent_descr, pd))
- result = cl_lock_modify(env, parent, pd);
- else
- result = 0;
- return result;
-}
-
-static int lovsub_lock_modify(const struct lu_env *env,
- const struct cl_lock_slice *s,
- const struct cl_lock_descr *d)
-{
- struct lovsub_lock *lock = cl2lovsub_lock(s);
- struct lov_lock_link *scan;
- struct lov_lock *lov;
- int result = 0;
-
- LASSERT(cl_lock_mode_match(d->cld_mode,
- s->cls_lock->cll_descr.cld_mode));
- list_for_each_entry(scan, &lock->lss_parents, lll_list) {
- int rc;
-
- lov = scan->lll_super;
- lovsub_parent_lock(env, lov);
- rc = lov_sublock_modify(env, lov, lock, d, scan->lll_idx);
- lovsub_parent_unlock(env, lov);
- result = result ?: rc;
- }
- return result;
-}
-
-static int lovsub_lock_closure(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- struct cl_lock_closure *closure)
-{
- struct lovsub_lock *sub;
- struct cl_lock *parent;
- struct lov_lock_link *scan;
- int result;
-
- LASSERT(cl_lock_is_mutexed(slice->cls_lock));
-
- sub = cl2lovsub_lock(slice);
- result = 0;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- parent = scan->lll_super->lls_cl.cls_lock;
- result = cl_lock_closure_build(env, parent, closure);
- if (result != 0)
- break;
- }
- return result;
-}
-
-/**
- * A helper function for lovsub_lock_delete() that deals with a given parent
- * top-lock.
- */
-static int lovsub_lock_delete_one(const struct lu_env *env,
- struct cl_lock *child, struct lov_lock *lov)
-{
- struct cl_lock *parent;
- int result;
-
- parent = lov->lls_cl.cls_lock;
- if (parent->cll_error)
- return 0;
-
- result = 0;
- switch (parent->cll_state) {
- case CLS_ENQUEUED:
- /* See LU-1355 for the case that a glimpse lock is
- * interrupted by signal
- */
- LASSERT(parent->cll_flags & CLF_CANCELLED);
- break;
- case CLS_QUEUING:
- case CLS_FREEING:
- cl_lock_signal(env, parent);
- break;
- case CLS_INTRANSIT:
- /*
- * Here lies a problem: a sub-lock is canceled while top-lock
- * is being unlocked. Top-lock cannot be moved into CLS_NEW
- * state, because unlocking has to succeed eventually by
- * placing lock into CLS_CACHED (or failing it), see
- * cl_unuse_try(). Nor can top-lock be left in CLS_CACHED
- * state, because lov maintains an invariant that all
- * sub-locks exist in CLS_CACHED (this allows cached top-lock
- * to be reused immediately). Nor can we wait for top-lock
- * state to change, because this can be synchronous to the
- * current thread.
- *
- * We know for sure that lov_lock_unuse() will be called at
- * least one more time to finish un-using, so leave a mark on
- * the top-lock, that will be seen by the next call to
- * lov_lock_unuse().
- */
- if (cl_lock_is_intransit(parent))
- lov->lls_cancel_race = 1;
- break;
- case CLS_CACHED:
- /*
- * if a sub-lock is canceled move its top-lock into CLS_NEW
- * state to preserve an invariant that a top-lock in
- * CLS_CACHED is immediately ready for re-use (i.e., has all
- * sub-locks), and so that next attempt to re-use the top-lock
- * enqueues missing sub-lock.
- */
- cl_lock_state_set(env, parent, CLS_NEW);
- /* fall through */
- case CLS_NEW:
- /*
- * if last sub-lock is canceled, destroy the top-lock (which
- * is now `empty') proactively.
- */
- if (lov->lls_nr_filled == 0) {
- /* ... but unfortunately, this cannot be done easily,
- * as cancellation of a top-lock might acquire mutices
- * of its other sub-locks, violating lock ordering,
- * see cl_lock_{cancel,delete}() preconditions.
- *
- * To work around this, the mutex of this sub-lock is
- * released, top-lock is destroyed, and sub-lock mutex
- * acquired again. The list of parents has to be
- * re-scanned from the beginning after this.
- *
- * Only do this if no mutices other than on @child and
- * @parent are held by the current thread.
- *
- * TODO: The lock modal here is too complex, because
- * the lock may be canceled and deleted by voluntarily:
- * cl_lock_request
- * -> osc_lock_enqueue_wait
- * -> osc_lock_cancel_wait
- * -> cl_lock_delete
- * -> lovsub_lock_delete
- * -> cl_lock_cancel/delete
- * -> ...
- *
- * The better choice is to spawn a kernel thread for
- * this purpose. -jay
- */
- if (cl_lock_nr_mutexed(env) == 2) {
- cl_lock_mutex_put(env, child);
- cl_lock_cancel(env, parent);
- cl_lock_delete(env, parent);
- result = 1;
- }
- }
- break;
- case CLS_HELD:
- CL_LOCK_DEBUG(D_ERROR, env, parent, "Delete CLS_HELD lock\n");
- default:
- CERROR("Impossible state: %d\n", parent->cll_state);
- LBUG();
- break;
- }
-
- return result;
-}
-
-/**
- * An implementation of cl_lock_operations::clo_delete() method. This is
- * invoked in "bottom-to-top" delete, when lock destruction starts from the
- * sub-lock (e.g, as a result of ldlm lock LRU policy).
- */
-static void lovsub_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct cl_lock *child = slice->cls_lock;
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- int restart;
-
- LASSERT(cl_lock_is_mutexed(child));
-
- /*
- * Destruction of a sub-lock might take multiple iterations, because
- * when the last sub-lock of a given top-lock is deleted, top-lock is
- * canceled proactively, and this requires to release sub-lock
- * mutex. Once sub-lock mutex has been released, list of its parents
- * has to be re-scanned from the beginning.
- */
- do {
- struct lov_lock *lov;
- struct lov_lock_link *scan;
- struct lov_lock_link *temp;
- struct lov_lock_sub *subdata;
-
- restart = 0;
- list_for_each_entry_safe(scan, temp,
- &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- subdata = &lov->lls_sub[scan->lll_idx];
- lovsub_parent_lock(env, lov);
- subdata->sub_got = subdata->sub_descr;
- lov_lock_unlink(env, scan, sub);
- restart = lovsub_lock_delete_one(env, child, lov);
- lovsub_parent_unlock(env, lov);
-
- if (restart) {
- cl_lock_mutex_get(env, child);
- break;
- }
- }
- } while (restart);
-}
-
-static int lovsub_lock_print(const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct cl_lock_slice *slice)
-{
- struct lovsub_lock *sub = cl2lovsub_lock(slice);
- struct lov_lock *lov;
- struct lov_lock_link *scan;
-
- list_for_each_entry(scan, &sub->lss_parents, lll_list) {
- lov = scan->lll_super;
- (*p)(env, cookie, "[%d %p ", scan->lll_idx, lov);
- if (lov)
- cl_lock_descr_print(env, cookie, p,
- &lov->lls_cl.cls_lock->cll_descr);
- (*p)(env, cookie, "] ");
- }
- return 0;
-}
-
static const struct cl_lock_operations lovsub_lock_ops = {
.clo_fini = lovsub_lock_fini,
- .clo_state = lovsub_lock_state,
- .clo_delete = lovsub_lock_delete,
- .clo_modify = lovsub_lock_modify,
- .clo_closure = lovsub_lock_closure,
- .clo_weigh = lovsub_lock_weigh,
- .clo_print = lovsub_lock_print
};
int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
@@ -460,8 +77,9 @@ int lovsub_lock_init(const struct lu_env *env, struct cl_object *obj,
INIT_LIST_HEAD(&lsk->lss_parents);
cl_lock_slice_add(lock, &lsk->lss_cl, obj, &lovsub_lock_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_object.c b/drivers/staging/lustre/lustre/lov/lovsub_object.c
index 6c5430d938d0..bcaae1e5b840 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_object.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_object.c
@@ -67,10 +67,10 @@ int lovsub_object_init(const struct lu_env *env, struct lu_object *obj,
lu_object_add(obj, below);
cl_object_page_init(lu2cl(obj), sizeof(struct lovsub_page));
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
-
}
static void lovsub_object_free(const struct lu_env *env, struct lu_object *obj)
@@ -154,8 +154,9 @@ struct lu_object *lovsub_object_alloc(const struct lu_env *env,
lu_object_add_top(&hdr->coh_lu, obj);
los->lso_cl.co_ops = &lovsub_ops;
obj->lo_ops = &lovsub_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/lov/lovsub_page.c b/drivers/staging/lustre/lustre/lov/lovsub_page.c
index 2d945532b78e..9badedcce2bf 100644
--- a/drivers/staging/lustre/lustre/lov/lovsub_page.c
+++ b/drivers/staging/lustre/lustre/lov/lovsub_page.c
@@ -60,11 +60,11 @@ static const struct cl_page_operations lovsub_page_ops = {
};
int lovsub_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *unused)
+ struct cl_page *page, pgoff_t index)
{
struct lovsub_page *lsb = cl_object_page_slice(obj, page);
- cl_page_slice_add(page, &lsb->lsb_cl, obj, &lovsub_page_ops);
+ cl_page_slice_add(page, &lsb->lsb_cl, obj, index, &lovsub_page_ops);
return 0;
}
diff --git a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
index 38f267a60f59..5c7a15dd7bd2 100644
--- a/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
+++ b/drivers/staging/lustre/lustre/mdc/lproc_mdc.c
@@ -49,9 +49,9 @@ static ssize_t max_rpcs_in_flight_show(struct kobject *kobj,
obd_kobj);
struct client_obd *cli = &dev->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%u\n", cli->cl_max_rpcs_in_flight);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -74,9 +74,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
if (val < 1 || val > MDC_MAX_RIF_MAX)
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_lib.c b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
index b3bfdcb73670..856c54e03b6b 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_lib.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_lib.c
@@ -279,8 +279,7 @@ static void mdc_setattr_pack_rec(struct mdt_rec_setattr *rec,
rec->sa_atime = LTIME_S(op_data->op_attr.ia_atime);
rec->sa_mtime = LTIME_S(op_data->op_attr.ia_mtime);
rec->sa_ctime = LTIME_S(op_data->op_attr.ia_ctime);
- rec->sa_attr_flags =
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags;
+ rec->sa_attr_flags = op_data->op_attr_flags;
if ((op_data->op_attr.ia_valid & ATTR_GID) &&
in_group_p(op_data->op_attr.ia_gid))
rec->sa_suppgid =
@@ -439,7 +438,6 @@ void mdc_getattr_pack(struct ptlrpc_request *req, __u64 valid, int flags,
char *tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
LOGL0(op_data->op_name, op_data->op_namelen, tmp);
-
}
}
@@ -455,7 +453,7 @@ static void mdc_hsm_release_pack(struct ptlrpc_request *req,
lock = ldlm_handle2lock(&op_data->op_lease_handle);
if (lock) {
data->cd_handle = lock->l_remote_handle;
- ldlm_lock_put(lock);
+ LDLM_LOCK_PUT(lock);
}
ldlm_cli_cancel(&op_data->op_lease_handle, LCF_LOCAL);
@@ -481,9 +479,9 @@ static int mdc_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
};
@@ -497,23 +495,23 @@ int mdc_enter_request(struct client_obd *cli)
struct mdc_cache_waiter mcw;
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
rc = l_wait_event(mcw.mcw_waitq, mdc_req_avail(cli, &mcw),
&lwi);
if (rc) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (list_empty(&mcw.mcw_entry))
cli->cl_r_in_flight--;
list_del_init(&mcw.mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
} else {
cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
return rc;
}
@@ -523,7 +521,7 @@ void mdc_exit_request(struct client_obd *cli)
struct list_head *l, *tmp;
struct mdc_cache_waiter *mcw;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_r_in_flight--;
list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
@@ -538,5 +536,5 @@ void mdc_exit_request(struct client_obd *cli)
}
/* Empty waiting list? Decrease reqs in-flight number */
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_locks.c b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
index 958a164f620d..3b1bc9111b93 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_locks.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_locks.c
@@ -869,7 +869,9 @@ resend:
* (explicits or automatically generated by Kernel to clean
* current FLocks upon exit) that can't be trashed
*/
- if ((rc == -EINTR) || (rc == -ETIMEDOUT))
+ if (((rc == -EINTR) || (rc == -ETIMEDOUT)) &&
+ (einfo->ei_type == LDLM_FLOCK) &&
+ (einfo->ei_mode == LCK_NL))
goto resend;
return rc;
}
@@ -963,7 +965,6 @@ static int mdc_finish_intent_lock(struct obd_export *exp,
if (fid_is_sane(&op_data->op_fid2) &&
it->it_create_mode & M_CHECK_STALE &&
it->it_op != IT_GETATTR) {
-
/* Also: did we find the same inode? */
/* sever can return one of two fids:
* op_fid2 - new allocated fid - if file is created.
diff --git a/drivers/staging/lustre/lustre/mdc/mdc_request.c b/drivers/staging/lustre/lustre/mdc/mdc_request.c
index b91d3ff18b02..86b7445365f4 100644
--- a/drivers/staging/lustre/lustre/mdc/mdc_request.c
+++ b/drivers/staging/lustre/lustre/mdc/mdc_request.c
@@ -142,9 +142,8 @@ static int mdc_getattr_common(struct obd_export *exp,
CDEBUG(D_NET, "mode: %o\n", body->mode);
+ mdc_update_max_ea_from_body(exp, body);
if (body->eadatasize != 0) {
- mdc_update_max_ea_from_body(exp, body);
-
eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
body->eadatasize);
if (!eadata)
@@ -1169,7 +1168,7 @@ static int mdc_ioc_hsm_progress(struct obd_export *exp,
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_progress struct */
req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
@@ -1203,7 +1202,7 @@ static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_progress struct */
archive_mask = req_capsule_client_get(&req->rq_pill,
@@ -1278,7 +1277,7 @@ static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
goto out;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
ptlrpc_request_set_replen(req);
@@ -1395,7 +1394,7 @@ static int mdc_ioc_hsm_request(struct obd_export *exp,
return rc;
}
- mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, 0, 0);
+ mdc_pack_body(req, NULL, OBD_MD_FLRMTPERM, 0, -1, 0);
/* Copy hsm_request struct */
req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
@@ -1952,7 +1951,7 @@ static void lustre_swab_hal(struct hsm_action_list *h)
__swab32s(&h->hal_count);
__swab32s(&h->hal_archive_id);
__swab64s(&h->hal_flags);
- hai = hai_zero(h);
+ hai = hai_first(h);
for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
lustre_swab_hai(hai);
}
@@ -2249,7 +2248,7 @@ static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
* recovery, non zero value will be return if the lock can be canceled,
* or zero returned for not
*/
-static int mdc_cancel_for_recovery(struct ldlm_lock *lock)
+static int mdc_cancel_weight(struct ldlm_lock *lock)
{
if (lock->l_resource->lr_type != LDLM_IBITS)
return 0;
@@ -2314,12 +2313,14 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
return -ENOMEM;
mdc_init_rpc_lock(cli->cl_rpc_lock);
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ goto err_rpc_lock;
cli->cl_close_lock = kzalloc(sizeof(*cli->cl_close_lock), GFP_NOFS);
if (!cli->cl_close_lock) {
rc = -ENOMEM;
- goto err_rpc_lock;
+ goto err_ptlrpcd_decref;
}
mdc_init_rpc_lock(cli->cl_close_lock);
@@ -2331,7 +2332,7 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
sptlrpc_lprocfs_cliobd_attach(obd);
ptlrpc_lprocfs_register_obd(obd);
- ns_register_cancel(obd->obd_namespace, mdc_cancel_for_recovery);
+ ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
obd->obd_namespace->ns_lvbo = &inode_lvbo;
@@ -2345,9 +2346,10 @@ static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
err_close_lock:
kfree(cli->cl_close_lock);
+err_ptlrpcd_decref:
+ ptlrpcd_decref();
err_rpc_lock:
kfree(cli->cl_rpc_lock);
- ptlrpcd_decref();
return rc;
}
diff --git a/drivers/staging/lustre/lustre/mgc/mgc_request.c b/drivers/staging/lustre/lustre/mgc/mgc_request.c
index 3924b095bfb0..2311a437c441 100644
--- a/drivers/staging/lustre/lustre/mgc/mgc_request.c
+++ b/drivers/staging/lustre/lustre/mgc/mgc_request.c
@@ -502,8 +502,12 @@ static void do_requeue(struct config_llog_data *cld)
*/
down_read(&cld->cld_mgcexp->exp_obd->u.cli.cl_sem);
if (cld->cld_mgcexp->exp_obd->u.cli.cl_conn_count != 0) {
+ int rc;
+
CDEBUG(D_MGC, "updating log %s\n", cld->cld_logname);
- mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ rc = mgc_process_log(cld->cld_mgcexp->exp_obd, cld);
+ if (rc && rc != -ENOENT)
+ CERROR("failed processing log: %d\n", rc);
} else {
CDEBUG(D_MGC, "disconnecting, won't update log %s\n",
cld->cld_logname);
@@ -734,7 +738,9 @@ static int mgc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
struct task_struct *task;
int rc;
- ptlrpcd_addref();
+ rc = ptlrpcd_addref();
+ if (rc < 0)
+ goto err_noref;
rc = client_obd_setup(obd, lcfg);
if (rc)
@@ -773,6 +779,7 @@ err_cleanup:
client_obd_cleanup(obd);
err_decref:
ptlrpcd_decref();
+err_noref:
return rc;
}
@@ -1720,7 +1727,6 @@ static int mgc_process_config(struct obd_device *obd, u32 len, void *buf)
CERROR("Unknown command: %d\n", lcfg->lcfg_command);
rc = -EINVAL;
goto out;
-
}
}
out:
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_io.c b/drivers/staging/lustre/lustre/obdclass/cl_io.c
index f5128b4f176f..583fb5f33889 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_io.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_io.c
@@ -36,6 +36,7 @@
* Client IO.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -132,6 +133,7 @@ void cl_io_fini(const struct lu_env *env, struct cl_io *io)
case CIT_WRITE:
break;
case CIT_FAULT:
+ break;
case CIT_FSYNC:
LASSERT(!io->ci_need_restart);
break;
@@ -159,7 +161,6 @@ static int cl_io_init0(const struct lu_env *env, struct cl_io *io,
io->ci_type = iot;
INIT_LIST_HEAD(&io->ci_lockset.cls_todo);
- INIT_LIST_HEAD(&io->ci_lockset.cls_curr);
INIT_LIST_HEAD(&io->ci_lockset.cls_done);
INIT_LIST_HEAD(&io->ci_layers);
@@ -241,37 +242,7 @@ static int cl_lock_descr_sort(const struct cl_lock_descr *d0,
const struct cl_lock_descr *d1)
{
return lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu)) ?:
- __diff_normalize(d0->cld_start, d1->cld_start);
-}
-
-static int cl_lock_descr_cmp(const struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- int ret;
-
- ret = lu_fid_cmp(lu_object_fid(&d0->cld_obj->co_lu),
- lu_object_fid(&d1->cld_obj->co_lu));
- if (ret)
- return ret;
- if (d0->cld_end < d1->cld_start)
- return -1;
- if (d0->cld_start > d0->cld_end)
- return 1;
- return 0;
-}
-
-static void cl_lock_descr_merge(struct cl_lock_descr *d0,
- const struct cl_lock_descr *d1)
-{
- d0->cld_start = min(d0->cld_start, d1->cld_start);
- d0->cld_end = max(d0->cld_end, d1->cld_end);
-
- if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
- d0->cld_mode = CLM_WRITE;
-
- if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
- d0->cld_mode = CLM_GROUP;
+ lu_object_fid(&d1->cld_obj->co_lu));
}
/*
@@ -320,33 +291,35 @@ static void cl_io_locks_sort(struct cl_io *io)
} while (!done);
}
-/**
- * Check whether \a queue contains locks matching \a need.
- *
- * \retval +ve there is a matching lock in the \a queue
- * \retval 0 there are no matching locks in the \a queue
- */
-int cl_queue_match(const struct list_head *queue,
- const struct cl_lock_descr *need)
+static void cl_lock_descr_merge(struct cl_lock_descr *d0,
+ const struct cl_lock_descr *d1)
{
- struct cl_io_lock_link *scan;
+ d0->cld_start = min(d0->cld_start, d1->cld_start);
+ d0->cld_end = max(d0->cld_end, d1->cld_end);
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_match(&scan->cill_descr, need))
- return 1;
- }
- return 0;
+ if (d1->cld_mode == CLM_WRITE && d0->cld_mode != CLM_WRITE)
+ d0->cld_mode = CLM_WRITE;
+
+ if (d1->cld_mode == CLM_GROUP && d0->cld_mode != CLM_GROUP)
+ d0->cld_mode = CLM_GROUP;
}
-EXPORT_SYMBOL(cl_queue_match);
-static int cl_queue_merge(const struct list_head *queue,
- const struct cl_lock_descr *need)
+static int cl_lockset_merge(const struct cl_lockset *set,
+ const struct cl_lock_descr *need)
{
struct cl_io_lock_link *scan;
- list_for_each_entry(scan, queue, cill_linkage) {
- if (cl_lock_descr_cmp(&scan->cill_descr, need))
+ list_for_each_entry(scan, &set->cls_todo, cill_linkage) {
+ if (!cl_object_same(scan->cill_descr.cld_obj, need->cld_obj))
continue;
+
+ /* Merge locks for the same object because ldlm lock server
+ * may expand the lock extent, otherwise there is a deadlock
+ * case if two conflicted locks are queueud for the same object
+ * and lock server expands one lock to overlap the another.
+ * The side effect is that it can generate a multi-stripe lock
+ * that may cause casacading problem
+ */
cl_lock_descr_merge(&scan->cill_descr, need);
CDEBUG(D_VFSTRACE, "lock: %d: [%lu, %lu]\n",
scan->cill_descr.cld_mode, scan->cill_descr.cld_start,
@@ -356,87 +329,20 @@ static int cl_queue_merge(const struct list_head *queue,
return 0;
}
-static int cl_lockset_match(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_match(&set->cls_curr, need) ||
- cl_queue_match(&set->cls_done, need);
-}
-
-static int cl_lockset_merge(const struct cl_lockset *set,
- const struct cl_lock_descr *need)
-{
- return cl_queue_merge(&set->cls_todo, need) ||
- cl_lockset_match(set, need);
-}
-
-static int cl_lockset_lock_one(const struct lu_env *env,
- struct cl_io *io, struct cl_lockset *set,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock;
- int result;
-
- lock = cl_lock_request(env, io, &link->cill_descr, "io", io);
-
- if (!IS_ERR(lock)) {
- link->cill_lock = lock;
- list_move(&link->cill_linkage, &set->cls_curr);
- if (!(link->cill_descr.cld_enq_flags & CEF_ASYNC)) {
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
- } else
- result = 0;
- } else
- result = PTR_ERR(lock);
- return result;
-}
-
-static void cl_lock_link_fini(const struct lu_env *env, struct cl_io *io,
- struct cl_io_lock_link *link)
-{
- struct cl_lock *lock = link->cill_lock;
-
- list_del_init(&link->cill_linkage);
- if (lock) {
- cl_lock_release(env, lock, "io", io);
- link->cill_lock = NULL;
- }
- if (link->cill_fini)
- link->cill_fini(env, link);
-}
-
static int cl_lockset_lock(const struct lu_env *env, struct cl_io *io,
struct cl_lockset *set)
{
struct cl_io_lock_link *link;
struct cl_io_lock_link *temp;
- struct cl_lock *lock;
int result;
result = 0;
list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
- if (!cl_lockset_match(set, &link->cill_descr)) {
- /* XXX some locking to guarantee that locks aren't
- * expanded in between.
- */
- result = cl_lockset_lock_one(env, io, set, link);
- if (result != 0)
- break;
- } else
- cl_lock_link_fini(env, io, link);
- }
- if (result == 0) {
- list_for_each_entry_safe(link, temp,
- &set->cls_curr, cill_linkage) {
- lock = link->cill_lock;
- result = cl_wait(env, lock);
- if (result == 0)
- list_move(&link->cill_linkage, &set->cls_done);
- else
- break;
- }
+ result = cl_lock_request(env, io, &link->cill_lock);
+ if (result < 0)
+ break;
+
+ list_move(&link->cill_linkage, &set->cls_done);
}
return result;
}
@@ -492,16 +398,19 @@ void cl_io_unlock(const struct lu_env *env, struct cl_io *io)
set = &io->ci_lockset;
- list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage)
- cl_lock_link_fini(env, io, link);
-
- list_for_each_entry_safe(link, temp, &set->cls_curr, cill_linkage)
- cl_lock_link_fini(env, io, link);
+ list_for_each_entry_safe(link, temp, &set->cls_todo, cill_linkage) {
+ list_del_init(&link->cill_linkage);
+ if (link->cill_fini)
+ link->cill_fini(env, link);
+ }
list_for_each_entry_safe(link, temp, &set->cls_done, cill_linkage) {
- cl_unuse(env, link->cill_lock);
- cl_lock_link_fini(env, io, link);
+ list_del_init(&link->cill_linkage);
+ cl_lock_release(env, &link->cill_lock);
+ if (link->cill_fini)
+ link->cill_fini(env, link);
}
+
cl_io_for_each_reverse(scan, io) {
if (scan->cis_iop->op[io->ci_type].cio_unlock)
scan->cis_iop->op[io->ci_type].cio_unlock(env, scan);
@@ -595,9 +504,9 @@ int cl_io_lock_add(const struct lu_env *env, struct cl_io *io,
{
int result;
- if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr))
+ if (cl_lockset_merge(&io->ci_lockset, &link->cill_descr)) {
result = 1;
- else {
+ } else {
list_add(&link->cill_linkage, &io->ci_lockset.cls_todo);
result = 0;
}
@@ -627,8 +536,9 @@ int cl_io_lock_alloc_add(const struct lu_env *env, struct cl_io *io,
result = cl_io_lock_add(env, io, link);
if (result) /* lock match */
link->cill_fini(env, link);
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
@@ -692,42 +602,6 @@ cl_io_slice_page(const struct cl_io_slice *ios, struct cl_page *page)
}
/**
- * True iff \a page is within \a io range.
- */
-static int cl_page_in_io(const struct cl_page *page, const struct cl_io *io)
-{
- int result = 1;
- loff_t start;
- loff_t end;
- pgoff_t idx;
-
- idx = page->cp_index;
- switch (io->ci_type) {
- case CIT_READ:
- case CIT_WRITE:
- /*
- * check that [start, end) and [pos, pos + count) extents
- * overlap.
- */
- if (!cl_io_is_append(io)) {
- const struct cl_io_rw_common *crw = &(io->u.ci_rw);
-
- start = cl_offset(page->cp_obj, idx);
- end = cl_offset(page->cp_obj, idx + 1);
- result = crw->crw_pos < end &&
- start < crw->crw_pos + crw->crw_count;
- }
- break;
- case CIT_FAULT:
- result = io->u.ci_fault.ft_index == idx;
- break;
- default:
- LBUG();
- }
- return result;
-}
-
-/**
* Called by read io, when page has to be read from the server.
*
* \see cl_io_operations::cio_read_page()
@@ -742,7 +616,6 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
LINVRNT(io->ci_type == CIT_READ || io->ci_type == CIT_FAULT);
LINVRNT(cl_page_is_owned(page, io));
LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_page_in_io(page, io));
LINVRNT(cl_io_invariant(io));
queue = &io->ci_queue;
@@ -769,7 +642,7 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
break;
}
}
- if (result == 0)
+ if (result == 0 && queue->c2_qin.pl_nr > 0)
result = cl_io_submit_rw(env, io, CRT_READ, queue);
/*
* Unlock unsent pages in case of error.
@@ -781,77 +654,29 @@ int cl_io_read_page(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_io_read_page);
/**
- * Called by write io to prepare page to receive data from user buffer.
+ * Commit a list of contiguous pages into writeback cache.
*
- * \see cl_io_operations::cio_prepare_write()
+ * \returns 0 if all pages committed, or errcode if error occurred.
+ * \see cl_io_operations::cio_commit_async()
*/
-int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
+int cl_io_commit_async(const struct lu_env *env, struct cl_io *io,
+ struct cl_page_list *queue, int from, int to,
+ cl_commit_cbt cb)
{
const struct cl_io_slice *scan;
int result = 0;
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(cl_page_is_owned(page, io));
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- LASSERT(cl_page_in_io(page, io));
-
- cl_io_for_each_reverse(scan, io) {
- if (scan->cis_iop->cio_prepare_write) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_prepare_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
- }
- return result;
-}
-EXPORT_SYMBOL(cl_io_prepare_write);
-
-/**
- * Called by write io after user data were copied into a page.
- *
- * \see cl_io_operations::cio_commit_write()
- */
-int cl_io_commit_write(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, unsigned from, unsigned to)
-{
- const struct cl_io_slice *scan;
- int result = 0;
-
- LINVRNT(io->ci_type == CIT_WRITE);
- LINVRNT(io->ci_state == CIS_IO_GOING || io->ci_state == CIS_LOCKED);
- LINVRNT(cl_io_invariant(io));
- /*
- * XXX Uh... not nice. Top level cl_io_commit_write() call (vvp->lov)
- * already called cl_page_cache_add(), moving page into CPS_CACHED
- * state. Better (and more general) way of dealing with such situation
- * is needed.
- */
- LASSERT(cl_page_is_owned(page, io) || page->cp_parent);
- LASSERT(cl_page_in_io(page, io));
-
cl_io_for_each(scan, io) {
- if (scan->cis_iop->cio_commit_write) {
- const struct cl_page_slice *slice;
-
- slice = cl_io_slice_page(scan, page);
- result = scan->cis_iop->cio_commit_write(env, scan,
- slice,
- from, to);
- if (result != 0)
- break;
- }
+ if (!scan->cis_iop->cio_commit_async)
+ continue;
+ result = scan->cis_iop->cio_commit_async(env, scan, queue,
+ from, to, cb);
+ if (result != 0)
+ break;
}
- LINVRNT(result <= 0);
return result;
}
-EXPORT_SYMBOL(cl_io_commit_write);
+EXPORT_SYMBOL(cl_io_commit_async);
/**
* Submits a list of pages for immediate io.
@@ -869,13 +694,10 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
const struct cl_io_slice *scan;
int result = 0;
- LINVRNT(crt < ARRAY_SIZE(scan->cis_iop->req_op));
-
cl_io_for_each(scan, io) {
- if (!scan->cis_iop->req_op[crt].cio_submit)
+ if (!scan->cis_iop->cio_submit)
continue;
- result = scan->cis_iop->req_op[crt].cio_submit(env, scan, crt,
- queue);
+ result = scan->cis_iop->cio_submit(env, scan, crt, queue);
if (result != 0)
break;
}
@@ -887,6 +709,9 @@ int cl_io_submit_rw(const struct lu_env *env, struct cl_io *io,
}
EXPORT_SYMBOL(cl_io_submit_rw);
+static void cl_page_list_assume(const struct lu_env *env,
+ struct cl_io *io, struct cl_page_list *plist);
+
/**
* Submit a sync_io and wait for the IO to be finished, or error happens.
* If \a timeout is zero, it means to wait for the IO unconditionally.
@@ -904,7 +729,7 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
pg->cp_sync_io = anchor;
}
- cl_sync_io_init(anchor, queue->c2_qin.pl_nr);
+ cl_sync_io_init(anchor, queue->c2_qin.pl_nr, &cl_sync_io_end);
rc = cl_io_submit_rw(env, io, iot, queue);
if (rc == 0) {
/*
@@ -915,12 +740,12 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
*/
cl_page_list_for_each(pg, &queue->c2_qin) {
pg->cp_sync_io = NULL;
- cl_sync_io_note(anchor, 1);
+ cl_sync_io_note(env, anchor, 1);
}
/* wait for the IO to be finished. */
- rc = cl_sync_io_wait(env, io, &queue->c2_qout,
- anchor, timeout);
+ rc = cl_sync_io_wait(env, anchor, timeout);
+ cl_page_list_assume(env, io, &queue->c2_qout);
} else {
LASSERT(list_empty(&queue->c2_qout.pl_pages));
cl_page_list_for_each(pg, &queue->c2_qin)
@@ -931,26 +756,6 @@ int cl_io_submit_sync(const struct lu_env *env, struct cl_io *io,
EXPORT_SYMBOL(cl_io_submit_sync);
/**
- * Cancel an IO which has been submitted by cl_io_submit_rw.
- */
-static int cl_io_cancel(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue)
-{
- struct cl_page *page;
- int result = 0;
-
- CERROR("Canceling ongoing page transmission\n");
- cl_page_list_for_each(page, queue) {
- int rc;
-
- LINVRNT(cl_page_in_io(page, io));
- rc = cl_page_cancel(env, page);
- result = result ?: rc;
- }
- return result;
-}
-
-/**
* Main io loop.
*
* Pumps io through iterations calling
@@ -1072,8 +877,8 @@ EXPORT_SYMBOL(cl_page_list_add);
/**
* Removes a page from a page list.
*/
-static void cl_page_list_del(const struct lu_env *env,
- struct cl_page_list *plist, struct cl_page *page)
+void cl_page_list_del(const struct lu_env *env, struct cl_page_list *plist,
+ struct cl_page *page)
{
LASSERT(plist->pl_nr > 0);
LINVRNT(plist->pl_owner == current);
@@ -1086,6 +891,7 @@ static void cl_page_list_del(const struct lu_env *env,
lu_ref_del_at(&page->cp_reference, &page->cp_queue_ref, "queue", plist);
cl_page_put(env, page);
}
+EXPORT_SYMBOL(cl_page_list_del);
/**
* Moves a page from one page list to another.
@@ -1106,6 +912,24 @@ void cl_page_list_move(struct cl_page_list *dst, struct cl_page_list *src,
EXPORT_SYMBOL(cl_page_list_move);
/**
+ * Moves a page from one page list to the head of another list.
+ */
+void cl_page_list_move_head(struct cl_page_list *dst, struct cl_page_list *src,
+ struct cl_page *page)
+{
+ LASSERT(src->pl_nr > 0);
+ LINVRNT(dst->pl_owner == current);
+ LINVRNT(src->pl_owner == current);
+
+ list_move(&page->cp_batch, &dst->pl_pages);
+ --src->pl_nr;
+ ++dst->pl_nr;
+ lu_ref_set_at(&page->cp_reference, &page->cp_queue_ref, "queue",
+ src, dst);
+}
+EXPORT_SYMBOL(cl_page_list_move_head);
+
+/**
* splice the cl_page_list, just as list head does
*/
void cl_page_list_splice(struct cl_page_list *list, struct cl_page_list *head)
@@ -1162,8 +986,7 @@ EXPORT_SYMBOL(cl_page_list_disown);
/**
* Releases pages from queue.
*/
-static void cl_page_list_fini(const struct lu_env *env,
- struct cl_page_list *plist)
+void cl_page_list_fini(const struct lu_env *env, struct cl_page_list *plist)
{
struct cl_page *page;
struct cl_page *temp;
@@ -1174,6 +997,7 @@ static void cl_page_list_fini(const struct lu_env *env,
cl_page_list_del(env, plist, page);
LASSERT(plist->pl_nr == 0);
}
+EXPORT_SYMBOL(cl_page_list_fini);
/**
* Assumes all pages in a queue.
@@ -1260,7 +1084,7 @@ EXPORT_SYMBOL(cl_2queue_init_page);
/**
* Returns top-level io.
*
- * \see cl_object_top(), cl_page_top().
+ * \see cl_object_top()
*/
struct cl_io *cl_io_top(struct cl_io *io)
{
@@ -1323,19 +1147,14 @@ static int cl_req_init(const struct lu_env *env, struct cl_req *req,
int result;
result = 0;
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
- if (dev->cd_ops->cdo_req_init) {
- result = dev->cd_ops->cdo_req_init(env,
- dev, req);
- if (result != 0)
- break;
- }
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ dev = lu2cl_dev(slice->cpl_obj->co_lu.lo_dev);
+ if (dev->cd_ops->cdo_req_init) {
+ result = dev->cd_ops->cdo_req_init(env, dev, req);
+ if (result != 0)
+ break;
}
- page = page->cp_child;
- } while (page && result == 0);
+ }
return result;
}
@@ -1384,14 +1203,16 @@ struct cl_req *cl_req_alloc(const struct lu_env *env, struct cl_page *page,
if (req->crq_o) {
req->crq_nrobjs = nr_objects;
result = cl_req_init(env, req, page);
- } else
+ } else {
result = -ENOMEM;
+ }
if (result != 0) {
cl_req_completion(env, req, result);
req = ERR_PTR(result);
}
- } else
+ } else {
req = ERR_PTR(-ENOMEM);
+ }
return req;
}
EXPORT_SYMBOL(cl_req_alloc);
@@ -1406,8 +1227,6 @@ void cl_req_page_add(const struct lu_env *env,
struct cl_req_obj *rqo;
int i;
- page = cl_page_top(page);
-
LASSERT(list_empty(&page->cp_flight));
LASSERT(!page->cp_req);
@@ -1438,8 +1257,6 @@ void cl_req_page_done(const struct lu_env *env, struct cl_page *page)
{
struct cl_req *req = page->cp_req;
- page = cl_page_top(page);
-
LASSERT(!list_empty(&page->cp_flight));
LASSERT(req->crq_nrpages > 0);
@@ -1511,25 +1328,39 @@ void cl_req_attr_set(const struct lu_env *env, struct cl_req *req,
}
EXPORT_SYMBOL(cl_req_attr_set);
+/* cl_sync_io_callback assumes the caller must call cl_sync_io_wait() to
+ * wait for the IO to finish.
+ */
+void cl_sync_io_end(const struct lu_env *env, struct cl_sync_io *anchor)
+{
+ wake_up_all(&anchor->csi_waitq);
+
+ /* it's safe to nuke or reuse anchor now */
+ atomic_set(&anchor->csi_barrier, 0);
+}
+EXPORT_SYMBOL(cl_sync_io_end);
/**
- * Initialize synchronous io wait anchor, for transfer of \a nrpages pages.
+ * Initialize synchronous io wait anchor
*/
-void cl_sync_io_init(struct cl_sync_io *anchor, int nrpages)
+void cl_sync_io_init(struct cl_sync_io *anchor, int nr,
+ void (*end)(const struct lu_env *, struct cl_sync_io *))
{
+ memset(anchor, 0, sizeof(*anchor));
init_waitqueue_head(&anchor->csi_waitq);
- atomic_set(&anchor->csi_sync_nr, nrpages);
- atomic_set(&anchor->csi_barrier, nrpages > 0);
+ atomic_set(&anchor->csi_sync_nr, nr);
+ atomic_set(&anchor->csi_barrier, nr > 0);
anchor->csi_sync_rc = 0;
+ anchor->csi_end_io = end;
+ LASSERT(end);
}
EXPORT_SYMBOL(cl_sync_io_init);
/**
- * Wait until all transfer completes. Transfer completion routine has to call
- * cl_sync_io_note() for every page.
+ * Wait until all IO completes. Transfer completion routine has to call
+ * cl_sync_io_note() for every entity.
*/
-int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
- struct cl_page_list *queue, struct cl_sync_io *anchor,
+int cl_sync_io_wait(const struct lu_env *env, struct cl_sync_io *anchor,
long timeout)
{
struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(timeout),
@@ -1542,11 +1373,9 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
atomic_read(&anchor->csi_sync_nr) == 0,
&lwi);
if (rc < 0) {
- CERROR("SYNC IO failed with error: %d, try to cancel %d remaining pages\n",
+ CERROR("IO failed: %d, still wait for %d remaining entries\n",
rc, atomic_read(&anchor->csi_sync_nr));
- (void)cl_io_cancel(env, io, queue);
-
lwi = (struct l_wait_info) { 0 };
(void)l_wait_event(anchor->csi_waitq,
atomic_read(&anchor->csi_sync_nr) == 0,
@@ -1555,14 +1384,12 @@ int cl_sync_io_wait(const struct lu_env *env, struct cl_io *io,
rc = anchor->csi_sync_rc;
}
LASSERT(atomic_read(&anchor->csi_sync_nr) == 0);
- cl_page_list_assume(env, io, queue);
/* wait until cl_sync_io_note() has done wakeup */
while (unlikely(atomic_read(&anchor->csi_barrier) != 0)) {
cpu_relax();
}
- POISON(anchor, 0x5a, sizeof(*anchor));
return rc;
}
EXPORT_SYMBOL(cl_sync_io_wait);
@@ -1570,7 +1397,8 @@ EXPORT_SYMBOL(cl_sync_io_wait);
/**
* Indicate that transfer of a single page completed.
*/
-void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
+void cl_sync_io_note(const struct lu_env *env, struct cl_sync_io *anchor,
+ int ioret)
{
if (anchor->csi_sync_rc == 0 && ioret < 0)
anchor->csi_sync_rc = ioret;
@@ -1581,9 +1409,9 @@ void cl_sync_io_note(struct cl_sync_io *anchor, int ioret)
*/
LASSERT(atomic_read(&anchor->csi_sync_nr) > 0);
if (atomic_dec_and_test(&anchor->csi_sync_nr)) {
- wake_up_all(&anchor->csi_waitq);
- /* it's safe to nuke or reuse anchor now */
- atomic_set(&anchor->csi_barrier, 0);
+ LASSERT(anchor->csi_end_io);
+ anchor->csi_end_io(env, anchor);
+ /* Can't access anchor any more */
}
}
EXPORT_SYMBOL(cl_sync_io_note);
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_lock.c b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
index aec644eb4db9..26a576b63a72 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_lock.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_lock.c
@@ -36,6 +36,7 @@
* Client Extent Lock.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -47,138 +48,18 @@
#include "../include/cl_object.h"
#include "cl_internal.h"
-/** Lock class of cl_lock::cll_guard */
-static struct lock_class_key cl_lock_guard_class;
-static struct kmem_cache *cl_lock_kmem;
-
-static struct lu_kmem_descr cl_lock_caches[] = {
- {
- .ckd_cache = &cl_lock_kmem,
- .ckd_name = "cl_lock_kmem",
- .ckd_size = sizeof (struct cl_lock)
- },
- {
- .ckd_cache = NULL
- }
-};
-
-#define CS_LOCK_INC(o, item)
-#define CS_LOCK_DEC(o, item)
-#define CS_LOCKSTATE_INC(o, state)
-#define CS_LOCKSTATE_DEC(o, state)
-
-/**
- * Basic lock invariant that is maintained at all times. Caller either has a
- * reference to \a lock, or somehow assures that \a lock cannot be freed.
- *
- * \see cl_lock_invariant()
- */
-static int cl_lock_invariant_trusted(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- return ergo(lock->cll_state == CLS_FREEING, lock->cll_holds == 0) &&
- atomic_read(&lock->cll_ref) >= lock->cll_holds &&
- lock->cll_holds >= lock->cll_users &&
- lock->cll_holds >= 0 &&
- lock->cll_users >= 0 &&
- lock->cll_depth >= 0;
-}
-
-/**
- * Stronger lock invariant, checking that caller has a reference on a lock.
- *
- * \see cl_lock_invariant_trusted()
- */
-static int cl_lock_invariant(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- int result;
-
- result = atomic_read(&lock->cll_ref) > 0 &&
- cl_lock_invariant_trusted(env, lock);
- if (!result && env)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "invariant broken\n");
- return result;
-}
-
-/**
- * Returns lock "nesting": 0 for a top-lock and 1 for a sub-lock.
- */
-static enum clt_nesting_level cl_lock_nesting(const struct cl_lock *lock)
-{
- return cl_object_header(lock->cll_descr.cld_obj)->coh_nesting;
-}
-
-/**
- * Returns a set of counters for this lock, depending on a lock nesting.
- */
-static struct cl_thread_counters *cl_lock_counters(const struct lu_env *env,
- const struct cl_lock *lock)
-{
- struct cl_thread_info *info;
- enum clt_nesting_level nesting;
-
- info = cl_env_info(env);
- nesting = cl_lock_nesting(lock);
- LASSERT(nesting < ARRAY_SIZE(info->clt_counters));
- return &info->clt_counters[nesting];
-}
-
static void cl_lock_trace0(int level, const struct lu_env *env,
const char *prefix, const struct cl_lock *lock,
const char *func, const int line)
{
struct cl_object_header *h = cl_object_header(lock->cll_descr.cld_obj);
- CDEBUG(level, "%s: %p@(%d %p %d %d %d %d %d %lx)(%p/%d/%d) at %s():%d\n",
- prefix, lock, atomic_read(&lock->cll_ref),
- lock->cll_guarder, lock->cll_depth,
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags,
- env, h->coh_nesting, cl_lock_nr_mutexed(env),
- func, line);
+ CDEBUG(level, "%s: %p (%p/%d) at %s():%d\n",
+ prefix, lock, env, h->coh_nesting, func, line);
}
-
-#define cl_lock_trace(level, env, prefix, lock) \
+#define cl_lock_trace(level, env, prefix, lock) \
cl_lock_trace0(level, env, prefix, lock, __func__, __LINE__)
-#define RETIP ((unsigned long)__builtin_return_address(0))
-
-#ifdef CONFIG_LOCKDEP
-static struct lock_class_key cl_lock_key;
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{
- lockdep_set_class_and_name(lock, &cl_lock_key, "EXT");
-}
-
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired++;
- lock_map_acquire(&lock->dep_map);
-}
-
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{
- cl_lock_counters(env, lock)->ctc_nr_locks_acquired--;
- lock_release(&lock->dep_map, 0, RETIP);
-}
-
-#else /* !CONFIG_LOCKDEP */
-
-static void cl_lock_lockdep_init(struct cl_lock *lock)
-{}
-static void cl_lock_lockdep_acquire(const struct lu_env *env,
- struct cl_lock *lock, __u32 enqflags)
-{}
-static void cl_lock_lockdep_release(const struct lu_env *env,
- struct cl_lock *lock)
-{}
-
-#endif /* !CONFIG_LOCKDEP */
-
/**
* Adds lock slice to the compound lock.
*
@@ -199,62 +80,10 @@ void cl_lock_slice_add(struct cl_lock *lock, struct cl_lock_slice *slice,
}
EXPORT_SYMBOL(cl_lock_slice_add);
-/**
- * Returns true iff a lock with the mode \a has provides at least the same
- * guarantees as a lock with the mode \a need.
- */
-int cl_lock_mode_match(enum cl_lock_mode has, enum cl_lock_mode need)
-{
- LINVRNT(need == CLM_READ || need == CLM_WRITE ||
- need == CLM_PHANTOM || need == CLM_GROUP);
- LINVRNT(has == CLM_READ || has == CLM_WRITE ||
- has == CLM_PHANTOM || has == CLM_GROUP);
- CLASSERT(CLM_PHANTOM < CLM_READ);
- CLASSERT(CLM_READ < CLM_WRITE);
- CLASSERT(CLM_WRITE < CLM_GROUP);
-
- if (has != CLM_GROUP)
- return need <= has;
- else
- return need == has;
-}
-EXPORT_SYMBOL(cl_lock_mode_match);
-
-/**
- * Returns true iff extent portions of lock descriptions match.
- */
-int cl_lock_ext_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
-{
- return
- has->cld_start <= need->cld_start &&
- has->cld_end >= need->cld_end &&
- cl_lock_mode_match(has->cld_mode, need->cld_mode) &&
- (has->cld_mode != CLM_GROUP || has->cld_gid == need->cld_gid);
-}
-EXPORT_SYMBOL(cl_lock_ext_match);
-
-/**
- * Returns true iff a lock with the description \a has provides at least the
- * same guarantees as a lock with the description \a need.
- */
-int cl_lock_descr_match(const struct cl_lock_descr *has,
- const struct cl_lock_descr *need)
+void cl_lock_fini(const struct lu_env *env, struct cl_lock *lock)
{
- return
- cl_object_same(has->cld_obj, need->cld_obj) &&
- cl_lock_ext_match(has, need);
-}
-EXPORT_SYMBOL(cl_lock_descr_match);
+ cl_lock_trace(D_DLMTRACE, env, "destroy lock", lock);
-static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj = lock->cll_descr.cld_obj;
-
- LINVRNT(!cl_lock_is_mutexed(lock));
-
- cl_lock_trace(D_DLMTRACE, env, "free lock", lock);
- might_sleep();
while (!list_empty(&lock->cll_layers)) {
struct cl_lock_slice *slice;
@@ -263,350 +92,36 @@ static void cl_lock_free(const struct lu_env *env, struct cl_lock *lock)
list_del_init(lock->cll_layers.next);
slice->cls_ops->clo_fini(env, slice);
}
- CS_LOCK_DEC(obj, total);
- CS_LOCKSTATE_DEC(obj, lock->cll_state);
- lu_object_ref_del_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock", lock);
- cl_object_put(env, obj);
- lu_ref_fini(&lock->cll_reference);
- lu_ref_fini(&lock->cll_holders);
- mutex_destroy(&lock->cll_guard);
- kmem_cache_free(cl_lock_kmem, lock);
-}
-
-/**
- * Releases a reference on a lock.
- *
- * When last reference is released, lock is returned to the cache, unless it
- * is in cl_lock_state::CLS_FREEING state, in which case it is destroyed
- * immediately.
- *
- * \see cl_object_put(), cl_page_put()
- */
-void cl_lock_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object *obj;
-
- LINVRNT(cl_lock_invariant(env, lock));
- obj = lock->cll_descr.cld_obj;
- LINVRNT(obj);
-
- CDEBUG(D_TRACE, "releasing reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
-
- if (atomic_dec_and_test(&lock->cll_ref)) {
- if (lock->cll_state == CLS_FREEING) {
- LASSERT(list_empty(&lock->cll_linkage));
- cl_lock_free(env, lock);
- }
- CS_LOCK_DEC(obj, busy);
- }
-}
-EXPORT_SYMBOL(cl_lock_put);
-
-/**
- * Acquires an additional reference to a lock.
- *
- * This can be called only by caller already possessing a reference to \a
- * lock.
- *
- * \see cl_object_get(), cl_page_get()
- */
-void cl_lock_get(struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(NULL, lock));
- CDEBUG(D_TRACE, "acquiring reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- atomic_inc(&lock->cll_ref);
-}
-EXPORT_SYMBOL(cl_lock_get);
-
-/**
- * Acquires a reference to a lock.
- *
- * This is much like cl_lock_get(), except that this function can be used to
- * acquire initial reference to the cached lock. Caller has to deal with all
- * possible races. Use with care!
- *
- * \see cl_page_get_trust()
- */
-void cl_lock_get_trust(struct cl_lock *lock)
-{
- CDEBUG(D_TRACE, "acquiring trusted reference: %d %p %lu\n",
- atomic_read(&lock->cll_ref), lock, RETIP);
- if (atomic_inc_return(&lock->cll_ref) == 1)
- CS_LOCK_INC(lock->cll_descr.cld_obj, busy);
-}
-EXPORT_SYMBOL(cl_lock_get_trust);
-
-/**
- * Helper function destroying the lock that wasn't completely initialized.
- *
- * Other threads can acquire references to the top-lock through its
- * sub-locks. Hence, it cannot be cl_lock_free()-ed immediately.
- */
-static void cl_lock_finish(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
-}
-
-static struct cl_lock *cl_lock_alloc(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *descr)
-{
- struct cl_lock *lock;
- struct lu_object_header *head;
-
- lock = kmem_cache_zalloc(cl_lock_kmem, GFP_NOFS);
- if (lock) {
- atomic_set(&lock->cll_ref, 1);
- lock->cll_descr = *descr;
- lock->cll_state = CLS_NEW;
- cl_object_get(obj);
- lu_object_ref_add_at(&obj->co_lu, &lock->cll_obj_ref, "cl_lock",
- lock);
- INIT_LIST_HEAD(&lock->cll_layers);
- INIT_LIST_HEAD(&lock->cll_linkage);
- INIT_LIST_HEAD(&lock->cll_inclosure);
- lu_ref_init(&lock->cll_reference);
- lu_ref_init(&lock->cll_holders);
- mutex_init(&lock->cll_guard);
- lockdep_set_class(&lock->cll_guard, &cl_lock_guard_class);
- init_waitqueue_head(&lock->cll_wq);
- head = obj->co_lu.lo_header;
- CS_LOCKSTATE_INC(obj, CLS_NEW);
- CS_LOCK_INC(obj, total);
- CS_LOCK_INC(obj, create);
- cl_lock_lockdep_init(lock);
- list_for_each_entry(obj, &head->loh_layers, co_lu.lo_linkage) {
- int err;
-
- err = obj->co_ops->coo_lock_init(env, obj, lock, io);
- if (err != 0) {
- cl_lock_finish(env, lock);
- lock = ERR_PTR(err);
- break;
- }
- }
- } else
- lock = ERR_PTR(-ENOMEM);
- return lock;
-}
-
-/**
- * Transfer the lock into INTRANSIT state and return the original state.
- *
- * \pre state: CLS_CACHED, CLS_HELD or CLS_ENQUEUED
- * \post state: CLS_INTRANSIT
- * \see CLS_INTRANSIT
- */
-static enum cl_lock_state cl_lock_intransit(const struct lu_env *env,
- struct cl_lock *lock)
-{
- enum cl_lock_state state = lock->cll_state;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(state != CLS_INTRANSIT);
- LASSERTF(state >= CLS_ENQUEUED && state <= CLS_CACHED,
- "Malformed lock state %d.\n", state);
-
- cl_lock_state_set(env, lock, CLS_INTRANSIT);
- lock->cll_intransit_owner = current;
- cl_lock_hold_add(env, lock, "intransit", current);
- return state;
-}
-
-/**
- * Exit the intransit state and restore the lock state to the original state
- */
-static void cl_lock_extransit(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(state != CLS_INTRANSIT);
- LASSERT(lock->cll_intransit_owner == current);
-
- lock->cll_intransit_owner = NULL;
- cl_lock_state_set(env, lock, state);
- cl_lock_unhold(env, lock, "intransit", current);
-}
-
-/**
- * Checking whether the lock is intransit state
- */
-int cl_lock_is_intransit(struct cl_lock *lock)
-{
- LASSERT(cl_lock_is_mutexed(lock));
- return lock->cll_state == CLS_INTRANSIT &&
- lock->cll_intransit_owner != current;
-}
-EXPORT_SYMBOL(cl_lock_is_intransit);
-/**
- * Returns true iff lock is "suitable" for given io. E.g., locks acquired by
- * truncate and O_APPEND cannot be reused for read/non-append-write, as they
- * cover multiple stripes and can trigger cascading timeouts.
- */
-static int cl_lock_fits_into(const struct lu_env *env,
- const struct cl_lock *lock,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_fits_into &&
- !slice->cls_ops->clo_fits_into(env, slice, need, io))
- return 0;
- }
- return 1;
+ POISON(lock, 0x5a, sizeof(*lock));
}
+EXPORT_SYMBOL(cl_lock_fini);
-static struct cl_lock *cl_lock_lookup(const struct lu_env *env,
- struct cl_object *obj,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
+int cl_lock_init(const struct lu_env *env, struct cl_lock *lock,
+ const struct cl_io *io)
{
- struct cl_lock *lock;
- struct cl_object_header *head;
-
- head = cl_object_header(obj);
- assert_spin_locked(&head->coh_lock_guard);
- CS_LOCK_INC(obj, lookup);
- list_for_each_entry(lock, &head->coh_locks, cll_linkage) {
- int matched;
-
- matched = cl_lock_ext_match(&lock->cll_descr, need) &&
- lock->cll_state < CLS_FREEING &&
- lock->cll_error == 0 &&
- !(lock->cll_flags & CLF_CANCELLED) &&
- cl_lock_fits_into(env, lock, need, io);
- CDEBUG(D_DLMTRACE, "has: "DDESCR"(%d) need: "DDESCR": %d\n",
- PDESCR(&lock->cll_descr), lock->cll_state, PDESCR(need),
- matched);
- if (matched) {
- cl_lock_get_trust(lock);
- CS_LOCK_INC(obj, hit);
- return lock;
- }
- }
- return NULL;
-}
-
-/**
- * Returns a lock matching description \a need.
- *
- * This is the main entry point into the cl_lock caching interface. First, a
- * cache (implemented as a per-object linked list) is consulted. If lock is
- * found there, it is returned immediately. Otherwise new lock is allocated
- * and returned. In any case, additional reference to lock is acquired.
- *
- * \see cl_object_find(), cl_page_find()
- */
-static struct cl_lock *cl_lock_find(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
+ struct cl_object *obj = lock->cll_descr.cld_obj;
+ struct cl_object *scan;
+ int result = 0;
- if (!lock) {
- lock = cl_lock_alloc(env, obj, io, need);
- if (!IS_ERR(lock)) {
- struct cl_lock *ghost;
+ /* Make sure cl_lock::cll_descr is initialized. */
+ LASSERT(obj);
- spin_lock(&head->coh_lock_guard);
- ghost = cl_lock_lookup(env, obj, io, need);
- if (!ghost) {
- cl_lock_get_trust(lock);
- list_add_tail(&lock->cll_linkage,
- &head->coh_locks);
- spin_unlock(&head->coh_lock_guard);
- CS_LOCK_INC(obj, busy);
- } else {
- spin_unlock(&head->coh_lock_guard);
- /*
- * Other threads can acquire references to the
- * top-lock through its sub-locks. Hence, it
- * cannot be cl_lock_free()-ed immediately.
- */
- cl_lock_finish(env, lock);
- lock = ghost;
- }
+ INIT_LIST_HEAD(&lock->cll_layers);
+ list_for_each_entry(scan, &obj->co_lu.lo_header->loh_layers,
+ co_lu.lo_linkage) {
+ result = scan->co_ops->coo_lock_init(env, scan, lock, io);
+ if (result != 0) {
+ cl_lock_fini(env, lock);
+ break;
}
}
- return lock;
-}
-/**
- * Returns existing lock matching given description. This is similar to
- * cl_lock_find() except that no new lock is created, and returned lock is
- * guaranteed to be in enum cl_lock_state::CLS_HELD state.
- */
-struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_object_header *head;
- struct cl_object *obj;
- struct cl_lock *lock;
-
- obj = need->cld_obj;
- head = cl_object_header(obj);
-
- do {
- spin_lock(&head->coh_lock_guard);
- lock = cl_lock_lookup(env, obj, io, need);
- spin_unlock(&head->coh_lock_guard);
- if (!lock)
- return NULL;
-
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state == CLS_INTRANSIT)
- /* Don't care return value. */
- cl_lock_state_wait(env, lock);
- if (lock->cll_state == CLS_FREEING) {
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
- } while (!lock);
-
- cl_lock_hold_add(env, lock, scope, source);
- cl_lock_user_add(env, lock);
- if (lock->cll_state == CLS_CACHED)
- cl_use_try(env, lock, 1);
- if (lock->cll_state == CLS_HELD) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock, 0);
- cl_lock_put(env, lock);
- } else {
- cl_unuse_try(env, lock);
- cl_lock_unhold(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- lock = NULL;
- }
-
- return lock;
+ return result;
}
-EXPORT_SYMBOL(cl_lock_peek);
+EXPORT_SYMBOL(cl_lock_init);
/**
- * Returns a slice within a lock, corresponding to the given layer in the
+ * Returns a slice with a lock, corresponding to the given layer in the
* device stack.
*
* \see cl_page_at()
@@ -616,8 +131,6 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
{
const struct cl_lock_slice *slice;
- LINVRNT(cl_lock_invariant_trusted(NULL, lock));
-
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
if (slice->cls_obj->co_lu.lo_dev->ld_type == dtype)
return slice;
@@ -626,1537 +139,96 @@ const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock,
}
EXPORT_SYMBOL(cl_lock_at);
-static void cl_lock_mutex_tail(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- counters = cl_lock_counters(env, lock);
- lock->cll_depth++;
- counters->ctc_nr_locks_locked++;
- lu_ref_add(&counters->ctc_locks_locked, "cll_guard", lock);
- cl_lock_trace(D_TRACE, env, "got mutex", lock);
-}
-
-/**
- * Locks cl_lock object.
- *
- * This is used to manipulate cl_lock fields, and to serialize state
- * transitions in the lock state machine.
- *
- * \post cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_put()
- */
-void cl_lock_mutex_get(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_guarder == current) {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_depth > 0);
- } else {
- struct cl_object_header *hdr;
- struct cl_thread_info *info;
- int i;
-
- LINVRNT(lock->cll_guarder != current);
- hdr = cl_object_header(lock->cll_descr.cld_obj);
- /*
- * Check that mutices are taken in the bottom-to-top order.
- */
- info = cl_env_info(env);
- for (i = 0; i < hdr->coh_nesting; ++i)
- LASSERT(info->clt_counters[i].ctc_nr_locks_locked == 0);
- mutex_lock_nested(&lock->cll_guard, hdr->coh_nesting);
- lock->cll_guarder = current;
- LINVRNT(lock->cll_depth == 0);
- }
- cl_lock_mutex_tail(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_mutex_get);
-
-/**
- * Try-locks cl_lock object.
- *
- * \retval 0 \a lock was successfully locked
- *
- * \retval -EBUSY \a lock cannot be locked right now
- *
- * \post ergo(result == 0, cl_lock_is_mutexed(lock))
- *
- * \see cl_lock_mutex_get()
- */
-static int cl_lock_mutex_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- LINVRNT(cl_lock_invariant_trusted(env, lock));
-
- result = 0;
- if (lock->cll_guarder == current) {
- LINVRNT(lock->cll_depth > 0);
- cl_lock_mutex_tail(env, lock);
- } else if (mutex_trylock(&lock->cll_guard)) {
- LINVRNT(lock->cll_depth == 0);
- lock->cll_guarder = current;
- cl_lock_mutex_tail(env, lock);
- } else
- result = -EBUSY;
- return result;
-}
-
-/**
- {* Unlocks cl_lock object.
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_mutex_get()
- */
-void cl_lock_mutex_put(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_counters *counters;
-
- LINVRNT(cl_lock_invariant(env, lock));
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(lock->cll_guarder == current);
- LINVRNT(lock->cll_depth > 0);
-
- counters = cl_lock_counters(env, lock);
- LINVRNT(counters->ctc_nr_locks_locked > 0);
-
- cl_lock_trace(D_TRACE, env, "put mutex", lock);
- lu_ref_del(&counters->ctc_locks_locked, "cll_guard", lock);
- counters->ctc_nr_locks_locked--;
- if (--lock->cll_depth == 0) {
- lock->cll_guarder = NULL;
- mutex_unlock(&lock->cll_guard);
- }
-}
-EXPORT_SYMBOL(cl_lock_mutex_put);
-
-/**
- * Returns true iff lock's mutex is owned by the current thread.
- */
-int cl_lock_is_mutexed(struct cl_lock *lock)
-{
- return lock->cll_guarder == current;
-}
-EXPORT_SYMBOL(cl_lock_is_mutexed);
-
-/**
- * Returns number of cl_lock mutices held by the current thread (environment).
- */
-int cl_lock_nr_mutexed(const struct lu_env *env)
-{
- struct cl_thread_info *info;
- int i;
- int locked;
-
- /*
- * NOTE: if summation across all nesting levels (currently 2) proves
- * too expensive, a summary counter can be added to
- * struct cl_thread_info.
- */
- info = cl_env_info(env);
- for (i = 0, locked = 0; i < ARRAY_SIZE(info->clt_counters); ++i)
- locked += info->clt_counters[i].ctc_nr_locks_locked;
- return locked;
-}
-EXPORT_SYMBOL(cl_lock_nr_mutexed);
-
-static void cl_lock_cancel0(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- if (!(lock->cll_flags & CLF_CANCELLED)) {
- const struct cl_lock_slice *slice;
-
- lock->cll_flags |= CLF_CANCELLED;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_cancel)
- slice->cls_ops->clo_cancel(env, slice);
- }
- }
-}
-
-static void cl_lock_delete0(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_object_header *head;
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_state < CLS_FREEING) {
- bool in_cache;
-
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_FREEING);
-
- head = cl_object_header(lock->cll_descr.cld_obj);
-
- spin_lock(&head->coh_lock_guard);
- in_cache = !list_empty(&lock->cll_linkage);
- if (in_cache)
- list_del_init(&lock->cll_linkage);
- spin_unlock(&head->coh_lock_guard);
-
- if (in_cache) /* coh_locks cache holds a refcount. */
- cl_lock_put(env, lock);
-
- /*
- * From now on, no new references to this lock can be acquired
- * by cl_lock_lookup().
- */
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_delete)
- slice->cls_ops->clo_delete(env, slice);
- }
- /*
- * From now on, no new references to this lock can be acquired
- * by layer-specific means (like a pointer from struct
- * ldlm_lock in osc, or a pointer from top-lock to sub-lock in
- * lov).
- *
- * Lock will be finally freed in cl_lock_put() when last of
- * existing references goes away.
- */
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_holds counter for a given lock. Also, for a
- * top-lock (nesting == 0) accounts for this modification in the per-thread
- * debugging counters. Sub-lock holds can be released by a thread different
- * from one that acquired it.
- */
-static void cl_lock_hold_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_holds += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_held += delta;
- LASSERT(counters->ctc_nr_held >= 0);
- }
-}
-
-/**
- * Mod(ifie)s cl_lock::cll_users counter for a given lock. See
- * cl_lock_hold_mod() for the explanation of the debugging code.
- */
-static void cl_lock_used_mod(const struct lu_env *env, struct cl_lock *lock,
- int delta)
-{
- struct cl_thread_counters *counters;
- enum clt_nesting_level nesting;
-
- lock->cll_users += delta;
- nesting = cl_lock_nesting(lock);
- if (nesting == CNL_TOP) {
- counters = &cl_env_info(env)->clt_counters[CNL_TOP];
- counters->ctc_nr_used += delta;
- LASSERT(counters->ctc_nr_used >= 0);
- }
-}
-
-void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_trace(D_DLMTRACE, env, "hold release lock", lock);
- lu_ref_del(&lock->cll_holders, scope, source);
- cl_lock_hold_mod(env, lock, -1);
- if (lock->cll_holds == 0) {
- CL_LOCK_ASSERT(lock->cll_state != CLS_HELD, env, lock);
- if (lock->cll_descr.cld_mode == CLM_PHANTOM ||
- lock->cll_descr.cld_mode == CLM_GROUP ||
- lock->cll_state != CLS_CACHED)
- /*
- * If lock is still phantom or grouplock when user is
- * done with it---destroy the lock.
- */
- lock->cll_flags |= CLF_CANCELPEND|CLF_DOOMED;
- if (lock->cll_flags & CLF_CANCELPEND) {
- lock->cll_flags &= ~CLF_CANCELPEND;
- cl_lock_cancel0(env, lock);
- }
- if (lock->cll_flags & CLF_DOOMED) {
- /* no longer doomed: it's dead... Jim. */
- lock->cll_flags &= ~CLF_DOOMED;
- cl_lock_delete0(env, lock);
- }
- }
-}
-EXPORT_SYMBOL(cl_lock_hold_release);
-
-/**
- * Waits until lock state is changed.
- *
- * This function is called with cl_lock mutex locked, atomically releases
- * mutex and goes to sleep, waiting for a lock state change (signaled by
- * cl_lock_signal()), and re-acquires the mutex before return.
- *
- * This function is used to wait until lock state machine makes some progress
- * and to emulate synchronous operations on top of asynchronous lock
- * interface.
- *
- * \retval -EINTR wait was interrupted
- *
- * \retval 0 wait wasn't interrupted
- *
- * \pre cl_lock_is_mutexed(lock)
- *
- * \see cl_lock_signal()
- */
-int cl_lock_state_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- wait_queue_t waiter;
- sigset_t blocked;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_depth == 1);
- LASSERT(lock->cll_state != CLS_FREEING); /* too late to wait */
-
- cl_lock_trace(D_DLMTRACE, env, "state wait lock", lock);
- result = lock->cll_error;
- if (result == 0) {
- /* To avoid being interrupted by the 'non-fatal' signals
- * (SIGCHLD, for instance), we'd block them temporarily.
- * LU-305
- */
- blocked = cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
-
- init_waitqueue_entry(&waiter, current);
- add_wait_queue(&lock->cll_wq, &waiter);
- set_current_state(TASK_INTERRUPTIBLE);
- cl_lock_mutex_put(env, lock);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- /* Returning ERESTARTSYS instead of EINTR so syscalls
- * can be restarted if signals are pending here
- */
- result = -ERESTARTSYS;
- if (likely(!OBD_FAIL_CHECK(OBD_FAIL_LOCK_STATE_WAIT_INTR))) {
- schedule();
- if (!cfs_signal_pending())
- result = 0;
- }
-
- cl_lock_mutex_get(env, lock);
- set_current_state(TASK_RUNNING);
- remove_wait_queue(&lock->cll_wq, &waiter);
-
- /* Restore old blocked signals */
- cfs_restore_sigs(blocked);
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_state_wait);
-
-static void cl_lock_state_signal(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- const struct cl_lock_slice *slice;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage)
- if (slice->cls_ops->clo_state)
- slice->cls_ops->clo_state(env, slice, state);
- wake_up_all(&lock->cll_wq);
-}
-
-/**
- * Notifies waiters that lock state changed.
- *
- * Wakes up all waiters sleeping in cl_lock_state_wait(), also notifies all
- * layers about state change by calling cl_lock_operations::clo_state()
- * top-to-bottom.
- */
-void cl_lock_signal(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_trace(D_DLMTRACE, env, "state signal lock", lock);
- cl_lock_state_signal(env, lock, lock->cll_state);
-}
-EXPORT_SYMBOL(cl_lock_signal);
-
-/**
- * Changes lock state.
- *
- * This function is invoked to notify layers that lock state changed, possible
- * as a result of an asynchronous event such as call-back reception.
- *
- * \post lock->cll_state == state
- *
- * \see cl_lock_operations::clo_state()
- */
-void cl_lock_state_set(const struct lu_env *env, struct cl_lock *lock,
- enum cl_lock_state state)
-{
- LASSERT(lock->cll_state <= state ||
- (lock->cll_state == CLS_CACHED &&
- (state == CLS_HELD || /* lock found in cache */
- state == CLS_NEW || /* sub-lock canceled */
- state == CLS_INTRANSIT)) ||
- /* lock is in transit state */
- lock->cll_state == CLS_INTRANSIT);
-
- if (lock->cll_state != state) {
- CS_LOCKSTATE_DEC(lock->cll_descr.cld_obj, lock->cll_state);
- CS_LOCKSTATE_INC(lock->cll_descr.cld_obj, state);
-
- cl_lock_state_signal(env, lock, state);
- lock->cll_state = state;
- }
-}
-EXPORT_SYMBOL(cl_lock_state_set);
-
-static int cl_unuse_try_internal(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- do {
- result = 0;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state == CLS_INTRANSIT);
-
- result = -ENOSYS;
- list_for_each_entry_reverse(slice, &lock->cll_layers,
- cls_linkage) {
- if (slice->cls_ops->clo_unuse) {
- result = slice->cls_ops->clo_unuse(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- } while (result == CLO_REPEAT);
-
- return result;
-}
-
-/**
- * Yanks lock from the cache (cl_lock_state::CLS_CACHED state) by calling
- * cl_lock_operations::clo_use() top-to-bottom to notify layers.
- * @atomic = 1, it must unuse the lock to recovery the lock to keep the
- * use process atomic
- */
-int cl_use_try(const struct lu_env *env, struct cl_lock *lock, int atomic)
+void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
{
const struct cl_lock_slice *slice;
- int result;
- enum cl_lock_state state;
-
- cl_lock_trace(D_DLMTRACE, env, "use lock", lock);
-
- LASSERT(lock->cll_state == CLS_CACHED);
- if (lock->cll_error)
- return lock->cll_error;
-
- result = -ENOSYS;
- state = cl_lock_intransit(env, lock);
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_use) {
- result = slice->cls_ops->clo_use(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
-
- LASSERTF(lock->cll_state == CLS_INTRANSIT, "Wrong state %d.\n",
- lock->cll_state);
-
- if (result == 0) {
- state = CLS_HELD;
- } else {
- if (result == -ESTALE) {
- /*
- * ESTALE means sublock being cancelled
- * at this time, and set lock state to
- * be NEW here and ask the caller to repeat.
- */
- state = CLS_NEW;
- result = CLO_REPEAT;
- }
-
- /* @atomic means back-off-on-failure. */
- if (atomic) {
- int rc;
-
- rc = cl_unuse_try_internal(env, lock);
- /* Vet the results. */
- if (rc < 0 && result > 0)
- result = rc;
- }
+ cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
+ list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
+ if (slice->cls_ops->clo_cancel)
+ slice->cls_ops->clo_cancel(env, slice);
}
- cl_lock_extransit(env, lock, state);
- return result;
}
-EXPORT_SYMBOL(cl_use_try);
+EXPORT_SYMBOL(cl_lock_cancel);
/**
- * Helper for cl_enqueue_try() that calls ->clo_enqueue() across all layers
- * top-to-bottom.
+ * Enqueue a lock.
+ * \param anchor: if we need to wait for resources before getting the lock,
+ * use @anchor for the purpose.
+ * \retval 0 enqueue successfully
+ * \retval <0 error code
*/
-static int cl_enqueue_kick(const struct lu_env *env,
- struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
+int cl_lock_enqueue(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock, struct cl_sync_io *anchor)
{
- int result;
const struct cl_lock_slice *slice;
+ int rc = -ENOSYS;
- result = -ENOSYS;
list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_enqueue) {
- result = slice->cls_ops->clo_enqueue(env,
- slice, io, flags);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- return result;
-}
-
-/**
- * Tries to enqueue a lock.
- *
- * This function is called repeatedly by cl_enqueue() until either lock is
- * enqueued, or error occurs. This function does not block waiting for
- * networking communication to complete.
- *
- * \post ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \see cl_enqueue() cl_lock_operations::clo_enqueue()
- * \see cl_lock_state::CLS_ENQUEUED
- */
-int cl_enqueue_try(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 flags)
-{
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "enqueue lock", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- switch (lock->cll_state) {
- case CLS_NEW:
- cl_lock_state_set(env, lock, CLS_QUEUING);
- /* fall-through */
- case CLS_QUEUING:
- /* kick layers. */
- result = cl_enqueue_kick(env, lock, io, flags);
- /* For AGL case, the cl_lock::cll_state may
- * become CLS_HELD already.
- */
- if (result == 0 && lock->cll_state == CLS_QUEUING)
- cl_lock_state_set(env, lock, CLS_ENQUEUED);
- break;
- case CLS_INTRANSIT:
- LASSERT(cl_lock_is_intransit(lock));
- result = CLO_WAIT;
- break;
- case CLS_CACHED:
- /* yank lock from the cache. */
- result = cl_use_try(env, lock, 0);
- break;
- case CLS_ENQUEUED:
- case CLS_HELD:
- result = 0;
- break;
- default:
- case CLS_FREEING:
- /*
- * impossible, only held locks with increased
- * ->cll_holds can be enqueued, and they cannot be
- * freed.
- */
- LBUG();
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_enqueue_try);
-
-/**
- * Cancel the conflicting lock found during previous enqueue.
- *
- * \retval 0 conflicting lock has been canceled.
- * \retval -ve error code.
- */
-int cl_lock_enqueue_wait(const struct lu_env *env,
- struct cl_lock *lock,
- int keep_mutex)
-{
- struct cl_lock *conflict;
- int rc = 0;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERT(lock->cll_state == CLS_QUEUING);
- LASSERT(lock->cll_conflict);
-
- conflict = lock->cll_conflict;
- lock->cll_conflict = NULL;
+ if (!slice->cls_ops->clo_enqueue)
+ continue;
- cl_lock_mutex_put(env, lock);
- LASSERT(cl_lock_nr_mutexed(env) == 0);
-
- cl_lock_mutex_get(env, conflict);
- cl_lock_trace(D_DLMTRACE, env, "enqueue wait", conflict);
- cl_lock_cancel(env, conflict);
- cl_lock_delete(env, conflict);
-
- while (conflict->cll_state != CLS_FREEING) {
- rc = cl_lock_state_wait(env, conflict);
+ rc = slice->cls_ops->clo_enqueue(env, slice, io, anchor);
if (rc != 0)
break;
- }
- cl_lock_mutex_put(env, conflict);
- lu_ref_del(&conflict->cll_reference, "cancel-wait", lock);
- cl_lock_put(env, conflict);
-
- if (keep_mutex)
- cl_lock_mutex_get(env, lock);
-
- LASSERT(rc <= 0);
- return rc;
-}
-EXPORT_SYMBOL(cl_lock_enqueue_wait);
-
-static int cl_enqueue_locked(const struct lu_env *env, struct cl_lock *lock,
- struct cl_io *io, __u32 enqflags)
-{
- int result;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_holds > 0);
-
- cl_lock_user_add(env, lock);
- do {
- result = cl_enqueue_try(env, lock, io, enqflags);
- if (result == CLO_WAIT) {
- if (lock->cll_conflict)
- result = cl_lock_enqueue_wait(env, lock, 1);
- else
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result != 0)
- cl_unuse_try(env, lock);
- LASSERT(ergo(result == 0 && !(enqflags & CEF_AGL),
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD));
- return result;
-}
-
-/**
- * Tries to unlock a lock.
- *
- * This function is called to release underlying resource:
- * 1. for top lock, the resource is sublocks it held;
- * 2. for sublock, the resource is the reference to dlmlock.
- *
- * cl_unuse_try is a one-shot operation, so it must NOT return CLO_WAIT.
- *
- * \see cl_unuse() cl_lock_operations::clo_unuse()
- * \see cl_lock_state::CLS_CACHED
- */
-int cl_unuse_try(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
- enum cl_lock_state state = CLS_NEW;
-
- cl_lock_trace(D_DLMTRACE, env, "unuse lock", lock);
-
- if (lock->cll_users > 1) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /* Only if the lock is in CLS_HELD or CLS_ENQUEUED state, it can hold
- * underlying resources.
- */
- if (!(lock->cll_state == CLS_HELD || lock->cll_state == CLS_ENQUEUED)) {
- cl_lock_user_del(env, lock);
- return 0;
- }
-
- /*
- * New lock users (->cll_users) are not protecting unlocking
- * from proceeding. From this point, lock eventually reaches
- * CLS_CACHED, is reinitialized to CLS_NEW or fails into
- * CLS_FREEING.
- */
- state = cl_lock_intransit(env, lock);
-
- result = cl_unuse_try_internal(env, lock);
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(result != CLO_WAIT);
- cl_lock_user_del(env, lock);
- if (result == 0 || result == -ESTALE) {
- /*
- * Return lock back to the cache. This is the only
- * place where lock is moved into CLS_CACHED state.
- *
- * If one of ->clo_unuse() methods returned -ESTALE, lock
- * cannot be placed into cache and has to be
- * re-initialized. This happens e.g., when a sub-lock was
- * canceled while unlocking was in progress.
- */
- if (state == CLS_HELD && result == 0)
- state = CLS_CACHED;
- else
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
-
- /*
- * Hide -ESTALE error.
- * If the lock is a glimpse lock, and it has multiple
- * stripes. Assuming that one of its sublock returned -ENAVAIL,
- * and other sublocks are matched write locks. In this case,
- * we can't set this lock to error because otherwise some of
- * its sublocks may not be canceled. This causes some dirty
- * pages won't be written to OSTs. -jay
- */
- result = 0;
- } else {
- CERROR("result = %d, this is unlikely!\n", result);
- state = CLS_NEW;
- cl_lock_extransit(env, lock, state);
- }
- return result ?: lock->cll_error;
-}
-EXPORT_SYMBOL(cl_unuse_try);
-
-static void cl_unuse_locked(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- result = cl_unuse_try(env, lock);
- if (result)
- CL_LOCK_DEBUG(D_ERROR, env, lock, "unuse return %d\n", result);
-}
-
-/**
- * Unlocks a lock.
- */
-void cl_unuse(const struct lu_env *env, struct cl_lock *lock)
-{
- cl_lock_mutex_get(env, lock);
- cl_unuse_locked(env, lock);
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_release(env, lock);
-}
-EXPORT_SYMBOL(cl_unuse);
-
-/**
- * Tries to wait for a lock.
- *
- * This function is called repeatedly by cl_wait() until either lock is
- * granted, or error occurs. This function does not block waiting for network
- * communication to complete.
- *
- * \see cl_wait() cl_lock_operations::clo_wait()
- * \see cl_lock_state::CLS_HELD
- */
-int cl_wait_try(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "wait lock try", lock);
- do {
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_QUEUING ||
- lock->cll_state == CLS_ENQUEUED ||
- lock->cll_state == CLS_HELD ||
- lock->cll_state == CLS_INTRANSIT,
- "lock state: %d\n", lock->cll_state);
- LASSERT(lock->cll_users > 0);
- LASSERT(lock->cll_holds > 0);
-
- result = lock->cll_error;
- if (result != 0)
- break;
-
- if (cl_lock_is_intransit(lock)) {
- result = CLO_WAIT;
- break;
- }
-
- if (lock->cll_state == CLS_HELD)
- /* nothing to do */
- break;
-
- result = -ENOSYS;
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_wait) {
- result = slice->cls_ops->clo_wait(env, slice);
- if (result != 0)
- break;
- }
- }
- LASSERT(result != -ENOSYS);
- if (result == 0) {
- LASSERT(lock->cll_state != CLS_INTRANSIT);
- cl_lock_state_set(env, lock, CLS_HELD);
- }
- } while (result == CLO_REPEAT);
- return result;
-}
-EXPORT_SYMBOL(cl_wait_try);
-
-/**
- * Waits until enqueued lock is granted.
- *
- * \pre current thread or io owns a hold on the lock
- * \pre ergo(result == 0, lock->cll_state == CLS_ENQUEUED ||
- * lock->cll_state == CLS_HELD)
- *
- * \post ergo(result == 0, lock->cll_state == CLS_HELD)
- */
-int cl_wait(const struct lu_env *env, struct cl_lock *lock)
-{
- int result;
-
- cl_lock_mutex_get(env, lock);
-
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERTF(lock->cll_state == CLS_ENQUEUED || lock->cll_state == CLS_HELD,
- "Wrong state %d\n", lock->cll_state);
- LASSERT(lock->cll_holds > 0);
-
- do {
- result = cl_wait_try(env, lock);
- if (result == CLO_WAIT) {
- result = cl_lock_state_wait(env, lock);
- if (result == 0)
- continue;
- }
- break;
- } while (1);
- if (result < 0) {
- cl_unuse_try(env, lock);
- cl_lock_lockdep_release(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env, "wait lock", lock);
- cl_lock_mutex_put(env, lock);
- LASSERT(ergo(result == 0, lock->cll_state == CLS_HELD));
- return result;
-}
-EXPORT_SYMBOL(cl_wait);
-
-/**
- * Executes cl_lock_operations::clo_weigh(), and sums results to estimate lock
- * value.
- */
-unsigned long cl_lock_weigh(const struct lu_env *env, struct cl_lock *lock)
-{
- const struct cl_lock_slice *slice;
- unsigned long pound;
- unsigned long ounce;
-
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- pound = 0;
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_weigh) {
- ounce = slice->cls_ops->clo_weigh(env, slice);
- pound += ounce;
- if (pound < ounce) /* over-weight^Wflow */
- pound = ~0UL;
- }
- }
- return pound;
-}
-EXPORT_SYMBOL(cl_lock_weigh);
-
-/**
- * Notifies layers that lock description changed.
- *
- * The server can grant client a lock different from one that was requested
- * (e.g., larger in extent). This method is called when actually granted lock
- * description becomes known to let layers to accommodate for changed lock
- * description.
- *
- * \see cl_lock_operations::clo_modify()
- */
-int cl_lock_modify(const struct lu_env *env, struct cl_lock *lock,
- const struct cl_lock_descr *desc)
-{
- const struct cl_lock_slice *slice;
- struct cl_object *obj = lock->cll_descr.cld_obj;
- struct cl_object_header *hdr = cl_object_header(obj);
- int result;
-
- cl_lock_trace(D_DLMTRACE, env, "modify lock", lock);
- /* don't allow object to change */
- LASSERT(obj == desc->cld_obj);
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- list_for_each_entry_reverse(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_modify) {
- result = slice->cls_ops->clo_modify(env, slice, desc);
- if (result != 0)
- return result;
- }
- }
- CL_LOCK_DEBUG(D_DLMTRACE, env, lock, " -> "DDESCR"@"DFID"\n",
- PDESCR(desc), PFID(lu_object_fid(&desc->cld_obj->co_lu)));
- /*
- * Just replace description in place. Nothing more is needed for
- * now. If locks were indexed according to their extent and/or mode,
- * that index would have to be updated here.
- */
- spin_lock(&hdr->coh_lock_guard);
- lock->cll_descr = *desc;
- spin_unlock(&hdr->coh_lock_guard);
- return 0;
-}
-EXPORT_SYMBOL(cl_lock_modify);
-
-/**
- * Initializes lock closure with a given origin.
- *
- * \see cl_lock_closure
- */
-void cl_lock_closure_init(const struct lu_env *env,
- struct cl_lock_closure *closure,
- struct cl_lock *origin, int wait)
-{
- LINVRNT(cl_lock_is_mutexed(origin));
- LINVRNT(cl_lock_invariant(env, origin));
-
- INIT_LIST_HEAD(&closure->clc_list);
- closure->clc_origin = origin;
- closure->clc_wait = wait;
- closure->clc_nr = 0;
-}
-EXPORT_SYMBOL(cl_lock_closure_init);
-
-/**
- * Builds a closure of \a lock.
- *
- * Building of a closure consists of adding initial lock (\a lock) into it,
- * and calling cl_lock_operations::clo_closure() methods of \a lock. These
- * methods might call cl_lock_closure_build() recursively again, adding more
- * locks to the closure, etc.
- *
- * \see cl_lock_closure
- */
-int cl_lock_closure_build(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- const struct cl_lock_slice *slice;
- int result;
-
- LINVRNT(cl_lock_is_mutexed(closure->clc_origin));
- LINVRNT(cl_lock_invariant(env, closure->clc_origin));
-
- result = cl_lock_enclosure(env, lock, closure);
- if (result == 0) {
- list_for_each_entry(slice, &lock->cll_layers, cls_linkage) {
- if (slice->cls_ops->clo_closure) {
- result = slice->cls_ops->clo_closure(env, slice,
- closure);
- if (result != 0)
- break;
- }
- }
- }
- if (result != 0)
- cl_lock_disclosure(env, closure);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_closure_build);
-
-/**
- * Adds new lock to a closure.
- *
- * Try-locks \a lock and if succeeded, adds it to the closure (never more than
- * once). If try-lock failed, returns CLO_REPEAT, after optionally waiting
- * until next try-lock is likely to succeed.
- */
-int cl_lock_enclosure(const struct lu_env *env, struct cl_lock *lock,
- struct cl_lock_closure *closure)
-{
- int result = 0;
-
- cl_lock_trace(D_DLMTRACE, env, "enclosure lock", lock);
- if (!cl_lock_mutex_try(env, lock)) {
- /*
- * If lock->cll_inclosure is not empty, lock is already in
- * this closure.
- */
- if (list_empty(&lock->cll_inclosure)) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure", closure);
- list_add(&lock->cll_inclosure, &closure->clc_list);
- closure->clc_nr++;
- } else
- cl_lock_mutex_put(env, lock);
- result = 0;
- } else {
- cl_lock_disclosure(env, closure);
- if (closure->clc_wait) {
- cl_lock_get_trust(lock);
- lu_ref_add(&lock->cll_reference, "closure-w", closure);
- cl_lock_mutex_put(env, closure->clc_origin);
-
- LASSERT(cl_lock_nr_mutexed(env) == 0);
- cl_lock_mutex_get(env, lock);
- cl_lock_mutex_put(env, lock);
-
- cl_lock_mutex_get(env, closure->clc_origin);
- lu_ref_del(&lock->cll_reference, "closure-w", closure);
- cl_lock_put(env, lock);
- }
- result = CLO_REPEAT;
- }
- return result;
-}
-EXPORT_SYMBOL(cl_lock_enclosure);
-
-/** Releases mutices of enclosed locks. */
-void cl_lock_disclosure(const struct lu_env *env,
- struct cl_lock_closure *closure)
-{
- struct cl_lock *scan;
- struct cl_lock *temp;
-
- cl_lock_trace(D_DLMTRACE, env, "disclosure lock", closure->clc_origin);
- list_for_each_entry_safe(scan, temp, &closure->clc_list,
- cll_inclosure) {
- list_del_init(&scan->cll_inclosure);
- cl_lock_mutex_put(env, scan);
- lu_ref_del(&scan->cll_reference, "closure", closure);
- cl_lock_put(env, scan);
- closure->clc_nr--;
- }
- LASSERT(closure->clc_nr == 0);
-}
-EXPORT_SYMBOL(cl_lock_disclosure);
-
-/** Finalizes a closure. */
-void cl_lock_closure_fini(struct cl_lock_closure *closure)
-{
- LASSERT(closure->clc_nr == 0);
- LASSERT(list_empty(&closure->clc_list));
-}
-EXPORT_SYMBOL(cl_lock_closure_fini);
-
-/**
- * Destroys this lock. Notifies layers (bottom-to-top) that lock is being
- * destroyed, then destroy the lock. If there are holds on the lock, postpone
- * destruction until all holds are released. This is called when a decision is
- * made to destroy the lock in the future. E.g., when a blocking AST is
- * received on it, or fatal communication error happens.
- *
- * Caller must have a reference on this lock to prevent a situation, when
- * deleted lock lingers in memory for indefinite time, because nobody calls
- * cl_lock_put() to finish it.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- * \pre ergo(cl_lock_nesting(lock) == CNL_TOP,
- * cl_lock_nr_mutexed(env) == 1)
- * [i.e., if a top-lock is deleted, mutices of no other locks can be
- * held, as deletion of sub-locks might require releasing a top-lock
- * mutex]
- *
- * \see cl_lock_operations::clo_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_delete(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(ergo(cl_lock_nesting(lock) == CNL_TOP,
- cl_lock_nr_mutexed(env) == 1));
-
- cl_lock_trace(D_DLMTRACE, env, "delete lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_delete0(env, lock);
- else
- lock->cll_flags |= CLF_DOOMED;
-}
-EXPORT_SYMBOL(cl_lock_delete);
-
-/**
- * Mark lock as irrecoverably failed, and mark it for destruction. This
- * happens when, e.g., server fails to grant a lock to us, or networking
- * time-out happens.
- *
- * \pre atomic_read(&lock->cll_ref) > 0
- *
- * \see clo_lock_delete()
- * \see cl_lock::cll_holds
- */
-void cl_lock_error(const struct lu_env *env, struct cl_lock *lock, int error)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- if (lock->cll_error == 0 && error != 0) {
- cl_lock_trace(D_DLMTRACE, env, "set lock error", lock);
- lock->cll_error = error;
- cl_lock_signal(env, lock);
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
-}
-EXPORT_SYMBOL(cl_lock_error);
-
-/**
- * Cancels this lock. Notifies layers
- * (bottom-to-top) that lock is being cancelled, then destroy the lock. If
- * there are holds on the lock, postpone cancellation until
- * all holds are released.
- *
- * Cancellation notification is delivered to layers at most once.
- *
- * \see cl_lock_operations::clo_cancel()
- * \see cl_lock::cll_holds
- */
-void cl_lock_cancel(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_trace(D_DLMTRACE, env, "cancel lock", lock);
- if (lock->cll_holds == 0)
- cl_lock_cancel0(env, lock);
- else
- lock->cll_flags |= CLF_CANCELPEND;
-}
-EXPORT_SYMBOL(cl_lock_cancel);
-
-/**
- * Finds an existing lock covering given index and optionally different from a
- * given \a except lock.
- */
-struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env,
- struct cl_object *obj, pgoff_t index,
- struct cl_lock *except,
- int pending, int canceld)
-{
- struct cl_object_header *head;
- struct cl_lock *scan;
- struct cl_lock *lock;
- struct cl_lock_descr *need;
-
- head = cl_object_header(obj);
- need = &cl_env_info(env)->clt_descr;
- lock = NULL;
-
- need->cld_mode = CLM_READ; /* CLM_READ matches both READ & WRITE, but
- * not PHANTOM
- */
- need->cld_start = need->cld_end = index;
- need->cld_enq_flags = 0;
-
- spin_lock(&head->coh_lock_guard);
- /* It is fine to match any group lock since there could be only one
- * with a uniq gid and it conflicts with all other lock modes too
- */
- list_for_each_entry(scan, &head->coh_locks, cll_linkage) {
- if (scan != except &&
- (scan->cll_descr.cld_mode == CLM_GROUP ||
- cl_lock_ext_match(&scan->cll_descr, need)) &&
- scan->cll_state >= CLS_HELD &&
- scan->cll_state < CLS_FREEING &&
- /*
- * This check is racy as the lock can be canceled right
- * after it is done, but this is fine, because page exists
- * already.
- */
- (canceld || !(scan->cll_flags & CLF_CANCELLED)) &&
- (pending || !(scan->cll_flags & CLF_CANCELPEND))) {
- /* Don't increase cs_hit here since this
- * is just a helper function.
- */
- cl_lock_get_trust(scan);
- lock = scan;
- break;
}
- }
- spin_unlock(&head->coh_lock_guard);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_at_pgoff);
-
-/**
- * Calculate the page offset at the layer of @lock.
- * At the time of this writing, @page is top page and @lock is sub lock.
- */
-static pgoff_t pgoff_at_lock(struct cl_page *page, struct cl_lock *lock)
-{
- struct lu_device_type *dtype;
- const struct cl_page_slice *slice;
-
- dtype = lock->cll_descr.cld_obj->co_lu.lo_dev->ld_type;
- slice = cl_page_at(page, dtype);
- return slice->cpl_page->cp_index;
+ return rc;
}
+EXPORT_SYMBOL(cl_lock_enqueue);
/**
- * Check if page @page is covered by an extra lock or discard it.
+ * Main high-level entry point of cl_lock interface that finds existing or
+ * enqueues new lock matching given description.
*/
-static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
+int cl_lock_request(const struct lu_env *env, struct cl_io *io,
+ struct cl_lock *lock)
{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
- pgoff_t index = pgoff_at_lock(page, lock);
+ struct cl_sync_io *anchor = NULL;
+ __u32 enq_flags = lock->cll_descr.cld_enq_flags;
+ int rc;
- if (index >= info->clt_fn_index) {
- struct cl_lock *tmp;
+ rc = cl_lock_init(env, lock, io);
+ if (rc < 0)
+ return rc;
- /* refresh non-overlapped index */
- tmp = cl_lock_at_pgoff(env, lock->cll_descr.cld_obj, index,
- lock, 1, 0);
- if (tmp) {
- /* Cache the first-non-overlapped index so as to skip
- * all pages within [index, clt_fn_index). This
- * is safe because if tmp lock is canceled, it will
- * discard these pages.
- */
- info->clt_fn_index = tmp->cll_descr.cld_end + 1;
- if (tmp->cll_descr.cld_end == CL_PAGE_EOF)
- info->clt_fn_index = CL_PAGE_EOF;
- cl_lock_put(env, tmp);
- } else if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
- }
+ if ((enq_flags & CEF_ASYNC) && !(enq_flags & CEF_AGL)) {
+ anchor = &cl_env_info(env)->clt_anchor;
+ cl_sync_io_init(anchor, 1, cl_sync_io_end);
}
- info->clt_next_index = index + 1;
- return CLP_GANG_OKAY;
-}
+ rc = cl_lock_enqueue(env, io, lock, anchor);
-static int discard_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_lock *lock = cbdata;
+ if (anchor) {
+ int rc2;
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageWriteback(cl_page_vmpage(env, page))));
- KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
- !PageDirty(cl_page_vmpage(env, page))));
-
- info->clt_next_index = pgoff_at_lock(page, lock) + 1;
- if (cl_page_own(env, io, page) == 0) {
- /* discard the page */
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- } else {
- LASSERT(page->cp_state == CPS_FREEING);
+ /* drop the reference count held at initialization time */
+ cl_sync_io_note(env, anchor, 0);
+ rc2 = cl_sync_io_wait(env, anchor, 0);
+ if (rc2 < 0 && rc == 0)
+ rc = rc2;
}
- return CLP_GANG_OKAY;
-}
+ if (rc < 0)
+ cl_lock_release(env, lock);
-/**
- * Discard pages protected by the given lock. This function traverses radix
- * tree to find all covering pages and discard them. If a page is being covered
- * by other locks, it should remain in cache.
- *
- * If error happens on any step, the process continues anyway (the reasoning
- * behind this being that lock cancellation cannot be delayed indefinitely).
- */
-int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock)
-{
- struct cl_thread_info *info = cl_env_info(env);
- struct cl_io *io = &info->clt_io;
- struct cl_lock_descr *descr = &lock->cll_descr;
- cl_page_gang_cb_t cb;
- int res;
- int result;
-
- LINVRNT(cl_lock_invariant(env, lock));
-
- io->ci_obj = cl_object_top(descr->cld_obj);
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
- if (result != 0)
- goto out;
-
- cb = descr->cld_mode == CLM_READ ? check_and_discard_cb : discard_cb;
- info->clt_fn_index = info->clt_next_index = descr->cld_start;
- do {
- res = cl_page_gang_lookup(env, descr->cld_obj, io,
- info->clt_next_index, descr->cld_end,
- cb, (void *)lock);
- if (info->clt_next_index > descr->cld_end)
- break;
-
- if (res == CLP_GANG_RESCHED)
- cond_resched();
- } while (res != CLP_GANG_OKAY);
-out:
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_lock_discard_pages);
-
-/**
- * Eliminate all locks for a given object.
- *
- * Caller has to guarantee that no lock is in active use.
- *
- * \param cancel when this is set, cl_locks_prune() cancels locks before
- * destroying.
- */
-void cl_locks_prune(const struct lu_env *env, struct cl_object *obj, int cancel)
-{
- struct cl_object_header *head;
- struct cl_lock *lock;
-
- head = cl_object_header(obj);
- /*
- * If locks are destroyed without cancellation, all pages must be
- * already destroyed (as otherwise they will be left unprotected).
- */
- LASSERT(ergo(!cancel,
- !head->coh_tree.rnode && head->coh_pages == 0));
-
- spin_lock(&head->coh_lock_guard);
- while (!list_empty(&head->coh_locks)) {
- lock = container_of(head->coh_locks.next,
- struct cl_lock, cll_linkage);
- cl_lock_get_trust(lock);
- spin_unlock(&head->coh_lock_guard);
- lu_ref_add(&lock->cll_reference, "prune", current);
-
-again:
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING) {
- LASSERT(lock->cll_users <= 1);
- if (unlikely(lock->cll_users == 1)) {
- struct l_wait_info lwi = { 0 };
-
- cl_lock_mutex_put(env, lock);
- l_wait_event(lock->cll_wq,
- lock->cll_users == 0,
- &lwi);
- goto again;
- }
-
- if (cancel)
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- }
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, "prune", current);
- cl_lock_put(env, lock);
- spin_lock(&head->coh_lock_guard);
- }
- spin_unlock(&head->coh_lock_guard);
-}
-EXPORT_SYMBOL(cl_locks_prune);
-
-static struct cl_lock *cl_lock_hold_mutex(const struct lu_env *env,
- const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- while (1) {
- lock = cl_lock_find(env, io, need);
- if (IS_ERR(lock))
- break;
- cl_lock_mutex_get(env, lock);
- if (lock->cll_state < CLS_FREEING &&
- !(lock->cll_flags & CLF_CANCELLED)) {
- cl_lock_hold_mod(env, lock, 1);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
- break;
- }
- cl_lock_mutex_put(env, lock);
- cl_lock_put(env, lock);
- }
- return lock;
-}
-
-/**
- * Returns a lock matching \a need description with a reference and a hold on
- * it.
- *
- * This is much like cl_lock_find(), except that cl_lock_hold() additionally
- * guarantees that lock is not in the CLS_FREEING state on return.
- */
-struct cl_lock *cl_lock_hold(const struct lu_env *env, const struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
-
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (!IS_ERR(lock))
- cl_lock_mutex_put(env, lock);
- return lock;
-}
-EXPORT_SYMBOL(cl_lock_hold);
-
-/**
- * Main high-level entry point of cl_lock interface that finds existing or
- * enqueues new lock matching given description.
- */
-struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io,
- const struct cl_lock_descr *need,
- const char *scope, const void *source)
-{
- struct cl_lock *lock;
- int rc;
- __u32 enqflags = need->cld_enq_flags;
-
- do {
- lock = cl_lock_hold_mutex(env, io, need, scope, source);
- if (IS_ERR(lock))
- break;
-
- rc = cl_enqueue_locked(env, lock, io, enqflags);
- if (rc == 0) {
- if (cl_lock_fits_into(env, lock, need, io)) {
- if (!(enqflags & CEF_AGL)) {
- cl_lock_mutex_put(env, lock);
- cl_lock_lockdep_acquire(env, lock,
- enqflags);
- break;
- }
- rc = 1;
- }
- cl_unuse_locked(env, lock);
- }
- cl_lock_trace(D_DLMTRACE, env,
- rc <= 0 ? "enqueue failed" : "agl succeed", lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
- if (rc > 0) {
- LASSERT(enqflags & CEF_AGL);
- lock = NULL;
- } else if (rc != 0) {
- lock = ERR_PTR(rc);
- }
- } while (rc == 0);
- return lock;
+ return rc;
}
EXPORT_SYMBOL(cl_lock_request);
/**
- * Adds a hold to a known lock.
- */
-void cl_lock_hold_add(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_state != CLS_FREEING);
-
- cl_lock_hold_mod(env, lock, 1);
- cl_lock_get(lock);
- lu_ref_add(&lock->cll_holders, scope, source);
- lu_ref_add(&lock->cll_reference, scope, source);
-}
-EXPORT_SYMBOL(cl_lock_hold_add);
-
-/**
- * Releases a hold and a reference on a lock, on which caller acquired a
- * mutex.
- */
-void cl_lock_unhold(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
-{
- LINVRNT(cl_lock_invariant(env, lock));
- cl_lock_hold_release(env, lock, scope, source);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
-}
-EXPORT_SYMBOL(cl_lock_unhold);
-
-/**
* Releases a hold and a reference on a lock, obtained by cl_lock_hold().
*/
-void cl_lock_release(const struct lu_env *env, struct cl_lock *lock,
- const char *scope, const void *source)
+void cl_lock_release(const struct lu_env *env, struct cl_lock *lock)
{
- LINVRNT(cl_lock_invariant(env, lock));
cl_lock_trace(D_DLMTRACE, env, "release lock", lock);
- cl_lock_mutex_get(env, lock);
- cl_lock_hold_release(env, lock, scope, source);
- cl_lock_mutex_put(env, lock);
- lu_ref_del(&lock->cll_reference, scope, source);
- cl_lock_put(env, lock);
+ cl_lock_cancel(env, lock);
+ cl_lock_fini(env, lock);
}
EXPORT_SYMBOL(cl_lock_release);
-void cl_lock_user_add(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
-
- cl_lock_used_mod(env, lock, 1);
-}
-EXPORT_SYMBOL(cl_lock_user_add);
-
-void cl_lock_user_del(const struct lu_env *env, struct cl_lock *lock)
-{
- LINVRNT(cl_lock_is_mutexed(lock));
- LINVRNT(cl_lock_invariant(env, lock));
- LASSERT(lock->cll_users > 0);
-
- cl_lock_used_mod(env, lock, -1);
- if (lock->cll_users == 0)
- wake_up_all(&lock->cll_wq);
-}
-EXPORT_SYMBOL(cl_lock_user_del);
-
const char *cl_lock_mode_name(const enum cl_lock_mode mode)
{
static const char *names[] = {
- [CLM_PHANTOM] = "P",
[CLM_READ] = "R",
[CLM_WRITE] = "W",
[CLM_GROUP] = "G"
@@ -2189,10 +261,8 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_lock *lock)
{
const struct cl_lock_slice *slice;
- (*printer)(env, cookie, "lock@%p[%d %d %d %d %d %08lx] ",
- lock, atomic_read(&lock->cll_ref),
- lock->cll_state, lock->cll_error, lock->cll_holds,
- lock->cll_users, lock->cll_flags);
+
+ (*printer)(env, cookie, "lock@%p", lock);
cl_lock_descr_print(env, cookie, printer, &lock->cll_descr);
(*printer)(env, cookie, " {\n");
@@ -2207,13 +277,3 @@ void cl_lock_print(const struct lu_env *env, void *cookie,
(*printer)(env, cookie, "} lock@%p\n", lock);
}
EXPORT_SYMBOL(cl_lock_print);
-
-int cl_lock_init(void)
-{
- return lu_kmem_init(cl_lock_caches);
-}
-
-void cl_lock_fini(void)
-{
- lu_kmem_fini(cl_lock_caches);
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_object.c b/drivers/staging/lustre/lustre/obdclass/cl_object.c
index 43e299d4d416..5940f30318ec 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_object.c
@@ -36,6 +36,7 @@
* Client Lustre Object.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
/*
@@ -43,8 +44,6 @@
*
* i_mutex
* PG_locked
- * ->coh_page_guard
- * ->coh_lock_guard
* ->coh_attr_guard
* ->ls_guard
*/
@@ -63,10 +62,6 @@
static struct kmem_cache *cl_env_kmem;
-/** Lock class of cl_object_header::coh_page_guard */
-static struct lock_class_key cl_page_guard_class;
-/** Lock class of cl_object_header::coh_lock_guard */
-static struct lock_class_key cl_lock_guard_class;
/** Lock class of cl_object_header::coh_attr_guard */
static struct lock_class_key cl_attr_guard_class;
@@ -81,17 +76,9 @@ int cl_object_header_init(struct cl_object_header *h)
result = lu_object_header_init(&h->coh_lu);
if (result == 0) {
- spin_lock_init(&h->coh_page_guard);
- spin_lock_init(&h->coh_lock_guard);
spin_lock_init(&h->coh_attr_guard);
- lockdep_set_class(&h->coh_page_guard, &cl_page_guard_class);
- lockdep_set_class(&h->coh_lock_guard, &cl_lock_guard_class);
lockdep_set_class(&h->coh_attr_guard, &cl_attr_guard_class);
- h->coh_pages = 0;
- /* XXX hard coded GFP_* mask. */
- INIT_RADIX_TREE(&h->coh_tree, GFP_ATOMIC);
- INIT_LIST_HEAD(&h->coh_locks);
- h->coh_page_bufsize = ALIGN(sizeof(struct cl_page), 8);
+ h->coh_page_bufsize = 0;
}
return result;
}
@@ -145,7 +132,7 @@ EXPORT_SYMBOL(cl_object_get);
/**
* Returns the top-object for a given \a o.
*
- * \see cl_page_top(), cl_io_top()
+ * \see cl_io_top()
*/
struct cl_object *cl_object_top(struct cl_object *o)
{
@@ -315,6 +302,29 @@ int cl_conf_set(const struct lu_env *env, struct cl_object *obj,
EXPORT_SYMBOL(cl_conf_set);
/**
+ * Prunes caches of pages and locks for this object.
+ */
+int cl_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct lu_object_header *top;
+ struct cl_object *o;
+ int result;
+
+ top = obj->co_lu.lo_header;
+ result = 0;
+ list_for_each_entry(o, &top->loh_layers, co_lu.lo_linkage) {
+ if (o->co_ops->coo_prune) {
+ result = o->co_ops->coo_prune(env, o);
+ if (result != 0)
+ break;
+ }
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(cl_object_prune);
+
+/**
* Helper function removing all object locks, and marking object for
* deletion. All object pages must have been deleted at this point.
*
@@ -323,34 +333,12 @@ EXPORT_SYMBOL(cl_conf_set);
*/
void cl_object_kill(const struct lu_env *env, struct cl_object *obj)
{
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(obj);
- LASSERT(!hdr->coh_tree.rnode);
- LASSERT(hdr->coh_pages == 0);
+ struct cl_object_header *hdr = cl_object_header(obj);
set_bit(LU_OBJECT_HEARD_BANSHEE, &hdr->coh_lu.loh_flags);
- /*
- * Destroy all locks. Object destruction (including cl_inode_fini())
- * cannot cancel the locks, because in the case of a local client,
- * where client and server share the same thread running
- * prune_icache(), this can dead-lock with ldlm_cancel_handler()
- * waiting on __wait_on_freeing_inode().
- */
- cl_locks_prune(env, obj, 0);
}
EXPORT_SYMBOL(cl_object_kill);
-/**
- * Prunes caches of pages and locks for this object.
- */
-void cl_object_prune(const struct lu_env *env, struct cl_object *obj)
-{
- cl_pages_prune(env, obj);
- cl_locks_prune(env, obj, 1);
-}
-EXPORT_SYMBOL(cl_object_prune);
-
void cache_stats_init(struct cache_stats *cs, const char *name)
{
int i;
@@ -383,6 +371,8 @@ static int cache_stats_print(const struct cache_stats *cs,
return 0;
}
+static void cl_env_percpu_refill(void);
+
/**
* Initialize client site.
*
@@ -397,11 +387,9 @@ int cl_site_init(struct cl_site *s, struct cl_device *d)
result = lu_site_init(&s->cs_lu, &d->cd_lu_dev);
if (result == 0) {
cache_stats_init(&s->cs_pages, "pages");
- cache_stats_init(&s->cs_locks, "locks");
for (i = 0; i < ARRAY_SIZE(s->cs_pages_state); ++i)
atomic_set(&s->cs_pages_state[0], 0);
- for (i = 0; i < ARRAY_SIZE(s->cs_locks_state); ++i)
- atomic_set(&s->cs_locks_state[i], 0);
+ cl_env_percpu_refill();
}
return result;
}
@@ -435,15 +423,6 @@ int cl_site_stats_print(const struct cl_site *site, struct seq_file *m)
[CPS_PAGEIN] = "r",
[CPS_FREEING] = "f"
};
- static const char *lstate[] = {
- [CLS_NEW] = "n",
- [CLS_QUEUING] = "q",
- [CLS_ENQUEUED] = "e",
- [CLS_HELD] = "h",
- [CLS_INTRANSIT] = "t",
- [CLS_CACHED] = "c",
- [CLS_FREEING] = "f"
- };
/*
lookup hit total busy create
pages: ...... ...... ...... ...... ...... [...... ...... ...... ......]
@@ -457,12 +436,6 @@ locks: ...... ...... ...... ...... ...... [...... ...... ...... ...... ......]
seq_printf(m, "%s: %u ", pstate[i],
atomic_read(&site->cs_pages_state[i]));
seq_printf(m, "]\n");
- cache_stats_print(&site->cs_locks, m, 0);
- seq_printf(m, " [");
- for (i = 0; i < ARRAY_SIZE(site->cs_locks_state); ++i)
- seq_printf(m, "%s: %u ", lstate[i],
- atomic_read(&site->cs_locks_state[i]));
- seq_printf(m, "]\n");
cache_stats_print(&cl_env_stats, m, 0);
seq_printf(m, "\n");
return 0;
@@ -492,6 +465,13 @@ EXPORT_SYMBOL(cl_site_stats_print);
* bz20044, bz22683.
*/
+static LIST_HEAD(cl_envs);
+static unsigned int cl_envs_cached_nr;
+static unsigned int cl_envs_cached_max = 128; /* XXX: prototype: arbitrary limit
+ * for now.
+ */
+static DEFINE_SPINLOCK(cl_envs_guard);
+
struct cl_env {
void *ce_magic;
struct lu_env ce_lu;
@@ -674,8 +654,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
lu_context_enter(&cle->ce_ses);
env->le_ses = &cle->ce_ses;
cl_env_init0(cle, debug);
- } else
+ } else {
lu_env_fini(env);
+ }
}
if (rc != 0) {
kmem_cache_free(cl_env_kmem, cle);
@@ -684,8 +665,9 @@ static struct lu_env *cl_env_new(__u32 ctx_tags, __u32 ses_tags, void *debug)
CL_ENV_INC(create);
CL_ENV_INC(total);
}
- } else
+ } else {
env = ERR_PTR(-ENOMEM);
+ }
return env;
}
@@ -697,6 +679,39 @@ static void cl_env_fini(struct cl_env *cle)
kmem_cache_free(cl_env_kmem, cle);
}
+static struct lu_env *cl_env_obtain(void *debug)
+{
+ struct cl_env *cle;
+ struct lu_env *env;
+
+ spin_lock(&cl_envs_guard);
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ if (cl_envs_cached_nr > 0) {
+ int rc;
+
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ env = &cle->ce_lu;
+ rc = lu_env_refill(env);
+ if (rc == 0) {
+ cl_env_init0(cle, debug);
+ lu_context_enter(&env->le_ctx);
+ lu_context_enter(&cle->ce_ses);
+ } else {
+ cl_env_fini(cle);
+ env = ERR_PTR(rc);
+ }
+ } else {
+ spin_unlock(&cl_envs_guard);
+ env = cl_env_new(lu_context_tags_default,
+ lu_session_tags_default, debug);
+ }
+ return env;
+}
+
static inline struct cl_env *cl_env_container(struct lu_env *env)
{
return container_of(env, struct cl_env, ce_lu);
@@ -727,6 +742,8 @@ static struct lu_env *cl_env_peek(int *refcheck)
* Returns lu_env: if there already is an environment associated with the
* current thread, it is returned, otherwise, new environment is allocated.
*
+ * Allocations are amortized through the global cache of environments.
+ *
* \param refcheck pointer to a counter used to detect environment leaks. In
* the usual case cl_env_get() and cl_env_put() are called in the same lexical
* scope and pointer to the same integer is passed as \a refcheck. This is
@@ -740,10 +757,7 @@ struct lu_env *cl_env_get(int *refcheck)
env = cl_env_peek(refcheck);
if (!env) {
- env = cl_env_new(lu_context_tags_default,
- lu_session_tags_default,
- __builtin_return_address(0));
-
+ env = cl_env_obtain(__builtin_return_address(0));
if (!IS_ERR(env)) {
struct cl_env *cle;
@@ -787,6 +801,32 @@ static void cl_env_exit(struct cl_env *cle)
}
/**
+ * Finalizes and frees a given number of cached environments. This is done to
+ * (1) free some memory (not currently hooked into VM), or (2) release
+ * references to modules.
+ */
+unsigned int cl_env_cache_purge(unsigned int nr)
+{
+ struct cl_env *cle;
+
+ spin_lock(&cl_envs_guard);
+ for (; !list_empty(&cl_envs) && nr > 0; --nr) {
+ cle = container_of(cl_envs.next, struct cl_env, ce_linkage);
+ list_del_init(&cle->ce_linkage);
+ LASSERT(cl_envs_cached_nr > 0);
+ cl_envs_cached_nr--;
+ spin_unlock(&cl_envs_guard);
+
+ cl_env_fini(cle);
+ spin_lock(&cl_envs_guard);
+ }
+ LASSERT(equi(cl_envs_cached_nr == 0, list_empty(&cl_envs)));
+ spin_unlock(&cl_envs_guard);
+ return nr;
+}
+EXPORT_SYMBOL(cl_env_cache_purge);
+
+/**
* Release an environment.
*
* Decrement \a env reference counter. When counter drops to 0, nothing in
@@ -808,7 +848,22 @@ void cl_env_put(struct lu_env *env, int *refcheck)
cl_env_detach(cle);
cle->ce_debug = NULL;
cl_env_exit(cle);
- cl_env_fini(cle);
+ /*
+ * Don't bother to take a lock here.
+ *
+ * Return environment to the cache only when it was allocated
+ * with the standard tags.
+ */
+ if (cl_envs_cached_nr < cl_envs_cached_max &&
+ (env->le_ctx.lc_tags & ~LCT_HAS_EXIT) == LCT_CL_THREAD &&
+ (env->le_ses->lc_tags & ~LCT_HAS_EXIT) == LCT_SESSION) {
+ spin_lock(&cl_envs_guard);
+ list_add(&cle->ce_linkage, &cl_envs);
+ cl_envs_cached_nr++;
+ spin_unlock(&cl_envs_guard);
+ } else {
+ cl_env_fini(cle);
+ }
}
}
EXPORT_SYMBOL(cl_env_put);
@@ -914,6 +969,104 @@ void cl_lvb2attr(struct cl_attr *attr, const struct ost_lvb *lvb)
}
EXPORT_SYMBOL(cl_lvb2attr);
+static struct cl_env cl_env_percpu[NR_CPUS];
+
+static int cl_env_percpu_init(void)
+{
+ struct cl_env *cle;
+ int tags = LCT_REMEMBER | LCT_NOREF;
+ int i, j;
+ int rc = 0;
+
+ for_each_possible_cpu(i) {
+ struct lu_env *env;
+
+ cle = &cl_env_percpu[i];
+ env = &cle->ce_lu;
+
+ INIT_LIST_HEAD(&cle->ce_linkage);
+ cle->ce_magic = &cl_env_init0;
+ rc = lu_env_init(env, LCT_CL_THREAD | tags);
+ if (rc == 0) {
+ rc = lu_context_init(&cle->ce_ses, LCT_SESSION | tags);
+ if (rc == 0) {
+ lu_context_enter(&cle->ce_ses);
+ env->le_ses = &cle->ce_ses;
+ } else {
+ lu_env_fini(env);
+ }
+ }
+ if (rc != 0)
+ break;
+ }
+ if (rc != 0) {
+ /* Indices 0 to i (excluding i) were correctly initialized,
+ * thus we must uninitialize up to i, the rest are undefined.
+ */
+ for (j = 0; j < i; j++) {
+ cle = &cl_env_percpu[i];
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+ }
+
+ return rc;
+}
+
+static void cl_env_percpu_fini(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cl_env *cle = &cl_env_percpu[i];
+
+ lu_context_exit(&cle->ce_ses);
+ lu_context_fini(&cle->ce_ses);
+ lu_env_fini(&cle->ce_lu);
+ }
+}
+
+static void cl_env_percpu_refill(void)
+{
+ int i;
+
+ for_each_possible_cpu(i)
+ lu_env_refill(&cl_env_percpu[i].ce_lu);
+}
+
+void cl_env_percpu_put(struct lu_env *env)
+{
+ struct cl_env *cle;
+ int cpu;
+
+ cpu = smp_processor_id();
+ cle = cl_env_container(env);
+ LASSERT(cle == &cl_env_percpu[cpu]);
+
+ cle->ce_ref--;
+ LASSERT(cle->ce_ref == 0);
+
+ CL_ENV_DEC(busy);
+ cl_env_detach(cle);
+ cle->ce_debug = NULL;
+
+ put_cpu();
+}
+EXPORT_SYMBOL(cl_env_percpu_put);
+
+struct lu_env *cl_env_percpu_get(void)
+{
+ struct cl_env *cle;
+
+ cle = &cl_env_percpu[get_cpu()];
+ cl_env_init0(cle, __builtin_return_address(0));
+
+ cl_env_attach(cle);
+ return &cle->ce_lu;
+}
+EXPORT_SYMBOL(cl_env_percpu_get);
+
/*****************************************************************************
*
* Temporary prototype thing: mirror obd-devices into cl devices.
@@ -944,8 +1097,9 @@ struct cl_device *cl_type_setup(const struct lu_env *env, struct lu_site *site,
CERROR("can't init device '%s', %d\n", typename, rc);
d = ERR_PTR(rc);
}
- } else
+ } else {
CERROR("Cannot allocate device: '%s'\n", typename);
+ }
return lu2cl_dev(d);
}
EXPORT_SYMBOL(cl_type_setup);
@@ -959,12 +1113,6 @@ void cl_stack_fini(const struct lu_env *env, struct cl_device *cl)
}
EXPORT_SYMBOL(cl_stack_fini);
-int cl_lock_init(void);
-void cl_lock_fini(void);
-
-int cl_page_init(void);
-void cl_page_fini(void);
-
static struct lu_context_key cl_key;
struct cl_thread_info *cl_env_info(const struct lu_env *env)
@@ -1059,17 +1207,13 @@ int cl_global_init(void)
if (result)
goto out_kmem;
- result = cl_lock_init();
+ result = cl_env_percpu_init();
if (result)
+ /* no cl_env_percpu_fini on error */
goto out_context;
- result = cl_page_init();
- if (result)
- goto out_lock;
-
return 0;
-out_lock:
- cl_lock_fini();
+
out_context:
lu_context_key_degister(&cl_key);
out_kmem:
@@ -1084,8 +1228,7 @@ out_store:
*/
void cl_global_fini(void)
{
- cl_lock_fini();
- cl_page_fini();
+ cl_env_percpu_fini();
lu_context_key_degister(&cl_key);
lu_kmem_fini(cl_object_caches);
cl_env_store_fini();
diff --git a/drivers/staging/lustre/lustre/obdclass/cl_page.c b/drivers/staging/lustre/lustre/obdclass/cl_page.c
index 394580016638..b754f516e557 100644
--- a/drivers/staging/lustre/lustre/obdclass/cl_page.c
+++ b/drivers/staging/lustre/lustre/obdclass/cl_page.c
@@ -36,6 +36,7 @@
* Client Lustre Page.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
@@ -48,8 +49,7 @@
#include "../include/cl_object.h"
#include "cl_internal.h"
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix);
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg);
# define PASSERT(env, page, expr) \
do { \
@@ -63,24 +63,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
((void)sizeof(env), (void)sizeof(page), (void)sizeof !!(exp))
/**
- * Internal version of cl_page_top, it should be called if the page is
- * known to be not freed, says with page referenced, or radix tree lock held,
- * or page owned.
- */
-static struct cl_page *cl_page_top_trusted(struct cl_page *page)
-{
- while (page->cp_parent)
- page = page->cp_parent;
- return page;
-}
-
-/**
* Internal version of cl_page_get().
*
* This function can be used to obtain initial reference to previously
* unreferenced cached object. It can be called only if concurrent page
- * reclamation is somehow prevented, e.g., by locking page radix-tree
- * (cl_object_header::hdr->coh_page_guard), or by keeping a lock on a VM page,
+ * reclamation is somehow prevented, e.g., by keeping a lock on a VM page,
* associated with \a page.
*
* Use with care! Not exported.
@@ -103,142 +90,12 @@ cl_page_at_trusted(const struct cl_page *page,
{
const struct cl_page_slice *slice;
- page = cl_page_top_trusted((struct cl_page *)page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
- return slice;
- }
- page = page->cp_child;
- } while (page);
- return NULL;
-}
-
-/**
- * Returns a page with given index in the given object, or NULL if no page is
- * found. Acquires a reference on \a page.
- *
- * Locking: called under cl_object_header::coh_page_guard spin-lock.
- */
-struct cl_page *cl_page_lookup(struct cl_object_header *hdr, pgoff_t index)
-{
- struct cl_page *page;
-
- assert_spin_locked(&hdr->coh_page_guard);
-
- page = radix_tree_lookup(&hdr->coh_tree, index);
- if (page)
- cl_page_get_trust(page);
- return page;
-}
-EXPORT_SYMBOL(cl_page_lookup);
-
-/**
- * Returns a list of pages by a given [start, end] of \a obj.
- *
- * \param resched If not NULL, then we give up before hogging CPU for too
- * long and set *resched = 1, in that case caller should implement a retry
- * logic.
- *
- * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
- * crucial in the face of [offset, EOF] locks.
- *
- * Return at least one page in @queue unless there is no covered page.
- */
-int cl_page_gang_lookup(const struct lu_env *env, struct cl_object *obj,
- struct cl_io *io, pgoff_t start, pgoff_t end,
- cl_page_gang_cb_t cb, void *cbdata)
-{
- struct cl_object_header *hdr;
- struct cl_page *page;
- struct cl_page **pvec;
- const struct cl_page_slice *slice;
- const struct lu_device_type *dtype;
- pgoff_t idx;
- unsigned int nr;
- unsigned int i;
- unsigned int j;
- int res = CLP_GANG_OKAY;
- int tree_lock = 1;
-
- idx = start;
- hdr = cl_object_header(obj);
- pvec = cl_env_info(env)->clt_pvec;
- dtype = cl_object_top(obj)->co_lu.lo_dev->ld_type;
- spin_lock(&hdr->coh_page_guard);
- while ((nr = radix_tree_gang_lookup(&hdr->coh_tree, (void **)pvec,
- idx, CLT_PVEC_SIZE)) > 0) {
- int end_of_region = 0;
-
- idx = pvec[nr - 1]->cp_index + 1;
- for (i = 0, j = 0; i < nr; ++i) {
- page = pvec[i];
- pvec[i] = NULL;
-
- LASSERT(page->cp_type == CPT_CACHEABLE);
- if (page->cp_index > end) {
- end_of_region = 1;
- break;
- }
- if (page->cp_state == CPS_FREEING)
- continue;
-
- slice = cl_page_at_trusted(page, dtype);
- /*
- * Pages for lsm-less file has no underneath sub-page
- * for osc, in case of ...
- */
- PASSERT(env, page, slice);
-
- page = slice->cpl_page;
- /*
- * Can safely call cl_page_get_trust() under
- * radix-tree spin-lock.
- *
- * XXX not true, because @page is from object another
- * than @hdr and protected by different tree lock.
- */
- cl_page_get_trust(page);
- lu_ref_add_atomic(&page->cp_reference,
- "gang_lookup", current);
- pvec[j++] = page;
- }
-
- /*
- * Here a delicate locking dance is performed. Current thread
- * holds a reference to a page, but has to own it before it
- * can be placed into queue. Owning implies waiting, so
- * radix-tree lock is to be released. After a wait one has to
- * check that pages weren't truncated (cl_page_own() returns
- * error in the latter case).
- */
- spin_unlock(&hdr->coh_page_guard);
- tree_lock = 0;
-
- for (i = 0; i < j; ++i) {
- page = pvec[i];
- if (res == CLP_GANG_OKAY)
- res = (*cb)(env, io, page, cbdata);
- lu_ref_del(&page->cp_reference,
- "gang_lookup", current);
- cl_page_put(env, page);
- }
- if (nr < CLT_PVEC_SIZE || end_of_region)
- break;
-
- if (res == CLP_GANG_OKAY && need_resched())
- res = CLP_GANG_RESCHED;
- if (res != CLP_GANG_OKAY)
- break;
-
- spin_lock(&hdr->coh_page_guard);
- tree_lock = 1;
+ list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
+ if (slice->cpl_obj->co_lu.lo_dev->ld_type == dtype)
+ return slice;
}
- if (tree_lock)
- spin_unlock(&hdr->coh_page_guard);
- return res;
+ return NULL;
}
-EXPORT_SYMBOL(cl_page_gang_lookup);
static void cl_page_free(const struct lu_env *env, struct cl_page *page)
{
@@ -247,17 +104,16 @@ static void cl_page_free(const struct lu_env *env, struct cl_page *page)
PASSERT(env, page, list_empty(&page->cp_batch));
PASSERT(env, page, !page->cp_owner);
PASSERT(env, page, !page->cp_req);
- PASSERT(env, page, !page->cp_parent);
PASSERT(env, page, page->cp_state == CPS_FREEING);
- might_sleep();
while (!list_empty(&page->cp_layers)) {
struct cl_page_slice *slice;
slice = list_entry(page->cp_layers.next,
struct cl_page_slice, cpl_linkage);
list_del_init(page->cp_layers.next);
- slice->cpl_ops->cpo_fini(env, slice);
+ if (unlikely(slice->cpl_ops->cpo_fini))
+ slice->cpl_ops->cpo_fini(env, slice);
}
lu_object_ref_del_at(&obj->co_lu, &page->cp_obj_ref, "cl_page", page);
cl_object_put(env, obj);
@@ -276,10 +132,10 @@ static inline void cl_page_state_set_trust(struct cl_page *page,
*(enum cl_page_state *)&page->cp_state = state;
}
-static struct cl_page *cl_page_alloc(const struct lu_env *env,
- struct cl_object *o, pgoff_t ind,
- struct page *vmpage,
- enum cl_page_type type)
+struct cl_page *cl_page_alloc(const struct lu_env *env,
+ struct cl_object *o, pgoff_t ind,
+ struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page;
struct lu_object_header *head;
@@ -289,13 +145,11 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
int result = 0;
atomic_set(&page->cp_ref, 1);
- if (type == CPT_CACHEABLE) /* for radix tree */
- atomic_inc(&page->cp_ref);
page->cp_obj = o;
cl_object_get(o);
lu_object_ref_add_at(&o->co_lu, &page->cp_obj_ref, "cl_page",
page);
- page->cp_index = ind;
+ page->cp_vmpage = vmpage;
cl_page_state_set_trust(page, CPS_CACHED);
page->cp_type = type;
INIT_LIST_HEAD(&page->cp_layers);
@@ -306,10 +160,10 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
head = o->co_lu.lo_header;
list_for_each_entry(o, &head->loh_layers, co_lu.lo_linkage) {
if (o->co_ops->coo_page_init) {
- result = o->co_ops->coo_page_init(env, o,
- page, vmpage);
+ result = o->co_ops->coo_page_init(env, o, page,
+ ind);
if (result != 0) {
- cl_page_delete0(env, page, 0);
+ cl_page_delete0(env, page);
cl_page_free(env, page);
page = ERR_PTR(result);
break;
@@ -321,6 +175,7 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
}
return page;
}
+EXPORT_SYMBOL(cl_page_alloc);
/**
* Returns a cl_page with index \a idx at the object \a o, and associated with
@@ -333,16 +188,13 @@ static struct cl_page *cl_page_alloc(const struct lu_env *env,
*
* \see cl_object_find(), cl_lock_find()
*/
-static struct cl_page *cl_page_find0(const struct lu_env *env,
- struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type,
- struct cl_page *parent)
+struct cl_page *cl_page_find(const struct lu_env *env,
+ struct cl_object *o,
+ pgoff_t idx, struct page *vmpage,
+ enum cl_page_type type)
{
struct cl_page *page = NULL;
- struct cl_page *ghost = NULL;
struct cl_object_header *hdr;
- int err;
LASSERT(type == CPT_CACHEABLE || type == CPT_TRANSIENT);
might_sleep();
@@ -368,120 +220,25 @@ static struct cl_page *cl_page_find0(const struct lu_env *env,
* reference on it.
*/
page = cl_vmpage_page(vmpage, o);
- PINVRNT(env, page,
- ergo(page,
- cl_page_vmpage(env, page) == vmpage &&
- (void *)radix_tree_lookup(&hdr->coh_tree,
- idx) == page));
- }
- if (page)
- return page;
+ if (page)
+ return page;
+ }
/* allocate and initialize cl_page */
page = cl_page_alloc(env, o, idx, vmpage, type);
- if (IS_ERR(page))
- return page;
-
- if (type == CPT_TRANSIENT) {
- if (parent) {
- LASSERT(!page->cp_parent);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- return page;
- }
-
- /*
- * XXX optimization: use radix_tree_preload() here, and change tree
- * gfp mask to GFP_KERNEL in cl_object_header_init().
- */
- spin_lock(&hdr->coh_page_guard);
- err = radix_tree_insert(&hdr->coh_tree, idx, page);
- if (err != 0) {
- ghost = page;
- /*
- * Noted by Jay: a lock on \a vmpage protects cl_page_find()
- * from this race, but
- *
- * 0. it's better to have cl_page interface "locally
- * consistent" so that its correctness can be reasoned
- * about without appealing to the (obscure world of) VM
- * locking.
- *
- * 1. handling this race allows ->coh_tree to remain
- * consistent even when VM locking is somehow busted,
- * which is very useful during diagnosing and debugging.
- */
- page = ERR_PTR(err);
- CL_PAGE_DEBUG(D_ERROR, env, ghost,
- "fail to insert into radix tree: %d\n", err);
- } else {
- if (parent) {
- LASSERT(!page->cp_parent);
- page->cp_parent = parent;
- parent->cp_child = page;
- }
- hdr->coh_pages++;
- }
- spin_unlock(&hdr->coh_page_guard);
-
- if (unlikely(ghost)) {
- cl_page_delete0(env, ghost, 0);
- cl_page_free(env, ghost);
- }
return page;
}
-
-struct cl_page *cl_page_find(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- enum cl_page_type type)
-{
- return cl_page_find0(env, o, idx, vmpage, type, NULL);
-}
EXPORT_SYMBOL(cl_page_find);
-struct cl_page *cl_page_find_sub(const struct lu_env *env, struct cl_object *o,
- pgoff_t idx, struct page *vmpage,
- struct cl_page *parent)
-{
- return cl_page_find0(env, o, idx, vmpage, parent->cp_type, parent);
-}
-EXPORT_SYMBOL(cl_page_find_sub);
-
static inline int cl_page_invariant(const struct cl_page *pg)
{
- struct cl_object_header *header;
- struct cl_page *parent;
- struct cl_page *child;
- struct cl_io *owner;
-
/*
* Page invariant is protected by a VM lock.
*/
LINVRNT(cl_page_is_vmlocked(NULL, pg));
- header = cl_object_header(pg->cp_obj);
- parent = pg->cp_parent;
- child = pg->cp_child;
- owner = pg->cp_owner;
-
- return cl_page_in_use(pg) &&
- ergo(parent, parent->cp_child == pg) &&
- ergo(child, child->cp_parent == pg) &&
- ergo(child, pg->cp_obj != child->cp_obj) &&
- ergo(parent, pg->cp_obj != parent->cp_obj) &&
- ergo(owner && parent,
- parent->cp_owner == pg->cp_owner->ci_parent) &&
- ergo(owner && child, child->cp_owner->ci_parent == owner) &&
- /*
- * Either page is early in initialization (has neither child
- * nor parent yet), or it is in the object radix tree.
- */
- ergo(pg->cp_state < CPS_FREEING && pg->cp_type == CPT_CACHEABLE,
- (void *)radix_tree_lookup(&header->coh_tree,
- pg->cp_index) == pg ||
- (!child && !parent));
+ return cl_page_in_use_noref(pg);
}
static void cl_page_state_set0(const struct lu_env *env,
@@ -534,13 +291,9 @@ static void cl_page_state_set0(const struct lu_env *env,
old = page->cp_state;
PASSERT(env, page, allowed_transitions[old][state]);
CL_PAGE_HEADER(D_TRACE, env, page, "%d -> %d\n", old, state);
- for (; page; page = page->cp_child) {
- PASSERT(env, page, page->cp_state == old);
- PASSERT(env, page,
- equi(state == CPS_OWNED, page->cp_owner));
-
- cl_page_state_set_trust(page, state);
- }
+ PASSERT(env, page, page->cp_state == old);
+ PASSERT(env, page, equi(state == CPS_OWNED, page->cp_owner));
+ cl_page_state_set_trust(page, state);
}
static void cl_page_state_set(const struct lu_env *env,
@@ -574,8 +327,6 @@ EXPORT_SYMBOL(cl_page_get);
*/
void cl_page_put(const struct lu_env *env, struct cl_page *page)
{
- PASSERT(env, page, atomic_read(&page->cp_ref) > !!page->cp_parent);
-
CL_PAGE_HEADER(D_TRACE, env, page, "%d\n",
atomic_read(&page->cp_ref));
@@ -595,34 +346,10 @@ void cl_page_put(const struct lu_env *env, struct cl_page *page)
EXPORT_SYMBOL(cl_page_put);
/**
- * Returns a VM page associated with a given cl_page.
- */
-struct page *cl_page_vmpage(const struct lu_env *env, struct cl_page *page)
-{
- const struct cl_page_slice *slice;
-
- /*
- * Find uppermost layer with ->cpo_vmpage() method, and return its
- * result.
- */
- page = cl_page_top(page);
- do {
- list_for_each_entry(slice, &page->cp_layers, cpl_linkage) {
- if (slice->cpl_ops->cpo_vmpage)
- return slice->cpl_ops->cpo_vmpage(env, slice);
- }
- page = page->cp_child;
- } while (page);
- LBUG(); /* ->cpo_vmpage() has to be defined somewhere in the stack */
-}
-EXPORT_SYMBOL(cl_page_vmpage);
-
-/**
* Returns a cl_page associated with a VM page, and given cl_object.
*/
struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
{
- struct cl_page *top;
struct cl_page *page;
KLASSERT(PageLocked(vmpage));
@@ -633,36 +360,15 @@ struct cl_page *cl_vmpage_page(struct page *vmpage, struct cl_object *obj)
* bottom-to-top pass.
*/
- /*
- * This loop assumes that ->private points to the top-most page. This
- * can be rectified easily.
- */
- top = (struct cl_page *)vmpage->private;
- if (!top)
- return NULL;
-
- for (page = top; page; page = page->cp_child) {
- if (cl_object_same(page->cp_obj, obj)) {
- cl_page_get_trust(page);
- break;
- }
+ page = (struct cl_page *)vmpage->private;
+ if (page) {
+ cl_page_get_trust(page);
+ LASSERT(page->cp_type == CPT_CACHEABLE);
}
- LASSERT(ergo(page, page->cp_type == CPT_CACHEABLE));
return page;
}
EXPORT_SYMBOL(cl_vmpage_page);
-/**
- * Returns the top-page for a given page.
- *
- * \see cl_object_top(), cl_io_top()
- */
-struct cl_page *cl_page_top(struct cl_page *page)
-{
- return cl_page_top_trusted(page);
-}
-EXPORT_SYMBOL(cl_page_top);
-
const struct cl_page_slice *cl_page_at(const struct cl_page *page,
const struct lu_device_type *dtype)
{
@@ -682,26 +388,43 @@ EXPORT_SYMBOL(cl_page_at);
int (*__method)_proto; \
\
__result = 0; \
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) { \
- __result = (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- if (__result != 0) \
- break; \
- } \
- } \
- __page = __page->cp_child; \
- } while (__page && __result == 0); \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) { \
+ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
if (__result > 0) \
__result = 0; \
__result; \
})
+#define CL_PAGE_INVOKE_REVERSE(_env, _page, _op, _proto, ...) \
+({ \
+ const struct lu_env *__env = (_env); \
+ struct cl_page *__page = (_page); \
+ const struct cl_page_slice *__scan; \
+ int __result; \
+ ptrdiff_t __op = (_op); \
+ int (*__method)_proto; \
+ \
+ __result = 0; \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, \
+ cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) { \
+ __result = (*__method)(__env, __scan, ## __VA_ARGS__); \
+ if (__result != 0) \
+ break; \
+ } \
+ } \
+ if (__result > 0) \
+ __result = 0; \
+ __result; \
+})
+
#define CL_PAGE_INVOID(_env, _page, _op, _proto, ...) \
do { \
const struct lu_env *__env = (_env); \
@@ -710,18 +433,11 @@ do { \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- __page = cl_page_top(__page); \
- do { \
- list_for_each_entry(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_child; \
- } while (__page); \
+ list_for_each_entry(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) \
+ (*__method)(__env, __scan, ## __VA_ARGS__); \
+ } \
} while (0)
#define CL_PAGE_INVOID_REVERSE(_env, _page, _op, _proto, ...) \
@@ -732,20 +448,11 @@ do { \
ptrdiff_t __op = (_op); \
void (*__method)_proto; \
\
- /* get to the bottom page. */ \
- while (__page->cp_child) \
- __page = __page->cp_child; \
- do { \
- list_for_each_entry_reverse(__scan, &__page->cp_layers, \
- cpl_linkage) { \
- __method = *(void **)((char *)__scan->cpl_ops + \
- __op); \
- if (__method) \
- (*__method)(__env, __scan, \
- ## __VA_ARGS__); \
- } \
- __page = __page->cp_parent; \
- } while (__page); \
+ list_for_each_entry_reverse(__scan, &__page->cp_layers, cpl_linkage) { \
+ __method = *(void **)((char *)__scan->cpl_ops + __op); \
+ if (__method) \
+ (*__method)(__env, __scan, ## __VA_ARGS__); \
+ } \
} while (0)
static int cl_page_invoke(const struct lu_env *env,
@@ -771,20 +478,17 @@ static void cl_page_invoid(const struct lu_env *env,
static void cl_page_owner_clear(struct cl_page *page)
{
- for (page = cl_page_top(page); page; page = page->cp_child) {
- if (page->cp_owner) {
- LASSERT(page->cp_owner->ci_owned_nr > 0);
- page->cp_owner->ci_owned_nr--;
- page->cp_owner = NULL;
- page->cp_task = NULL;
- }
+ if (page->cp_owner) {
+ LASSERT(page->cp_owner->ci_owned_nr > 0);
+ page->cp_owner->ci_owned_nr--;
+ page->cp_owner = NULL;
+ page->cp_task = NULL;
}
}
static void cl_page_owner_set(struct cl_page *page)
{
- for (page = cl_page_top(page); page; page = page->cp_child)
- page->cp_owner->ci_owned_nr++;
+ page->cp_owner->ci_owned_nr++;
}
void cl_page_disown0(const struct lu_env *env,
@@ -794,7 +498,7 @@ void cl_page_disown0(const struct lu_env *env,
state = pg->cp_state;
PINVRNT(env, pg, state == CPS_OWNED || state == CPS_FREEING);
- PINVRNT(env, pg, cl_page_invariant(pg));
+ PINVRNT(env, pg, cl_page_invariant(pg) || state == CPS_FREEING);
cl_page_owner_clear(pg);
if (state == CPS_OWNED)
@@ -815,8 +519,9 @@ void cl_page_disown0(const struct lu_env *env,
*/
int cl_page_is_owned(const struct cl_page *pg, const struct cl_io *io)
{
+ struct cl_io *top = cl_io_top((struct cl_io *)io);
LINVRNT(cl_object_same(pg->cp_obj, io->ci_obj));
- return pg->cp_state == CPS_OWNED && pg->cp_owner == io;
+ return pg->cp_state == CPS_OWNED && pg->cp_owner == top;
}
EXPORT_SYMBOL(cl_page_is_owned);
@@ -847,7 +552,6 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
PINVRNT(env, pg, !cl_page_is_owned(pg, io));
- pg = cl_page_top(pg);
io = cl_io_top(io);
if (pg->cp_state == CPS_FREEING) {
@@ -861,7 +565,7 @@ static int cl_page_own0(const struct lu_env *env, struct cl_io *io,
if (result == 0) {
PASSERT(env, pg, !pg->cp_owner);
PASSERT(env, pg, !pg->cp_req);
- pg->cp_owner = io;
+ pg->cp_owner = cl_io_top(io);
pg->cp_task = current;
cl_page_owner_set(pg);
if (pg->cp_state != CPS_FREEING) {
@@ -914,12 +618,11 @@ void cl_page_assume(const struct lu_env *env,
{
PINVRNT(env, pg, cl_object_same(pg->cp_obj, io->ci_obj));
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_invoid(env, io, pg, CL_PAGE_OP(cpo_assume));
PASSERT(env, pg, !pg->cp_owner);
- pg->cp_owner = io;
+ pg->cp_owner = cl_io_top(io);
pg->cp_task = current;
cl_page_owner_set(pg);
cl_page_state_set(env, pg, CPS_OWNED);
@@ -943,7 +646,6 @@ void cl_page_unassume(const struct lu_env *env,
PINVRNT(env, pg, cl_page_is_owned(pg, io));
PINVRNT(env, pg, cl_page_invariant(pg));
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_owner_clear(pg);
cl_page_state_set(env, pg, CPS_CACHED);
@@ -968,9 +670,9 @@ EXPORT_SYMBOL(cl_page_unassume);
void cl_page_disown(const struct lu_env *env,
struct cl_io *io, struct cl_page *pg)
{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
+ PINVRNT(env, pg, cl_page_is_owned(pg, io) ||
+ pg->cp_state == CPS_FREEING);
- pg = cl_page_top(pg);
io = cl_io_top(io);
cl_page_disown0(env, io, pg);
}
@@ -1001,12 +703,8 @@ EXPORT_SYMBOL(cl_page_discard);
* pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
* path. Doesn't check page invariant.
*/
-static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
- int radix)
+static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
{
- struct cl_page *tmp = pg;
-
- PASSERT(env, pg, pg == cl_page_top(pg));
PASSERT(env, pg, pg->cp_state != CPS_FREEING);
/*
@@ -1014,41 +712,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
*/
cl_page_owner_clear(pg);
- /*
- * unexport the page firstly before freeing it so that
- * the page content is considered to be invalid.
- * We have to do this because a CPS_FREEING cl_page may
- * be NOT under the protection of a cl_lock.
- * Afterwards, if this page is found by other threads, then this
- * page will be forced to reread.
- */
- cl_page_export(env, pg, 0);
cl_page_state_set0(env, pg, CPS_FREEING);
- CL_PAGE_INVOID(env, pg, CL_PAGE_OP(cpo_delete),
- (const struct lu_env *, const struct cl_page_slice *));
-
- if (tmp->cp_type == CPT_CACHEABLE) {
- if (!radix)
- /* !radix means that @pg is not yet in the radix tree,
- * skip removing it.
- */
- tmp = pg->cp_child;
- for (; tmp; tmp = tmp->cp_child) {
- void *value;
- struct cl_object_header *hdr;
-
- hdr = cl_object_header(tmp->cp_obj);
- spin_lock(&hdr->coh_page_guard);
- value = radix_tree_delete(&hdr->coh_tree,
- tmp->cp_index);
- PASSERT(env, tmp, value == tmp);
- PASSERT(env, tmp, hdr->coh_pages > 0);
- hdr->coh_pages--;
- spin_unlock(&hdr->coh_page_guard);
- cl_page_put(env, tmp);
- }
- }
+ CL_PAGE_INVOID_REVERSE(env, pg, CL_PAGE_OP(cpo_delete),
+ (const struct lu_env *,
+ const struct cl_page_slice *));
}
/**
@@ -1070,7 +738,6 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
* Once page reaches cl_page_state::CPS_FREEING, all remaining references will
* drain after some time, at which point page will be recycled.
*
- * \pre pg == cl_page_top(pg)
* \pre VM page is locked
* \post pg->cp_state == CPS_FREEING
*
@@ -1079,30 +746,11 @@ static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg,
void cl_page_delete(const struct lu_env *env, struct cl_page *pg)
{
PINVRNT(env, pg, cl_page_invariant(pg));
- cl_page_delete0(env, pg, 1);
+ cl_page_delete0(env, pg);
}
EXPORT_SYMBOL(cl_page_delete);
/**
- * Unmaps page from user virtual memory.
- *
- * Calls cl_page_operations::cpo_unmap() through all layers top-to-bottom. The
- * layer responsible for VM interaction has to unmap page from user space
- * virtual memory.
- *
- * \see cl_page_operations::cpo_unmap()
- */
-int cl_page_unmap(const struct lu_env *env,
- struct cl_io *io, struct cl_page *pg)
-{
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- return cl_page_invoke(env, io, pg, CL_PAGE_OP(cpo_unmap));
-}
-EXPORT_SYMBOL(cl_page_unmap);
-
-/**
* Marks page up-to-date.
*
* Call cl_page_operations::cpo_export() through all layers top-to-bottom. The
@@ -1129,7 +777,6 @@ int cl_page_is_vmlocked(const struct lu_env *env, const struct cl_page *pg)
int result;
const struct cl_page_slice *slice;
- pg = cl_page_top_trusted((struct cl_page *)pg);
slice = container_of(pg->cp_layers.next,
const struct cl_page_slice, cpl_linkage);
PASSERT(env, pg, slice->cpl_ops->cpo_is_vmlocked);
@@ -1241,7 +888,7 @@ void cl_page_completion(const struct lu_env *env,
cl_page_put(env, pg);
if (anchor)
- cl_sync_io_note(anchor, ioret);
+ cl_sync_io_note(env, anchor, ioret);
}
EXPORT_SYMBOL(cl_page_completion);
@@ -1276,44 +923,6 @@ int cl_page_make_ready(const struct lu_env *env, struct cl_page *pg,
EXPORT_SYMBOL(cl_page_make_ready);
/**
- * Notify layers that high level io decided to place this page into a cache
- * for future transfer.
- *
- * The layer implementing transfer engine (osc) has to register this page in
- * its queues.
- *
- * \pre cl_page_is_owned(pg, io)
- * \post cl_page_is_owned(pg, io)
- *
- * \see cl_page_operations::cpo_cache_add()
- */
-int cl_page_cache_add(const struct lu_env *env, struct cl_io *io,
- struct cl_page *pg, enum cl_req_type crt)
-{
- const struct cl_page_slice *scan;
- int result = 0;
-
- PINVRNT(env, pg, crt < CRT_NR);
- PINVRNT(env, pg, cl_page_is_owned(pg, io));
- PINVRNT(env, pg, cl_page_invariant(pg));
-
- if (crt >= CRT_NR)
- return -EINVAL;
-
- list_for_each_entry(scan, &pg->cp_layers, cpl_linkage) {
- if (!scan->cpl_ops->io[crt].cpo_cache_add)
- continue;
-
- result = scan->cpl_ops->io[crt].cpo_cache_add(env, scan, io);
- if (result != 0)
- break;
- }
- CL_PAGE_HEADER(D_TRACE, env, pg, "%d %d\n", crt, result);
- return result;
-}
-EXPORT_SYMBOL(cl_page_cache_add);
-
-/**
* Called if a pge is being written back by kernel's intention.
*
* \pre cl_page_is_owned(pg, io)
@@ -1344,68 +953,21 @@ EXPORT_SYMBOL(cl_page_flush);
* \see cl_page_operations::cpo_is_under_lock()
*/
int cl_page_is_under_lock(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page)
+ struct cl_page *page, pgoff_t *max_index)
{
int rc;
PINVRNT(env, page, cl_page_invariant(page));
- rc = CL_PAGE_INVOKE(env, page, CL_PAGE_OP(cpo_is_under_lock),
- (const struct lu_env *,
- const struct cl_page_slice *, struct cl_io *),
- io);
- PASSERT(env, page, rc != 0);
+ rc = CL_PAGE_INVOKE_REVERSE(env, page, CL_PAGE_OP(cpo_is_under_lock),
+ (const struct lu_env *,
+ const struct cl_page_slice *,
+ struct cl_io *, pgoff_t *),
+ io, max_index);
return rc;
}
EXPORT_SYMBOL(cl_page_is_under_lock);
-static int page_prune_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
-{
- cl_page_own(env, io, page);
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- cl_page_disown(env, io, page);
- return CLP_GANG_OKAY;
-}
-
-/**
- * Purges all cached pages belonging to the object \a obj.
- */
-int cl_pages_prune(const struct lu_env *env, struct cl_object *clobj)
-{
- struct cl_thread_info *info;
- struct cl_object *obj = cl_object_top(clobj);
- struct cl_io *io;
- int result;
-
- info = cl_env_info(env);
- io = &info->clt_io;
-
- /*
- * initialize the io. This is ugly since we never do IO in this
- * function, we just make cl_page_list functions happy. -jay
- */
- io->ci_obj = obj;
- io->ci_ignore_layout = 1;
- result = cl_io_init(env, io, CIT_MISC, obj);
- if (result != 0) {
- cl_io_fini(env, io);
- return io->ci_result;
- }
-
- do {
- result = cl_page_gang_lookup(env, obj, io, 0, CL_PAGE_EOF,
- page_prune_cb, NULL);
- if (result == CLP_GANG_RESCHED)
- cond_resched();
- } while (result != CLP_GANG_OKAY);
-
- cl_io_fini(env, io);
- return result;
-}
-EXPORT_SYMBOL(cl_pages_prune);
-
/**
* Tells transfer engine that only part of a page is to be transmitted.
*
@@ -1431,9 +993,8 @@ void cl_page_header_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
(*printer)(env, cookie,
- "page@%p[%d %p:%lu ^%p_%p %d %d %d %p %p %#x]\n",
+ "page@%p[%d %p %d %d %d %p %p %#x]\n",
pg, atomic_read(&pg->cp_ref), pg->cp_obj,
- pg->cp_index, pg->cp_parent, pg->cp_child,
pg->cp_state, pg->cp_error, pg->cp_type,
pg->cp_owner, pg->cp_req, pg->cp_flags);
}
@@ -1445,11 +1006,7 @@ EXPORT_SYMBOL(cl_page_header_print);
void cl_page_print(const struct lu_env *env, void *cookie,
lu_printer_t printer, const struct cl_page *pg)
{
- struct cl_page *scan;
-
- for (scan = cl_page_top((struct cl_page *)pg); scan;
- scan = scan->cp_child)
- cl_page_header_print(env, cookie, printer, scan);
+ cl_page_header_print(env, cookie, printer, pg);
CL_PAGE_INVOKE(env, (struct cl_page *)pg, CL_PAGE_OP(cpo_print),
(const struct lu_env *env,
const struct cl_page_slice *slice,
@@ -1509,21 +1066,13 @@ EXPORT_SYMBOL(cl_page_size);
* \see cl_lock_slice_add(), cl_req_slice_add(), cl_io_slice_add()
*/
void cl_page_slice_add(struct cl_page *page, struct cl_page_slice *slice,
- struct cl_object *obj,
+ struct cl_object *obj, pgoff_t index,
const struct cl_page_operations *ops)
{
list_add_tail(&slice->cpl_linkage, &page->cp_layers);
slice->cpl_obj = obj;
+ slice->cpl_index = index;
slice->cpl_ops = ops;
slice->cpl_page = page;
}
EXPORT_SYMBOL(cl_page_slice_add);
-
-int cl_page_init(void)
-{
- return 0;
-}
-
-void cl_page_fini(void)
-{
-}
diff --git a/drivers/staging/lustre/lustre/obdclass/class_obd.c b/drivers/staging/lustre/lustre/obdclass/class_obd.c
index c2cf015962dd..f48816af8be7 100644
--- a/drivers/staging/lustre/lustre/obdclass/class_obd.c
+++ b/drivers/staging/lustre/lustre/obdclass/class_obd.c
@@ -60,6 +60,8 @@ unsigned int obd_dump_on_eviction;
EXPORT_SYMBOL(obd_dump_on_eviction);
unsigned int obd_max_dirty_pages = 256;
EXPORT_SYMBOL(obd_max_dirty_pages);
+atomic_t obd_unstable_pages;
+EXPORT_SYMBOL(obd_unstable_pages);
atomic_t obd_dirty_pages;
EXPORT_SYMBOL(obd_dirty_pages);
unsigned int obd_timeout = OBD_TIMEOUT_DEFAULT; /* seconds */
@@ -335,7 +337,6 @@ int class_handle_ioctl(unsigned int cmd, unsigned long arg)
err = 0;
goto out;
}
-
}
if (data->ioc_dev == OBD_DEV_BY_DEVNAME) {
@@ -461,7 +462,7 @@ static int obd_init_checks(void)
CWARN("LPD64 wrong length! strlen(%s)=%d != 2\n", buf, len);
ret = -EINVAL;
}
- if ((u64val & ~CFS_PAGE_MASK) >= PAGE_SIZE) {
+ if ((u64val & ~PAGE_MASK) >= PAGE_SIZE) {
CWARN("mask failed: u64val %llu >= %llu\n", u64val,
(__u64)PAGE_SIZE);
ret = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 43a7f7a79b35..e4edfb2c0a20 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -68,8 +68,8 @@ int block_debug_check(char *who, void *addr, int end, __u64 off, __u64 id)
LASSERT(addr);
- ne_off = le64_to_cpu (off);
- id = le64_to_cpu (id);
+ ne_off = le64_to_cpu(off);
+ id = le64_to_cpu(id);
if (memcmp(addr, (char *)&ne_off, LPDS)) {
CDEBUG(D_ERROR, "%s: id %#llx offset %llu off: %#llx != %#llx\n",
who, id, off, *(__u64 *)addr, ne_off);
diff --git a/drivers/staging/lustre/lustre/obdclass/genops.c b/drivers/staging/lustre/lustre/obdclass/genops.c
index cf97b8f06764..d95f11d62a32 100644
--- a/drivers/staging/lustre/lustre/obdclass/genops.c
+++ b/drivers/staging/lustre/lustre/obdclass/genops.c
@@ -604,7 +604,6 @@ int obd_init_caches(void)
out:
obd_cleanup_caches();
return -ENOMEM;
-
}
/* map connection to client */
diff --git a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
index 8eddf206f1ed..2cd4522462d9 100644
--- a/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
+++ b/drivers/staging/lustre/lustre/obdclass/linux/linux-module.c
@@ -158,9 +158,7 @@ int obd_ioctl_popdata(void __user *arg, void *data, int len)
{
int err;
- err = copy_to_user(arg, data, len);
- if (err)
- err = -EFAULT;
+ err = copy_to_user(arg, data, len) ? -EFAULT : 0;
return err;
}
EXPORT_SYMBOL(obd_ioctl_popdata);
diff --git a/drivers/staging/lustre/lustre/obdclass/llog.c b/drivers/staging/lustre/lustre/obdclass/llog.c
index 992573eae1b1..79194d8cb587 100644
--- a/drivers/staging/lustre/lustre/obdclass/llog.c
+++ b/drivers/staging/lustre/lustre/obdclass/llog.c
@@ -265,7 +265,6 @@ repeat:
for (rec = (struct llog_rec_hdr *)buf;
(char *)rec < buf + LLOG_CHUNK_SIZE;
rec = (struct llog_rec_hdr *)((char *)rec + rec->lrh_len)) {
-
CDEBUG(D_OTHER, "processing rec 0x%p type %#x\n",
rec, rec->lrh_type);
diff --git a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
index d93f42fee420..5a1eae1de2ec 100644
--- a/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
+++ b/drivers/staging/lustre/lustre/obdclass/lprocfs_status.c
@@ -49,7 +49,7 @@
static const char * const obd_connect_names[] = {
"read_only",
"lov_index",
- "unused",
+ "connect_from_mds",
"write_grant",
"server_lock",
"version",
@@ -122,6 +122,56 @@ int obd_connect_flags2str(char *page, int count, __u64 flags, char *sep)
}
EXPORT_SYMBOL(obd_connect_flags2str);
+static void obd_connect_data_seqprint(struct seq_file *m,
+ struct obd_connect_data *ocd)
+{
+ int flags;
+
+ LASSERT(ocd);
+ flags = ocd->ocd_connect_flags;
+
+ seq_printf(m, " connect_data:\n"
+ " flags: %llx\n"
+ " instance: %u\n",
+ ocd->ocd_connect_flags,
+ ocd->ocd_instance);
+ if (flags & OBD_CONNECT_VERSION)
+ seq_printf(m, " target_version: %u.%u.%u.%u\n",
+ OBD_OCD_VERSION_MAJOR(ocd->ocd_version),
+ OBD_OCD_VERSION_MINOR(ocd->ocd_version),
+ OBD_OCD_VERSION_PATCH(ocd->ocd_version),
+ OBD_OCD_VERSION_FIX(ocd->ocd_version));
+ if (flags & OBD_CONNECT_MDS)
+ seq_printf(m, " mdt_index: %d\n", ocd->ocd_group);
+ if (flags & OBD_CONNECT_GRANT)
+ seq_printf(m, " initial_grant: %d\n", ocd->ocd_grant);
+ if (flags & OBD_CONNECT_INDEX)
+ seq_printf(m, " target_index: %u\n", ocd->ocd_index);
+ if (flags & OBD_CONNECT_BRW_SIZE)
+ seq_printf(m, " max_brw_size: %d\n", ocd->ocd_brw_size);
+ if (flags & OBD_CONNECT_IBITS)
+ seq_printf(m, " ibits_known: %llx\n",
+ ocd->ocd_ibits_known);
+ if (flags & OBD_CONNECT_GRANT_PARAM)
+ seq_printf(m, " grant_block_size: %d\n"
+ " grant_inode_size: %d\n"
+ " grant_extent_overhead: %d\n",
+ ocd->ocd_blocksize,
+ ocd->ocd_inodespace,
+ ocd->ocd_grant_extent);
+ if (flags & OBD_CONNECT_TRANSNO)
+ seq_printf(m, " first_transno: %llx\n",
+ ocd->ocd_transno);
+ if (flags & OBD_CONNECT_CKSUM)
+ seq_printf(m, " cksum_types: %#x\n",
+ ocd->ocd_cksum_types);
+ if (flags & OBD_CONNECT_MAX_EASIZE)
+ seq_printf(m, " max_easize: %d\n", ocd->ocd_max_easize);
+ if (flags & OBD_CONNECT_MAXBYTES)
+ seq_printf(m, " max_object_bytes: %llx\n",
+ ocd->ocd_maxbytes);
+}
+
int lprocfs_read_frac_helper(char *buffer, unsigned long count, long val,
int mult)
{
@@ -624,6 +674,7 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
struct obd_device *obd = data;
struct obd_import *imp;
struct obd_import_conn *conn;
+ struct obd_connect_data *ocd;
int j;
int k;
int rw = 0;
@@ -635,9 +686,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
return rc;
imp = obd->u.cli.cl_import;
+ ocd = &imp->imp_connect_data;
- seq_printf(m,
- "import:\n"
+ seq_printf(m, "import:\n"
" name: %s\n"
" target: %s\n"
" state: %s\n"
@@ -649,9 +700,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
imp->imp_connect_data.ocd_instance);
obd_connect_seq_flags2str(m, imp->imp_connect_data.ocd_connect_flags,
", ");
- seq_printf(m,
- " ]\n"
- " import_flags: [ ");
+ seq_printf(m, " ]\n");
+ obd_connect_data_seqprint(m, ocd);
+ seq_printf(m, " import_flags: [ ");
obd_import_flags2str(imp, m);
seq_printf(m,
@@ -694,8 +745,9 @@ int lprocfs_rd_import(struct seq_file *m, void *data)
do_div(sum, ret.lc_count);
ret.lc_sum = sum;
- } else
+ } else {
ret.lc_sum = 0;
+ }
seq_printf(m,
" rpcs:\n"
" inflight: %u\n"
@@ -1471,10 +1523,10 @@ EXPORT_SYMBOL(lprocfs_oh_tally);
void lprocfs_oh_tally_log2(struct obd_histogram *oh, unsigned int value)
{
- unsigned int val;
+ unsigned int val = 0;
- for (val = 0; ((1 << val) < value) && (val <= OBD_HIST_MAX); val++)
- ;
+ if (likely(value != 0))
+ val = min(fls(value - 1), OBD_HIST_MAX);
lprocfs_oh_tally(oh, val);
}
diff --git a/drivers/staging/lustre/lustre/obdclass/lu_object.c b/drivers/staging/lustre/lustre/obdclass/lu_object.c
index 978568ada8e9..e04385760f21 100644
--- a/drivers/staging/lustre/lustre/obdclass/lu_object.c
+++ b/drivers/staging/lustre/lustre/obdclass/lu_object.c
@@ -55,6 +55,7 @@
#include "../include/lustre_disk.h"
#include "../include/lustre_fid.h"
#include "../include/lu_object.h"
+#include "../include/cl_object.h"
#include "../include/lu_ref.h"
#include <linux/list.h>
@@ -103,7 +104,6 @@ void lu_object_put(const struct lu_env *env, struct lu_object *o)
if (!cfs_hash_bd_dec_and_lock(site->ls_obj_hash, &bd, &top->loh_ref)) {
if (lu_object_is_dying(top)) {
-
/*
* somebody may be waiting for this, currently only
* used for cl_object, see cl_object_put_last().
@@ -357,7 +357,6 @@ int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
if (count > 0 && --count == 0)
break;
-
}
cfs_hash_bd_unlock(s->ls_obj_hash, &bd, 1);
cond_resched();
@@ -715,8 +714,9 @@ struct lu_object *lu_object_find_slice(const struct lu_env *env,
obj = lu_object_locate(top->lo_header, dev->ld_type);
if (!obj)
lu_object_put(env, top);
- } else
+ } else {
obj = top;
+ }
return obj;
}
EXPORT_SYMBOL(lu_object_find_slice);
@@ -935,7 +935,7 @@ static void lu_dev_add_linkage(struct lu_site *s, struct lu_device *d)
* Initialize site \a s, with \a d as the top level device.
*/
#define LU_SITE_BITS_MIN 12
-#define LU_SITE_BITS_MAX 24
+#define LU_SITE_BITS_MAX 19
/**
* total 256 buckets, we don't want too many buckets because:
* - consume too much memory
@@ -1468,6 +1468,7 @@ void lu_context_key_quiesce(struct lu_context_key *key)
/*
* XXX layering violation.
*/
+ cl_env_cache_purge(~0);
key->lct_tags |= LCT_QUIESCENT;
/*
* XXX memory barrier has to go here.
diff --git a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
index 5f812460b3ea..b1abe023bb35 100644
--- a/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
+++ b/drivers/staging/lustre/lustre/obdclass/lustre_peer.c
@@ -163,8 +163,9 @@ int class_del_uuid(const char *uuid)
break;
}
}
- } else
+ } else {
list_splice_init(&g_uuid_list, &deathrow);
+ }
spin_unlock(&g_uuid_lock);
if (uuid && list_empty(&deathrow)) {
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_config.c b/drivers/staging/lustre/lustre/obdclass/obd_config.c
index 5395e994deab..cb1d65c3d95d 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_config.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_config.c
@@ -606,7 +606,7 @@ static int class_del_conn(struct obd_device *obd, struct lustre_cfg *lcfg)
return rc;
}
-LIST_HEAD(lustre_profile_list);
+static LIST_HEAD(lustre_profile_list);
struct lustre_profile *class_get_profile(const char *prof)
{
@@ -961,7 +961,6 @@ int class_process_config(struct lustre_cfg *lcfg)
default: {
err = obd_process_config(obd, sizeof(*lcfg), lcfg);
goto out;
-
}
}
out:
@@ -1001,7 +1000,13 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
for (i = 1; i < lcfg->lcfg_bufcount; i++) {
key = lustre_cfg_buf(lcfg, i);
/* Strip off prefix */
- class_match_param(key, prefix, &key);
+ if (class_match_param(key, prefix, &key)) {
+ /*
+ * If the prefix doesn't match, return error so we
+ * can pass it down the stack
+ */
+ return -ENOSYS;
+ }
sval = strchr(key, '=');
if (!sval || (*(sval + 1) == 0)) {
CERROR("Can't parse param %s (missing '=')\n", key);
@@ -1034,18 +1039,14 @@ int class_process_proc_param(char *prefix, struct lprocfs_vars *lvars,
j++;
}
if (!matched) {
- /* If the prefix doesn't match, return error so we
- * can pass it down the stack
- */
- if (strnchr(key, keylen, '.'))
- return -ENOSYS;
- CERROR("%s: unknown param %s\n",
+ CERROR("%.*s: %s unknown param %s\n",
+ (int)strlen(prefix) - 1, prefix,
(char *)lustre_cfg_string(lcfg, 0), key);
/* rc = -EINVAL; continue parsing other params */
skip++;
} else if (rc < 0) {
- CERROR("writing proc entry %s err %d\n",
- var->name, rc);
+ CERROR("%s: error writing proc entry '%s': rc = %d\n",
+ prefix, var->name, rc);
rc = 0;
} else {
CDEBUG(D_CONFIG, "%s.%.*s: Set parameter %.*s=%s\n",
@@ -1350,6 +1351,7 @@ static int class_config_parse_rec(struct llog_rec_hdr *rec, char *buf,
lustre_cfg_string(lcfg, i));
}
}
+ ptr += snprintf(ptr, end - ptr, "\n");
/* return consumed bytes */
rc = ptr - buf;
return rc;
@@ -1368,7 +1370,7 @@ int class_config_dump_handler(const struct lu_env *env,
if (rec->lrh_type == OBD_CFG_REC) {
class_config_parse_rec(rec, outstr, 256);
- LCONSOLE(D_WARNING, " %s\n", outstr);
+ LCONSOLE(D_WARNING, " %s", outstr);
} else {
LCONSOLE(D_WARNING, "unhandled lrh_type: %#x\n", rec->lrh_type);
rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/obdclass/obd_mount.c b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
index d3e28a389ac1..e0c90adc72a7 100644
--- a/drivers/staging/lustre/lustre/obdclass/obd_mount.c
+++ b/drivers/staging/lustre/lustre/obdclass/obd_mount.c
@@ -102,7 +102,7 @@ int lustre_process_log(struct super_block *sb, char *logname,
LCONSOLE_ERROR_MSG(0x15b, "%s: The configuration from log '%s' failed from the MGS (%d). Make sure this client and the MGS are running compatible versions of Lustre.\n",
mgc->obd_name, logname, rc);
- if (rc)
+ else if (rc)
LCONSOLE_ERROR_MSG(0x15c, "%s: The configuration from log '%s' failed (%d). This may be the result of communication errors between this node and the MGS, a bad configuration, or other errors. See the syslog for more information.\n",
mgc->obd_name, logname,
rc);
@@ -307,7 +307,8 @@ int lustre_start_mgc(struct super_block *sb)
while (class_parse_nid(ptr, &nid, &ptr) == 0) {
rc = do_lcfg(mgcname, nid,
LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
- i++;
+ if (!rc)
+ i++;
/* Stop at the first failover nid */
if (*ptr == ':')
break;
@@ -345,16 +346,18 @@ int lustre_start_mgc(struct super_block *sb)
sprintf(niduuid, "%s_%x", mgcname, i);
j = 0;
while (class_parse_nid_quiet(ptr, &nid, &ptr) == 0) {
- j++;
- rc = do_lcfg(mgcname, nid,
- LCFG_ADD_UUID, niduuid, NULL, NULL, NULL);
+ rc = do_lcfg(mgcname, nid, LCFG_ADD_UUID, niduuid,
+ NULL, NULL, NULL);
+ if (!rc)
+ ++j;
if (*ptr == ':')
break;
}
if (j > 0) {
rc = do_lcfg(mgcname, 0, LCFG_ADD_CONN,
niduuid, NULL, NULL, NULL);
- i++;
+ if (!rc)
+ i++;
} else {
/* at ":/fsname" */
break;
diff --git a/drivers/staging/lustre/lustre/obdclass/obdo.c b/drivers/staging/lustre/lustre/obdclass/obdo.c
index e6436cb4ac62..748e33f017d5 100644
--- a/drivers/staging/lustre/lustre/obdclass/obdo.c
+++ b/drivers/staging/lustre/lustre/obdclass/obdo.c
@@ -185,8 +185,7 @@ void md_from_obdo(struct md_op_data *op_data, struct obdo *oa, u32 valid)
op_data->op_attr.ia_valid |= ATTR_BLOCKS;
}
if (valid & OBD_MD_FLFLAGS) {
- ((struct ll_iattr *)&op_data->op_attr)->ia_attr_flags =
- oa->o_flags;
+ op_data->op_attr_flags = oa->o_flags;
op_data->op_attr.ia_valid |= ATTR_ATTR_FLAG;
}
}
diff --git a/drivers/staging/lustre/lustre/obdecho/echo_client.c b/drivers/staging/lustre/lustre/obdecho/echo_client.c
index 1e83669c204d..91ef06f17934 100644
--- a/drivers/staging/lustre/lustre/obdecho/echo_client.c
+++ b/drivers/staging/lustre/lustre/obdecho/echo_client.c
@@ -81,7 +81,6 @@ struct echo_object_conf {
struct echo_page {
struct cl_page_slice ep_cl;
struct mutex ep_lock;
- struct page *ep_vmpage;
};
struct echo_lock {
@@ -164,15 +163,13 @@ static int cl_echo_object_put(struct echo_object *eco);
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
struct page **pages, int npages, int async);
-static struct echo_thread_info *echo_env_info(const struct lu_env *env);
-
struct echo_thread_info {
struct echo_object_conf eti_conf;
struct lustre_md eti_md;
struct cl_2queue eti_queue;
struct cl_io eti_io;
- struct cl_lock_descr eti_descr;
+ struct cl_lock eti_lock;
struct lu_fid eti_fid;
struct lu_fid eti_fid2;
};
@@ -219,12 +216,6 @@ static struct lu_kmem_descr echo_caches[] = {
*
* @{
*/
-static struct page *echo_page_vmpage(const struct lu_env *env,
- const struct cl_page_slice *slice)
-{
- return cl2echo_page(slice)->ep_vmpage;
-}
-
static int echo_page_own(const struct lu_env *env,
const struct cl_page_slice *slice,
struct cl_io *io, int nonblock)
@@ -273,12 +264,10 @@ static void echo_page_completion(const struct lu_env *env,
static void echo_page_fini(const struct lu_env *env,
struct cl_page_slice *slice)
{
- struct echo_page *ep = cl2echo_page(slice);
struct echo_object *eco = cl2echo_obj(slice->cpl_obj);
- struct page *vmpage = ep->ep_vmpage;
atomic_dec(&eco->eo_npages);
- put_page(vmpage);
+ put_page(slice->cpl_page->cp_vmpage);
}
static int echo_page_prep(const struct lu_env *env,
@@ -295,7 +284,8 @@ static int echo_page_print(const struct lu_env *env,
struct echo_page *ep = cl2echo_page(slice);
(*printer)(env, cookie, LUSTRE_ECHO_CLIENT_NAME"-page@%p %d vm@%p\n",
- ep, mutex_is_locked(&ep->ep_lock), ep->ep_vmpage);
+ ep, mutex_is_locked(&ep->ep_lock),
+ slice->cpl_page->cp_vmpage);
return 0;
}
@@ -303,7 +293,6 @@ static const struct cl_page_operations echo_page_ops = {
.cpo_own = echo_page_own,
.cpo_disown = echo_page_disown,
.cpo_discard = echo_page_discard,
- .cpo_vmpage = echo_page_vmpage,
.cpo_fini = echo_page_fini,
.cpo_print = echo_page_print,
.cpo_is_vmlocked = echo_page_is_vmlocked,
@@ -336,26 +325,8 @@ static void echo_lock_fini(const struct lu_env *env,
kmem_cache_free(echo_lock_kmem, ecl);
}
-static void echo_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct echo_lock *ecl = cl2echo_lock(slice);
-
- LASSERT(list_empty(&ecl->el_chain));
-}
-
-static int echo_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *unused)
-{
- return 1;
-}
-
static struct cl_lock_operations echo_lock_ops = {
.clo_fini = echo_lock_fini,
- .clo_delete = echo_lock_delete,
- .clo_fits_into = echo_lock_fits_into
};
/** @} echo_lock */
@@ -367,15 +338,14 @@ static struct cl_lock_operations echo_lock_ops = {
* @{
*/
static int echo_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct echo_page *ep = cl_object_page_slice(obj, page);
struct echo_object *eco = cl2echo_obj(obj);
- ep->ep_vmpage = vmpage;
- get_page(vmpage);
+ get_page(page->cp_vmpage);
mutex_init(&ep->ep_lock);
- cl_page_slice_add(page, &ep->ep_cl, obj, &echo_page_ops);
+ cl_page_slice_add(page, &ep->ep_cl, obj, index, &echo_page_ops);
atomic_inc(&eco->eo_npages);
return 0;
}
@@ -568,6 +538,8 @@ static struct lu_object *echo_object_alloc(const struct lu_env *env,
obj = &echo_obj2cl(eco)->co_lu;
cl_object_header_init(hdr);
+ hdr->coh_page_bufsize = cfs_size_round(sizeof(struct cl_page));
+
lu_object_init(obj, &hdr->coh_lu, dev);
lu_object_add_top(&hdr->coh_lu, obj);
@@ -694,8 +666,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
struct obd_device *obd = NULL; /* to keep compiler happy */
struct obd_device *tgt;
const char *tgt_type_name;
- int rc;
- int cleanup = 0;
+ int rc, err;
ed = kzalloc(sizeof(*ed), GFP_NOFS);
if (!ed) {
@@ -703,16 +674,14 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
goto out;
}
- cleanup = 1;
cd = &ed->ed_cl;
rc = cl_device_init(cd, t);
if (rc)
- goto out;
+ goto out_free;
cd->cd_lu_dev.ld_ops = &echo_device_lu_ops;
cd->cd_ops = &echo_device_cl_ops;
- cleanup = 2;
obd = class_name2obd(lustre_cfg_string(cfg, 0));
LASSERT(obd);
LASSERT(env);
@@ -722,28 +691,25 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
CERROR("Can not find tgt device %s\n",
lustre_cfg_string(cfg, 1));
rc = -ENODEV;
- goto out;
+ goto out_device_fini;
}
next = tgt->obd_lu_dev;
if (!strcmp(tgt->obd_type->typ_name, LUSTRE_MDT_NAME)) {
CERROR("echo MDT client must be run on server\n");
rc = -EOPNOTSUPP;
- goto out;
+ goto out_device_fini;
}
rc = echo_site_init(env, ed);
if (rc)
- goto out;
-
- cleanup = 3;
+ goto out_device_fini;
rc = echo_client_setup(env, obd, cfg);
if (rc)
- goto out;
+ goto out_site_fini;
ed->ed_ec = &obd->u.echo_client;
- cleanup = 4;
/* if echo client is to be stacked upon ost device, the next is
* NULL since ost is not a clio device so far
@@ -755,7 +721,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
if (next) {
if (next->ld_site) {
rc = -EBUSY;
- goto out;
+ goto out_cleanup;
}
next->ld_site = &ed->ed_site->cs_lu;
@@ -763,7 +729,7 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
next->ld_type->ldt_name,
NULL);
if (rc)
- goto out;
+ goto out_cleanup;
} else {
LASSERT(strcmp(tgt_type_name, LUSTRE_OST_NAME) == 0);
@@ -771,27 +737,19 @@ static struct lu_device *echo_device_alloc(const struct lu_env *env,
ed->ed_next = next;
return &cd->cd_lu_dev;
-out:
- switch (cleanup) {
- case 4: {
- int rc2;
-
- rc2 = echo_client_cleanup(obd);
- if (rc2)
- CERROR("Cleanup obd device %s error(%d)\n",
- obd->obd_name, rc2);
- }
- case 3:
- echo_site_fini(env, ed);
- case 2:
- cl_device_fini(&ed->ed_cl);
- case 1:
- kfree(ed);
- case 0:
- default:
- break;
- }
+out_cleanup:
+ err = echo_client_cleanup(obd);
+ if (err)
+ CERROR("Cleanup obd device %s error(%d)\n",
+ obd->obd_name, err);
+out_site_fini:
+ echo_site_fini(env, ed);
+out_device_fini:
+ cl_device_fini(&ed->ed_cl);
+out_free:
+ kfree(ed);
+out:
return ERR_PTR(rc);
}
@@ -819,16 +777,7 @@ static void echo_lock_release(const struct lu_env *env,
{
struct cl_lock *clk = echo_lock2cl(ecl);
- cl_lock_get(clk);
- cl_unuse(env, clk);
- cl_lock_release(env, clk, "ec enqueue", ecl->el_object);
- if (!still_used) {
- cl_lock_mutex_get(env, clk);
- cl_lock_cancel(env, clk);
- cl_lock_delete(env, clk);
- cl_lock_mutex_put(env, clk);
- }
- cl_lock_put(env, clk);
+ cl_lock_release(env, clk);
}
static struct lu_device *echo_device_free(const struct lu_env *env,
@@ -1022,9 +971,11 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
info = echo_env_info(env);
io = &info->eti_io;
- descr = &info->eti_descr;
+ lck = &info->eti_lock;
obj = echo_obj2cl(eco);
+ memset(lck, 0, sizeof(*lck));
+ descr = &lck->cll_descr;
descr->cld_obj = obj;
descr->cld_start = cl_index(obj, start);
descr->cld_end = cl_index(obj, end);
@@ -1032,25 +983,20 @@ static int cl_echo_enqueue0(struct lu_env *env, struct echo_object *eco,
descr->cld_enq_flags = enqflags;
io->ci_obj = obj;
- lck = cl_lock_request(env, io, descr, "ec enqueue", eco);
- if (lck) {
+ rc = cl_lock_request(env, io, lck);
+ if (rc == 0) {
struct echo_client_obd *ec = eco->eo_dev->ed_ec;
struct echo_lock *el;
- rc = cl_wait(env, lck);
- if (rc == 0) {
- el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
- spin_lock(&ec->ec_lock);
- if (list_empty(&el->el_chain)) {
- list_add(&el->el_chain, &ec->ec_locks);
- el->el_cookie = ++ec->ec_unique;
- }
- atomic_inc(&el->el_refcount);
- *cookie = el->el_cookie;
- spin_unlock(&ec->ec_lock);
- } else {
- cl_lock_release(env, lck, "ec enqueue", current);
+ el = cl2echo_lock(cl_lock_at(lck, &echo_device_type));
+ spin_lock(&ec->ec_lock);
+ if (list_empty(&el->el_chain)) {
+ list_add(&el->el_chain, &ec->ec_locks);
+ el->el_cookie = ++ec->ec_unique;
}
+ atomic_inc(&el->el_refcount);
+ *cookie = el->el_cookie;
+ spin_unlock(&ec->ec_lock);
}
return rc;
}
@@ -1085,22 +1031,17 @@ static int cl_echo_cancel0(struct lu_env *env, struct echo_device *ed,
return 0;
}
-static int cl_echo_async_brw(const struct lu_env *env, struct cl_io *io,
- enum cl_req_type unused, struct cl_2queue *queue)
+static void echo_commit_callback(const struct lu_env *env, struct cl_io *io,
+ struct cl_page *page)
{
- struct cl_page *clp;
- struct cl_page *temp;
- int result = 0;
+ struct echo_thread_info *info;
+ struct cl_2queue *queue;
- cl_page_list_for_each_safe(clp, temp, &queue->c2_qin) {
- int rc;
+ info = echo_env_info(env);
+ LASSERT(io == &info->eti_io);
- rc = cl_page_cache_add(env, io, clp, CRT_WRITE);
- if (rc == 0)
- continue;
- result = result ?: rc;
- }
- return result;
+ queue = &info->eti_queue;
+ cl_page_list_add(&queue->c2_qout, page);
}
static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
@@ -1119,7 +1060,7 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
int rc;
int i;
- LASSERT((offset & ~CFS_PAGE_MASK) == 0);
+ LASSERT((offset & ~PAGE_MASK) == 0);
LASSERT(ed->ed_next);
env = cl_env_get(&refcheck);
if (IS_ERR(env))
@@ -1179,7 +1120,9 @@ static int cl_echo_object_brw(struct echo_object *eco, int rw, u64 offset,
async = async && (typ == CRT_WRITE);
if (async)
- rc = cl_echo_async_brw(env, io, typ, queue);
+ rc = cl_io_commit_async(env, io, &queue->c2_qin,
+ 0, PAGE_SIZE,
+ echo_commit_callback);
else
rc = cl_io_submit_sync(env, io, typ, queue, 0);
CDEBUG(D_INFO, "echo_client %s write returns %d\n",
@@ -1387,7 +1330,7 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
LASSERT(rw == OBD_BRW_WRITE || rw == OBD_BRW_READ);
if (count <= 0 ||
- (count & (~CFS_PAGE_MASK)) != 0)
+ (count & (~PAGE_MASK)) != 0)
return -EINVAL;
/* XXX think again with misaligned I/O */
@@ -1409,7 +1352,6 @@ static int echo_client_kbrw(struct echo_device *ed, int rw, struct obdo *oa,
for (i = 0, pgp = pga, off = offset;
i < npages;
i++, pgp++, off += PAGE_SIZE) {
-
LASSERT(!pgp->pg); /* for cleanup */
rc = -ENOMEM;
@@ -1470,7 +1412,7 @@ static int echo_client_prep_commit(const struct lu_env *env,
u64 npages, tot_pages;
int i, ret = 0, brw_flags = 0;
- if (count <= 0 || (count & (~CFS_PAGE_MASK)) != 0)
+ if (count <= 0 || (count & (~PAGE_MASK)) != 0)
return -EINVAL;
npages = batch >> PAGE_SHIFT;
@@ -1886,7 +1828,6 @@ static int __init obdecho_init(void)
static void /*__exit*/ obdecho_exit(void)
{
echo_client_exit();
-
}
MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
diff --git a/drivers/staging/lustre/lustre/osc/lproc_osc.c b/drivers/staging/lustre/lustre/osc/lproc_osc.c
index a3358c39b2f1..33a113213bf5 100644
--- a/drivers/staging/lustre/lustre/osc/lproc_osc.c
+++ b/drivers/staging/lustre/lustre/osc/lproc_osc.c
@@ -121,9 +121,9 @@ static ssize_t max_rpcs_in_flight_store(struct kobject *kobj,
atomic_add(added, &osc_pool_req_count);
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_rpcs_in_flight = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
@@ -139,9 +139,9 @@ static ssize_t max_dirty_mb_show(struct kobject *kobj,
long val;
int mult;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
val = cli->cl_dirty_max;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
mult = 1 << 20;
return lprocfs_read_frac_helper(buf, PAGE_SIZE, val, mult);
@@ -169,10 +169,10 @@ static ssize_t max_dirty_mb_store(struct kobject *kobj,
pages_number > totalram_pages / 4) /* 1/4 of RAM */
return -ERANGE;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_dirty_max = (u32)(pages_number << PAGE_SHIFT);
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
@@ -222,8 +222,16 @@ static ssize_t osc_cached_mb_seq_write(struct file *file,
return -ERANGE;
rc = atomic_read(&cli->cl_lru_in_list) - pages_number;
- if (rc > 0)
- (void)osc_lru_shrink(cli, rc);
+ if (rc > 0) {
+ struct lu_env *env;
+ int refcheck;
+
+ env = cl_env_get(&refcheck);
+ if (!IS_ERR(env)) {
+ (void)osc_lru_shrink(env, cli, rc, true);
+ cl_env_put(env, &refcheck);
+ }
+ }
return count;
}
@@ -239,9 +247,9 @@ static ssize_t cur_dirty_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_dirty);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -256,9 +264,9 @@ static ssize_t cur_grant_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_avail_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -279,12 +287,12 @@ static ssize_t cur_grant_bytes_store(struct kobject *kobj,
return rc;
/* this is only for shrinking grant */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (val >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return -EINVAL;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_FULL)
rc = osc_shrink_grant_to_target(cli, val);
@@ -303,9 +311,9 @@ static ssize_t cur_lost_grant_bytes_show(struct kobject *kobj,
struct client_obd *cli = &dev->u.cli;
int len;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
len = sprintf(buf, "%lu\n", cli->cl_lost_grant);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return len;
}
@@ -577,14 +585,31 @@ static ssize_t max_pages_per_rpc_store(struct kobject *kobj,
if (val == 0 || val > ocd->ocd_brw_size >> PAGE_SHIFT) {
return -ERANGE;
}
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_max_pages_per_rpc = val;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return count;
}
LUSTRE_RW_ATTR(max_pages_per_rpc);
+static ssize_t unstable_stats_show(struct kobject *kobj,
+ struct attribute *attr,
+ char *buf)
+{
+ struct obd_device *dev = container_of(kobj, struct obd_device,
+ obd_kobj);
+ struct client_obd *cli = &dev->u.cli;
+ int pages, mb;
+
+ pages = atomic_read(&cli->cl_unstable_count);
+ mb = (pages * PAGE_SIZE) >> 20;
+
+ return sprintf(buf, "unstable_pages: %8d\n"
+ "unstable_mb: %8d\n", pages, mb);
+}
+LUSTRE_RO_ATTR(unstable_stats);
+
LPROC_SEQ_FOPS_RO_TYPE(osc, connect_flags);
LPROC_SEQ_FOPS_RO_TYPE(osc, server_uuid);
LPROC_SEQ_FOPS_RO_TYPE(osc, conn_uuid);
@@ -623,7 +648,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
ktime_get_real_ts64(&now);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
seq_printf(seq, "snapshot_time: %llu.%9lu (secs.usecs)\n",
(s64)now.tv_sec, (unsigned long)now.tv_nsec);
@@ -707,7 +732,7 @@ static int osc_rpc_stats_seq_show(struct seq_file *seq, void *v)
break;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
@@ -794,6 +819,7 @@ static struct attribute *osc_attrs[] = {
&lustre_attr_max_pages_per_rpc.attr,
&lustre_attr_max_rpcs_in_flight.attr,
&lustre_attr_resend_count.attr,
+ &lustre_attr_unstable_stats.attr,
NULL,
};
diff --git a/drivers/staging/lustre/lustre/osc/osc_cache.c b/drivers/staging/lustre/lustre/osc/osc_cache.c
index 5f25bf83dcfc..5a14bea961b4 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cache.c
+++ b/drivers/staging/lustre/lustre/osc/osc_cache.c
@@ -76,6 +76,8 @@ static inline char *ext_flags(struct osc_extent *ext, char *flags)
*buf++ = ext->oe_rw ? 'r' : 'w';
if (ext->oe_intree)
*buf++ = 'i';
+ if (ext->oe_sync)
+ *buf++ = 'S';
if (ext->oe_srvlock)
*buf++ = 's';
if (ext->oe_hp)
@@ -121,9 +123,13 @@ static const char *oes_strings[] = {
__ext->oe_grants, __ext->oe_nr_pages, \
list_empty_marker(&__ext->oe_pages), \
waitqueue_active(&__ext->oe_waitq) ? '+' : '-', \
- __ext->oe_osclock, __ext->oe_mppr, __ext->oe_owner, \
+ __ext->oe_dlmlock, __ext->oe_mppr, __ext->oe_owner, \
/* ----- part 4 ----- */ \
## __VA_ARGS__); \
+ if (lvl == D_ERROR && __ext->oe_dlmlock) \
+ LDLM_ERROR(__ext->oe_dlmlock, "extent: %p\n", __ext); \
+ else \
+ LDLM_DEBUG(__ext->oe_dlmlock, "extent: %p\n", __ext); \
} while (0)
#undef EASSERTF
@@ -240,20 +246,25 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
goto out;
}
- if (!ext->oe_osclock && ext->oe_grants > 0) {
+ if (ext->oe_sync && ext->oe_grants > 0) {
rc = 90;
goto out;
}
- if (ext->oe_osclock) {
- struct cl_lock_descr *descr;
+ if (ext->oe_dlmlock) {
+ struct ldlm_extent *extent;
- descr = &ext->oe_osclock->cll_descr;
- if (!(descr->cld_start <= ext->oe_start &&
- descr->cld_end >= ext->oe_max_end)) {
+ extent = &ext->oe_dlmlock->l_policy_data.l_extent;
+ if (!(extent->start <= cl_offset(osc2cl(obj), ext->oe_start) &&
+ extent->end >= cl_offset(osc2cl(obj), ext->oe_max_end))) {
rc = 100;
goto out;
}
+
+ if (!(ext->oe_dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))) {
+ rc = 102;
+ goto out;
+ }
}
if (ext->oe_nr_pages > ext->oe_mppr) {
@@ -276,7 +287,7 @@ static int osc_extent_sanity_check0(struct osc_extent *ext,
page_count = 0;
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
- pgoff_t index = oap2cl_page(oap)->cp_index;
+ pgoff_t index = osc_index(oap2osc(oap));
++page_count;
if (index > ext->oe_end || index < ext->oe_start) {
rc = 110;
@@ -359,7 +370,7 @@ static struct osc_extent *osc_extent_alloc(struct osc_object *obj)
ext->oe_state = OES_INV;
INIT_LIST_HEAD(&ext->oe_pages);
init_waitqueue_head(&ext->oe_waitq);
- ext->oe_osclock = NULL;
+ ext->oe_dlmlock = NULL;
return ext;
}
@@ -385,9 +396,11 @@ static void osc_extent_put(const struct lu_env *env, struct osc_extent *ext)
LASSERT(ext->oe_state == OES_INV);
LASSERT(!ext->oe_intree);
- if (ext->oe_osclock) {
- cl_lock_put(env, ext->oe_osclock);
- ext->oe_osclock = NULL;
+ if (ext->oe_dlmlock) {
+ lu_ref_add(&ext->oe_dlmlock->l_reference,
+ "osc_extent", ext);
+ LDLM_LOCK_PUT(ext->oe_dlmlock);
+ ext->oe_dlmlock = NULL;
}
osc_extent_free(ext);
}
@@ -543,7 +556,7 @@ static int osc_extent_merge(const struct lu_env *env, struct osc_extent *cur,
if (cur->oe_max_end != victim->oe_max_end)
return -ERANGE;
- LASSERT(cur->oe_osclock == victim->oe_osclock);
+ LASSERT(cur->oe_dlmlock == victim->oe_dlmlock);
ppc_bits = osc_cli(obj)->cl_chunkbits - PAGE_SHIFT;
chunk_start = cur->oe_start >> ppc_bits;
chunk_end = cur->oe_end >> ppc_bits;
@@ -624,10 +637,10 @@ static inline int overlapped(struct osc_extent *ex1, struct osc_extent *ex2)
static struct osc_extent *osc_extent_find(const struct lu_env *env,
struct osc_object *obj, pgoff_t index,
int *grants)
-
{
struct client_obd *cli = osc_cli(obj);
- struct cl_lock *lock;
+ struct osc_lock *olck;
+ struct cl_lock_descr *descr;
struct osc_extent *cur;
struct osc_extent *ext;
struct osc_extent *conflict = NULL;
@@ -644,8 +657,12 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
if (!cur)
return ERR_PTR(-ENOMEM);
- lock = cl_lock_at_pgoff(env, osc2cl(obj), index, NULL, 1, 0);
- LASSERT(lock->cll_descr.cld_mode >= CLM_WRITE);
+ olck = osc_env_io(env)->oi_write_osclock;
+ LASSERTF(olck, "page %lu is not covered by lock\n", index);
+ LASSERT(olck->ols_state == OLS_GRANTED);
+
+ descr = &olck->ols_cl.cls_lock->cll_descr;
+ LASSERT(descr->cld_mode >= CLM_WRITE);
LASSERT(cli->cl_chunkbits >= PAGE_SHIFT);
ppc_bits = cli->cl_chunkbits - PAGE_SHIFT;
@@ -657,19 +674,23 @@ static struct osc_extent *osc_extent_find(const struct lu_env *env,
max_pages = cli->cl_max_pages_per_rpc;
LASSERT((max_pages & ~chunk_mask) == 0);
max_end = index - (index % max_pages) + max_pages - 1;
- max_end = min_t(pgoff_t, max_end, lock->cll_descr.cld_end);
+ max_end = min_t(pgoff_t, max_end, descr->cld_end);
/* initialize new extent by parameters so far */
cur->oe_max_end = max_end;
cur->oe_start = index & chunk_mask;
cur->oe_end = ((index + ~chunk_mask + 1) & chunk_mask) - 1;
- if (cur->oe_start < lock->cll_descr.cld_start)
- cur->oe_start = lock->cll_descr.cld_start;
+ if (cur->oe_start < descr->cld_start)
+ cur->oe_start = descr->cld_start;
if (cur->oe_end > max_end)
cur->oe_end = max_end;
- cur->oe_osclock = lock;
cur->oe_grants = 0;
cur->oe_mppr = max_pages;
+ if (olck->ols_dlmlock) {
+ LASSERT(olck->ols_hold);
+ cur->oe_dlmlock = LDLM_LOCK_GET(olck->ols_dlmlock);
+ lu_ref_add(&olck->ols_dlmlock->l_reference, "osc_extent", cur);
+ }
/* grants has been allocated by caller */
LASSERTF(*grants >= chunksize + cli->cl_extent_tax,
@@ -691,7 +712,7 @@ restart:
break;
/* if covering by different locks, no chance to match */
- if (lock != ext->oe_osclock) {
+ if (olck->ols_dlmlock != ext->oe_dlmlock) {
EASSERTF(!overlapped(ext, cur), ext,
EXTSTR"\n", EXTPARA(cur));
@@ -795,7 +816,7 @@ restart:
if (found) {
LASSERT(!conflict);
if (!IS_ERR(found)) {
- LASSERT(found->oe_osclock == cur->oe_osclock);
+ LASSERT(found->oe_dlmlock == cur->oe_dlmlock);
OSC_EXTENT_DUMP(D_CACHE, found,
"found caching ext for %lu.\n", index);
}
@@ -810,7 +831,7 @@ restart:
found = osc_extent_hold(cur);
osc_extent_insert(obj, cur);
OSC_EXTENT_DUMP(D_CACHE, cur, "add into tree %lu/%lu.\n",
- index, lock->cll_descr.cld_end);
+ index, descr->cld_end);
}
osc_object_unlock(obj);
@@ -856,6 +877,8 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
ext->oe_rc = rc ?: ext->oe_nr_pages;
EASSERT(ergo(rc == 0, ext->oe_state == OES_RPC), ext);
+
+ osc_lru_add_batch(cli, &ext->oe_pages);
list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
list_del_init(&oap->oap_rpc_item);
list_del_init(&oap->oap_pending_item);
@@ -877,10 +900,9 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
* span a whole chunk on the OST side, or our accounting goes
* wrong. Should match the code in filter_grant_check.
*/
- int offset = oap->oap_page_off & ~CFS_PAGE_MASK;
- int count = oap->oap_count + (offset & (blocksize - 1));
- int end = (offset + oap->oap_count) & (blocksize - 1);
-
+ int offset = last_off & ~PAGE_MASK;
+ int count = last_count + (offset & (blocksize - 1));
+ int end = (offset + last_count) & (blocksize - 1);
if (end)
count += blocksize - end;
@@ -943,7 +965,7 @@ static int osc_extent_wait(const struct lu_env *env, struct osc_extent *ext,
"%s: wait ext to %d timedout, recovery in progress?\n",
osc_export(obj)->exp_obd->obd_name, state);
- lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ lwi = LWI_INTR(NULL, NULL);
rc = l_wait_event(ext->oe_waitq, extent_wait_cb(ext, state),
&lwi);
}
@@ -990,19 +1012,19 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
/* discard all pages with index greater then trunc_index */
list_for_each_entry_safe(oap, tmp, &ext->oe_pages, oap_pending_item) {
- struct cl_page *sub = oap2cl_page(oap);
- struct cl_page *page = cl_page_top(sub);
+ pgoff_t index = osc_index(oap2osc(oap));
+ struct cl_page *page = oap2cl_page(oap);
LASSERT(list_empty(&oap->oap_rpc_item));
/* only discard the pages with their index greater than
* trunc_index, and ...
*/
- if (sub->cp_index < trunc_index ||
- (sub->cp_index == trunc_index && partial)) {
+ if (index < trunc_index ||
+ (index == trunc_index && partial)) {
/* accounting how many pages remaining in the chunk
* so that we can calculate grants correctly. */
- if (sub->cp_index >> ppc_bits == trunc_chunk)
+ if (index >> ppc_bits == trunc_chunk)
++pages_in_chunk;
continue;
}
@@ -1013,7 +1035,6 @@ static int osc_extent_truncate(struct osc_extent *ext, pgoff_t trunc_index,
lu_ref_add(&page->cp_reference, "truncate", current);
if (cl_page_own(env, io, page) == 0) {
- cl_page_unmap(env, io, page);
cl_page_discard(env, io, page);
cl_page_disown(env, io, page);
} else {
@@ -1126,7 +1147,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
last->oap_count = osc_refresh_count(env, last, OBD_BRW_WRITE);
LASSERT(last->oap_count > 0);
LASSERT(last->oap_page_off + last->oap_count <= PAGE_SIZE);
+ spin_lock(&last->oap_lock);
last->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&last->oap_lock);
}
/* for the rest of pages, we don't need to call osf_refresh_count()
@@ -1135,7 +1158,9 @@ static int osc_extent_make_ready(const struct lu_env *env,
list_for_each_entry(oap, &ext->oe_pages, oap_pending_item) {
if (!(oap->oap_async_flags & ASYNC_COUNT_STABLE)) {
oap->oap_count = PAGE_SIZE - oap->oap_page_off;
+ spin_lock(&last->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&last->oap_lock);
}
}
@@ -1256,7 +1281,7 @@ static int osc_make_ready(const struct lu_env *env, struct osc_async_page *oap,
int cmd)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct cl_page *page = oap2cl_page(oap);
int result;
LASSERT(cmd == OBD_BRW_WRITE); /* no cached reads */
@@ -1271,7 +1296,7 @@ static int osc_refresh_count(const struct lu_env *env,
struct osc_async_page *oap, int cmd)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = oap2cl_page(oap);
+ pgoff_t index = osc_index(oap2osc(oap));
struct cl_object *obj;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
@@ -1288,10 +1313,10 @@ static int osc_refresh_count(const struct lu_env *env,
if (result < 0)
return result;
kms = attr->cat_kms;
- if (cl_offset(obj, page->cp_index) >= kms)
+ if (cl_offset(obj, index) >= kms)
/* catch race with truncate */
return 0;
- else if (cl_offset(obj, page->cp_index + 1) > kms)
+ else if (cl_offset(obj, index + 1) > kms)
/* catch sub-page write at end of file */
return kms % PAGE_SIZE;
else
@@ -1302,14 +1327,16 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
int cmd, int rc)
{
struct osc_page *opg = oap2osc_page(oap);
- struct cl_page *page = cl_page_top(oap2cl_page(oap));
+ struct cl_page *page = oap2cl_page(oap);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
enum cl_req_type crt;
int srvlock;
cmd &= ~OBD_BRW_NOQUOTA;
- LASSERT(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ));
- LASSERT(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE));
+ LASSERTF(equi(page->cp_state == CPS_PAGEIN, cmd == OBD_BRW_READ),
+ "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
+ LASSERTF(equi(page->cp_state == CPS_PAGEOUT, cmd == OBD_BRW_WRITE),
+ "cp_state:%u, cmd:%d\n", page->cp_state, cmd);
LASSERT(opg->ops_transfer_pinned);
/*
@@ -1358,22 +1385,28 @@ static int osc_completion(const struct lu_env *env, struct osc_async_page *oap,
return 0;
}
-#define OSC_DUMP_GRANT(cli, fmt, args...) do { \
+#define OSC_DUMP_GRANT(lvl, cli, fmt, args...) do { \
struct client_obd *__tmp = (cli); \
- CDEBUG(D_CACHE, "%s: { dirty: %ld/%ld dirty_pages: %d/%d " \
- "dropped: %ld avail: %ld, reserved: %ld, flight: %d } " fmt, \
+ CDEBUG(lvl, "%s: grant { dirty: %ld/%ld dirty_pages: %d/%d " \
+ "unstable_pages: %d/%d dropped: %ld avail: %ld, " \
+ "reserved: %ld, flight: %d } lru {in list: %d, " \
+ "left: %d, waiters: %d }" fmt, \
__tmp->cl_import->imp_obd->obd_name, \
__tmp->cl_dirty, __tmp->cl_dirty_max, \
atomic_read(&obd_dirty_pages), obd_max_dirty_pages, \
+ atomic_read(&obd_unstable_pages), obd_max_dirty_pages, \
__tmp->cl_lost_grant, __tmp->cl_avail_grant, \
- __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, ##args); \
+ __tmp->cl_reserved_grant, __tmp->cl_w_in_flight, \
+ atomic_read(&__tmp->cl_lru_in_list), \
+ atomic_read(&__tmp->cl_lru_busy), \
+ atomic_read(&__tmp->cl_lru_shrinkers), ##args); \
} while (0)
/* caller must hold loi_list_lock */
static void osc_consume_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
LASSERT(!(pga->flag & OBD_BRW_FROM_GRANT));
atomic_inc(&obd_dirty_pages);
cli->cl_dirty += PAGE_SIZE;
@@ -1389,7 +1422,7 @@ static void osc_consume_write_grant(struct client_obd *cli,
static void osc_release_write_grant(struct client_obd *cli,
struct brw_page *pga)
{
- assert_spin_locked(&cli->cl_loi_list_lock.lock);
+ assert_spin_locked(&cli->cl_loi_list_lock);
if (!(pga->flag & OBD_BRW_FROM_GRANT)) {
return;
}
@@ -1408,7 +1441,7 @@ static void osc_release_write_grant(struct client_obd *cli,
* To avoid sleeping with object lock held, it's good for us allocate enough
* grants before entering into critical section.
*
- * client_obd_list_lock held by caller
+ * spin_lock held by caller
*/
static int osc_reserve_grant(struct client_obd *cli, unsigned int bytes)
{
@@ -1442,11 +1475,11 @@ static void __osc_unreserve_grant(struct client_obd *cli,
static void osc_unreserve_grant(struct client_obd *cli,
unsigned int reserved, unsigned int unused)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
__osc_unreserve_grant(cli, reserved, unused);
if (unused > 0)
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
@@ -1467,7 +1500,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
{
int grant = (1 << cli->cl_chunkbits) + cli->cl_extent_tax;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
atomic_sub(nr_pages, &obd_dirty_pages);
cli->cl_dirty -= nr_pages << PAGE_SHIFT;
cli->cl_lost_grant += lost_grant;
@@ -1479,7 +1512,7 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
cli->cl_avail_grant += grant;
}
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "lost %u grant: %lu avail: %lu dirty: %lu\n",
lost_grant, cli->cl_lost_grant,
cli->cl_avail_grant, cli->cl_dirty);
@@ -1491,9 +1524,9 @@ static void osc_free_grant(struct client_obd *cli, unsigned int nr_pages,
*/
static void osc_exit_cache(struct client_obd *cli, struct osc_async_page *oap)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_release_write_grant(cli, &oap->oap_brw_page);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
/**
@@ -1506,14 +1539,15 @@ static int osc_enter_cache_try(struct client_obd *cli,
{
int rc;
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
rc = osc_reserve_grant(cli, bytes);
if (rc < 0)
return 0;
if (cli->cl_dirty + PAGE_SIZE <= cli->cl_dirty_max &&
- atomic_read(&obd_dirty_pages) + 1 <= obd_max_dirty_pages) {
+ atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) <= obd_max_dirty_pages) {
osc_consume_write_grant(cli, &oap->oap_brw_page);
if (transient) {
cli->cl_dirty_transit += PAGE_SIZE;
@@ -1532,9 +1566,9 @@ static int ocw_granted(struct client_obd *cli, struct osc_cache_waiter *ocw)
{
int rc;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = list_empty(&ocw->ocw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return rc;
}
@@ -1551,12 +1585,13 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc = oap->oap_obj;
struct lov_oinfo *loi = osc->oo_oinfo;
struct osc_cache_waiter ocw;
- struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct l_wait_info lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(600), NULL,
+ LWI_ON_SIGNAL_NOOP, NULL);
int rc = -EDQUOT;
- OSC_DUMP_GRANT(cli, "need:%d.\n", bytes);
+ OSC_DUMP_GRANT(D_CACHE, cli, "need:%d.\n", bytes);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* force the caller to try sync io. this can jump the list
* of queued writes and create a discontiguous rpc stream
@@ -1587,7 +1622,7 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
while (cli->cl_dirty > 0 || cli->cl_w_in_flight > 0) {
list_add_tail(&ocw.ocw_entry, &cli->cl_cache_waiters);
ocw.ocw_rc = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug_async(env, cli, NULL);
@@ -1596,10 +1631,17 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
rc = l_wait_event(ocw.ocw_waitq, ocw_granted(cli, &ocw), &lwi);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
- /* l_wait_event is interrupted by signal */
+ /* l_wait_event is interrupted by signal, or timed out */
if (rc < 0) {
+ if (rc == -ETIMEDOUT) {
+ OSC_DUMP_GRANT(D_ERROR, cli,
+ "try to reserve %d.\n", bytes);
+ osc_extent_tree_dump(D_ERROR, osc);
+ rc = -EDQUOT;
+ }
+
list_del_init(&ocw.ocw_entry);
goto out;
}
@@ -1615,8 +1657,8 @@ static int osc_enter_cache(const struct lu_env *env, struct client_obd *cli,
}
}
out:
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- OSC_DUMP_GRANT(cli, "returned %d.\n", rc);
+ spin_unlock(&cli->cl_loi_list_lock);
+ OSC_DUMP_GRANT(D_CACHE, cli, "returned %d.\n", rc);
return rc;
}
@@ -1633,8 +1675,8 @@ void osc_wake_cache_waiters(struct client_obd *cli)
ocw->ocw_rc = -EDQUOT;
/* we can't dirty more */
if ((cli->cl_dirty + PAGE_SIZE > cli->cl_dirty_max) ||
- (atomic_read(&obd_dirty_pages) + 1 >
- obd_max_dirty_pages)) {
+ (atomic_read(&obd_unstable_pages) + 1 +
+ atomic_read(&obd_dirty_pages) > obd_max_dirty_pages)) {
CDEBUG(D_CACHE, "no dirty room: dirty: %ld osc max %ld, sys max %d\n",
cli->cl_dirty,
cli->cl_dirty_max, obd_max_dirty_pages);
@@ -1776,9 +1818,9 @@ static int osc_list_maint(struct client_obd *cli, struct osc_object *osc)
{
int is_ready;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
is_ready = __osc_list_maint(cli, osc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return is_ready;
}
@@ -1799,13 +1841,101 @@ static void osc_process_ar(struct osc_async_rc *ar, __u64 xid,
ar->ar_force_sync = 1;
ar->ar_min_xid = ptlrpc_sample_next_xid();
return;
-
}
if (ar->ar_force_sync && (xid >= ar->ar_min_xid))
ar->ar_force_sync = 0;
}
+/**
+ * Performs "unstable" page accounting. This function balances the
+ * increment operations performed in osc_inc_unstable_pages. It is
+ * registered as the RPC request callback, and is executed when the
+ * bulk RPC is committed on the server. Thus at this point, the pages
+ * involved in the bulk transfer are no longer considered unstable.
+ */
+void osc_dec_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ int page_count = desc->bd_iov_count;
+ int i;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache)
+ return;
+
+ LASSERT(page_count >= 0);
+
+ for (i = 0; i < page_count; i++)
+ dec_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+
+ atomic_sub(page_count, &cli->cl_cache->ccc_unstable_nr);
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+
+ atomic_sub(page_count, &cli->cl_unstable_count);
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+
+ atomic_sub(page_count, &obd_unstable_pages);
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+
+ spin_lock(&req->rq_lock);
+ req->rq_committed = 1;
+ req->rq_unstable = 0;
+ spin_unlock(&req->rq_lock);
+
+ wake_up_all(&cli->cl_cache->ccc_unstable_waitq);
+}
+
+/* "unstable" page accounting. See: osc_dec_unstable_pages. */
+void osc_inc_unstable_pages(struct ptlrpc_request *req)
+{
+ struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
+ struct ptlrpc_bulk_desc *desc = req->rq_bulk;
+ long page_count = desc->bd_iov_count;
+ int i;
+
+ /* No unstable page tracking */
+ if (!cli->cl_cache)
+ return;
+
+ LASSERT(page_count >= 0);
+
+ for (i = 0; i < page_count; i++)
+ inc_zone_page_state(desc->bd_iov[i].kiov_page, NR_UNSTABLE_NFS);
+
+ LASSERT(atomic_read(&cli->cl_cache->ccc_unstable_nr) >= 0);
+ atomic_add(page_count, &cli->cl_cache->ccc_unstable_nr);
+
+ LASSERT(atomic_read(&cli->cl_unstable_count) >= 0);
+ atomic_add(page_count, &cli->cl_unstable_count);
+
+ LASSERT(atomic_read(&obd_unstable_pages) >= 0);
+ atomic_add(page_count, &obd_unstable_pages);
+
+ spin_lock(&req->rq_lock);
+
+ /*
+ * If the request has already been committed (i.e. brw_commit
+ * called via rq_commit_cb), we need to undo the unstable page
+ * increments we just performed because rq_commit_cb wont be
+ * called again. Otherwise, just set the commit callback so the
+ * unstable page accounting is properly updated when the request
+ * is committed
+ */
+ if (req->rq_committed) {
+ /* Drop lock before calling osc_dec_unstable_pages */
+ spin_unlock(&req->rq_lock);
+ osc_dec_unstable_pages(req);
+ spin_lock(&req->rq_lock);
+ } else {
+ req->rq_unstable = 1;
+ req->rq_commit_cb = osc_dec_unstable_pages;
+ }
+
+ spin_unlock(&req->rq_lock);
+}
+
/* this must be called holding the loi list lock to give coverage to exit_cache,
* async_flag maintenance, and oap_request
*/
@@ -1817,6 +1947,9 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
__u64 xid = 0;
if (oap->oap_request) {
+ if (!rc)
+ osc_inc_unstable_pages(oap->oap_request);
+
xid = ptlrpc_req_xid(oap->oap_request);
ptlrpc_req_finished(oap->oap_request);
oap->oap_request = NULL;
@@ -1829,10 +1962,10 @@ static void osc_ap_completion(const struct lu_env *env, struct client_obd *cli,
oap->oap_interrupted = 0;
if (oap->oap_cmd & OBD_BRW_WRITE && xid > 0) {
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_process_ar(&cli->cl_ar, xid, rc);
osc_process_ar(&loi->loi_ar, xid, rc);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
rc = osc_completion(env, oap, oap->oap_cmd, rc);
@@ -2133,9 +2266,8 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
}
cl_object_get(obj);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- lu_object_ref_add_at(&obj->co_lu, &link, "check",
- current);
+ spin_unlock(&cli->cl_loi_list_lock);
+ lu_object_ref_add_at(&obj->co_lu, &link, "check", current);
/* attempt some read/write balancing by alternating between
* reads and writes in an object. The makes_rpc checks here
@@ -2178,11 +2310,10 @@ static void osc_check_rpcs(const struct lu_env *env, struct client_obd *cli)
osc_object_unlock(osc);
osc_list_maint(cli, osc);
- lu_object_ref_del_at(&obj->co_lu, &link, "check",
- current);
+ lu_object_ref_del_at(&obj->co_lu, &link, "check", current);
cl_object_put(env, obj);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
}
}
@@ -2199,9 +2330,9 @@ static int osc_io_unplug0(const struct lu_env *env, struct client_obd *cli,
* potential stack overrun problem. LU-2859
*/
atomic_inc(&cli->cl_lru_shrinkers);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
osc_check_rpcs(env, cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
atomic_dec(&cli->cl_lru_shrinkers);
} else {
CDEBUG(D_CACHE, "Queue writeback work for client %p.\n", cli);
@@ -2238,7 +2369,7 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
oap->oap_page = page;
oap->oap_obj_off = offset;
- LASSERT(!(offset & ~CFS_PAGE_MASK));
+ LASSERT(!(offset & ~PAGE_MASK));
if (!client_is_remote(exp) && capable(CFS_CAP_SYS_RESOURCE))
oap->oap_brw_flags = OBD_BRW_NOQUOTA;
@@ -2306,16 +2437,23 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
return rc;
}
+ if (osc_over_unstable_soft_limit(cli))
+ brw_flags |= OBD_BRW_SOFT_SYNC;
+
oap->oap_cmd = cmd;
oap->oap_page_off = ops->ops_from;
oap->oap_count = ops->ops_to - ops->ops_from;
+ /*
+ * No need to hold a lock here,
+ * since this page is not in any list yet.
+ */
oap->oap_async_flags = 0;
oap->oap_brw_flags = brw_flags;
OSC_IO_DEBUG(osc, "oap %p page %p added for cmd %d\n",
oap, oap->oap_page, oap->oap_cmd & OBD_BRW_RWMASK);
- index = oap2cl_page(oap)->cp_index;
+ index = osc_index(oap2osc(oap));
/* Add this page into extent by the following steps:
* 1. if there exists an active extent for this IO, mostly this page
@@ -2334,9 +2472,9 @@ int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
grants = 0;
/* it doesn't need any grant to dirty this page */
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
rc = osc_enter_cache_try(cli, oap, grants, 0);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (rc == 0) { /* try failed */
grants = 0;
need_release = 1;
@@ -2427,21 +2565,21 @@ int osc_teardown_async_page(const struct lu_env *env,
LASSERT(oap->oap_magic == OAP_MAGIC);
CDEBUG(D_INFO, "teardown oap %p page %p at index %lu.\n",
- oap, ops, oap2cl_page(oap)->cp_index);
+ oap, ops, osc_index(oap2osc(oap)));
osc_object_lock(obj);
if (!list_empty(&oap->oap_rpc_item)) {
CDEBUG(D_CACHE, "oap %p is not in cache.\n", oap);
rc = -EBUSY;
} else if (!list_empty(&oap->oap_pending_item)) {
- ext = osc_extent_lookup(obj, oap2cl_page(oap)->cp_index);
+ ext = osc_extent_lookup(obj, osc_index(oap2osc(oap)));
/* only truncated pages are allowed to be taken out.
* See osc_extent_truncate() and osc_cache_truncate_start()
* for details.
*/
if (ext && ext->oe_state != OES_TRUNC) {
OSC_EXTENT_DUMP(D_ERROR, ext, "trunc at %lu.\n",
- oap2cl_page(oap)->cp_index);
+ osc_index(oap2osc(oap)));
rc = -EBUSY;
}
}
@@ -2464,7 +2602,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
struct osc_extent *ext = NULL;
struct osc_object *obj = cl2osc(ops->ops_cl.cpl_obj);
struct cl_page *cp = ops->ops_cl.cpl_page;
- pgoff_t index = cp->cp_index;
+ pgoff_t index = osc_index(ops);
struct osc_async_page *oap = &ops->ops_oap;
bool unplug = false;
int rc = 0;
@@ -2479,8 +2617,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
switch (ext->oe_state) {
case OES_RPC:
case OES_LOCK_DONE:
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(cp),
- "flush an in-rpc page?\n");
+ CL_PAGE_DEBUG(D_ERROR, env, cp, "flush an in-rpc page?\n");
LASSERT(0);
break;
case OES_LOCKING:
@@ -2506,7 +2643,7 @@ int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
break;
}
- rc = cl_page_prep(env, io, cl_page_top(cp), CRT_WRITE);
+ rc = cl_page_prep(env, io, cp, CRT_WRITE);
if (rc)
goto out;
@@ -2550,7 +2687,7 @@ int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops)
struct osc_extent *ext;
struct osc_extent *found = NULL;
struct list_head *plist;
- pgoff_t index = oap2cl_page(oap)->cp_index;
+ pgoff_t index = osc_index(ops);
int rc = -EBUSY;
int cmd;
@@ -2613,12 +2750,12 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
pgoff_t end = 0;
list_for_each_entry(oap, list, oap_pending_item) {
- struct cl_page *cp = oap2cl_page(oap);
+ pgoff_t index = osc_index(oap2osc(oap));
- if (cp->cp_index > end)
- end = cp->cp_index;
- if (cp->cp_index < start)
- start = cp->cp_index;
+ if (index > end)
+ end = index;
+ if (index < start)
+ start = index;
++page_count;
mppr <<= (page_count > mppr);
}
@@ -2633,6 +2770,7 @@ int osc_queue_sync_pages(const struct lu_env *env, struct osc_object *obj,
}
ext->oe_rw = !!(cmd & OBD_BRW_READ);
+ ext->oe_sync = 1;
ext->oe_urgent = 1;
ext->oe_start = start;
ext->oe_end = ext->oe_max_end = end;
@@ -2988,7 +3126,200 @@ int osc_cache_writeback_range(const struct lu_env *env, struct osc_object *obj,
result = rc;
}
- OSC_IO_DEBUG(obj, "cache page out.\n");
+ OSC_IO_DEBUG(obj, "pageout [%lu, %lu], %d.\n", start, end, result);
+ return result;
+}
+
+/**
+ * Returns a list of pages by a given [start, end] of \a obj.
+ *
+ * \param resched If not NULL, then we give up before hogging CPU for too
+ * long and set *resched = 1, in that case caller should implement a retry
+ * logic.
+ *
+ * Gang tree lookup (radix_tree_gang_lookup()) optimization is absolutely
+ * crucial in the face of [offset, EOF] locks.
+ *
+ * Return at least one page in @queue unless there is no covered page.
+ */
+int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata)
+{
+ struct osc_page *ops;
+ void **pvec;
+ pgoff_t idx;
+ unsigned int nr;
+ unsigned int i;
+ unsigned int j;
+ int res = CLP_GANG_OKAY;
+ bool tree_lock = true;
+
+ idx = start;
+ pvec = osc_env_info(env)->oti_pvec;
+ spin_lock(&osc->oo_tree_lock);
+ while ((nr = radix_tree_gang_lookup(&osc->oo_tree, pvec,
+ idx, OTI_PVEC_SIZE)) > 0) {
+ struct cl_page *page;
+ bool end_of_region = false;
+
+ for (i = 0, j = 0; i < nr; ++i) {
+ ops = pvec[i];
+ pvec[i] = NULL;
+
+ idx = osc_index(ops);
+ if (idx > end) {
+ end_of_region = true;
+ break;
+ }
+
+ page = ops->ops_cl.cpl_page;
+ LASSERT(page->cp_type == CPT_CACHEABLE);
+ if (page->cp_state == CPS_FREEING)
+ continue;
+
+ cl_page_get(page);
+ lu_ref_add_atomic(&page->cp_reference,
+ "gang_lookup", current);
+ pvec[j++] = ops;
+ }
+ ++idx;
+
+ /*
+ * Here a delicate locking dance is performed. Current thread
+ * holds a reference to a page, but has to own it before it
+ * can be placed into queue. Owning implies waiting, so
+ * radix-tree lock is to be released. After a wait one has to
+ * check that pages weren't truncated (cl_page_own() returns
+ * error in the latter case).
+ */
+ spin_unlock(&osc->oo_tree_lock);
+ tree_lock = false;
+
+ for (i = 0; i < j; ++i) {
+ ops = pvec[i];
+ if (res == CLP_GANG_OKAY)
+ res = (*cb)(env, io, ops, cbdata);
+
+ page = ops->ops_cl.cpl_page;
+ lu_ref_del(&page->cp_reference, "gang_lookup", current);
+ cl_page_put(env, page);
+ }
+ if (nr < OTI_PVEC_SIZE || end_of_region)
+ break;
+
+ if (res == CLP_GANG_OKAY && need_resched())
+ res = CLP_GANG_RESCHED;
+ if (res != CLP_GANG_OKAY)
+ break;
+
+ spin_lock(&osc->oo_tree_lock);
+ tree_lock = true;
+ }
+ if (tree_lock)
+ spin_unlock(&osc->oo_tree_lock);
+ return res;
+}
+
+/**
+ * Check if page @page is covered by an extra lock or discard it.
+ */
+static int check_and_discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct osc_object *osc = cbdata;
+ pgoff_t index;
+
+ index = osc_index(ops);
+ if (index >= info->oti_fn_index) {
+ struct ldlm_lock *tmp;
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ /* refresh non-overlapped index */
+ tmp = osc_dlmlock_at_pgoff(env, osc, index, 0, 0);
+ if (tmp) {
+ __u64 end = tmp->l_policy_data.l_extent.end;
+ /* Cache the first-non-overlapped index so as to skip
+ * all pages within [index, oti_fn_index). This is safe
+ * because if tmp lock is canceled, it will discard
+ * these pages.
+ */
+ info->oti_fn_index = cl_index(osc2cl(osc), end + 1);
+ if (end == OBD_OBJECT_EOF)
+ info->oti_fn_index = CL_PAGE_EOF;
+ LDLM_LOCK_PUT(tmp);
+ } else if (cl_page_own(env, io, page) == 0) {
+ /* discard the page */
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+ }
+
+ info->oti_next_index = index + 1;
+ return CLP_GANG_OKAY;
+}
+
+static int discard_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ /* page is top page. */
+ info->oti_next_index = osc_index(ops) + 1;
+ if (cl_page_own(env, io, page) == 0) {
+ KLASSERT(ergo(page->cp_type == CPT_CACHEABLE,
+ !PageDirty(cl_page_vmpage(page))));
+
+ /* discard the page */
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
+ } else {
+ LASSERT(page->cp_state == CPS_FREEING);
+ }
+
+ return CLP_GANG_OKAY;
+}
+
+/**
+ * Discard pages protected by the given lock. This function traverses radix
+ * tree to find all covering pages and discard them. If a page is being covered
+ * by other locks, it should remain in cache.
+ *
+ * If error happens on any step, the process continues anyway (the reasoning
+ * behind this being that lock cancellation cannot be delayed indefinitely).
+ */
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode)
+{
+ struct osc_thread_info *info = osc_env_info(env);
+ struct cl_io *io = &info->oti_io;
+ osc_page_gang_cbt cb;
+ int res;
+ int result;
+
+ io->ci_obj = cl_object_top(osc2cl(osc));
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ goto out;
+
+ cb = mode == CLM_READ ? check_and_discard_cb : discard_cb;
+ info->oti_fn_index = info->oti_next_index = start;
+ do {
+ res = osc_page_gang_lookup(env, io, osc,
+ info->oti_next_index, end, cb, osc);
+ if (info->oti_next_index > end)
+ break;
+
+ if (res == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (res != CLP_GANG_OKAY);
+out:
+ cl_io_fini(env, io);
return result;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
index d55d04d0428b..ae19d396b537 100644
--- a/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_cl_internal.h
@@ -51,7 +51,6 @@
#include "../include/obd.h"
/* osc_build_res_name() */
#include "../include/cl_object.h"
-#include "../include/lclient.h"
#include "osc_internal.h"
/** \defgroup osc osc
@@ -68,6 +67,9 @@ struct osc_io {
struct cl_io_slice oi_cl;
/** true if this io is lockless. */
int oi_lockless;
+ /** how many LRU pages are reserved for this IO */
+ int oi_lru_reserved;
+
/** active extents, we know how many bytes is going to be written,
* so having an active extent will prevent it from being fragmented
*/
@@ -77,6 +79,8 @@ struct osc_io {
*/
struct osc_extent *oi_trunc;
+ /** write osc_lock for this IO, used by osc_extent_find(). */
+ struct osc_lock *oi_write_osclock;
struct obd_info oi_info;
struct obdo oi_oa;
struct osc_async_cbargs {
@@ -100,7 +104,7 @@ struct osc_session {
struct osc_io os_io;
};
-#define OTI_PVEC_SIZE 64
+#define OTI_PVEC_SIZE 256
struct osc_thread_info {
struct ldlm_res_id oti_resname;
ldlm_policy_data_t oti_policy;
@@ -109,7 +113,13 @@ struct osc_thread_info {
struct lustre_handle oti_handle;
struct cl_page_list oti_plist;
struct cl_io oti_io;
- struct cl_page *oti_pvec[OTI_PVEC_SIZE];
+ void *oti_pvec[OTI_PVEC_SIZE];
+ /**
+ * Fields used by cl_lock_discard_pages().
+ */
+ pgoff_t oti_next_index;
+ pgoff_t oti_fn_index; /* first non-overlapped index */
+ struct cl_sync_io oti_anchor;
};
struct osc_object {
@@ -125,7 +135,7 @@ struct osc_object {
*/
struct list_head oo_inflight[CRT_NR];
/**
- * Lock, protecting ccc_object::cob_inflight, because a seat-belt is
+ * Lock, protecting osc_page::ops_inflight, because a seat-belt is
* locked during take-off and landing.
*/
spinlock_t oo_seatbelt;
@@ -159,6 +169,17 @@ struct osc_object {
* oo_{read|write}_pages soon.
*/
spinlock_t oo_lock;
+
+ /**
+ * Radix tree for caching pages
+ */
+ struct radix_tree_root oo_tree;
+ spinlock_t oo_tree_lock;
+ unsigned long oo_npages;
+
+ /* Protect osc_lock this osc_object has */
+ spinlock_t oo_ol_spin;
+ struct list_head oo_ol_list;
};
static inline void osc_object_lock(struct osc_object *obj)
@@ -198,8 +219,6 @@ enum osc_lock_state {
OLS_ENQUEUED,
OLS_UPCALL_RECEIVED,
OLS_GRANTED,
- OLS_RELEASED,
- OLS_BLOCKED,
OLS_CANCELLED
};
@@ -208,10 +227,8 @@ enum osc_lock_state {
*
* Interaction with DLM.
*
- * CLIO enqueues all DLM locks through ptlrpcd (that is, in "async" mode).
- *
* Once receive upcall is invoked, osc_lock remembers a handle of DLM lock in
- * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_lock.
+ * osc_lock::ols_handle and a pointer to that lock in osc_lock::ols_dlmlock.
*
* This pointer is protected through a reference, acquired by
* osc_lock_upcall0(). Also, an additional reference is acquired by
@@ -249,26 +266,27 @@ enum osc_lock_state {
*/
struct osc_lock {
struct cl_lock_slice ols_cl;
+ /** Internal lock to protect states, etc. */
+ spinlock_t ols_lock;
+ /** Owner sleeps on this channel for state change */
+ struct cl_sync_io *ols_owner;
+ /** waiting list for this lock to be cancelled */
+ struct list_head ols_waiting_list;
+ /** wait entry of ols_waiting_list */
+ struct list_head ols_wait_entry;
+ /** list entry for osc_object::oo_ol_list */
+ struct list_head ols_nextlock_oscobj;
+
/** underlying DLM lock */
- struct ldlm_lock *ols_lock;
- /** lock value block */
- struct ost_lvb ols_lvb;
+ struct ldlm_lock *ols_dlmlock;
/** DLM flags with which osc_lock::ols_lock was enqueued */
__u64 ols_flags;
/** osc_lock::ols_lock handle */
struct lustre_handle ols_handle;
struct ldlm_enqueue_info ols_einfo;
enum osc_lock_state ols_state;
-
- /**
- * How many pages are using this lock for io, currently only used by
- * read-ahead. If non-zero, the underlying dlm lock won't be cancelled
- * during recovery to avoid deadlock. see bz16774.
- *
- * \see osc_page::ops_lock
- * \see osc_page_addref_lock(), osc_page_putref_lock()
- */
- atomic_t ols_pageref;
+ /** lock value block */
+ struct ost_lvb ols_lvb;
/**
* true, if ldlm_lock_addref() was called against
@@ -299,16 +317,6 @@ struct osc_lock {
*/
ols_locklessable:1,
/**
- * set by osc_lock_use() to wait until blocking AST enters into
- * osc_ldlm_blocking_ast0(), so that cl_lock mutex can be used for
- * further synchronization.
- */
- ols_ast_wait:1,
- /**
- * If the data of this lock has been flushed to server side.
- */
- ols_flush:1,
- /**
* if set, the osc_lock is a glimpse lock. For glimpse locks, we treat
* the EVAVAIL error as tolerable, this will make upper logic happy
* to wait all glimpse locks to each OSTs to be completed.
@@ -321,15 +329,6 @@ struct osc_lock {
* For async glimpse lock.
*/
ols_agl:1;
- /**
- * IO that owns this lock. This field is used for a dead-lock
- * avoidance by osc_lock_enqueue_wait().
- *
- * XXX: unfortunately, the owner of a osc_lock is not unique,
- * the lock may have multiple users, if the lock is granted and
- * then matched.
- */
- struct osc_io *ols_owner;
};
/**
@@ -369,18 +368,15 @@ struct osc_page {
* Set if the page must be transferred with OBD_BRW_SRVLOCK.
*/
ops_srvlock:1;
- union {
- /**
- * lru page list. ops_inflight and ops_lru are exclusive so
- * that they can share the same data.
- */
- struct list_head ops_lru;
- /**
- * Linkage into a per-osc_object list of pages in flight. For
- * debugging.
- */
- struct list_head ops_inflight;
- };
+ /**
+ * lru page list. See osc_lru_{del|use}() in osc_page.c for usage.
+ */
+ struct list_head ops_lru;
+ /**
+ * Linkage into a per-osc_object list of pages in flight. For
+ * debugging.
+ */
+ struct list_head ops_inflight;
/**
* Thread that submitted this page for transfer. For debugging.
*/
@@ -389,16 +385,6 @@ struct osc_page {
* Submit time - the time when the page is starting RPC. For debugging.
*/
unsigned long ops_submit_time;
-
- /**
- * A lock of which we hold a reference covers this page. Only used by
- * read-ahead: for a readahead page, we hold it's covering lock to
- * prevent it from being canceled during recovery.
- *
- * \see osc_lock::ols_pageref
- * \see osc_page_addref_lock(), osc_page_putref_lock().
- */
- struct cl_lock *ops_lock;
};
extern struct kmem_cache *osc_lock_kmem;
@@ -417,21 +403,22 @@ extern struct lu_context_key osc_session_key;
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
const struct cl_io *io);
-int osc_io_init (const struct lu_env *env,
- struct cl_object *obj, struct cl_io *io);
-int osc_req_init (const struct lu_env *env, struct cl_device *dev,
- struct cl_req *req);
+int osc_io_init(const struct lu_env *env,
+ struct cl_object *obj, struct cl_io *io);
+int osc_req_init(const struct lu_env *env, struct cl_device *dev,
+ struct cl_req *req);
struct lu_object *osc_object_alloc(const struct lu_env *env,
const struct lu_object_header *hdr,
struct lu_device *dev);
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage);
+ struct cl_page *page, pgoff_t ind);
-void osc_index2policy (ldlm_policy_data_t *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
-int osc_lvb_print (const struct lu_env *env, void *cookie,
- lu_printer_t p, const struct ost_lvb *lvb);
+void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
+ pgoff_t start, pgoff_t end);
+int osc_lvb_print(const struct lu_env *env, void *cookie,
+ lu_printer_t p, const struct ost_lvb *lvb);
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags);
int osc_cancel_async_page(const struct lu_env *env, struct osc_page *ops);
@@ -441,6 +428,8 @@ int osc_prep_async_page(struct osc_object *osc, struct osc_page *ops,
struct page *page, loff_t offset);
int osc_queue_async_io(const struct lu_env *env, struct cl_io *io,
struct osc_page *ops);
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io);
int osc_teardown_async_page(const struct lu_env *env, struct osc_object *obj,
struct osc_page *ops);
int osc_flush_async_page(const struct lu_env *env, struct cl_io *io,
@@ -457,12 +446,13 @@ int osc_cache_wait_range(const struct lu_env *env, struct osc_object *obj,
pgoff_t start, pgoff_t end);
void osc_io_unplug(const struct lu_env *env, struct client_obd *cli,
struct osc_object *osc);
+int lru_queue_work(const struct lu_env *env, void *data);
-void osc_object_set_contended (struct osc_object *obj);
+void osc_object_set_contended(struct osc_object *obj);
void osc_object_clear_contended(struct osc_object *obj);
-int osc_object_is_contended (struct osc_object *obj);
+int osc_object_is_contended(struct osc_object *obj);
-int osc_lock_is_lockless (const struct osc_lock *olck);
+int osc_lock_is_lockless(const struct osc_lock *olck);
/*****************************************************************************
*
@@ -558,6 +548,11 @@ static inline struct osc_page *oap2osc(struct osc_async_page *oap)
return container_of0(oap, struct osc_page, ops_oap);
}
+static inline pgoff_t osc_index(struct osc_page *opg)
+{
+ return opg->ops_cl.cpl_index;
+}
+
static inline struct cl_page *oap2cl_page(struct osc_async_page *oap)
{
return oap2osc(oap)->ops_cl.cpl_page;
@@ -608,7 +603,7 @@ enum osc_extent_state {
*
* LOCKING ORDER
* =============
- * page lock -> client_obd_list_lock -> object lock(osc_object::oo_lock)
+ * page lock -> cl_loi_list_lock -> object lock(osc_object::oo_lock)
*/
struct osc_extent {
/** red-black tree node */
@@ -627,6 +622,8 @@ struct osc_extent {
unsigned int oe_intree:1,
/** 0 is write, 1 is read */
oe_rw:1,
+ /** sync extent, queued by osc_queue_sync_pages() */
+ oe_sync:1,
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
@@ -675,7 +672,7 @@ struct osc_extent {
*/
wait_queue_head_t oe_waitq;
/** lock covering this extent */
- struct cl_lock *oe_osclock;
+ struct ldlm_lock *oe_dlmlock;
/** terminator of this extent. Must be true if this extent is in IO. */
struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
@@ -690,6 +687,14 @@ int osc_extent_finish(const struct lu_env *env, struct osc_extent *ext,
int sent, int rc);
void osc_extent_release(const struct lu_env *env, struct osc_extent *ext);
+int osc_lock_discard_pages(const struct lu_env *env, struct osc_object *osc,
+ pgoff_t start, pgoff_t end, enum cl_lock_mode mode);
+
+typedef int (*osc_page_gang_cbt)(const struct lu_env *, struct cl_io *,
+ struct osc_page *, void *);
+int osc_page_gang_lookup(const struct lu_env *env, struct cl_io *io,
+ struct osc_object *osc, pgoff_t start, pgoff_t end,
+ osc_page_gang_cbt cb, void *cbdata);
/** @} osc */
#endif /* OSC_CL_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_internal.h b/drivers/staging/lustre/lustre/osc/osc_internal.h
index ea695c2099ee..7fad8278150f 100644
--- a/drivers/staging/lustre/lustre/osc/osc_internal.h
+++ b/drivers/staging/lustre/lustre/osc/osc_internal.h
@@ -83,6 +83,12 @@ struct osc_async_page {
#define oap_count oap_brw_page.count
#define oap_brw_flags oap_brw_page.flag
+static inline struct osc_async_page *brw_page2oap(struct brw_page *pga)
+{
+ return (struct osc_async_page *)container_of(pga, struct osc_async_page,
+ oap_brw_page);
+}
+
struct osc_cache_waiter {
struct list_head ocw_entry;
wait_queue_head_t ocw_waitq;
@@ -102,12 +108,14 @@ void osc_update_next_shrink(struct client_obd *cli);
extern struct ptlrpc_request_set *PTLRPCD_SET;
+typedef int (*osc_enqueue_upcall_f)(void *cookie, struct lustre_handle *lockh,
+ int rc);
+
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall,
+ osc_enqueue_upcall_f upcall,
void *cookie, struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
struct ptlrpc_request_set *rqset, int async, int agl);
int osc_cancel_base(struct lustre_handle *lockh, __u32 mode);
@@ -130,9 +138,11 @@ int osc_sync_base(struct obd_export *exp, struct obd_info *oinfo,
int osc_process_config_base(struct obd_device *obd, struct lustre_cfg *cfg);
int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
struct list_head *ext_list, int cmd);
-int osc_lru_shrink(struct client_obd *cli, int target);
+int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ int target, bool force);
+int osc_lru_reclaim(struct client_obd *cli);
-extern spinlock_t osc_ast_guard;
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg);
@@ -173,8 +183,6 @@ static inline struct osc_device *obd2osc_dev(const struct obd_device *d)
return container_of0(d->obd_lu_dev, struct osc_device, od_cl.cd_lu_dev);
}
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm);
-
extern struct kmem_cache *osc_quota_kmem;
struct osc_quota_info {
/** linkage for quota hash table */
@@ -192,5 +200,12 @@ int osc_quotactl(struct obd_device *unused, struct obd_export *exp,
int osc_quotacheck(struct obd_device *unused, struct obd_export *exp,
struct obd_quotactl *oqctl);
int osc_quota_poll_check(struct obd_export *exp, struct if_quotacheck *qchk);
+void osc_inc_unstable_pages(struct ptlrpc_request *req);
+void osc_dec_unstable_pages(struct ptlrpc_request *req);
+int osc_over_unstable_soft_limit(struct client_obd *cli);
+
+struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int pending, int canceling);
#endif /* OSC_INTERNAL_H */
diff --git a/drivers/staging/lustre/lustre/osc/osc_io.c b/drivers/staging/lustre/lustre/osc/osc_io.c
index 6bd0a45d8b06..d534b0e0edf6 100644
--- a/drivers/staging/lustre/lustre/osc/osc_io.c
+++ b/drivers/staging/lustre/lustre/osc/osc_io.c
@@ -68,11 +68,15 @@ static struct osc_io *cl2osc_io(const struct lu_env *env,
return oio;
}
-static struct osc_page *osc_cl_page_osc(struct cl_page *page)
+static struct osc_page *osc_cl_page_osc(struct cl_page *page,
+ struct osc_object *osc)
{
const struct cl_page_slice *slice;
- slice = cl_page_at(page, &osc_device_type);
+ if (osc)
+ slice = cl_object_page_slice(&osc->oo_cl, page);
+ else
+ slice = cl_page_at(page, &osc_device_type);
LASSERT(slice);
return cl2osc_page(slice);
@@ -137,7 +141,7 @@ static int osc_io_submit(const struct lu_env *env,
io = page->cp_owner;
LASSERT(io);
- opg = osc_cl_page_osc(page);
+ opg = osc_cl_page_osc(page, osc);
oap = &opg->ops_oap;
LASSERT(osc == oap->oap_obj);
@@ -164,8 +168,10 @@ static int osc_io_submit(const struct lu_env *env,
}
cl_page_list_move(qout, qin, page);
+ spin_lock(&oap->oap_lock);
oap->oap_async_flags = ASYNC_URGENT|ASYNC_READY;
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
+ spin_unlock(&oap->oap_lock);
osc_page_submit(env, opg, crt, brw_flags);
list_add_tail(&oap->oap_pending_item, &list);
@@ -185,6 +191,13 @@ static int osc_io_submit(const struct lu_env *env,
return qout->pl_nr > 0 ? 0 : result;
}
+/**
+ * This is called when a page is accessed within file in a way that creates
+ * new page, if one were missing (i.e., if there were a hole at that place in
+ * the file, or accessed page is beyond the current file size).
+ *
+ * Expand stripe KMS if necessary.
+ */
static void osc_page_touch_at(const struct lu_env *env,
struct cl_object *obj, pgoff_t idx, unsigned to)
{
@@ -208,7 +221,8 @@ static void osc_page_touch_at(const struct lu_env *env,
kms > loi->loi_kms ? "" : "not ", loi->loi_kms, kms,
loi->loi_lvb.lvb_size);
- valid = 0;
+ attr->cat_mtime = attr->cat_ctime = LTIME_S(CURRENT_TIME);
+ valid = CAT_MTIME | CAT_CTIME;
if (kms > loi->loi_kms) {
attr->cat_kms = kms;
valid |= CAT_KMS;
@@ -221,91 +235,128 @@ static void osc_page_touch_at(const struct lu_env *env,
cl_object_attr_unlock(obj);
}
-/**
- * This is called when a page is accessed within file in a way that creates
- * new page, if one were missing (i.e., if there were a hole at that place in
- * the file, or accessed page is beyond the current file size). Examples:
- * ->commit_write() and ->nopage() methods.
- *
- * Expand stripe KMS if necessary.
- */
-static void osc_page_touch(const struct lu_env *env,
- struct osc_page *opage, unsigned to)
-{
- struct cl_page *page = opage->ops_cl.cpl_page;
- struct cl_object *obj = opage->ops_cl.cpl_obj;
-
- osc_page_touch_at(env, obj, page->cp_index, to);
-}
-
-/**
- * Implements cl_io_operations::cio_prepare_write() method for osc layer.
- *
- * \retval -EIO transfer initiated against this osc will most likely fail
- * \retval 0 transfer initiated against this osc will most likely succeed.
- *
- * The reason for this check is to immediately return an error to the caller
- * in the case of a deactivated import. Note, that import can be deactivated
- * later, while pages, dirtied by this IO, are still in the cache, but this is
- * irrelevant, because that would still return an error to the application (if
- * it does fsync), but many applications don't do fsync because of performance
- * issues, and we wanted to return an -EIO at write time to notify the
- * application.
- */
-static int osc_io_prepare_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
+static int osc_io_commit_async(const struct lu_env *env,
+ const struct cl_io_slice *ios,
+ struct cl_page_list *qin, int from, int to,
+ cl_commit_cbt cb)
{
- struct osc_device *dev = lu2osc_dev(slice->cpl_obj->co_lu.lo_dev);
- struct obd_import *imp = class_exp2cliimp(dev->od_exp);
+ struct cl_io *io = ios->cis_io;
struct osc_io *oio = cl2osc_io(env, ios);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct cl_page *page;
+ struct cl_page *last_page;
+ struct osc_page *opg;
int result = 0;
- /*
- * This implements OBD_BRW_CHECK logic from old client.
- */
+ LASSERT(qin->pl_nr > 0);
+
+ /* Handle partial page cases */
+ last_page = cl_page_list_last(qin);
+ if (oio->oi_lockless) {
+ page = cl_page_list_first(qin);
+ if (page == last_page) {
+ cl_page_clip(env, page, from, to);
+ } else {
+ if (from != 0)
+ cl_page_clip(env, page, from, PAGE_SIZE);
+ if (to != PAGE_SIZE)
+ cl_page_clip(env, last_page, 0, to);
+ }
+ }
+
+ while (qin->pl_nr > 0) {
+ struct osc_async_page *oap;
+
+ page = cl_page_list_first(qin);
+ opg = osc_cl_page_osc(page, osc);
+ oap = &opg->ops_oap;
+
+ if (!list_empty(&oap->oap_rpc_item)) {
+ CDEBUG(D_CACHE, "Busy oap %p page %p for submit.\n",
+ oap, opg);
+ result = -EBUSY;
+ break;
+ }
+
+ /* The page may be already in dirty cache. */
+ if (list_empty(&oap->oap_pending_item)) {
+ result = osc_page_cache_add(env, &opg->ops_cl, io);
+ if (result != 0)
+ break;
+ }
+
+ osc_page_touch_at(env, osc2cl(osc), osc_index(opg),
+ page == last_page ? to : PAGE_SIZE);
+
+ cl_page_list_del(env, qin, page);
- if (!imp || imp->imp_invalid)
- result = -EIO;
- if (result == 0 && oio->oi_lockless)
- /* this page contains `invalid' data, but who cares?
- * nobody can access the invalid data.
- * in osc_io_commit_write(), we're going to write exact
- * [from, to) bytes of this page to OST. -jay
+ (*cb)(env, io, page);
+ /* Can't access page any more. Page can be in transfer and
+ * complete at any time.
*/
- cl_page_export(env, slice->cpl_page, 1);
+ }
+ /* for sync write, kernel will wait for this page to be flushed before
+ * osc_io_end() is called, so release it earlier.
+ * for mkwrite(), it's known there is no further pages.
+ */
+ if (cl_io_is_sync_write(io) && oio->oi_active) {
+ osc_extent_release(env, oio->oi_active);
+ oio->oi_active = NULL;
+ }
+
+ CDEBUG(D_INFO, "%d %d\n", qin->pl_nr, result);
return result;
}
-static int osc_io_commit_write(const struct lu_env *env,
- const struct cl_io_slice *ios,
- const struct cl_page_slice *slice,
- unsigned from, unsigned to)
+static int osc_io_rw_iter_init(const struct lu_env *env,
+ const struct cl_io_slice *ios)
{
- struct osc_io *oio = cl2osc_io(env, ios);
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- struct osc_async_page *oap = &opg->ops_oap;
+ struct cl_io *io = ios->cis_io;
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct client_obd *cli = osc_cli(osc);
+ unsigned long c;
+ unsigned int npages;
+ unsigned int max_pages;
+
+ if (cl_io_is_append(io))
+ return 0;
+
+ npages = io->u.ci_rw.crw_count >> PAGE_SHIFT;
+ if (io->u.ci_rw.crw_pos & ~PAGE_MASK)
+ ++npages;
+
+ max_pages = cli->cl_max_pages_per_rpc * cli->cl_max_rpcs_in_flight;
+ if (npages > max_pages)
+ npages = max_pages;
+
+ c = atomic_read(cli->cl_lru_left);
+ if (c < npages && osc_lru_reclaim(cli) > 0)
+ c = atomic_read(cli->cl_lru_left);
+ while (c >= npages) {
+ if (c == atomic_cmpxchg(cli->cl_lru_left, c, c - npages)) {
+ oio->oi_lru_reserved = npages;
+ break;
+ }
+ c = atomic_read(cli->cl_lru_left);
+ }
- LASSERT(to > 0);
- /*
- * XXX instead of calling osc_page_touch() here and in
- * osc_io_fault_start() it might be more logical to introduce
- * cl_page_touch() method, that generic cl_io_commit_write() and page
- * fault code calls.
- */
- osc_page_touch(env, cl2osc_page(slice), to);
- if (!client_is_remote(osc_export(obj)) &&
- capable(CFS_CAP_SYS_RESOURCE))
- oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
+ return 0;
+}
- if (oio->oi_lockless)
- /* see osc_io_prepare_write() for lockless io handling. */
- cl_page_clip(env, slice->cpl_page, from, to);
+static void osc_io_rw_iter_fini(const struct lu_env *env,
+ const struct cl_io_slice *ios)
+{
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(ios->cis_obj);
+ struct client_obd *cli = osc_cli(osc);
- return 0;
+ if (oio->oi_lru_reserved > 0) {
+ atomic_add(oio->oi_lru_reserved, cli->cl_lru_left);
+ oio->oi_lru_reserved = 0;
+ }
+ oio->oi_write_osclock = NULL;
}
static int osc_io_fault_start(const struct lu_env *env,
@@ -342,31 +393,21 @@ static int osc_async_upcall(void *a, int rc)
* Checks that there are no pages being written in the extent being truncated.
*/
static int trunc_check_cb(const struct lu_env *env, struct cl_io *io,
- struct cl_page *page, void *cbdata)
+ struct osc_page *ops, void *cbdata)
{
- const struct cl_page_slice *slice;
- struct osc_page *ops;
+ struct cl_page *page = ops->ops_cl.cpl_page;
struct osc_async_page *oap;
__u64 start = *(__u64 *)cbdata;
- slice = cl_page_at(page, &osc_device_type);
- LASSERT(slice);
- ops = cl2osc_page(slice);
oap = &ops->ops_oap;
-
if (oap->oap_cmd & OBD_BRW_WRITE &&
!list_empty(&oap->oap_pending_item))
CL_PAGE_DEBUG(D_ERROR, env, page, "exists %llu/%s.\n",
start, current->comm);
- {
- struct page *vmpage = cl_page_vmpage(env, page);
-
- if (PageLocked(vmpage))
- CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
- ops, page->cp_index,
- (oap->oap_cmd & OBD_BRW_RWMASK));
- }
+ if (PageLocked(page->cp_vmpage))
+ CDEBUG(D_CACHE, "page %p index %lu locked for %d.\n",
+ ops, osc_index(ops), oap->oap_cmd & OBD_BRW_RWMASK);
return CLP_GANG_OKAY;
}
@@ -385,8 +426,9 @@ static void osc_trunc_check(const struct lu_env *env, struct cl_io *io,
/*
* Complain if there are pages in the truncated region.
*/
- cl_page_gang_lookup(env, clob, io, start + partial, CL_PAGE_EOF,
- trunc_check_cb, (void *)&size);
+ osc_page_gang_lookup(env, io, cl2osc(clob),
+ start + partial, CL_PAGE_EOF,
+ trunc_check_cb, (void *)&size);
}
static int osc_io_setattr_start(const struct lu_env *env,
@@ -650,6 +692,8 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
},
[CIT_WRITE] = {
+ .cio_iter_init = osc_io_rw_iter_init,
+ .cio_iter_fini = osc_io_rw_iter_fini,
.cio_start = osc_io_write_start,
.cio_end = osc_io_end,
.cio_fini = osc_io_fini
@@ -672,16 +716,8 @@ static const struct cl_io_operations osc_io_ops = {
.cio_fini = osc_io_fini
}
},
- .req_op = {
- [CRT_READ] = {
- .cio_submit = osc_io_submit
- },
- [CRT_WRITE] = {
- .cio_submit = osc_io_submit
- }
- },
- .cio_prepare_write = osc_io_prepare_write,
- .cio_commit_write = osc_io_commit_write
+ .cio_submit = osc_io_submit,
+ .cio_commit_async = osc_io_commit_async
};
/*****************************************************************************
@@ -718,8 +754,7 @@ static void osc_req_attr_set(const struct lu_env *env,
struct lov_oinfo *oinfo;
struct cl_req *clerq;
struct cl_page *apage; /* _some_ page in @clerq */
- struct cl_lock *lock; /* _some_ lock protecting @apage */
- struct osc_lock *olck;
+ struct ldlm_lock *lock; /* _some_ lock protecting @apage */
struct osc_page *opg;
struct obdo *oa;
struct ost_lvb *lvb;
@@ -753,31 +788,32 @@ static void osc_req_attr_set(const struct lu_env *env,
LASSERT(!list_empty(&clerq->crq_pages));
apage = container_of(clerq->crq_pages.next,
struct cl_page, cp_flight);
- opg = osc_cl_page_osc(apage);
- apage = opg->ops_cl.cpl_page; /* now apage is a sub-page */
- lock = cl_lock_at_page(env, apage->cp_obj, apage, NULL, 1, 1);
- if (!lock) {
- struct cl_object_header *head;
- struct cl_lock *scan;
-
- head = cl_object_header(apage->cp_obj);
- list_for_each_entry(scan, &head->coh_locks, cll_linkage)
- CL_LOCK_DEBUG(D_ERROR, env, scan,
- "no cover page!\n");
- CL_PAGE_DEBUG(D_ERROR, env, apage,
- "dump uncover page!\n");
+ opg = osc_cl_page_osc(apage, NULL);
+ lock = osc_dlmlock_at_pgoff(env, cl2osc(obj), osc_index(opg),
+ 1, 1);
+ if (!lock && !opg->ops_srvlock) {
+ struct ldlm_resource *res;
+ struct ldlm_res_id *resname;
+
+ CL_PAGE_DEBUG(D_ERROR, env, apage, "uncovered page!\n");
+
+ resname = &osc_env_info(env)->oti_resname;
+ ostid_build_res_name(&oinfo->loi_oi, resname);
+ res = ldlm_resource_get(
+ osc_export(cl2osc(obj))->exp_obd->obd_namespace,
+ NULL, resname, LDLM_EXTENT, 0);
+ ldlm_resource_dump(D_ERROR, res);
+
dump_stack();
LBUG();
}
- olck = osc_lock_at(lock);
- LASSERT(ergo(opg->ops_srvlock, !olck->ols_lock));
/* check for lockless io. */
- if (olck->ols_lock) {
- oa->o_handle = olck->ols_lock->l_remote_handle;
+ if (lock) {
+ oa->o_handle = lock->l_remote_handle;
oa->o_valid |= OBD_MD_FLHANDLE;
+ LDLM_LOCK_PUT(lock);
}
- cl_lock_put(env, lock);
}
}
@@ -807,8 +843,9 @@ int osc_req_init(const struct lu_env *env, struct cl_device *dev,
if (or) {
cl_req_slice_add(req, &or->or_cl, dev, &osc_req_ops);
result = 0;
- } else
+ } else {
result = -ENOMEM;
+ }
return result;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_lock.c b/drivers/staging/lustre/lustre/osc/osc_lock.c
index 013df9787f3e..16f9cd9d3b12 100644
--- a/drivers/staging/lustre/lustre/osc/osc_lock.c
+++ b/drivers/staging/lustre/lustre/osc/osc_lock.c
@@ -36,6 +36,7 @@
* Implementation of cl_lock for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
@@ -50,8 +51,6 @@
* @{
*/
-#define _PAGEREF_MAGIC (-10000000)
-
/*****************************************************************************
*
* Type conversions.
@@ -62,7 +61,6 @@ static const struct cl_lock_operations osc_lock_ops;
static const struct cl_lock_operations osc_lock_lockless_ops;
static void osc_lock_to_lockless(const struct lu_env *env,
struct osc_lock *ols, int force);
-static int osc_lock_has_pages(struct osc_lock *olck);
int osc_lock_is_lockless(const struct osc_lock *olck)
{
@@ -90,11 +88,11 @@ static struct ldlm_lock *osc_handle_ptr(struct lustre_handle *handle)
static int osc_lock_invariant(struct osc_lock *ols)
{
struct ldlm_lock *lock = osc_handle_ptr(&ols->ols_handle);
- struct ldlm_lock *olock = ols->ols_lock;
+ struct ldlm_lock *olock = ols->ols_dlmlock;
int handle_used = lustre_handle_is_used(&ols->ols_handle);
if (ergo(osc_lock_is_lockless(ols),
- ols->ols_locklessable && !ols->ols_lock))
+ ols->ols_locklessable && !ols->ols_dlmlock))
return 1;
/*
@@ -111,7 +109,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
ergo(!lock, !olock)))
return 0;
/*
- * Check that ->ols_handle and ->ols_lock are consistent, but
+ * Check that ->ols_handle and ->ols_dlmlock are consistent, but
* take into account that they are set at the different time.
*/
if (!ergo(ols->ols_state == OLS_CANCELLED,
@@ -122,7 +120,7 @@ static int osc_lock_invariant(struct osc_lock *ols)
* ast.
*/
if (!ergo(olock && ols->ols_state < OLS_CANCELLED,
- ((olock->l_flags & LDLM_FL_DESTROYED) == 0)))
+ !ldlm_is_destroyed(olock)))
return 0;
if (!ergo(ols->ols_state == OLS_GRANTED,
@@ -138,117 +136,13 @@ static int osc_lock_invariant(struct osc_lock *ols)
*
*/
-/**
- * Breaks a link between osc_lock and dlm_lock.
- */
-static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
-{
- struct ldlm_lock *dlmlock;
-
- spin_lock(&osc_ast_guard);
- dlmlock = olck->ols_lock;
- if (!dlmlock) {
- spin_unlock(&osc_ast_guard);
- return;
- }
-
- olck->ols_lock = NULL;
- /* wb(); --- for all who checks (ols->ols_lock != NULL) before
- * call to osc_lock_detach()
- */
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
-
- lock_res_and_lock(dlmlock);
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
- struct cl_object *obj = olck->ols_cl.cls_obj;
- struct cl_attr *attr = &osc_env_info(env)->oti_attr;
- __u64 old_kms;
-
- cl_object_attr_lock(obj);
- /* Must get the value under the lock to avoid possible races. */
- old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
- /* Update the kms. Need to loop all granted locks.
- * Not a problem for the client
- */
- attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
-
- cl_object_attr_set(env, obj, attr, CAT_KMS);
- cl_object_attr_unlock(obj);
- }
- unlock_res_and_lock(dlmlock);
-
- /* release a reference taken in osc_lock_upcall0(). */
- LASSERT(olck->ols_has_ref);
- lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
- LDLM_LOCK_RELEASE(dlmlock);
- olck->ols_has_ref = 0;
-}
-
-static int osc_lock_unhold(struct osc_lock *ols)
-{
- int result = 0;
-
- if (ols->ols_hold) {
- ols->ols_hold = 0;
- result = osc_cancel_base(&ols->ols_handle,
- ols->ols_einfo.ei_mode);
- }
- return result;
-}
-
-static int osc_lock_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- LINVRNT(osc_lock_invariant(ols));
-
- switch (ols->ols_state) {
- case OLS_NEW:
- LASSERT(!ols->ols_hold);
- LASSERT(ols->ols_agl);
- return 0;
- case OLS_UPCALL_RECEIVED:
- osc_lock_unhold(ols);
- case OLS_ENQUEUED:
- LASSERT(!ols->ols_hold);
- osc_lock_detach(env, ols);
- ols->ols_state = OLS_NEW;
- return 0;
- case OLS_GRANTED:
- LASSERT(!ols->ols_glimpse);
- LASSERT(ols->ols_hold);
- /*
- * Move lock into OLS_RELEASED state before calling
- * osc_cancel_base() so that possible synchronous cancellation
- * sees that lock is released.
- */
- ols->ols_state = OLS_RELEASED;
- return osc_lock_unhold(ols);
- default:
- CERROR("Impossible state: %d\n", ols->ols_state);
- LBUG();
- }
-}
-
static void osc_lock_fini(const struct lu_env *env,
struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
LINVRNT(osc_lock_invariant(ols));
- /*
- * ->ols_hold can still be true at this point if, for example, a
- * thread that requested a lock was killed (and released a reference
- * to the lock), before reply from a server was received. In this case
- * lock is destroyed immediately after upcall.
- */
- osc_lock_unhold(ols);
- LASSERT(!ols->ols_lock);
- LASSERT(atomic_read(&ols->ols_pageref) == 0 ||
- atomic_read(&ols->ols_pageref) == _PAGEREF_MAGIC);
+ LASSERT(!ols->ols_dlmlock);
kmem_cache_free(osc_lock_kmem, ols);
}
@@ -275,55 +169,12 @@ static __u64 osc_enq2ldlm_flags(__u32 enqflags)
result |= LDLM_FL_HAS_INTENT;
if (enqflags & CEF_DISCARD_DATA)
result |= LDLM_FL_AST_DISCARD_DATA;
+ if (enqflags & CEF_PEEK)
+ result |= LDLM_FL_TEST_LOCK;
return result;
}
/**
- * Global spin-lock protecting consistency of ldlm_lock::l_ast_data
- * pointers. Initialized in osc_init().
- */
-spinlock_t osc_ast_guard;
-
-static struct osc_lock *osc_ast_data_get(struct ldlm_lock *dlm_lock)
-{
- struct osc_lock *olck;
-
- lock_res_and_lock(dlm_lock);
- spin_lock(&osc_ast_guard);
- olck = dlm_lock->l_ast_data;
- if (olck) {
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- /*
- * If osc_lock holds a reference on ldlm lock, return it even
- * when cl_lock is in CLS_FREEING state. This way
- *
- * osc_ast_data_get(dlmlock) == NULL
- *
- * guarantees that all osc references on dlmlock were
- * released. osc_dlm_blocking_ast0() relies on that.
- */
- if (lock->cll_state < CLS_FREEING || olck->ols_has_ref) {
- cl_lock_get_trust(lock);
- lu_ref_add_atomic(&lock->cll_reference,
- "ast", current);
- } else
- olck = NULL;
- }
- spin_unlock(&osc_ast_guard);
- unlock_res_and_lock(dlm_lock);
- return olck;
-}
-
-static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
-{
- struct cl_lock *lock;
-
- lock = olck->ols_cl.cls_lock;
- lu_ref_del(&lock->cll_reference, "ast", current);
- cl_lock_put(env, lock);
-}
-
-/**
* Updates object attributes from a lock value block (lvb) received together
* with the DLM lock reply from the server. Copy of osc_update_enqueue()
* logic.
@@ -333,35 +184,30 @@ static void osc_ast_data_put(const struct lu_env *env, struct osc_lock *olck)
*
* Called under lock and resource spin-locks.
*/
-static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
- int rc)
+static void osc_lock_lvb_update(const struct lu_env *env,
+ struct osc_object *osc,
+ struct ldlm_lock *dlmlock,
+ struct ost_lvb *lvb)
{
- struct ost_lvb *lvb;
- struct cl_object *obj;
- struct lov_oinfo *oinfo;
- struct cl_attr *attr;
+ struct cl_object *obj = osc2cl(osc);
+ struct lov_oinfo *oinfo = osc->oo_oinfo;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned valid;
- if (!(olck->ols_flags & LDLM_FL_LVB_READY))
- return;
-
- lvb = &olck->ols_lvb;
- obj = olck->ols_cl.cls_obj;
- oinfo = cl2osc(obj)->oo_oinfo;
- attr = &osc_env_info(env)->oti_attr;
valid = CAT_BLOCKS | CAT_ATIME | CAT_CTIME | CAT_MTIME | CAT_SIZE;
+ if (!lvb)
+ lvb = dlmlock->l_lvb_data;
+
cl_lvb2attr(attr, lvb);
cl_object_attr_lock(obj);
- if (rc == 0) {
- struct ldlm_lock *dlmlock;
+ if (dlmlock) {
__u64 size;
- dlmlock = olck->ols_lock;
-
- /* re-grab LVB from a dlm lock under DLM spin-locks. */
- *lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
+ check_res_locked(dlmlock->l_resource);
+ LASSERT(lvb == dlmlock->l_lvb_data);
size = lvb->lvb_size;
+
/* Extend KMS up to the end of this lock and no further
* A lock on [x,y] means a KMS of up to y + 1 bytes!
*/
@@ -378,102 +224,67 @@ static void osc_lock_lvb_update(const struct lu_env *env, struct osc_lock *olck,
dlmlock->l_policy_data.l_extent.end);
}
ldlm_lock_allow_match_locked(dlmlock);
- } else if (rc == -ENAVAIL && olck->ols_glimpse) {
- CDEBUG(D_INODE, "glimpsed, setting rss=%llu; leaving kms=%llu\n",
- lvb->lvb_size, oinfo->loi_kms);
- } else
- valid = 0;
-
- if (valid != 0)
- cl_object_attr_set(env, obj, attr, valid);
+ }
+ cl_object_attr_set(env, obj, attr, valid);
cl_object_attr_unlock(obj);
}
-/**
- * Called when a lock is granted, from an upcall (when server returned a
- * granted lock), or from completion AST, when server returned a blocked lock.
- *
- * Called under lock and resource spin-locks, that are released temporarily
- * here.
- */
-static void osc_lock_granted(const struct lu_env *env, struct osc_lock *olck,
- struct ldlm_lock *dlmlock, int rc)
+static void osc_lock_granted(const struct lu_env *env, struct osc_lock *oscl,
+ struct lustre_handle *lockh, bool lvb_update)
{
- struct ldlm_extent *ext;
- struct cl_lock *lock;
- struct cl_lock_descr *descr;
+ struct ldlm_lock *dlmlock;
- LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+ dlmlock = ldlm_handle2lock_long(lockh, 0);
+ LASSERT(dlmlock);
- if (olck->ols_state < OLS_GRANTED) {
- lock = olck->ols_cl.cls_lock;
- ext = &dlmlock->l_policy_data.l_extent;
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_obj = lock->cll_descr.cld_obj;
+ /* lock reference taken by ldlm_handle2lock_long() is
+ * owned by osc_lock and released in osc_lock_detach()
+ */
+ lu_ref_add(&dlmlock->l_reference, "osc_lock", oscl);
+ oscl->ols_has_ref = 1;
- /* XXX check that ->l_granted_mode is valid. */
- descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
- descr->cld_start = cl_index(descr->cld_obj, ext->start);
- descr->cld_end = cl_index(descr->cld_obj, ext->end);
- descr->cld_gid = ext->gid;
- /*
- * tell upper layers the extent of the lock that was actually
- * granted
- */
- olck->ols_state = OLS_GRANTED;
- osc_lock_lvb_update(env, olck, rc);
-
- /* release DLM spin-locks to allow cl_lock_{modify,signal}()
- * to take a semaphore on a parent lock. This is safe, because
- * spin-locks are needed to protect consistency of
- * dlmlock->l_*_mode and LVB, and we have finished processing
- * them.
+ LASSERT(!oscl->ols_dlmlock);
+ oscl->ols_dlmlock = dlmlock;
+
+ /* This may be a matched lock for glimpse request, do not hold
+ * lock reference in that case.
+ */
+ if (!oscl->ols_glimpse) {
+ /* hold a refc for non glimpse lock which will
+ * be released in osc_lock_cancel()
*/
- unlock_res_and_lock(dlmlock);
- cl_lock_modify(env, lock, descr);
- cl_lock_signal(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- lock_res_and_lock(dlmlock);
+ lustre_handle_copy(&oscl->ols_handle, lockh);
+ ldlm_lock_addref(lockh, oscl->ols_einfo.ei_mode);
+ oscl->ols_hold = 1;
}
-}
-
-static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
-
-{
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock_long(&olck->ols_handle, 0);
- LASSERT(dlmlock);
+ /* Lock must have been granted. */
lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(dlmlock->l_ast_data == olck);
- LASSERT(!olck->ols_lock);
- olck->ols_lock = dlmlock;
- spin_unlock(&osc_ast_guard);
+ if (dlmlock->l_granted_mode == dlmlock->l_req_mode) {
+ struct ldlm_extent *ext = &dlmlock->l_policy_data.l_extent;
+ struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
- /*
- * Lock might be not yet granted. In this case, completion ast
- * (osc_ldlm_completion_ast()) comes later and finishes lock
- * granting.
- */
- if (dlmlock->l_granted_mode == dlmlock->l_req_mode)
- osc_lock_granted(env, olck, dlmlock, 0);
- unlock_res_and_lock(dlmlock);
+ /* extend the lock extent, otherwise it will have problem when
+ * we decide whether to grant a lockless lock.
+ */
+ descr->cld_mode = osc_ldlm2cl_lock(dlmlock->l_granted_mode);
+ descr->cld_start = cl_index(descr->cld_obj, ext->start);
+ descr->cld_end = cl_index(descr->cld_obj, ext->end);
+ descr->cld_gid = ext->gid;
- /*
- * osc_enqueue_interpret() decrefs asynchronous locks, counter
- * this.
- */
- ldlm_lock_addref(&olck->ols_handle, olck->ols_einfo.ei_mode);
- olck->ols_hold = 1;
+ /* no lvb update for matched lock */
+ if (lvb_update) {
+ LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
+ osc_lock_lvb_update(env, cl2osc(oscl->ols_cl.cls_obj),
+ dlmlock, NULL);
+ }
+ LINVRNT(osc_lock_invariant(oscl));
+ }
+ unlock_res_and_lock(dlmlock);
- /* lock reference taken by ldlm_handle2lock_long() is owned by
- * osc_lock and released in osc_lock_detach()
- */
- lu_ref_add(&dlmlock->l_reference, "osc_lock", olck);
- olck->ols_has_ref = 1;
+ LASSERT(oscl->ols_state != OLS_GRANTED);
+ oscl->ols_state = OLS_GRANTED;
}
/**
@@ -481,143 +292,124 @@ static void osc_lock_upcall0(const struct lu_env *env, struct osc_lock *olck)
* received from a server, or after osc_enqueue_base() matched a local DLM
* lock.
*/
-static int osc_lock_upcall(void *cookie, int errcode)
+static int osc_lock_upcall(void *cookie, struct lustre_handle *lockh,
+ int errcode)
{
- struct osc_lock *olck = cookie;
- struct cl_lock_slice *slice = &olck->ols_cl;
- struct cl_lock *lock = slice->cls_lock;
+ struct osc_lock *oscl = cookie;
+ struct cl_lock_slice *slice = &oscl->ols_cl;
struct lu_env *env;
struct cl_env_nest nest;
+ int rc;
env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- int rc;
+ /* should never happen, similar to osc_ldlm_blocking_ast(). */
+ LASSERT(!IS_ERR(env));
+
+ rc = ldlm_error2errno(errcode);
+ if (oscl->ols_state == OLS_ENQUEUED) {
+ oscl->ols_state = OLS_UPCALL_RECEIVED;
+ } else if (oscl->ols_state == OLS_CANCELLED) {
+ rc = -EIO;
+ } else {
+ CERROR("Impossible state: %d\n", oscl->ols_state);
+ LBUG();
+ }
- cl_lock_mutex_get(env, lock);
+ if (rc == 0)
+ osc_lock_granted(env, oscl, lockh, errcode == ELDLM_OK);
- LASSERT(lock->cll_state >= CLS_QUEUING);
- if (olck->ols_state == OLS_ENQUEUED) {
- olck->ols_state = OLS_UPCALL_RECEIVED;
- rc = ldlm_error2errno(errcode);
- } else if (olck->ols_state == OLS_CANCELLED) {
- rc = -EIO;
- } else {
- CERROR("Impossible state: %d\n", olck->ols_state);
- LBUG();
- }
- if (rc) {
- struct ldlm_lock *dlmlock;
-
- dlmlock = ldlm_handle2lock(&olck->ols_handle);
- if (dlmlock) {
- lock_res_and_lock(dlmlock);
- spin_lock(&osc_ast_guard);
- LASSERT(!olck->ols_lock);
- dlmlock->l_ast_data = NULL;
- olck->ols_handle.cookie = 0ULL;
- spin_unlock(&osc_ast_guard);
- ldlm_lock_fail_match_locked(dlmlock);
- unlock_res_and_lock(dlmlock);
- LDLM_LOCK_PUT(dlmlock);
- }
- } else {
- if (olck->ols_glimpse)
- olck->ols_glimpse = 0;
- osc_lock_upcall0(env, olck);
- }
+ /* Error handling, some errors are tolerable. */
+ if (oscl->ols_locklessable && rc == -EUSERS) {
+ /* This is a tolerable error, turn this lock into
+ * lockless lock.
+ */
+ osc_object_set_contended(cl2osc(slice->cls_obj));
+ LASSERT(slice->cls_ops == &osc_lock_ops);
+
+ /* Change this lock to ldlmlock-less lock. */
+ osc_lock_to_lockless(env, oscl, 1);
+ oscl->ols_state = OLS_GRANTED;
+ rc = 0;
+ } else if (oscl->ols_glimpse && rc == -ENAVAIL) {
+ LASSERT(oscl->ols_flags & LDLM_FL_LVB_READY);
+ osc_lock_lvb_update(env, cl2osc(slice->cls_obj),
+ NULL, &oscl->ols_lvb);
+ /* Hide the error. */
+ rc = 0;
+ }
- /* Error handling, some errors are tolerable. */
- if (olck->ols_locklessable && rc == -EUSERS) {
- /* This is a tolerable error, turn this lock into
- * lockless lock.
- */
- osc_object_set_contended(cl2osc(slice->cls_obj));
- LASSERT(slice->cls_ops == &osc_lock_ops);
+ if (oscl->ols_owner)
+ cl_sync_io_note(env, oscl->ols_owner, rc);
+ cl_env_nested_put(&nest, env);
- /* Change this lock to ldlmlock-less lock. */
- osc_lock_to_lockless(env, olck, 1);
- olck->ols_state = OLS_GRANTED;
- rc = 0;
- } else if (olck->ols_glimpse && rc == -ENAVAIL) {
- osc_lock_lvb_update(env, olck, rc);
- cl_lock_delete(env, lock);
- /* Hide the error. */
- rc = 0;
- }
-
- if (rc == 0) {
- /* For AGL case, the RPC sponsor may exits the cl_lock
- * processing without wait() called before related OSC
- * lock upcall(). So update the lock status according
- * to the enqueue result inside AGL upcall().
- */
- if (olck->ols_agl) {
- lock->cll_flags |= CLF_FROM_UPCALL;
- cl_wait_try(env, lock);
- lock->cll_flags &= ~CLF_FROM_UPCALL;
- if (!olck->ols_glimpse)
- olck->ols_agl = 0;
- }
- cl_lock_signal(env, lock);
- /* del user for lock upcall cookie */
- cl_unuse_try(env, lock);
- } else {
- /* del user for lock upcall cookie */
- cl_lock_user_del(env, lock);
- cl_lock_error(env, lock, rc);
- }
+ return rc;
+}
- /* release cookie reference, acquired by osc_lock_enqueue() */
- cl_lock_hold_release(env, lock, "upcall", lock);
- cl_lock_mutex_put(env, lock);
+static int osc_lock_upcall_agl(void *cookie, struct lustre_handle *lockh,
+ int errcode)
+{
+ struct osc_object *osc = cookie;
+ struct ldlm_lock *dlmlock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
- lu_ref_del(&lock->cll_reference, "upcall", lock);
- /* This maybe the last reference, so must be called after
- * cl_lock_mutex_put().
- */
- cl_lock_put(env, lock);
+ env = cl_env_nested_get(&nest);
+ LASSERT(!IS_ERR(env));
- cl_env_nested_put(&nest, env);
- } else {
- /* should never happen, similar to osc_ldlm_blocking_ast(). */
- LBUG();
+ if (errcode == ELDLM_LOCK_MATCHED) {
+ errcode = ELDLM_OK;
+ goto out;
}
- return errcode;
+
+ if (errcode != ELDLM_OK)
+ goto out;
+
+ dlmlock = ldlm_handle2lock(lockh);
+ LASSERT(dlmlock);
+
+ lock_res_and_lock(dlmlock);
+ LASSERT(dlmlock->l_granted_mode == dlmlock->l_req_mode);
+
+ /* there is no osc_lock associated with AGL lock */
+ osc_lock_lvb_update(env, osc, dlmlock, NULL);
+
+ unlock_res_and_lock(dlmlock);
+ LDLM_LOCK_PUT(dlmlock);
+
+out:
+ cl_object_put(env, osc2cl(osc));
+ cl_env_nested_put(&nest, env);
+ return ldlm_error2errno(errcode);
}
-/**
- * Core of osc_dlm_blocking_ast() logic.
- */
-static void osc_lock_blocking(const struct lu_env *env,
- struct ldlm_lock *dlmlock,
- struct osc_lock *olck, int blocking)
+static int osc_lock_flush(struct osc_object *obj, pgoff_t start, pgoff_t end,
+ enum cl_lock_mode mode, int discard)
{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
+ struct lu_env *env;
+ struct cl_env_nest nest;
+ int rc = 0;
+ int rc2 = 0;
- LASSERT(olck->ols_lock == dlmlock);
- CLASSERT(OLS_BLOCKED < OLS_CANCELLED);
- LASSERT(!osc_lock_is_lockless(olck));
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return PTR_ERR(env);
+
+ if (mode == CLM_WRITE) {
+ rc = osc_cache_writeback_range(env, obj, start, end, 1,
+ discard);
+ CDEBUG(D_CACHE, "object %p: [%lu -> %lu] %d pages were %s.\n",
+ obj, start, end, rc,
+ discard ? "discarded" : "written back");
+ if (rc > 0)
+ rc = 0;
+ }
- /*
- * Lock might be still addref-ed here, if e.g., blocking ast
- * is sent for a failed lock.
- */
- osc_lock_unhold(olck);
+ rc2 = osc_lock_discard_pages(env, obj, start, end, mode);
+ if (rc == 0 && rc2 < 0)
+ rc = rc2;
- if (blocking && olck->ols_state < OLS_BLOCKED)
- /*
- * Move osc_lock into OLS_BLOCKED before canceling the lock,
- * because it recursively re-enters osc_lock_blocking(), with
- * the state set to OLS_CANCELLED.
- */
- olck->ols_state = OLS_BLOCKED;
- /*
- * cancel and destroy lock at least once no matter how blocking ast is
- * entered (see comment above osc_ldlm_blocking_ast() for use
- * cases). cl_lock_cancel() and cl_lock_delete() are idempotent.
- */
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
+ cl_env_nested_put(&nest, env);
+ return rc;
}
/**
@@ -628,65 +420,63 @@ static int osc_dlm_blocking_ast0(const struct lu_env *env,
struct ldlm_lock *dlmlock,
void *data, int flag)
{
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int cancel;
-
- LASSERT(flag == LDLM_CB_BLOCKING || flag == LDLM_CB_CANCELING);
-
- cancel = 0;
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- LINVRNT(osc_lock_invariant(olck));
- if (olck->ols_ast_wait) {
- /* wake up osc_lock_use() */
- cl_lock_signal(env, lock);
- olck->ols_ast_wait = 0;
- }
- /*
- * Lock might have been canceled while this thread was
- * sleeping for lock mutex, but olck is pinned in memory.
- */
- if (olck == dlmlock->l_ast_data) {
- /*
- * NOTE: DLM sends blocking AST's for failed locks
- * (that are still in pre-OLS_GRANTED state)
- * too, and they have to be canceled otherwise
- * DLM lock is never destroyed and stuck in
- * the memory.
- *
- * Alternatively, ldlm_cli_cancel() can be
- * called here directly for osc_locks with
- * ols_state < OLS_GRANTED to maintain an
- * invariant that ->clo_cancel() is only called
- * for locks that were granted.
- */
- LASSERT(data == olck);
- osc_lock_blocking(env, dlmlock,
- olck, flag == LDLM_CB_BLOCKING);
- } else
- cancel = 1;
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- } else
- /*
- * DLM lock exists, but there is no cl_lock attached to it.
- * This is a `normal' race. cl_object and its cl_lock's can be
- * removed by memory pressure, together with all pages.
+ struct cl_object *obj = NULL;
+ int result = 0;
+ int discard;
+ enum cl_lock_mode mode = CLM_READ;
+
+ LASSERT(flag == LDLM_CB_CANCELING);
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_granted_mode != dlmlock->l_req_mode) {
+ dlmlock->l_ast_data = NULL;
+ unlock_res_and_lock(dlmlock);
+ return 0;
+ }
+
+ discard = ldlm_is_discard_data(dlmlock);
+ if (dlmlock->l_granted_mode & (LCK_PW | LCK_GROUP))
+ mode = CLM_WRITE;
+
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ dlmlock->l_ast_data = NULL;
+
+ cl_object_get(obj);
+ }
+
+ unlock_res_and_lock(dlmlock);
+
+ /* if l_ast_data is NULL, the dlmlock was enqueued by AGL or
+ * the object has been destroyed.
+ */
+ if (obj) {
+ struct ldlm_extent *extent = &dlmlock->l_policy_data.l_extent;
+ struct cl_attr *attr = &osc_env_info(env)->oti_attr;
+ __u64 old_kms;
+
+ /* Destroy pages covered by the extent of the DLM lock */
+ result = osc_lock_flush(cl2osc(obj),
+ cl_index(obj, extent->start),
+ cl_index(obj, extent->end),
+ mode, discard);
+
+ /* losing a lock, update kms */
+ lock_res_and_lock(dlmlock);
+ cl_object_attr_lock(obj);
+ /* Must get the value under the lock to avoid race. */
+ old_kms = cl2osc(obj)->oo_oinfo->loi_kms;
+ /* Update the kms. Need to loop all granted locks.
+ * Not a problem for the client
*/
- cancel = (flag == LDLM_CB_BLOCKING);
+ attr->cat_kms = ldlm_extent_shift_kms(dlmlock, old_kms);
- if (cancel) {
- struct lustre_handle *lockh;
+ cl_object_attr_set(env, obj, attr, CAT_KMS);
+ cl_object_attr_unlock(obj);
+ unlock_res_and_lock(dlmlock);
- lockh = &osc_env_info(env)->oti_handle;
- ldlm_lock2handle(dlmlock, lockh);
- result = ldlm_cli_cancel(lockh, LCF_ASYNC);
- } else
- result = 0;
+ cl_object_put(env, obj);
+ }
return result;
}
@@ -736,107 +526,52 @@ static int osc_ldlm_blocking_ast(struct ldlm_lock *dlmlock,
struct ldlm_lock_desc *new, void *data,
int flag)
{
- struct lu_env *env;
- struct cl_env_nest nest;
- int result;
+ int result = 0;
- /*
- * This can be called in the context of outer IO, e.g.,
- *
- * cl_enqueue()->...
- * ->osc_enqueue_base()->...
- * ->ldlm_prep_elc_req()->...
- * ->ldlm_cancel_callback()->...
- * ->osc_ldlm_blocking_ast()
- *
- * new environment has to be created to not corrupt outer context.
- */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
- cl_env_nested_put(&nest, env);
- } else {
- result = PTR_ERR(env);
- /*
- * XXX This should never happen, as cl_lock is
- * stuck. Pre-allocated environment a la vvp_inode_fini_env
- * should be used.
- */
- LBUG();
- }
- if (result != 0) {
+ switch (flag) {
+ case LDLM_CB_BLOCKING: {
+ struct lustre_handle lockh;
+
+ ldlm_lock2handle(dlmlock, &lockh);
+ result = ldlm_cli_cancel(&lockh, LCF_ASYNC);
if (result == -ENODATA)
result = 0;
- else
- CERROR("BAST failed: %d\n", result);
+ break;
}
- return result;
-}
+ case LDLM_CB_CANCELING: {
+ struct lu_env *env;
+ struct cl_env_nest nest;
-static int osc_ldlm_completion_ast(struct ldlm_lock *dlmlock,
- __u64 flags, void *data)
-{
- struct cl_env_nest nest;
- struct lu_env *env;
- struct osc_lock *olck;
- struct cl_lock *lock;
- int result;
- int dlmrc;
-
- /* first, do dlm part of the work */
- dlmrc = ldlm_completion_ast_async(dlmlock, flags, data);
- /* then, notify cl_lock */
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
- cl_lock_mutex_get(env, lock);
- /*
- * ldlm_handle_cp_callback() copied LVB from request
- * to lock->l_lvb_data, store it in osc_lock.
- */
- LASSERT(dlmlock->l_lvb_data);
- lock_res_and_lock(dlmlock);
- olck->ols_lvb = *(struct ost_lvb *)dlmlock->l_lvb_data;
- if (!olck->ols_lock) {
- /*
- * upcall (osc_lock_upcall()) hasn't yet been
- * called. Do nothing now, upcall will bind
- * olck to dlmlock and signal the waiters.
- *
- * This maintains an invariant that osc_lock
- * and ldlm_lock are always bound when
- * osc_lock is in OLS_GRANTED state.
- */
- } else if (dlmlock->l_granted_mode ==
- dlmlock->l_req_mode) {
- osc_lock_granted(env, olck, dlmlock, dlmrc);
- }
- unlock_res_and_lock(dlmlock);
+ /*
+ * This can be called in the context of outer IO, e.g.,
+ *
+ * osc_enqueue_base()->...
+ * ->ldlm_prep_elc_req()->...
+ * ->ldlm_cancel_callback()->...
+ * ->osc_ldlm_blocking_ast()
+ *
+ * new environment has to be created to not corrupt outer
+ * context.
+ */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env)) {
+ result = PTR_ERR(env);
+ break;
+ }
- if (dlmrc != 0) {
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "dlmlock returned %d\n", dlmrc);
- cl_lock_error(env, lock, dlmrc);
- }
- cl_lock_mutex_put(env, lock);
- osc_ast_data_put(env, olck);
- result = 0;
- } else
- result = -ELDLM_NO_LOCK_DATA;
+ result = osc_dlm_blocking_ast0(env, dlmlock, data, flag);
cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- return dlmrc ?: result;
+ break;
+ }
+ default:
+ LBUG();
+ }
+ return result;
}
static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
{
struct ptlrpc_request *req = data;
- struct osc_lock *olck;
- struct cl_lock *lock;
- struct cl_object *obj;
struct cl_env_nest nest;
struct lu_env *env;
struct ost_lvb *lvb;
@@ -847,14 +582,16 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
env = cl_env_nested_get(&nest);
if (!IS_ERR(env)) {
- /* osc_ast_data_get() has to go after environment is
- * allocated, because osc_ast_data() acquires a
- * reference to a lock, and it can only be released in
- * environment.
- */
- olck = osc_ast_data_get(dlmlock);
- if (olck) {
- lock = olck->ols_cl.cls_lock;
+ struct cl_object *obj = NULL;
+
+ lock_res_and_lock(dlmlock);
+ if (dlmlock->l_ast_data) {
+ obj = osc2cl(dlmlock->l_ast_data);
+ cl_object_get(obj);
+ }
+ unlock_res_and_lock(dlmlock);
+
+ if (obj) {
/* Do not grab the mutex of cl_lock for glimpse.
* See LU-1274 for details.
* BTW, it's okay for cl_lock to be cancelled during
@@ -869,7 +606,6 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
result = req_capsule_server_pack(cap);
if (result == 0) {
lvb = req_capsule_server_get(cap, &RMF_DLM_LVB);
- obj = lock->cll_descr.cld_obj;
result = cl_object_glimpse(env, obj, lvb);
}
if (!exp_connect_lvb_type(req->rq_export))
@@ -877,7 +613,7 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
&RMF_DLM_LVB,
sizeof(struct ost_lvb_v1),
RCL_SERVER);
- osc_ast_data_put(env, olck);
+ cl_object_put(env, obj);
} else {
/*
* These errors are normal races, so we don't want to
@@ -888,44 +624,123 @@ static int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data)
result = -ELDLM_NO_LOCK_DATA;
}
cl_env_nested_put(&nest, env);
- } else
+ } else {
result = PTR_ERR(env);
+ }
req->rq_status = result;
return result;
}
-static unsigned long osc_lock_weigh(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static int weigh_cb(const struct lu_env *env, struct cl_io *io,
+ struct osc_page *ops, void *cbdata)
{
- /*
- * don't need to grab coh_page_guard since we don't care the exact #
- * of pages..
- */
- return cl_object_header(slice->cls_obj)->coh_pages;
+ struct cl_page *page = ops->ops_cl.cpl_page;
+
+ if (cl_page_is_vmlocked(env, page) ||
+ PageDirty(page->cp_vmpage) || PageWriteback(page->cp_vmpage)
+ ) {
+ (*(unsigned long *)cbdata)++;
+ return CLP_GANG_ABORT;
+ }
+
+ return CLP_GANG_OKAY;
}
-static void osc_lock_build_einfo(const struct lu_env *env,
- const struct cl_lock *clock,
- struct osc_lock *lock,
- struct ldlm_enqueue_info *einfo)
+static unsigned long osc_lock_weight(const struct lu_env *env,
+ struct osc_object *oscobj,
+ struct ldlm_extent *extent)
+{
+ struct cl_io *io = &osc_env_info(env)->oti_io;
+ struct cl_object *obj = cl_object_top(&oscobj->oo_cl);
+ unsigned long npages = 0;
+ int result;
+
+ io->ci_obj = obj;
+ io->ci_ignore_layout = 1;
+ result = cl_io_init(env, io, CIT_MISC, io->ci_obj);
+ if (result != 0)
+ return result;
+
+ do {
+ result = osc_page_gang_lookup(env, io, oscobj,
+ cl_index(obj, extent->start),
+ cl_index(obj, extent->end),
+ weigh_cb, (void *)&npages);
+ if (result == CLP_GANG_ABORT)
+ break;
+ if (result == CLP_GANG_RESCHED)
+ cond_resched();
+ } while (result != CLP_GANG_OKAY);
+ cl_io_fini(env, io);
+
+ return npages;
+}
+
+/**
+ * Get the weight of dlm lock for early cancellation.
+ */
+unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock)
{
- enum cl_lock_mode mode;
+ struct cl_env_nest nest;
+ struct lu_env *env;
+ struct osc_object *obj;
+ struct osc_lock *oscl;
+ unsigned long weight;
+ bool found = false;
+
+ might_sleep();
+ /*
+ * osc_ldlm_weigh_ast has a complex context since it might be called
+ * because of lock canceling, or from user's input. We have to make
+ * a new environment for it. Probably it is implementation safe to use
+ * the upper context because cl_lock_put don't modify environment
+ * variables. But just in case ..
+ */
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ /* Mostly because lack of memory, do not eliminate this lock */
+ return 1;
- mode = clock->cll_descr.cld_mode;
- if (mode == CLM_PHANTOM)
+ LASSERT(dlmlock->l_resource->lr_type == LDLM_EXTENT);
+ obj = dlmlock->l_ast_data;
+ if (obj) {
+ weight = 1;
+ goto out;
+ }
+
+ spin_lock(&obj->oo_ol_spin);
+ list_for_each_entry(oscl, &obj->oo_ol_list, ols_nextlock_oscobj) {
+ if (oscl->ols_dlmlock && oscl->ols_dlmlock != dlmlock)
+ continue;
+ found = true;
+ }
+ spin_unlock(&obj->oo_ol_spin);
+ if (found) {
/*
- * For now, enqueue all glimpse locks in read mode. In the
- * future, client might choose to enqueue LCK_PW lock for
- * glimpse on a file opened for write.
+ * If the lock is being used by an IO, definitely not cancel it.
*/
- mode = CLM_READ;
+ weight = 1;
+ goto out;
+ }
+
+ weight = osc_lock_weight(env, obj, &dlmlock->l_policy_data.l_extent);
+
+out:
+ cl_env_nested_put(&nest, env);
+ return weight;
+}
+static void osc_lock_build_einfo(const struct lu_env *env,
+ const struct cl_lock *lock,
+ struct osc_object *osc,
+ struct ldlm_enqueue_info *einfo)
+{
einfo->ei_type = LDLM_EXTENT;
- einfo->ei_mode = osc_cl_lock2ldlm(mode);
+ einfo->ei_mode = osc_cl_lock2ldlm(lock->cll_descr.cld_mode);
einfo->ei_cb_bl = osc_ldlm_blocking_ast;
- einfo->ei_cb_cp = osc_ldlm_completion_ast;
+ einfo->ei_cb_cp = ldlm_completion_ast;
einfo->ei_cb_gl = osc_ldlm_glimpse_ast;
- einfo->ei_cbdata = lock; /* value to be put into ->l_ast_data */
+ einfo->ei_cbdata = osc; /* value to be put into ->l_ast_data */
}
/**
@@ -981,113 +796,100 @@ static void osc_lock_to_lockless(const struct lu_env *env,
LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
}
-static int osc_lock_compatible(const struct osc_lock *qing,
- const struct osc_lock *qed)
+static bool osc_lock_compatible(const struct osc_lock *qing,
+ const struct osc_lock *qed)
{
- enum cl_lock_mode qing_mode;
- enum cl_lock_mode qed_mode;
+ struct cl_lock_descr *qed_descr = &qed->ols_cl.cls_lock->cll_descr;
+ struct cl_lock_descr *qing_descr = &qing->ols_cl.cls_lock->cll_descr;
- qing_mode = qing->ols_cl.cls_lock->cll_descr.cld_mode;
- if (qed->ols_glimpse &&
- (qed->ols_state >= OLS_UPCALL_RECEIVED || qing_mode == CLM_READ))
- return 1;
+ if (qed->ols_glimpse)
+ return true;
+
+ if (qing_descr->cld_mode == CLM_READ && qed_descr->cld_mode == CLM_READ)
+ return true;
+
+ if (qed->ols_state < OLS_GRANTED)
+ return true;
+
+ if (qed_descr->cld_mode >= qing_descr->cld_mode &&
+ qed_descr->cld_start <= qing_descr->cld_start &&
+ qed_descr->cld_end >= qing_descr->cld_end)
+ return true;
- qed_mode = qed->ols_cl.cls_lock->cll_descr.cld_mode;
- return ((qing_mode == CLM_READ) && (qed_mode == CLM_READ));
+ return false;
}
-/**
- * Cancel all conflicting locks and wait for them to be destroyed.
- *
- * This function is used for two purposes:
- *
- * - early cancel all conflicting locks before starting IO, and
- *
- * - guarantee that pages added to the page cache by lockless IO are never
- * covered by locks other than lockless IO lock, and, hence, are not
- * visible to other threads.
- */
-static int osc_lock_enqueue_wait(const struct lu_env *env,
- const struct osc_lock *olck)
+static void osc_lock_wake_waiters(const struct lu_env *env,
+ struct osc_object *osc,
+ struct osc_lock *oscl)
{
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- struct cl_lock_descr *descr = &lock->cll_descr;
- struct cl_object_header *hdr = cl_object_header(descr->cld_obj);
- struct cl_lock *scan;
- struct cl_lock *conflict = NULL;
- int lockless = osc_lock_is_lockless(olck);
- int rc = 0;
+ spin_lock(&osc->oo_ol_spin);
+ list_del_init(&oscl->ols_nextlock_oscobj);
+ spin_unlock(&osc->oo_ol_spin);
- LASSERT(cl_lock_is_mutexed(lock));
+ spin_lock(&oscl->ols_lock);
+ while (!list_empty(&oscl->ols_waiting_list)) {
+ struct osc_lock *scan;
- /* make it enqueue anyway for glimpse lock, because we actually
- * don't need to cancel any conflicting locks.
- */
- if (olck->ols_glimpse)
- return 0;
+ scan = list_entry(oscl->ols_waiting_list.next, struct osc_lock,
+ ols_wait_entry);
+ list_del_init(&scan->ols_wait_entry);
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- struct cl_lock_descr *cld = &scan->cll_descr;
- const struct osc_lock *scan_ols;
+ cl_sync_io_note(env, scan->ols_owner, 0);
+ }
+ spin_unlock(&oscl->ols_lock);
+}
+
+static void osc_lock_enqueue_wait(const struct lu_env *env,
+ struct osc_object *obj,
+ struct osc_lock *oscl)
+{
+ struct osc_lock *tmp_oscl;
+ struct cl_lock_descr *need = &oscl->ols_cl.cls_lock->cll_descr;
+ struct cl_sync_io *waiter = &osc_env_info(env)->oti_anchor;
- if (scan == lock)
+ spin_lock(&obj->oo_ol_spin);
+ list_add_tail(&oscl->ols_nextlock_oscobj, &obj->oo_ol_list);
+
+restart:
+ list_for_each_entry(tmp_oscl, &obj->oo_ol_list,
+ ols_nextlock_oscobj) {
+ struct cl_lock_descr *descr;
+
+ if (tmp_oscl == oscl)
break;
- if (scan->cll_state < CLS_QUEUING ||
- scan->cll_state == CLS_FREEING ||
- cld->cld_start > descr->cld_end ||
- cld->cld_end < descr->cld_start)
+ descr = &tmp_oscl->ols_cl.cls_lock->cll_descr;
+ if (descr->cld_start > need->cld_end ||
+ descr->cld_end < need->cld_start)
continue;
- /* overlapped and living locks. */
+ /* We're not supposed to give up group lock */
+ if (descr->cld_mode == CLM_GROUP)
+ break;
- /* We're not supposed to give up group lock. */
- if (scan->cll_descr.cld_mode == CLM_GROUP) {
- LASSERT(descr->cld_mode != CLM_GROUP ||
- descr->cld_gid != scan->cll_descr.cld_gid);
+ if (!osc_lock_is_lockless(oscl) &&
+ osc_lock_compatible(oscl, tmp_oscl))
continue;
- }
- scan_ols = osc_lock_at(scan);
+ /* wait for conflicting lock to be canceled */
+ cl_sync_io_init(waiter, 1, cl_sync_io_end);
+ oscl->ols_owner = waiter;
- /* We need to cancel the compatible locks if we're enqueuing
- * a lockless lock, for example:
- * imagine that client has PR lock on [0, 1000], and thread T0
- * is doing lockless IO in [500, 1500] region. Concurrent
- * thread T1 can see lockless data in [500, 1000], which is
- * wrong, because these data are possibly stale.
- */
- if (!lockless && osc_lock_compatible(olck, scan_ols))
- continue;
+ spin_lock(&tmp_oscl->ols_lock);
+ /* add oscl into tmp's ols_waiting list */
+ list_add_tail(&oscl->ols_wait_entry,
+ &tmp_oscl->ols_waiting_list);
+ spin_unlock(&tmp_oscl->ols_lock);
- cl_lock_get_trust(scan);
- conflict = scan;
- break;
- }
- spin_unlock(&hdr->coh_lock_guard);
+ spin_unlock(&obj->oo_ol_spin);
+ (void)cl_sync_io_wait(env, waiter, 0);
- if (conflict) {
- if (lock->cll_descr.cld_mode == CLM_GROUP) {
- /* we want a group lock but a previous lock request
- * conflicts, we do not wait but return 0 so the
- * request is send to the server
- */
- CDEBUG(D_DLMTRACE, "group lock %p is conflicted with %p, no wait, send to server\n",
- lock, conflict);
- cl_lock_put(env, conflict);
- rc = 0;
- } else {
- CDEBUG(D_DLMTRACE, "lock %p is conflicted with %p, will wait\n",
- lock, conflict);
- LASSERT(!lock->cll_conflict);
- lu_ref_add(&conflict->cll_reference, "cancel-wait",
- lock);
- lock->cll_conflict = conflict;
- rc = CLO_WAIT;
- }
+ spin_lock(&obj->oo_ol_spin);
+ oscl->ols_owner = NULL;
+ goto restart;
}
- return rc;
+ spin_unlock(&obj->oo_ol_spin);
}
/**
@@ -1106,188 +908,122 @@ static int osc_lock_enqueue_wait(const struct lu_env *env,
*/
static int osc_lock_enqueue(const struct lu_env *env,
const struct cl_lock_slice *slice,
- struct cl_io *unused, __u32 enqflags)
+ struct cl_io *unused, struct cl_sync_io *anchor)
{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = ols->ols_cl.cls_lock;
+ struct osc_thread_info *info = osc_env_info(env);
+ struct osc_io *oio = osc_env_io(env);
+ struct osc_object *osc = cl2osc(slice->cls_obj);
+ struct osc_lock *oscl = cl2osc_lock(slice);
+ struct cl_lock *lock = slice->cls_lock;
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ osc_enqueue_upcall_f upcall = osc_lock_upcall;
+ void *cookie = oscl;
+ bool async = false;
int result;
- LASSERT(cl_lock_is_mutexed(lock));
- LASSERTF(ols->ols_state == OLS_NEW,
- "Impossible state: %d\n", ols->ols_state);
-
- LASSERTF(ergo(ols->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
- "lock = %p, ols = %p\n", lock, ols);
+ LASSERTF(ergo(oscl->ols_glimpse, lock->cll_descr.cld_mode <= CLM_READ),
+ "lock = %p, ols = %p\n", lock, oscl);
- result = osc_lock_enqueue_wait(env, ols);
- if (result == 0) {
- if (!osc_lock_is_lockless(ols)) {
- struct osc_object *obj = cl2osc(slice->cls_obj);
- struct osc_thread_info *info = osc_env_info(env);
- struct ldlm_res_id *resname = &info->oti_resname;
- ldlm_policy_data_t *policy = &info->oti_policy;
- struct ldlm_enqueue_info *einfo = &ols->ols_einfo;
+ if (oscl->ols_state == OLS_GRANTED)
+ return 0;
- /* lock will be passed as upcall cookie,
- * hold ref to prevent to be released.
- */
- cl_lock_hold_add(env, lock, "upcall", lock);
- /* a user for lock also */
- cl_lock_user_add(env, lock);
- ols->ols_state = OLS_ENQUEUED;
+ if (oscl->ols_flags & LDLM_FL_TEST_LOCK)
+ goto enqueue_base;
- /*
- * XXX: this is possible blocking point as
- * ldlm_lock_match(LDLM_FL_LVB_READY) waits for
- * LDLM_CP_CALLBACK.
- */
- ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
- osc_lock_build_policy(env, lock, policy);
- result = osc_enqueue_base(osc_export(obj), resname,
- &ols->ols_flags, policy,
- &ols->ols_lvb,
- obj->oo_oinfo->loi_kms_valid,
- osc_lock_upcall,
- ols, einfo, &ols->ols_handle,
- PTLRPCD_SET, 1, ols->ols_agl);
- if (result != 0) {
- cl_lock_user_del(env, lock);
- cl_lock_unhold(env, lock, "upcall", lock);
- if (unlikely(result == -ECANCELED)) {
- ols->ols_state = OLS_NEW;
- result = 0;
- }
- }
- } else {
- ols->ols_state = OLS_GRANTED;
- ols->ols_owner = osc_env_io(env);
- }
+ if (oscl->ols_glimpse) {
+ LASSERT(equi(oscl->ols_agl, !anchor));
+ async = true;
+ goto enqueue_base;
}
- LASSERT(ergo(ols->ols_glimpse, !osc_lock_is_lockless(ols)));
- return result;
-}
-static int osc_lock_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
-
- LINVRNT(osc_lock_invariant(olck));
-
- if (olck->ols_glimpse && olck->ols_state >= OLS_UPCALL_RECEIVED) {
- if (olck->ols_flags & LDLM_FL_LVB_READY) {
- return 0;
- } else if (olck->ols_agl) {
- if (lock->cll_flags & CLF_FROM_UPCALL)
- /* It is from enqueue RPC reply upcall for
- * updating state. Do not re-enqueue.
- */
- return -ENAVAIL;
- olck->ols_state = OLS_NEW;
- } else {
- LASSERT(lock->cll_error);
- return lock->cll_error;
- }
+ osc_lock_enqueue_wait(env, osc, oscl);
+
+ /* we can grant lockless lock right after all conflicting locks
+ * are canceled.
+ */
+ if (osc_lock_is_lockless(oscl)) {
+ oscl->ols_state = OLS_GRANTED;
+ oio->oi_lockless = 1;
+ return 0;
}
- if (olck->ols_state == OLS_NEW) {
- int rc;
-
- LASSERT(olck->ols_agl);
- olck->ols_agl = 0;
- olck->ols_flags &= ~LDLM_FL_BLOCK_NOWAIT;
- rc = osc_lock_enqueue(env, slice, NULL, CEF_ASYNC | CEF_MUST);
- if (rc != 0)
- return rc;
- else
- return CLO_REENQUEUED;
+enqueue_base:
+ oscl->ols_state = OLS_ENQUEUED;
+ if (anchor) {
+ atomic_inc(&anchor->csi_sync_nr);
+ oscl->ols_owner = anchor;
}
- LASSERT(equi(olck->ols_state >= OLS_UPCALL_RECEIVED &&
- lock->cll_error == 0, olck->ols_lock));
+ /**
+ * DLM lock's ast data must be osc_object;
+ * if glimpse or AGL lock, async of osc_enqueue_base() must be true,
+ * DLM's enqueue callback set to osc_lock_upcall() with cookie as
+ * osc_lock.
+ */
+ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
+ osc_lock_build_einfo(env, lock, osc, &oscl->ols_einfo);
+ osc_lock_build_policy(env, lock, policy);
+ if (oscl->ols_agl) {
+ oscl->ols_einfo.ei_cbdata = NULL;
+ /* hold a reference for callback */
+ cl_object_get(osc2cl(osc));
+ upcall = osc_lock_upcall_agl;
+ cookie = osc;
+ }
+ result = osc_enqueue_base(osc_export(osc), resname, &oscl->ols_flags,
+ policy, &oscl->ols_lvb,
+ osc->oo_oinfo->loi_kms_valid,
+ upcall, cookie,
+ &oscl->ols_einfo, PTLRPCD_SET, async,
+ oscl->ols_agl);
+ if (result != 0) {
+ oscl->ols_state = OLS_CANCELLED;
+ osc_lock_wake_waiters(env, osc, oscl);
- return lock->cll_error ?: olck->ols_state >= OLS_GRANTED ? 0 : CLO_WAIT;
+ /* hide error for AGL lock. */
+ if (oscl->ols_agl) {
+ cl_object_put(env, osc2cl(osc));
+ result = 0;
+ }
+ if (anchor)
+ cl_sync_io_note(env, anchor, result);
+ } else {
+ if (osc_lock_is_lockless(oscl)) {
+ oio->oi_lockless = 1;
+ } else if (!async) {
+ LASSERT(oscl->ols_state == OLS_GRANTED);
+ LASSERT(oscl->ols_hold);
+ LASSERT(oscl->ols_dlmlock);
+ }
+ }
+ return result;
}
/**
- * An implementation of cl_lock_operations::clo_use() method that pins cached
- * lock.
+ * Breaks a link between osc_lock and dlm_lock.
*/
-static int osc_lock_use(const struct lu_env *env,
- const struct cl_lock_slice *slice)
+static void osc_lock_detach(const struct lu_env *env, struct osc_lock *olck)
{
- struct osc_lock *olck = cl2osc_lock(slice);
- int rc;
-
- LASSERT(!olck->ols_hold);
+ struct ldlm_lock *dlmlock;
- /*
- * Atomically check for LDLM_FL_CBPENDING and addref a lock if this
- * flag is not set. This protects us from a concurrent blocking ast.
- */
- rc = ldlm_lock_addref_try(&olck->ols_handle, olck->ols_einfo.ei_mode);
- if (rc == 0) {
- olck->ols_hold = 1;
- olck->ols_state = OLS_GRANTED;
- } else {
- struct cl_lock *lock;
+ dlmlock = olck->ols_dlmlock;
+ if (!dlmlock)
+ return;
- /*
- * Lock is being cancelled somewhere within
- * ldlm_handle_bl_callback(): LDLM_FL_CBPENDING is already
- * set, but osc_ldlm_blocking_ast() hasn't yet acquired
- * cl_lock mutex.
- */
- lock = slice->cls_lock;
- LASSERT(lock->cll_state == CLS_INTRANSIT);
- LASSERT(lock->cll_users > 0);
- /* set a flag for osc_dlm_blocking_ast0() to signal the
- * lock.
- */
- olck->ols_ast_wait = 1;
- rc = CLO_WAIT;
+ if (olck->ols_hold) {
+ olck->ols_hold = 0;
+ osc_cancel_base(&olck->ols_handle, olck->ols_einfo.ei_mode);
+ olck->ols_handle.cookie = 0ULL;
}
- return rc;
-}
-static int osc_lock_flush(struct osc_lock *ols, int discard)
-{
- struct cl_lock *lock = ols->ols_cl.cls_lock;
- struct cl_env_nest nest;
- struct lu_env *env;
- int result = 0;
-
- env = cl_env_nested_get(&nest);
- if (!IS_ERR(env)) {
- struct osc_object *obj = cl2osc(ols->ols_cl.cls_obj);
- struct cl_lock_descr *descr = &lock->cll_descr;
- int rc = 0;
-
- if (descr->cld_mode >= CLM_WRITE) {
- result = osc_cache_writeback_range(env, obj,
- descr->cld_start,
- descr->cld_end,
- 1, discard);
- LDLM_DEBUG(ols->ols_lock,
- "lock %p: %d pages were %s.\n", lock, result,
- discard ? "discarded" : "written");
- if (result > 0)
- result = 0;
- }
+ olck->ols_dlmlock = NULL;
- rc = cl_lock_discard_pages(env, lock);
- if (result == 0 && rc < 0)
- result = rc;
-
- cl_env_nested_put(&nest, env);
- } else
- result = PTR_ERR(env);
- if (result == 0) {
- ols->ols_flush = 1;
- LINVRNT(!osc_lock_has_pages(ols));
- }
- return result;
+ /* release a reference taken in osc_lock_upcall(). */
+ LASSERT(olck->ols_has_ref);
+ lu_ref_del(&dlmlock->l_reference, "osc_lock", olck);
+ LDLM_LOCK_RELEASE(dlmlock);
+ olck->ols_has_ref = 0;
}
/**
@@ -1307,96 +1043,16 @@ static int osc_lock_flush(struct osc_lock *ols, int discard)
static void osc_lock_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
- struct cl_lock *lock = slice->cls_lock;
- struct osc_lock *olck = cl2osc_lock(slice);
- struct ldlm_lock *dlmlock = olck->ols_lock;
- int result = 0;
- int discard;
-
- LASSERT(cl_lock_is_mutexed(lock));
- LINVRNT(osc_lock_invariant(olck));
-
- if (dlmlock) {
- int do_cancel;
-
- discard = !!(dlmlock->l_flags & LDLM_FL_DISCARD_DATA);
- if (olck->ols_state >= OLS_GRANTED)
- result = osc_lock_flush(olck, discard);
- osc_lock_unhold(olck);
-
- lock_res_and_lock(dlmlock);
- /* Now that we're the only user of dlm read/write reference,
- * mostly the ->l_readers + ->l_writers should be zero.
- * However, there is a corner case.
- * See bug 18829 for details.
- */
- do_cancel = (dlmlock->l_readers == 0 &&
- dlmlock->l_writers == 0);
- dlmlock->l_flags |= LDLM_FL_CBPENDING;
- unlock_res_and_lock(dlmlock);
- if (do_cancel)
- result = ldlm_cli_cancel(&olck->ols_handle, LCF_ASYNC);
- if (result < 0)
- CL_LOCK_DEBUG(D_ERROR, env, lock,
- "lock %p cancel failure with error(%d)\n",
- lock, result);
- }
- olck->ols_state = OLS_CANCELLED;
- olck->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_detach(env, olck);
-}
-
-static int osc_lock_has_pages(struct osc_lock *olck)
-{
- return 0;
-}
-
-static void osc_lock_delete(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck;
+ struct osc_object *obj = cl2osc(slice->cls_obj);
+ struct osc_lock *oscl = cl2osc_lock(slice);
- olck = cl2osc_lock(slice);
- if (olck->ols_glimpse) {
- LASSERT(!olck->ols_hold);
- LASSERT(!olck->ols_lock);
- return;
- }
+ LINVRNT(osc_lock_invariant(oscl));
- LINVRNT(osc_lock_invariant(olck));
- LINVRNT(!osc_lock_has_pages(olck));
+ osc_lock_detach(env, oscl);
+ oscl->ols_state = OLS_CANCELLED;
+ oscl->ols_flags &= ~LDLM_FL_LVB_READY;
- osc_lock_unhold(olck);
- osc_lock_detach(env, olck);
-}
-
-/**
- * Implements cl_lock_operations::clo_state() method for osc layer.
- *
- * Maintains osc_lock::ols_owner field.
- *
- * This assumes that lock always enters CLS_HELD (from some other state) in
- * the same IO context as one that requested the lock. This should not be a
- * problem, because context is by definition shared by all activity pertaining
- * to the same high-level IO.
- */
-static void osc_lock_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- /*
- * XXX multiple io contexts can use the lock at the same time.
- */
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD && slice->cls_lock->cll_state != CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
-
- LASSERT(!lock->ols_owner);
- lock->ols_owner = oio;
- } else if (state != CLS_HELD)
- lock->ols_owner = NULL;
+ osc_lock_wake_waiters(env, obj, oscl);
}
static int osc_lock_print(const struct lu_env *env, void *cookie,
@@ -1404,221 +1060,161 @@ static int osc_lock_print(const struct lu_env *env, void *cookie,
{
struct osc_lock *lock = cl2osc_lock(slice);
- /*
- * XXX print ldlm lock and einfo properly.
- */
(*p)(env, cookie, "%p %#16llx %#llx %d %p ",
- lock->ols_lock, lock->ols_flags, lock->ols_handle.cookie,
+ lock->ols_dlmlock, lock->ols_flags, lock->ols_handle.cookie,
lock->ols_state, lock->ols_owner);
osc_lvb_print(env, cookie, p, &lock->ols_lvb);
return 0;
}
-static int osc_lock_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
-
- if (need->cld_enq_flags & CEF_NEVER)
- return 0;
-
- if (ols->ols_state >= OLS_CANCELLED)
- return 0;
-
- if (need->cld_mode == CLM_PHANTOM) {
- if (ols->ols_agl)
- return !(ols->ols_state > OLS_RELEASED);
-
- /*
- * Note: the QUEUED lock can't be matched here, otherwise
- * it might cause the deadlocks.
- * In read_process,
- * P1: enqueued read lock, create sublock1
- * P2: enqueued write lock, create sublock2(conflicted
- * with sublock1).
- * P1: Grant read lock.
- * P1: enqueued glimpse lock(with holding sublock1_read),
- * matched with sublock2, waiting sublock2 to be granted.
- * But sublock2 can not be granted, because P1
- * will not release sublock1. Bang!
- */
- if (ols->ols_state < OLS_GRANTED ||
- ols->ols_state > OLS_RELEASED)
- return 0;
- } else if (need->cld_enq_flags & CEF_MUST) {
- /*
- * If the lock hasn't ever enqueued, it can't be matched
- * because enqueue process brings in many information
- * which can be used to determine things such as lockless,
- * CEF_MUST, etc.
- */
- if (ols->ols_state < OLS_UPCALL_RECEIVED &&
- ols->ols_locklessable)
- return 0;
- }
- return 1;
-}
-
static const struct cl_lock_operations osc_lock_ops = {
.clo_fini = osc_lock_fini,
.clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_wait,
- .clo_unuse = osc_lock_unuse,
- .clo_use = osc_lock_use,
- .clo_delete = osc_lock_delete,
- .clo_state = osc_lock_state,
.clo_cancel = osc_lock_cancel,
- .clo_weigh = osc_lock_weigh,
.clo_print = osc_lock_print,
- .clo_fits_into = osc_lock_fits_into,
};
-static int osc_lock_lockless_unuse(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *ols = cl2osc_lock(slice);
- struct cl_lock *lock = slice->cls_lock;
-
- LASSERT(ols->ols_state == OLS_GRANTED);
- LINVRNT(osc_lock_invariant(ols));
-
- cl_lock_cancel(env, lock);
- cl_lock_delete(env, lock);
- return 0;
-}
-
static void osc_lock_lockless_cancel(const struct lu_env *env,
const struct cl_lock_slice *slice)
{
struct osc_lock *ols = cl2osc_lock(slice);
+ struct osc_object *osc = cl2osc(slice->cls_obj);
+ struct cl_lock_descr *descr = &slice->cls_lock->cll_descr;
int result;
- result = osc_lock_flush(ols, 0);
+ LASSERT(!ols->ols_dlmlock);
+ result = osc_lock_flush(osc, descr->cld_start, descr->cld_end,
+ descr->cld_mode, 0);
if (result)
CERROR("Pages for lockless lock %p were not purged(%d)\n",
ols, result);
- ols->ols_state = OLS_CANCELLED;
-}
-
-static int osc_lock_lockless_wait(const struct lu_env *env,
- const struct cl_lock_slice *slice)
-{
- struct osc_lock *olck = cl2osc_lock(slice);
- struct cl_lock *lock = olck->ols_cl.cls_lock;
- LINVRNT(osc_lock_invariant(olck));
- LASSERT(olck->ols_state >= OLS_UPCALL_RECEIVED);
-
- return lock->cll_error;
+ osc_lock_wake_waiters(env, osc, ols);
}
-static void osc_lock_lockless_state(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- enum cl_lock_state state)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
+static const struct cl_lock_operations osc_lock_lockless_ops = {
+ .clo_fini = osc_lock_fini,
+ .clo_enqueue = osc_lock_enqueue,
+ .clo_cancel = osc_lock_lockless_cancel,
+ .clo_print = osc_lock_print
+};
- LINVRNT(osc_lock_invariant(lock));
- if (state == CLS_HELD) {
- struct osc_io *oio = osc_env_io(env);
+static void osc_lock_set_writer(const struct lu_env *env,
+ const struct cl_io *io,
+ struct cl_object *obj, struct osc_lock *oscl)
+{
+ struct cl_lock_descr *descr = &oscl->ols_cl.cls_lock->cll_descr;
+ pgoff_t io_start;
+ pgoff_t io_end;
- LASSERT(ergo(lock->ols_owner, lock->ols_owner == oio));
- lock->ols_owner = oio;
+ if (!cl_object_same(io->ci_obj, obj))
+ return;
- /* set the io to be lockless if this lock is for io's
- * host object
- */
- if (cl_object_same(oio->oi_cl.cis_obj, slice->cls_obj))
- oio->oi_lockless = 1;
+ if (likely(io->ci_type == CIT_WRITE)) {
+ io_start = cl_index(obj, io->u.ci_rw.crw_pos);
+ io_end = cl_index(obj, io->u.ci_rw.crw_pos +
+ io->u.ci_rw.crw_count - 1);
+ if (cl_io_is_append(io)) {
+ io_start = 0;
+ io_end = CL_PAGE_EOF;
+ }
+ } else {
+ LASSERT(cl_io_is_mkwrite(io));
+ io_start = io_end = io->u.ci_fault.ft_index;
}
-}
-static int osc_lock_lockless_fits_into(const struct lu_env *env,
- const struct cl_lock_slice *slice,
- const struct cl_lock_descr *need,
- const struct cl_io *io)
-{
- struct osc_lock *lock = cl2osc_lock(slice);
-
- if (!(need->cld_enq_flags & CEF_NEVER))
- return 0;
+ if (descr->cld_mode >= CLM_WRITE &&
+ descr->cld_start <= io_start && descr->cld_end >= io_end) {
+ struct osc_io *oio = osc_env_io(env);
- /* lockless lock should only be used by its owning io. b22147 */
- return (lock->ols_owner == osc_env_io(env));
+ /* There must be only one lock to match the write region */
+ LASSERT(!oio->oi_write_osclock);
+ oio->oi_write_osclock = oscl;
+ }
}
-static const struct cl_lock_operations osc_lock_lockless_ops = {
- .clo_fini = osc_lock_fini,
- .clo_enqueue = osc_lock_enqueue,
- .clo_wait = osc_lock_lockless_wait,
- .clo_unuse = osc_lock_lockless_unuse,
- .clo_state = osc_lock_lockless_state,
- .clo_fits_into = osc_lock_lockless_fits_into,
- .clo_cancel = osc_lock_lockless_cancel,
- .clo_print = osc_lock_print
-};
-
int osc_lock_init(const struct lu_env *env,
struct cl_object *obj, struct cl_lock *lock,
- const struct cl_io *unused)
+ const struct cl_io *io)
{
- struct osc_lock *clk;
- int result;
-
- clk = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
- if (clk) {
- __u32 enqflags = lock->cll_descr.cld_enq_flags;
+ struct osc_lock *oscl;
+ __u32 enqflags = lock->cll_descr.cld_enq_flags;
+
+ oscl = kmem_cache_zalloc(osc_lock_kmem, GFP_NOFS);
+ if (!oscl)
+ return -ENOMEM;
+
+ oscl->ols_state = OLS_NEW;
+ spin_lock_init(&oscl->ols_lock);
+ INIT_LIST_HEAD(&oscl->ols_waiting_list);
+ INIT_LIST_HEAD(&oscl->ols_wait_entry);
+ INIT_LIST_HEAD(&oscl->ols_nextlock_oscobj);
+
+ oscl->ols_flags = osc_enq2ldlm_flags(enqflags);
+ oscl->ols_agl = !!(enqflags & CEF_AGL);
+ if (oscl->ols_agl)
+ oscl->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
+ if (oscl->ols_flags & LDLM_FL_HAS_INTENT) {
+ oscl->ols_flags |= LDLM_FL_BLOCK_GRANTED;
+ oscl->ols_glimpse = 1;
+ }
- osc_lock_build_einfo(env, lock, clk, &clk->ols_einfo);
- atomic_set(&clk->ols_pageref, 0);
- clk->ols_state = OLS_NEW;
+ cl_lock_slice_add(lock, &oscl->ols_cl, obj, &osc_lock_ops);
- clk->ols_flags = osc_enq2ldlm_flags(enqflags);
- clk->ols_agl = !!(enqflags & CEF_AGL);
- if (clk->ols_agl)
- clk->ols_flags |= LDLM_FL_BLOCK_NOWAIT;
- if (clk->ols_flags & LDLM_FL_HAS_INTENT)
- clk->ols_glimpse = 1;
+ if (!(enqflags & CEF_MUST))
+ /* try to convert this lock to a lockless lock */
+ osc_lock_to_lockless(env, oscl, (enqflags & CEF_NEVER));
+ if (oscl->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
+ oscl->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- cl_lock_slice_add(lock, &clk->ols_cl, obj, &osc_lock_ops);
+ if (io->ci_type == CIT_WRITE || cl_io_is_mkwrite(io))
+ osc_lock_set_writer(env, io, obj, oscl);
- if (!(enqflags & CEF_MUST))
- /* try to convert this lock to a lockless lock */
- osc_lock_to_lockless(env, clk, (enqflags & CEF_NEVER));
- if (clk->ols_locklessable && !(enqflags & CEF_DISCARD_DATA))
- clk->ols_flags |= LDLM_FL_DENY_ON_CONTENTION;
- LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx",
- lock, clk, clk->ols_flags);
+ LDLM_DEBUG_NOLOCK("lock %p, osc lock %p, flags %llx\n",
+ lock, oscl, oscl->ols_flags);
- result = 0;
- } else
- result = -ENOMEM;
- return result;
+ return 0;
}
-int osc_dlm_lock_pageref(struct ldlm_lock *dlm)
+/**
+ * Finds an existing lock covering given index and optionally different from a
+ * given \a except lock.
+ */
+struct ldlm_lock *osc_dlmlock_at_pgoff(const struct lu_env *env,
+ struct osc_object *obj, pgoff_t index,
+ int pending, int canceling)
{
- struct osc_lock *olock;
- int rc = 0;
-
- spin_lock(&osc_ast_guard);
- olock = dlm->l_ast_data;
+ struct osc_thread_info *info = osc_env_info(env);
+ struct ldlm_res_id *resname = &info->oti_resname;
+ ldlm_policy_data_t *policy = &info->oti_policy;
+ struct lustre_handle lockh;
+ struct ldlm_lock *lock = NULL;
+ enum ldlm_mode mode;
+ __u64 flags;
+
+ ostid_build_res_name(&obj->oo_oinfo->loi_oi, resname);
+ osc_index2policy(policy, osc2cl(obj), index, index);
+ policy->l_extent.gid = LDLM_GID_ANY;
+
+ flags = LDLM_FL_BLOCK_GRANTED | LDLM_FL_TEST_LOCK;
+ if (pending)
+ flags |= LDLM_FL_CBPENDING;
/*
- * there's a very rare race with osc_page_addref_lock(), but that
- * doesn't matter because in the worst case we don't cancel a lock
- * which we actually can, that's no harm.
+ * It is fine to match any group lock since there could be only one
+ * with a uniq gid and it conflicts with all other lock modes too
*/
- if (olock &&
- atomic_add_return(_PAGEREF_MAGIC,
- &olock->ols_pageref) != _PAGEREF_MAGIC) {
- atomic_sub(_PAGEREF_MAGIC, &olock->ols_pageref);
- rc = 1;
+again:
+ mode = ldlm_lock_match(osc_export(obj)->exp_obd->obd_namespace,
+ flags, resname, LDLM_EXTENT, policy,
+ LCK_PR | LCK_PW | LCK_GROUP, &lockh, canceling);
+ if (mode != 0) {
+ lock = ldlm_handle2lock(&lockh);
+ /* RACE: the lock is cancelled so let's try again */
+ if (unlikely(!lock))
+ goto again;
}
- spin_unlock(&osc_ast_guard);
- return rc;
+ return lock;
}
/** @} osc */
diff --git a/drivers/staging/lustre/lustre/osc/osc_object.c b/drivers/staging/lustre/lustre/osc/osc_object.c
index 9d474fcdd9a7..738ab10ab274 100644
--- a/drivers/staging/lustre/lustre/osc/osc_object.c
+++ b/drivers/staging/lustre/lustre/osc/osc_object.c
@@ -36,6 +36,7 @@
* Implementation of cl_object for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
@@ -94,6 +95,9 @@ static int osc_object_init(const struct lu_env *env, struct lu_object *obj,
atomic_set(&osc->oo_nr_reads, 0);
atomic_set(&osc->oo_nr_writes, 0);
spin_lock_init(&osc->oo_lock);
+ spin_lock_init(&osc->oo_tree_lock);
+ spin_lock_init(&osc->oo_ol_spin);
+ INIT_LIST_HEAD(&osc->oo_ol_list);
cl_object_page_init(lu2cl(obj), sizeof(struct osc_page));
@@ -120,6 +124,7 @@ static void osc_object_free(const struct lu_env *env, struct lu_object *obj)
LASSERT(list_empty(&osc->oo_reading_exts));
LASSERT(atomic_read(&osc->oo_nr_reads) == 0);
LASSERT(atomic_read(&osc->oo_nr_writes) == 0);
+ LASSERT(list_empty(&osc->oo_ol_list));
lu_object_fini(obj);
kmem_cache_free(osc_object_kmem, osc);
@@ -192,6 +197,32 @@ static int osc_object_glimpse(const struct lu_env *env,
return 0;
}
+static int osc_object_ast_clear(struct ldlm_lock *lock, void *data)
+{
+ LASSERT(lock->l_granted_mode == lock->l_req_mode);
+ if (lock->l_ast_data == data)
+ lock->l_ast_data = NULL;
+ return LDLM_ITER_CONTINUE;
+}
+
+static int osc_object_prune(const struct lu_env *env, struct cl_object *obj)
+{
+ struct osc_object *osc = cl2osc(obj);
+ struct ldlm_res_id *resname = &osc_env_info(env)->oti_resname;
+
+ LASSERTF(osc->oo_npages == 0,
+ DFID "still have %lu pages, obj: %p, osc: %p\n",
+ PFID(lu_object_fid(&obj->co_lu)), osc->oo_npages, obj, osc);
+
+ /* DLM locks don't hold a reference of osc_object so we have to
+ * clear it before the object is being destroyed.
+ */
+ ostid_build_res_name(&osc->oo_oinfo->loi_oi, resname);
+ ldlm_resource_iterate(osc_export(osc)->exp_obd->obd_namespace, resname,
+ osc_object_ast_clear, osc);
+ return 0;
+}
+
void osc_object_set_contended(struct osc_object *obj)
{
obj->oo_contention_time = cfs_time_current();
@@ -236,12 +267,12 @@ static const struct cl_object_operations osc_ops = {
.coo_io_init = osc_io_init,
.coo_attr_get = osc_attr_get,
.coo_attr_set = osc_attr_set,
- .coo_glimpse = osc_object_glimpse
+ .coo_glimpse = osc_object_glimpse,
+ .coo_prune = osc_object_prune
};
static const struct lu_object_operations osc_lu_obj_ops = {
.loo_object_init = osc_object_init,
- .loo_object_delete = NULL,
.loo_object_release = NULL,
.loo_object_free = osc_object_free,
.loo_object_print = osc_object_print,
@@ -261,8 +292,9 @@ struct lu_object *osc_object_alloc(const struct lu_env *env,
lu_object_init(obj, NULL, dev);
osc->oo_cl.co_ops = &osc_ops;
obj->lo_ops = &osc_lu_obj_ops;
- } else
+ } else {
obj = NULL;
+ }
return obj;
}
diff --git a/drivers/staging/lustre/lustre/osc/osc_page.c b/drivers/staging/lustre/lustre/osc/osc_page.c
index ce9ddd515f64..c29c2eabe39c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_page.c
+++ b/drivers/staging/lustre/lustre/osc/osc_page.c
@@ -36,14 +36,15 @@
* Implementation of cl_page for OSC layer.
*
* Author: Nikita Danilov <nikita.danilov@sun.com>
+ * Author: Jinshan Xiong <jinshan.xiong@intel.com>
*/
#define DEBUG_SUBSYSTEM S_OSC
#include "osc_cl_internal.h"
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del);
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg);
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg);
static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg);
@@ -63,18 +64,9 @@ static int osc_page_protected(const struct lu_env *env,
* Page operations.
*
*/
-static void osc_page_fini(const struct lu_env *env,
- struct cl_page_slice *slice)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- CDEBUG(D_TRACE, "%p\n", opg);
- LASSERT(!opg->ops_lock);
-}
-
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
LASSERT(!opg->ops_transfer_pinned);
cl_page_get(page);
@@ -85,11 +77,11 @@ static void osc_page_transfer_get(struct osc_page *opg, const char *label)
static void osc_page_transfer_put(const struct lu_env *env,
struct osc_page *opg)
{
- struct cl_page *page = cl_page_top(opg->ops_cl.cpl_page);
+ struct cl_page *page = opg->ops_cl.cpl_page;
if (opg->ops_transfer_pinned) {
- lu_ref_del(&page->cp_reference, "transfer", page);
opg->ops_transfer_pinned = 0;
+ lu_ref_del(&page->cp_reference, "transfer", page);
cl_page_put(env, page);
}
}
@@ -104,10 +96,7 @@ static void osc_page_transfer_add(const struct lu_env *env,
{
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- /* ops_lru and ops_inflight share the same field, so take it from LRU
- * first and then use it as inflight.
- */
- osc_lru_del(osc_cli(obj), opg, false);
+ osc_lru_use(osc_cli(obj), opg);
spin_lock(&obj->oo_seatbelt);
list_add(&opg->ops_inflight, &obj->oo_inflight[crt]);
@@ -115,11 +104,9 @@ static void osc_page_transfer_add(const struct lu_env *env,
spin_unlock(&obj->oo_seatbelt);
}
-static int osc_page_cache_add(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
+int osc_page_cache_add(const struct lu_env *env,
+ const struct cl_page_slice *slice, struct cl_io *io)
{
- struct osc_io *oio = osc_env_io(env);
struct osc_page *opg = cl2osc_page(slice);
int result;
@@ -132,17 +119,6 @@ static int osc_page_cache_add(const struct lu_env *env,
else
osc_page_transfer_add(env, opg, CRT_WRITE);
- /* for sync write, kernel will wait for this page to be flushed before
- * osc_io_end() is called, so release it earlier.
- * for mkwrite(), it's known there is no further pages.
- */
- if (cl_io_is_sync_write(io) || cl_io_is_mkwrite(io)) {
- if (oio->oi_active) {
- osc_extent_release(env, oio->oi_active);
- oio->oi_active = NULL;
- }
- }
-
return result;
}
@@ -154,102 +130,25 @@ void osc_index2policy(ldlm_policy_data_t *policy, const struct cl_object *obj,
policy->l_extent.end = cl_offset(obj, end + 1) - 1;
}
-static int osc_page_addref_lock(const struct lu_env *env,
- struct osc_page *opg,
- struct cl_lock *lock)
-{
- struct osc_lock *olock;
- int rc;
-
- LASSERT(!opg->ops_lock);
-
- olock = osc_lock_at(lock);
- if (atomic_inc_return(&olock->ols_pageref) <= 0) {
- atomic_dec(&olock->ols_pageref);
- rc = -ENODATA;
- } else {
- cl_lock_get(lock);
- opg->ops_lock = lock;
- rc = 0;
- }
- return rc;
-}
-
-static void osc_page_putref_lock(const struct lu_env *env,
- struct osc_page *opg)
-{
- struct cl_lock *lock = opg->ops_lock;
- struct osc_lock *olock;
-
- LASSERT(lock);
- olock = osc_lock_at(lock);
-
- atomic_dec(&olock->ols_pageref);
- opg->ops_lock = NULL;
-
- cl_lock_put(env, lock);
-}
-
static int osc_page_is_under_lock(const struct lu_env *env,
const struct cl_page_slice *slice,
- struct cl_io *unused)
+ struct cl_io *unused, pgoff_t *max_index)
{
- struct cl_lock *lock;
+ struct osc_page *opg = cl2osc_page(slice);
+ struct ldlm_lock *dlmlock;
int result = -ENODATA;
- lock = cl_lock_at_page(env, slice->cpl_obj, slice->cpl_page,
- NULL, 1, 0);
- if (lock) {
- if (osc_page_addref_lock(env, cl2osc_page(slice), lock) == 0)
- result = -EBUSY;
- cl_lock_put(env, lock);
+ dlmlock = osc_dlmlock_at_pgoff(env, cl2osc(slice->cpl_obj),
+ osc_index(opg), 1, 0);
+ if (dlmlock) {
+ *max_index = cl_index(slice->cpl_obj,
+ dlmlock->l_policy_data.l_extent.end);
+ LDLM_LOCK_PUT(dlmlock);
+ result = 0;
}
return result;
}
-static void osc_page_disown(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *io)
-{
- struct osc_page *opg = cl2osc_page(slice);
-
- if (unlikely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
-}
-
-static void osc_page_completion_read(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
-
- if (likely(opg->ops_lock))
- osc_page_putref_lock(env, opg);
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static void osc_page_completion_write(const struct lu_env *env,
- const struct cl_page_slice *slice,
- int ioret)
-{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_object *obj = cl2osc(slice->cpl_obj);
-
- osc_lru_add(osc_cli(obj), opg);
-}
-
-static int osc_page_fail(const struct lu_env *env,
- const struct cl_page_slice *slice,
- struct cl_io *unused)
-{
- /*
- * Cached read?
- */
- LBUG();
- return 0;
-}
-
static const char *osc_list(struct list_head *head)
{
return list_empty(head) ? "-" : "+";
@@ -272,8 +171,8 @@ static int osc_page_print(const struct lu_env *env,
struct osc_object *obj = cl2osc(slice->cpl_obj);
struct client_obd *cli = &osc_export(obj)->exp_obd->u.cli;
- return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
- opg,
+ return (*printer)(env, cookie, LUSTRE_OSC_NAME "-page@%p %lu: 1< %#x %d %u %s %s > 2< %llu %u %u %#x %#x | %p %p %p > 3< %s %p %d %lu %d > 4< %d %d %d %lu %s | %s %s %s %s > 5< %s %s %s %s | %d %s | %d %s %s>\n",
+ opg, osc_index(opg),
/* 1 */
oap->oap_magic, oap->oap_cmd,
oap->oap_interrupted,
@@ -321,7 +220,7 @@ static void osc_page_delete(const struct lu_env *env,
osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
if (rc) {
- CL_PAGE_DEBUG(D_ERROR, env, cl_page_top(slice->cpl_page),
+ CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
"Trying to teardown failed: %d\n", rc);
LASSERT(0);
}
@@ -334,7 +233,19 @@ static void osc_page_delete(const struct lu_env *env,
}
spin_unlock(&obj->oo_seatbelt);
- osc_lru_del(osc_cli(obj), opg, true);
+ osc_lru_del(osc_cli(obj), opg);
+
+ if (slice->cpl_page->cp_type == CPT_CACHEABLE) {
+ void *value;
+
+ spin_lock(&obj->oo_tree_lock);
+ value = radix_tree_delete(&obj->oo_tree, osc_index(opg));
+ if (value)
+ --obj->oo_npages;
+ spin_unlock(&obj->oo_tree_lock);
+
+ LASSERT(ergo(value, value == opg));
+ }
}
static void osc_page_clip(const struct lu_env *env,
@@ -382,28 +293,16 @@ static int osc_page_flush(const struct lu_env *env,
}
static const struct cl_page_operations osc_page_ops = {
- .cpo_fini = osc_page_fini,
.cpo_print = osc_page_print,
.cpo_delete = osc_page_delete,
.cpo_is_under_lock = osc_page_is_under_lock,
- .cpo_disown = osc_page_disown,
- .io = {
- [CRT_READ] = {
- .cpo_cache_add = osc_page_fail,
- .cpo_completion = osc_page_completion_read
- },
- [CRT_WRITE] = {
- .cpo_cache_add = osc_page_cache_add,
- .cpo_completion = osc_page_completion_write
- }
- },
.cpo_clip = osc_page_clip,
.cpo_cancel = osc_page_cancel,
.cpo_flush = osc_page_flush
};
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
- struct cl_page *page, struct page *vmpage)
+ struct cl_page *page, pgoff_t index)
{
struct osc_object *osc = cl2osc(obj);
struct osc_page *opg = cl_object_page_slice(obj, page);
@@ -412,13 +311,14 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
opg->ops_from = 0;
opg->ops_to = PAGE_SIZE;
- result = osc_prep_async_page(osc, opg, vmpage,
- cl_offset(obj, page->cp_index));
+ result = osc_prep_async_page(osc, opg, page->cp_vmpage,
+ cl_offset(obj, index));
if (result == 0) {
struct osc_io *oio = osc_env_io(env);
opg->ops_srvlock = osc_io_srvlock(oio);
- cl_page_slice_add(page, &opg->ops_cl, obj, &osc_page_ops);
+ cl_page_slice_add(page, &opg->ops_cl, obj, index,
+ &osc_page_ops);
}
/*
* Cannot assert osc_page_protected() here as read-ahead
@@ -431,12 +331,47 @@ int osc_page_init(const struct lu_env *env, struct cl_object *obj,
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
- if (page->cp_type == CPT_CACHEABLE && result == 0)
+ if (page->cp_type == CPT_CACHEABLE && result == 0) {
result = osc_lru_reserve(env, osc, opg);
+ if (result == 0) {
+ spin_lock(&osc->oo_tree_lock);
+ result = radix_tree_insert(&osc->oo_tree, index, opg);
+ if (result == 0)
+ ++osc->oo_npages;
+ spin_unlock(&osc->oo_tree_lock);
+ LASSERT(result == 0);
+ }
+ }
return result;
}
+int osc_over_unstable_soft_limit(struct client_obd *cli)
+{
+ long obd_upages, obd_dpages, osc_upages;
+
+ /* Can't check cli->cl_unstable_count, therefore, no soft limit */
+ if (!cli)
+ return 0;
+
+ obd_upages = atomic_read(&obd_unstable_pages);
+ obd_dpages = atomic_read(&obd_dirty_pages);
+
+ osc_upages = atomic_read(&cli->cl_unstable_count);
+
+ /*
+ * obd_max_dirty_pages is the max number of (dirty + unstable)
+ * pages allowed at any given time. To simulate an unstable page
+ * only limit, we subtract the current number of dirty pages
+ * from this max. This difference is roughly the amount of pages
+ * currently available for unstable pages. Thus, the soft limit
+ * is half of that difference. Check osc_upages to ensure we don't
+ * set SOFT_SYNC for OSCs without any outstanding unstable pages.
+ */
+ return osc_upages &&
+ obd_upages >= (obd_max_dirty_pages - obd_dpages) / 2;
+}
+
/**
* Helper function called by osc_io_submit() for every page in an immediate
* transfer (i.e., transferred synchronously).
@@ -460,6 +395,9 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
oap->oap_count = opg->ops_to - opg->ops_from;
oap->oap_brw_flags = brw_flags | OBD_BRW_SYNC;
+ if (osc_over_unstable_soft_limit(oap->oap_cli))
+ oap->oap_brw_flags |= OBD_BRW_SOFT_SYNC;
+
if (!client_is_remote(osc_export(obj)) &&
capable(CFS_CAP_SYS_RESOURCE)) {
oap->oap_brw_flags |= OBD_BRW_NOQUOTA;
@@ -483,13 +421,12 @@ void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
*/
static DECLARE_WAIT_QUEUE_HEAD(osc_lru_waitq);
-static atomic_t osc_lru_waiters = ATOMIC_INIT(0);
/* LRU pages are freed in batch mode. OSC should at least free this
* number of pages to avoid running out of LRU budget, and..
*/
static const int lru_shrink_min = 2 << (20 - PAGE_SHIFT); /* 2M */
/* free this number at most otherwise it will take too long time to finish. */
-static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
+static const int lru_shrink_max = 8 << (20 - PAGE_SHIFT); /* 8M */
/* Check if we can free LRU slots from this OSC. If there exists LRU waiters,
* we should free slots aggressively. In this way, slots are freed in a steady
@@ -500,65 +437,142 @@ static const int lru_shrink_max = 32 << (20 - PAGE_SHIFT); /* 32M */
static int osc_cache_too_much(struct client_obd *cli)
{
struct cl_client_cache *cache = cli->cl_cache;
- int pages = atomic_read(&cli->cl_lru_in_list) >> 1;
+ int pages = atomic_read(&cli->cl_lru_in_list);
+ unsigned long budget;
- if (atomic_read(&osc_lru_waiters) > 0 &&
- atomic_read(cli->cl_lru_left) < lru_shrink_max)
- /* drop lru pages aggressively */
- return min(pages, lru_shrink_max);
+ budget = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
/* if it's going to run out LRU slots, we should free some, but not
* too much to maintain fairness among OSCs.
*/
if (atomic_read(cli->cl_lru_left) < cache->ccc_lru_max >> 4) {
- unsigned long tmp;
+ if (pages >= budget)
+ return lru_shrink_max;
+ else if (pages >= budget / 2)
+ return lru_shrink_min;
+ } else if (pages >= budget * 2) {
+ return lru_shrink_min;
+ }
+ return 0;
+}
- tmp = cache->ccc_lru_max / atomic_read(&cache->ccc_users);
- if (pages > tmp)
- return min(pages, lru_shrink_max);
+int lru_queue_work(const struct lu_env *env, void *data)
+{
+ struct client_obd *cli = data;
- return pages > lru_shrink_min ? lru_shrink_min : 0;
- }
+ CDEBUG(D_CACHE, "Run LRU work for client obd %p.\n", cli);
+
+ if (osc_cache_too_much(cli))
+ osc_lru_shrink(env, cli, lru_shrink_max, true);
return 0;
}
-/* Return how many pages are not discarded in @pvec. */
-static int discard_pagevec(const struct lu_env *env, struct cl_io *io,
- struct cl_page **pvec, int max_index)
+void osc_lru_add_batch(struct client_obd *cli, struct list_head *plist)
+{
+ LIST_HEAD(lru);
+ struct osc_async_page *oap;
+ int npages = 0;
+
+ list_for_each_entry(oap, plist, oap_pending_item) {
+ struct osc_page *opg = oap2osc_page(oap);
+
+ if (!opg->ops_in_lru)
+ continue;
+
+ ++npages;
+ LASSERT(list_empty(&opg->ops_lru));
+ list_add(&opg->ops_lru, &lru);
+ }
+
+ if (npages > 0) {
+ spin_lock(&cli->cl_lru_list_lock);
+ list_splice_tail(&lru, &cli->cl_lru_list);
+ atomic_sub(npages, &cli->cl_lru_busy);
+ atomic_add(npages, &cli->cl_lru_in_list);
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ /* XXX: May set force to be true for better performance */
+ if (osc_cache_too_much(cli))
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ }
+}
+
+static void __osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
+ list_del_init(&opg->ops_lru);
+ atomic_dec(&cli->cl_lru_in_list);
+}
+
+/**
+ * Page is being destroyed. The page may be not in LRU list, if the transfer
+ * has never finished(error occurred).
+ */
+static void osc_lru_del(struct client_obd *cli, struct osc_page *opg)
+{
+ if (opg->ops_in_lru) {
+ spin_lock(&cli->cl_lru_list_lock);
+ if (!list_empty(&opg->ops_lru)) {
+ __osc_lru_del(cli, opg);
+ } else {
+ LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
+ atomic_dec(&cli->cl_lru_busy);
+ }
+ spin_unlock(&cli->cl_lru_list_lock);
+
+ atomic_inc(cli->cl_lru_left);
+ /* this is a great place to release more LRU pages if
+ * this osc occupies too many LRU pages and kernel is
+ * stealing one of them.
+ */
+ if (!memory_pressure_get())
+ (void)ptlrpcd_queue_work(cli->cl_lru_work);
+ wake_up(&osc_lru_waitq);
+ } else {
+ LASSERT(list_empty(&opg->ops_lru));
+ }
+}
+
+/**
+ * Delete page from LRUlist for redirty.
+ */
+static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
+{
+ /* If page is being transferred for the first time,
+ * ops_lru should be empty
+ */
+ if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
+ spin_lock(&cli->cl_lru_list_lock);
+ __osc_lru_del(cli, opg);
+ spin_unlock(&cli->cl_lru_list_lock);
+ atomic_inc(&cli->cl_lru_busy);
+ }
+}
+
+static void discard_pagevec(const struct lu_env *env, struct cl_io *io,
+ struct cl_page **pvec, int max_index)
{
- int count;
int i;
- for (count = 0, i = 0; i < max_index; i++) {
+ for (i = 0; i < max_index; i++) {
struct cl_page *page = pvec[i];
- if (cl_page_own_try(env, io, page) == 0) {
- /* free LRU page only if nobody is using it.
- * This check is necessary to avoid freeing the pages
- * having already been removed from LRU and pinned
- * for IO.
- */
- if (!cl_page_in_use(page)) {
- cl_page_unmap(env, io, page);
- cl_page_discard(env, io, page);
- ++count;
- }
- cl_page_disown(env, io, page);
- }
+ LASSERT(cl_page_is_owned(page, io));
+ cl_page_discard(env, io, page);
+ cl_page_disown(env, io, page);
cl_page_put(env, page);
+
pvec[i] = NULL;
}
- return max_index - count;
}
/**
* Drop @target of pages from LRU at most.
*/
-int osc_lru_shrink(struct client_obd *cli, int target)
+int osc_lru_shrink(const struct lu_env *env, struct client_obd *cli,
+ int target, bool force)
{
- struct cl_env_nest nest;
- struct lu_env *env;
struct cl_io *io;
struct cl_object *clobj = NULL;
struct cl_page **pvec;
@@ -573,23 +587,31 @@ int osc_lru_shrink(struct client_obd *cli, int target)
if (atomic_read(&cli->cl_lru_in_list) == 0 || target <= 0)
return 0;
- env = cl_env_nested_get(&nest);
- if (IS_ERR(env))
- return PTR_ERR(env);
+ if (!force) {
+ if (atomic_read(&cli->cl_lru_shrinkers) > 0)
+ return -EBUSY;
- pvec = osc_env_info(env)->oti_pvec;
+ if (atomic_inc_return(&cli->cl_lru_shrinkers) > 1) {
+ atomic_dec(&cli->cl_lru_shrinkers);
+ return -EBUSY;
+ }
+ } else {
+ atomic_inc(&cli->cl_lru_shrinkers);
+ }
+
+ pvec = (struct cl_page **)osc_env_info(env)->oti_pvec;
io = &osc_env_info(env)->oti_io;
- client_obd_list_lock(&cli->cl_lru_list_lock);
- atomic_inc(&cli->cl_lru_shrinkers);
+ spin_lock(&cli->cl_lru_list_lock);
maxscan = min(target << 1, atomic_read(&cli->cl_lru_in_list));
list_for_each_entry_safe(opg, temp, &cli->cl_lru_list, ops_lru) {
struct cl_page *page;
+ bool will_free = false;
if (--maxscan < 0)
break;
- page = cl_page_top(opg->ops_cl.cpl_page);
+ page = opg->ops_cl.cpl_page;
if (cl_page_in_use_noref(page)) {
list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
continue;
@@ -600,10 +622,10 @@ int osc_lru_shrink(struct client_obd *cli, int target)
struct cl_object *tmp = page->cp_obj;
cl_object_get(tmp);
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj) {
- count -= discard_pagevec(env, io, pvec, index);
+ discard_pagevec(env, io, pvec, index);
index = 0;
cl_io_fini(env, io);
@@ -616,7 +638,7 @@ int osc_lru_shrink(struct client_obd *cli, int target)
io->ci_ignore_layout = 1;
rc = cl_io_init(env, io, CIT_MISC, clobj);
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
if (rc != 0)
break;
@@ -625,98 +647,54 @@ int osc_lru_shrink(struct client_obd *cli, int target)
continue;
}
- /* move this page to the end of list as it will be discarded
- * soon. The page will be finally removed from LRU list in
- * osc_page_delete().
- */
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ if (cl_page_own_try(env, io, page) == 0) {
+ if (!cl_page_in_use_noref(page)) {
+ /* remove it from lru list earlier to avoid
+ * lock contention
+ */
+ __osc_lru_del(cli, opg);
+ opg->ops_in_lru = 0; /* will be discarded */
+
+ cl_page_get(page);
+ will_free = true;
+ } else {
+ cl_page_disown(env, io, page);
+ }
+ }
- /* it's okay to grab a refcount here w/o holding lock because
- * it has to grab cl_lru_list_lock to delete the page.
- */
- cl_page_get(page);
- pvec[index++] = page;
- if (++count >= target)
- break;
+ if (!will_free) {
+ list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
+ continue;
+ }
+ /* Don't discard and free the page with cl_lru_list held */
+ pvec[index++] = page;
if (unlikely(index == OTI_PVEC_SIZE)) {
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- count -= discard_pagevec(env, io, pvec, index);
+ spin_unlock(&cli->cl_lru_list_lock);
+ discard_pagevec(env, io, pvec, index);
index = 0;
- client_obd_list_lock(&cli->cl_lru_list_lock);
+ spin_lock(&cli->cl_lru_list_lock);
}
+
+ if (++count >= target)
+ break;
}
- client_obd_list_unlock(&cli->cl_lru_list_lock);
+ spin_unlock(&cli->cl_lru_list_lock);
if (clobj) {
- count -= discard_pagevec(env, io, pvec, index);
+ discard_pagevec(env, io, pvec, index);
cl_io_fini(env, io);
cl_object_put(env, clobj);
}
- cl_env_nested_put(&nest, env);
atomic_dec(&cli->cl_lru_shrinkers);
- return count > 0 ? count : rc;
-}
-
-static void osc_lru_add(struct client_obd *cli, struct osc_page *opg)
-{
- bool wakeup = false;
-
- if (!opg->ops_in_lru)
- return;
-
- atomic_dec(&cli->cl_lru_busy);
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (list_empty(&opg->ops_lru)) {
- list_move_tail(&opg->ops_lru, &cli->cl_lru_list);
- atomic_inc_return(&cli->cl_lru_in_list);
- wakeup = atomic_read(&osc_lru_waiters) > 0;
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
-
- if (wakeup) {
- osc_lru_shrink(cli, osc_cache_too_much(cli));
+ if (count > 0) {
+ atomic_add(count, cli->cl_lru_left);
wake_up_all(&osc_lru_waitq);
}
-}
-
-/* delete page from LRUlist. The page can be deleted from LRUlist for two
- * reasons: redirtied or deleted from page cache.
- */
-static void osc_lru_del(struct client_obd *cli, struct osc_page *opg, bool del)
-{
- if (opg->ops_in_lru) {
- client_obd_list_lock(&cli->cl_lru_list_lock);
- if (!list_empty(&opg->ops_lru)) {
- LASSERT(atomic_read(&cli->cl_lru_in_list) > 0);
- list_del_init(&opg->ops_lru);
- atomic_dec(&cli->cl_lru_in_list);
- if (!del)
- atomic_inc(&cli->cl_lru_busy);
- } else if (del) {
- LASSERT(atomic_read(&cli->cl_lru_busy) > 0);
- atomic_dec(&cli->cl_lru_busy);
- }
- client_obd_list_unlock(&cli->cl_lru_list_lock);
- if (del) {
- atomic_inc(cli->cl_lru_left);
- /* this is a great place to release more LRU pages if
- * this osc occupies too many LRU pages and kernel is
- * stealing one of them.
- * cl_lru_shrinkers is to avoid recursive call in case
- * we're already in the context of osc_lru_shrink().
- */
- if (atomic_read(&cli->cl_lru_shrinkers) == 0 &&
- !memory_pressure_get())
- osc_lru_shrink(cli, osc_cache_too_much(cli));
- wake_up(&osc_lru_waitq);
- }
- } else {
- LASSERT(list_empty(&opg->ops_lru));
- }
+ return count > 0 ? count : rc;
}
static inline int max_to_shrink(struct client_obd *cli)
@@ -724,19 +702,28 @@ static inline int max_to_shrink(struct client_obd *cli)
return min(atomic_read(&cli->cl_lru_in_list) >> 1, lru_shrink_max);
}
-static int osc_lru_reclaim(struct client_obd *cli)
+int osc_lru_reclaim(struct client_obd *cli)
{
+ struct cl_env_nest nest;
+ struct lu_env *env;
struct cl_client_cache *cache = cli->cl_cache;
int max_scans;
- int rc;
+ int rc = 0;
LASSERT(cache);
- rc = osc_lru_shrink(cli, lru_shrink_min);
+ env = cl_env_nested_get(&nest);
+ if (IS_ERR(env))
+ return 0;
+
+ rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli), false);
if (rc != 0) {
+ if (rc == -EBUSY)
+ rc = 0;
+
CDEBUG(D_CACHE, "%s: Free %d pages from own LRU: %p.\n",
cli->cl_import->imp_obd->obd_name, rc, cli);
- return rc;
+ goto out;
}
CDEBUG(D_CACHE, "%s: cli %p no free slots, pages: %d, busy: %d.\n",
@@ -764,10 +751,11 @@ static int osc_lru_reclaim(struct client_obd *cli)
atomic_read(&cli->cl_lru_busy));
list_move_tail(&cli->cl_lru_osc, &cache->ccc_lru);
- if (atomic_read(&cli->cl_lru_in_list) > 0) {
+ if (osc_cache_too_much(cli) > 0) {
spin_unlock(&cache->ccc_lru_lock);
- rc = osc_lru_shrink(cli, max_to_shrink(cli));
+ rc = osc_lru_shrink(env, cli, osc_cache_too_much(cli),
+ true);
spin_lock(&cache->ccc_lru_lock);
if (rc != 0)
break;
@@ -775,6 +763,8 @@ static int osc_lru_reclaim(struct client_obd *cli)
}
spin_unlock(&cache->ccc_lru_lock);
+out:
+ cl_env_nested_put(&nest, env);
CDEBUG(D_CACHE, "%s: cli %p freed %d pages.\n",
cli->cl_import->imp_obd->obd_name, cli, rc);
return rc;
@@ -784,16 +774,20 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
struct osc_page *opg)
{
struct l_wait_info lwi = LWI_INTR(LWI_ON_SIGNAL_NOOP, NULL);
+ struct osc_io *oio = osc_env_io(env);
struct client_obd *cli = osc_cli(obj);
int rc = 0;
if (!cli->cl_cache) /* shall not be in LRU */
return 0;
+ if (oio->oi_lru_reserved > 0) {
+ --oio->oi_lru_reserved;
+ goto out;
+ }
+
LASSERT(atomic_read(cli->cl_lru_left) >= 0);
while (!atomic_add_unless(cli->cl_lru_left, -1, 0)) {
- int gen;
-
/* run out of LRU spaces, try to drop some by itself */
rc = osc_lru_reclaim(cli);
if (rc < 0)
@@ -803,23 +797,15 @@ static int osc_lru_reserve(const struct lu_env *env, struct osc_object *obj,
cond_resched();
- /* slowest case, all of caching pages are busy, notifying
- * other OSCs that we're lack of LRU slots.
- */
- atomic_inc(&osc_lru_waiters);
-
- gen = atomic_read(&cli->cl_lru_in_list);
rc = l_wait_event(osc_lru_waitq,
- atomic_read(cli->cl_lru_left) > 0 ||
- (atomic_read(&cli->cl_lru_in_list) > 0 &&
- gen != atomic_read(&cli->cl_lru_in_list)),
+ atomic_read(cli->cl_lru_left) > 0,
&lwi);
- atomic_dec(&osc_lru_waiters);
if (rc < 0)
break;
}
+out:
if (rc >= 0) {
atomic_inc(&cli->cl_lru_busy);
opg->ops_in_lru = 1;
diff --git a/drivers/staging/lustre/lustre/osc/osc_request.c b/drivers/staging/lustre/lustre/osc/osc_request.c
index 30526ebcad04..47417f88fe3c 100644
--- a/drivers/staging/lustre/lustre/osc/osc_request.c
+++ b/drivers/staging/lustre/lustre/osc/osc_request.c
@@ -92,12 +92,13 @@ struct osc_fsync_args {
struct osc_enqueue_args {
struct obd_export *oa_exp;
+ enum ldlm_type oa_type;
+ enum ldlm_mode oa_mode;
__u64 *oa_flags;
- obd_enqueue_update_f oa_upcall;
+ osc_enqueue_upcall_f oa_upcall;
void *oa_cookie;
struct ost_lvb *oa_lvb;
- struct lustre_handle *oa_lockh;
- struct ldlm_enqueue_info *oa_ei;
+ struct lustre_handle oa_lockh;
unsigned int oa_agl:1;
};
@@ -801,21 +802,24 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
LASSERT(!(oa->o_valid & bits));
oa->o_valid |= bits;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_dirty = cli->cl_dirty;
if (unlikely(cli->cl_dirty - cli->cl_dirty_transit >
cli->cl_dirty_max)) {
CERROR("dirty %lu - %lu > dirty_max %lu\n",
cli->cl_dirty, cli->cl_dirty_transit, cli->cl_dirty_max);
oa->o_undirty = 0;
- } else if (unlikely(atomic_read(&obd_dirty_pages) -
+ } else if (unlikely(atomic_read(&obd_unstable_pages) +
+ atomic_read(&obd_dirty_pages) -
atomic_read(&obd_dirty_transit_pages) >
(long)(obd_max_dirty_pages + 1))) {
/* The atomic_read() allowing the atomic_inc() are
* not covered by a lock thus they may safely race and trip
* this CERROR() unless we add in a small fudge factor (+1).
*/
- CERROR("dirty %d - %d > system dirty_max %d\n",
+ CERROR("%s: dirty %d + %d - %d > system dirty_max %d\n",
+ cli->cl_import->imp_obd->obd_name,
+ atomic_read(&obd_unstable_pages),
atomic_read(&obd_dirty_pages),
atomic_read(&obd_dirty_transit_pages),
obd_max_dirty_pages);
@@ -833,10 +837,9 @@ static void osc_announce_cached(struct client_obd *cli, struct obdo *oa,
oa->o_grant = cli->cl_avail_grant + cli->cl_reserved_grant;
oa->o_dropped = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "dirty: %llu undirty: %u dropped %u grant: %llu\n",
oa->o_dirty, oa->o_undirty, oa->o_dropped, oa->o_grant);
-
}
void osc_update_next_shrink(struct client_obd *cli)
@@ -849,9 +852,9 @@ void osc_update_next_shrink(struct client_obd *cli)
static void __osc_update_grant(struct client_obd *cli, u64 grant)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant += grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
}
static void osc_update_grant(struct client_obd *cli, struct ost_body *body)
@@ -889,10 +892,10 @@ out:
static void osc_shrink_grant_local(struct client_obd *cli, struct obdo *oa)
{
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
oa->o_grant = cli->cl_avail_grant / 4;
cli->cl_avail_grant -= oa->o_grant;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(oa->o_valid & OBD_MD_FLFLAGS)) {
oa->o_valid |= OBD_MD_FLFLAGS;
oa->o_flags = 0;
@@ -911,10 +914,10 @@ static int osc_shrink_grant(struct client_obd *cli)
__u64 target_bytes = (cli->cl_max_rpcs_in_flight + 1) *
(cli->cl_max_pages_per_rpc << PAGE_SHIFT);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_avail_grant <= target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return osc_shrink_grant_to_target(cli, target_bytes);
}
@@ -924,7 +927,7 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
int rc = 0;
struct ost_body *body;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* Don't shrink if we are already above or below the desired limit
* We don't want to shrink below a single RPC, as that will negatively
* impact block allocation and long-term performance.
@@ -933,10 +936,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
target_bytes = cli->cl_max_pages_per_rpc << PAGE_SHIFT;
if (target_bytes >= cli->cl_avail_grant) {
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
return 0;
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
body = kzalloc(sizeof(*body), GFP_NOFS);
if (!body)
@@ -944,10 +947,10 @@ int osc_shrink_grant_to_target(struct client_obd *cli, __u64 target_bytes)
osc_announce_cached(cli, &body->oa, 0);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
body->oa.o_grant = cli->cl_avail_grant - target_bytes;
cli->cl_avail_grant = target_bytes;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
if (!(body->oa.o_valid & OBD_MD_FLFLAGS)) {
body->oa.o_valid |= OBD_MD_FLFLAGS;
body->oa.o_flags = 0;
@@ -1035,7 +1038,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
* race is tolerable here: if we're evicted, but imp_state already
* left EVICTED state, then cl_dirty must be 0 already.
*/
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
if (cli->cl_import->imp_state == LUSTRE_IMP_EVICTED)
cli->cl_avail_grant = ocd->ocd_grant;
else
@@ -1053,7 +1056,7 @@ static void osc_init_grant(struct client_obd *cli, struct obd_connect_data *ocd)
/* determine the appropriate chunk size used by osc_extent. */
cli->cl_chunkbits = max_t(int, PAGE_SHIFT, ocd->ocd_blocksize);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_CACHE, "%s, setting cl_avail_grant: %ld cl_lost_grant: %ld chunk bits: %d\n",
cli->cl_import->imp_obd->obd_name,
@@ -1082,7 +1085,7 @@ static void handle_short_read(int nob_read, u32 page_count,
if (pga[i]->count > nob_read) {
/* EOF inside this page */
ptr = kmap(pga[i]->pg) +
- (pga[i]->off & ~CFS_PAGE_MASK);
+ (pga[i]->off & ~PAGE_MASK);
memset(ptr + nob_read, 0, pga[i]->count - nob_read);
kunmap(pga[i]->pg);
page_count--;
@@ -1097,7 +1100,7 @@ static void handle_short_read(int nob_read, u32 page_count,
/* zero remaining pages */
while (page_count-- > 0) {
- ptr = kmap(pga[i]->pg) + (pga[i]->off & ~CFS_PAGE_MASK);
+ ptr = kmap(pga[i]->pg) + (pga[i]->off & ~PAGE_MASK);
memset(ptr, 0, pga[i]->count);
kunmap(pga[i]->pg);
i++;
@@ -1144,7 +1147,8 @@ static inline int can_merge_pages(struct brw_page *p1, struct brw_page *p2)
{
if (p1->flag != p2->flag) {
unsigned mask = ~(OBD_BRW_FROM_GRANT | OBD_BRW_NOCACHE |
- OBD_BRW_SYNC | OBD_BRW_ASYNC|OBD_BRW_NOQUOTA);
+ OBD_BRW_SYNC | OBD_BRW_ASYNC |
+ OBD_BRW_NOQUOTA | OBD_BRW_SOFT_SYNC);
/* warn if we try to combine flags that we don't know to be
* safe to combine
@@ -1188,32 +1192,29 @@ static u32 osc_checksum_bulk(int nob, u32 pg_count,
if (i == 0 && opc == OST_READ &&
OBD_FAIL_CHECK(OBD_FAIL_OSC_CHECKSUM_RECEIVE)) {
unsigned char *ptr = kmap(pga[i]->pg);
- int off = pga[i]->off & ~CFS_PAGE_MASK;
+ int off = pga[i]->off & ~PAGE_MASK;
memcpy(ptr + off, "bad1", min(4, nob));
kunmap(pga[i]->pg);
}
cfs_crypto_hash_update_page(hdesc, pga[i]->pg,
- pga[i]->off & ~CFS_PAGE_MASK,
+ pga[i]->off & ~PAGE_MASK,
count);
CDEBUG(D_PAGE,
"page %p map %p index %lu flags %lx count %u priv %0lx: off %d\n",
pga[i]->pg, pga[i]->pg->mapping, pga[i]->pg->index,
(long)pga[i]->pg->flags, page_count(pga[i]->pg),
page_private(pga[i]->pg),
- (int)(pga[i]->off & ~CFS_PAGE_MASK));
+ (int)(pga[i]->off & ~PAGE_MASK));
nob -= pga[i]->count;
pg_count--;
i++;
}
- bufsize = 4;
+ bufsize = sizeof(cksum);
err = cfs_crypto_hash_final(hdesc, (unsigned char *)&cksum, &bufsize);
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
-
/* For sending we only compute the wrong checksum instead
* of corrupting the data so it is still correct on a redo
*/
@@ -1312,7 +1313,7 @@ static int osc_brw_prep_request(int cmd, struct client_obd *cli,
pg_prev = pga[0];
for (requested_nob = i = 0; i < page_count; i++, niobuf++) {
struct brw_page *pg = pga[i];
- int poff = pg->off & ~CFS_PAGE_MASK;
+ int poff = pg->off & ~PAGE_MASK;
LASSERT(pg->count > 0);
/* make sure there is no gap in the middle of page array */
@@ -1658,6 +1659,7 @@ static int osc_brw_redo_request(struct ptlrpc_request *request,
aa->aa_resends++;
new_req->rq_interpret_reply = request->rq_interpret_reply;
new_req->rq_async_args = request->rq_async_args;
+ new_req->rq_commit_cb = request->rq_commit_cb;
/* cap resend delay to the current request timeout, this is similar to
* what ptlrpc does (see after_reply())
*/
@@ -1737,7 +1739,6 @@ static int brw_interpret(const struct lu_env *env,
struct osc_brw_async_args *aa = data;
struct osc_extent *ext;
struct osc_extent *tmp;
- struct cl_object *obj = NULL;
struct client_obd *cli = aa->aa_cli;
rc = osc_brw_fini_request(req, rc);
@@ -1766,24 +1767,17 @@ static int brw_interpret(const struct lu_env *env,
rc = -EIO;
}
- list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
- if (!obj && rc == 0) {
- obj = osc2cl(ext->oe_obj);
- cl_object_get(obj);
- }
-
- list_del_init(&ext->oe_link);
- osc_extent_finish(env, ext, 1, rc);
- }
- LASSERT(list_empty(&aa->aa_exts));
- LASSERT(list_empty(&aa->aa_oaps));
-
- if (obj) {
+ if (rc == 0) {
struct obdo *oa = aa->aa_oa;
struct cl_attr *attr = &osc_env_info(env)->oti_attr;
unsigned long valid = 0;
+ struct cl_object *obj;
+ struct osc_async_page *last;
+
+ last = brw_page2oap(aa->aa_ppga[aa->aa_page_count - 1]);
+ obj = osc2cl(last->oap_obj);
- LASSERT(rc == 0);
+ cl_object_attr_lock(obj);
if (oa->o_valid & OBD_MD_FLBLOCKS) {
attr->cat_blocks = oa->o_blocks;
valid |= CAT_BLOCKS;
@@ -1800,21 +1794,45 @@ static int brw_interpret(const struct lu_env *env,
attr->cat_ctime = oa->o_ctime;
valid |= CAT_CTIME;
}
- if (valid != 0) {
- cl_object_attr_lock(obj);
- cl_object_attr_set(env, obj, attr, valid);
- cl_object_attr_unlock(obj);
+
+ if (lustre_msg_get_opc(req->rq_reqmsg) == OST_WRITE) {
+ struct lov_oinfo *loi = cl2osc(obj)->oo_oinfo;
+ loff_t last_off = last->oap_count + last->oap_obj_off;
+
+ /* Change file size if this is an out of quota or
+ * direct IO write and it extends the file size
+ */
+ if (loi->loi_lvb.lvb_size < last_off) {
+ attr->cat_size = last_off;
+ valid |= CAT_SIZE;
+ }
+ /* Extend KMS if it's not a lockless write */
+ if (loi->loi_kms < last_off &&
+ oap2osc_page(last)->ops_srvlock == 0) {
+ attr->cat_kms = last_off;
+ valid |= CAT_KMS;
+ }
}
- cl_object_put(env, obj);
+
+ if (valid != 0)
+ cl_object_attr_set(env, obj, attr, valid);
+ cl_object_attr_unlock(obj);
}
kmem_cache_free(obdo_cachep, aa->aa_oa);
+ list_for_each_entry_safe(ext, tmp, &aa->aa_exts, oe_link) {
+ list_del_init(&ext->oe_link);
+ osc_extent_finish(env, ext, 1, rc);
+ }
+ LASSERT(list_empty(&aa->aa_exts));
+ LASSERT(list_empty(&aa->aa_oaps));
+
cl_req_completion(env, aa->aa_clerq, rc < 0 ? rc :
req->rq_bulk->bd_nob_transferred);
osc_release_ppga(aa->aa_ppga, aa->aa_page_count);
ptlrpc_lprocfs_brw(req, req->rq_bulk->bd_nob_transferred);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
/* We need to decrement before osc_ap_completion->osc_wake_cache_waiters
* is called so we know whether to go to sync BRWs or wait for more
* RPCs to complete
@@ -1824,12 +1842,31 @@ static int brw_interpret(const struct lu_env *env,
else
cli->cl_r_in_flight--;
osc_wake_cache_waiters(cli);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
osc_io_unplug(env, cli, NULL);
return rc;
}
+static void brw_commit(struct ptlrpc_request *req)
+{
+ spin_lock(&req->rq_lock);
+ /*
+ * If osc_inc_unstable_pages (via osc_extent_finish) races with
+ * this called via the rq_commit_cb, I need to ensure
+ * osc_dec_unstable_pages is still called. Otherwise unstable
+ * pages may be leaked.
+ */
+ if (req->rq_unstable) {
+ spin_unlock(&req->rq_lock);
+ osc_dec_unstable_pages(req);
+ spin_lock(&req->rq_lock);
+ } else {
+ req->rq_committed = 1;
+ }
+ spin_unlock(&req->rq_lock);
+}
+
/**
* Build an RPC by the list of extent @ext_list. The caller must ensure
* that the total pages in this list are NOT over max pages per RPC.
@@ -1920,7 +1957,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
pga[i] = &oap->oap_brw_page;
pga[i]->off = oap->oap_obj_off + oap->oap_page_off;
CDEBUG(0, "put page %p index %lu oap %p flg %x to pga\n",
- pga[i]->pg, page_index(oap->oap_page), oap,
+ pga[i]->pg, oap->oap_page->index, oap,
pga[i]->flag);
i++;
cl_req_page_add(env, clerq, page);
@@ -1949,6 +1986,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
goto out;
}
+ req->rq_commit_cb = brw_commit;
req->rq_interpret_reply = brw_interpret;
if (mem_tight != 0)
@@ -1992,7 +2030,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
if (tmp)
tmp->oap_request = ptlrpc_request_addref(req);
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
starting_offset >>= PAGE_SHIFT;
if (cmd == OBD_BRW_READ) {
cli->cl_r_in_flight++;
@@ -2007,7 +2045,7 @@ int osc_build_rpc(const struct lu_env *env, struct client_obd *cli,
lprocfs_oh_tally_log2(&cli->cl_write_offset_hist,
starting_offset + 1);
}
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
DEBUG_REQ(D_INODE, req, "%d pages, aa %p. now %dr/%dw in flight",
page_count, aa, cli->cl_r_in_flight,
@@ -2055,14 +2093,12 @@ static int osc_set_lock_data_with_check(struct ldlm_lock *lock,
LASSERT(lock->l_glimpse_ast == einfo->ei_cb_gl);
lock_res_and_lock(lock);
- spin_lock(&osc_ast_guard);
if (!lock->l_ast_data)
lock->l_ast_data = data;
if (lock->l_ast_data == data)
set = 1;
- spin_unlock(&osc_ast_guard);
unlock_res_and_lock(lock);
return set;
@@ -2104,36 +2140,38 @@ static int osc_find_cbdata(struct obd_export *exp, struct lov_stripe_md *lsm,
return rc;
}
-static int osc_enqueue_fini(struct ptlrpc_request *req, struct ost_lvb *lvb,
- obd_enqueue_update_f upcall, void *cookie,
- __u64 *flags, int agl, int rc)
+static int osc_enqueue_fini(struct ptlrpc_request *req,
+ osc_enqueue_upcall_f upcall, void *cookie,
+ struct lustre_handle *lockh, enum ldlm_mode mode,
+ __u64 *flags, int agl, int errcode)
{
- int intent = *flags & LDLM_FL_HAS_INTENT;
-
- if (intent) {
- /* The request was created before ldlm_cli_enqueue call. */
- if (rc == ELDLM_LOCK_ABORTED) {
- struct ldlm_reply *rep;
+ bool intent = *flags & LDLM_FL_HAS_INTENT;
+ int rc;
- rep = req_capsule_server_get(&req->rq_pill,
- &RMF_DLM_REP);
+ /* The request was created before ldlm_cli_enqueue call. */
+ if (intent && errcode == ELDLM_LOCK_ABORTED) {
+ struct ldlm_reply *rep;
- rep->lock_policy_res1 =
- ptlrpc_status_ntoh(rep->lock_policy_res1);
- if (rep->lock_policy_res1)
- rc = rep->lock_policy_res1;
- }
- }
+ rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
- if ((intent != 0 && rc == ELDLM_LOCK_ABORTED && agl == 0) ||
- (rc == 0)) {
+ rep->lock_policy_res1 =
+ ptlrpc_status_ntoh(rep->lock_policy_res1);
+ if (rep->lock_policy_res1)
+ errcode = rep->lock_policy_res1;
+ if (!agl)
+ *flags |= LDLM_FL_LVB_READY;
+ } else if (errcode == ELDLM_OK) {
*flags |= LDLM_FL_LVB_READY;
- CDEBUG(D_INODE, "got kms %llu blocks %llu mtime %llu\n",
- lvb->lvb_size, lvb->lvb_blocks, lvb->lvb_mtime);
}
/* Call the update callback. */
- rc = (*upcall)(cookie, rc);
+ rc = (*upcall)(cookie, lockh, errcode);
+ /* release the reference taken in ldlm_cli_enqueue() */
+ if (errcode == ELDLM_LOCK_MATCHED)
+ errcode = ELDLM_OK;
+ if (errcode == ELDLM_OK && lustre_handle_is_used(lockh))
+ ldlm_lock_decref(lockh, mode);
+
return rc;
}
@@ -2142,62 +2180,50 @@ static int osc_enqueue_interpret(const struct lu_env *env,
struct osc_enqueue_args *aa, int rc)
{
struct ldlm_lock *lock;
- struct lustre_handle handle;
- __u32 mode;
- struct ost_lvb *lvb;
- __u32 lvb_len;
- __u64 *flags = aa->oa_flags;
-
- /* Make a local copy of a lock handle and a mode, because aa->oa_*
- * might be freed anytime after lock upcall has been called.
- */
- lustre_handle_copy(&handle, aa->oa_lockh);
- mode = aa->oa_ei->ei_mode;
+ struct lustre_handle *lockh = &aa->oa_lockh;
+ enum ldlm_mode mode = aa->oa_mode;
+ struct ost_lvb *lvb = aa->oa_lvb;
+ __u32 lvb_len = sizeof(*lvb);
+ __u64 flags = 0;
+
/* ldlm_cli_enqueue is holding a reference on the lock, so it must
* be valid.
*/
- lock = ldlm_handle2lock(&handle);
+ lock = ldlm_handle2lock(lockh);
+ LASSERTF(lock, "lockh %llx, req %p, aa %p - client evicted?\n",
+ lockh->cookie, req, aa);
/* Take an additional reference so that a blocking AST that
* ldlm_cli_enqueue_fini() might post for a failed lock, is guaranteed
* to arrive after an upcall has been executed by
* osc_enqueue_fini().
*/
- ldlm_lock_addref(&handle, mode);
+ ldlm_lock_addref(lockh, mode);
+
+ /* Let cl_lock_state_wait fail with -ERESTARTSYS to unuse sublocks. */
+ OBD_FAIL_TIMEOUT(OBD_FAIL_LDLM_ENQUEUE_HANG, 2);
/* Let CP AST to grant the lock first. */
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_ENQ_RACE, 1);
- if (aa->oa_agl && rc == ELDLM_LOCK_ABORTED) {
- lvb = NULL;
- lvb_len = 0;
- } else {
- lvb = aa->oa_lvb;
- lvb_len = sizeof(*aa->oa_lvb);
+ if (aa->oa_agl) {
+ LASSERT(!aa->oa_lvb);
+ LASSERT(!aa->oa_flags);
+ aa->oa_flags = &flags;
}
/* Complete obtaining the lock procedure. */
- rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_ei->ei_type, 1,
- mode, flags, lvb, lvb_len, &handle, rc);
+ rc = ldlm_cli_enqueue_fini(aa->oa_exp, req, aa->oa_type, 1,
+ aa->oa_mode, aa->oa_flags, lvb, lvb_len,
+ lockh, rc);
/* Complete osc stuff. */
- rc = osc_enqueue_fini(req, aa->oa_lvb, aa->oa_upcall, aa->oa_cookie,
- flags, aa->oa_agl, rc);
+ rc = osc_enqueue_fini(req, aa->oa_upcall, aa->oa_cookie, lockh, mode,
+ aa->oa_flags, aa->oa_agl, rc);
OBD_FAIL_TIMEOUT(OBD_FAIL_OSC_CP_CANCEL_RACE, 10);
- /* Release the lock for async request. */
- if (lustre_handle_is_used(&handle) && rc == ELDLM_OK)
- /*
- * Releases a reference taken by ldlm_cli_enqueue(), if it is
- * not already released by
- * ldlm_cli_enqueue_fini()->failed_lock_cleanup()
- */
- ldlm_lock_decref(&handle, mode);
-
- LASSERTF(lock, "lockh %p, req %p, aa %p - client evicted?\n",
- aa->oa_lockh, req, aa);
- ldlm_lock_decref(&handle, mode);
+ ldlm_lock_decref(lockh, mode);
LDLM_LOCK_PUT(lock);
return rc;
}
@@ -2209,29 +2235,29 @@ struct ptlrpc_request_set *PTLRPCD_SET = (void *)1;
* other synchronous requests, however keeping some locks and trying to obtain
* others may take a considerable amount of time in a case of ost failure; and
* when other sync requests do not get released lock from a client, the client
- * is excluded from the cluster -- such scenarious make the life difficult, so
+ * is evicted from the cluster -- such scenaries make the life difficult, so
* release locks just after they are obtained.
*/
int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
__u64 *flags, ldlm_policy_data_t *policy,
struct ost_lvb *lvb, int kms_valid,
- obd_enqueue_update_f upcall, void *cookie,
+ osc_enqueue_upcall_f upcall, void *cookie,
struct ldlm_enqueue_info *einfo,
- struct lustre_handle *lockh,
struct ptlrpc_request_set *rqset, int async, int agl)
{
struct obd_device *obd = exp->exp_obd;
+ struct lustre_handle lockh = { 0 };
struct ptlrpc_request *req = NULL;
int intent = *flags & LDLM_FL_HAS_INTENT;
- __u64 match_lvb = (agl != 0 ? 0 : LDLM_FL_LVB_READY);
+ __u64 match_lvb = agl ? 0 : LDLM_FL_LVB_READY;
enum ldlm_mode mode;
int rc;
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother.
*/
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/*
* kms is not valid when either object is completely fresh (so that no
@@ -2259,64 +2285,46 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
if (einfo->ei_mode == LCK_PR)
mode |= LCK_PW;
mode = ldlm_lock_match(obd->obd_namespace, *flags | match_lvb, res_id,
- einfo->ei_type, policy, mode, lockh, 0);
+ einfo->ei_type, policy, mode, &lockh, 0);
if (mode) {
- struct ldlm_lock *matched = ldlm_handle2lock(lockh);
+ struct ldlm_lock *matched;
- if ((agl != 0) && !(matched->l_flags & LDLM_FL_LVB_READY)) {
- /* For AGL, if enqueue RPC is sent but the lock is not
- * granted, then skip to process this strpe.
- * Return -ECANCELED to tell the caller.
+ if (*flags & LDLM_FL_TEST_LOCK)
+ return ELDLM_OK;
+
+ matched = ldlm_handle2lock(&lockh);
+ if (agl) {
+ /* AGL enqueues DLM locks speculatively. Therefore if
+ * it already exists a DLM lock, it wll just inform the
+ * caller to cancel the AGL process for this stripe.
*/
- ldlm_lock_decref(lockh, mode);
+ ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return -ECANCELED;
- }
-
- if (osc_set_lock_data_with_check(matched, einfo)) {
+ } else if (osc_set_lock_data_with_check(matched, einfo)) {
*flags |= LDLM_FL_LVB_READY;
- /* addref the lock only if not async requests and PW
- * lock is matched whereas we asked for PR.
- */
- if (!rqset && einfo->ei_mode != mode)
- ldlm_lock_addref(lockh, LCK_PR);
- if (intent) {
- /* I would like to be able to ASSERT here that
- * rss <= kms, but I can't, for reasons which
- * are explained in lov_enqueue()
- */
- }
-
- /* We already have a lock, and it's referenced.
- *
- * At this point, the cl_lock::cll_state is CLS_QUEUING,
- * AGL upcall may change it to CLS_HELD directly.
- */
- (*upcall)(cookie, ELDLM_OK);
+ /* We already have a lock, and it's referenced. */
+ (*upcall)(cookie, &lockh, ELDLM_LOCK_MATCHED);
- if (einfo->ei_mode != mode)
- ldlm_lock_decref(lockh, LCK_PW);
- else if (rqset)
- /* For async requests, decref the lock. */
- ldlm_lock_decref(lockh, einfo->ei_mode);
+ ldlm_lock_decref(&lockh, mode);
LDLM_LOCK_PUT(matched);
return ELDLM_OK;
+ } else {
+ ldlm_lock_decref(&lockh, mode);
+ LDLM_LOCK_PUT(matched);
}
-
- ldlm_lock_decref(lockh, mode);
- LDLM_LOCK_PUT(matched);
}
- no_match:
+no_match:
+ if (*flags & LDLM_FL_TEST_LOCK)
+ return -ENOLCK;
if (intent) {
- LIST_HEAD(cancels);
-
req = ptlrpc_request_alloc(class_exp2cliimp(exp),
&RQF_LDLM_ENQUEUE_LVB);
if (!req)
return -ENOMEM;
- rc = ldlm_prep_enqueue_req(exp, req, &cancels, 0);
+ rc = ldlm_prep_enqueue_req(exp, req, NULL, 0);
if (rc) {
ptlrpc_request_free(req);
return rc;
@@ -2331,21 +2339,31 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
*flags &= ~LDLM_FL_BLOCK_GRANTED;
rc = ldlm_cli_enqueue(exp, &req, einfo, res_id, policy, flags, lvb,
- sizeof(*lvb), LVB_T_OST, lockh, async);
- if (rqset) {
+ sizeof(*lvb), LVB_T_OST, &lockh, async);
+ if (async) {
if (!rc) {
struct osc_enqueue_args *aa;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
- aa->oa_ei = einfo;
aa->oa_exp = exp;
- aa->oa_flags = flags;
+ aa->oa_mode = einfo->ei_mode;
+ aa->oa_type = einfo->ei_type;
+ lustre_handle_copy(&aa->oa_lockh, &lockh);
aa->oa_upcall = upcall;
aa->oa_cookie = cookie;
- aa->oa_lvb = lvb;
- aa->oa_lockh = lockh;
aa->oa_agl = !!agl;
+ if (!agl) {
+ aa->oa_flags = flags;
+ aa->oa_lvb = lvb;
+ } else {
+ /* AGL is essentially to enqueue an DLM lock
+ * in advance, so we don't care about the
+ * result of AGL enqueue.
+ */
+ aa->oa_lvb = NULL;
+ aa->oa_flags = NULL;
+ }
req->rq_interpret_reply =
(ptlrpc_interpterer_t)osc_enqueue_interpret;
@@ -2359,7 +2377,8 @@ int osc_enqueue_base(struct obd_export *exp, struct ldlm_res_id *res_id,
return rc;
}
- rc = osc_enqueue_fini(req, lvb, upcall, cookie, flags, agl, rc);
+ rc = osc_enqueue_fini(req, upcall, cookie, &lockh, einfo->ei_mode,
+ flags, agl, rc);
if (intent)
ptlrpc_req_finished(req);
@@ -2381,8 +2400,8 @@ int osc_match_base(struct obd_export *exp, struct ldlm_res_id *res_id,
/* Filesystem lock extents are extended to page boundaries so that
* dealing with the page cache is a little smoother
*/
- policy->l_extent.start -= policy->l_extent.start & ~CFS_PAGE_MASK;
- policy->l_extent.end |= ~CFS_PAGE_MASK;
+ policy->l_extent.start -= policy->l_extent.start & ~PAGE_MASK;
+ policy->l_extent.end |= ~PAGE_MASK;
/* Next, search for already existing extent locks that will cover us */
/* If we're trying to read, we also search for an existing PW lock. The
@@ -2493,7 +2512,7 @@ static int osc_statfs_async(struct obd_export *exp,
}
req->rq_interpret_reply = (ptlrpc_interpterer_t)osc_statfs_interpret;
- CLASSERT (sizeof(*aa) <= sizeof(req->rq_async_args));
+ CLASSERT(sizeof(*aa) <= sizeof(req->rq_async_args));
aa = ptlrpc_req_async_args(req);
aa->aa_oi = oinfo;
@@ -2787,7 +2806,7 @@ out:
goto skip_locking;
policy.l_extent.start = fm_key->fiemap.fm_start &
- CFS_PAGE_MASK;
+ PAGE_MASK;
if (OBD_OBJECT_EOF - fm_key->fiemap.fm_length <=
fm_key->fiemap.fm_start + PAGE_SIZE - 1)
@@ -2795,7 +2814,7 @@ out:
else
policy.l_extent.end = (fm_key->fiemap.fm_start +
fm_key->fiemap.fm_length +
- PAGE_SIZE - 1) & CFS_PAGE_MASK;
+ PAGE_SIZE - 1) & PAGE_MASK;
ostid_build_res_name(&fm_key->oa.o_oi, &res_id);
mode = ldlm_lock_match(exp->exp_obd->obd_namespace,
@@ -2913,7 +2932,7 @@ static int osc_set_info_async(const struct lu_env *env, struct obd_export *exp,
int nr = atomic_read(&cli->cl_lru_in_list) >> 1;
int target = *(int *)val;
- nr = osc_lru_shrink(cli, min(nr, target));
+ nr = osc_lru_shrink(env, cli, min(nr, target), true);
*(int *)val -= nr;
return 0;
}
@@ -2992,12 +3011,12 @@ static int osc_reconnect(const struct lu_env *env,
if (data && (data->ocd_connect_flags & OBD_CONNECT_GRANT)) {
long lost_grant;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
data->ocd_grant = (cli->cl_avail_grant + cli->cl_dirty) ?:
2 * cli_brw_size(obd);
lost_grant = cli->cl_lost_grant;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
CDEBUG(D_RPCTRACE, "ocd_connect_flags: %#llx ocd_version: %d ocd_grant: %d, lost: %ld.\n",
data->ocd_connect_flags,
@@ -3047,10 +3066,10 @@ static int osc_import_event(struct obd_device *obd,
switch (event) {
case IMP_EVENT_DISCON: {
cli = &obd->u.cli;
- client_obd_list_lock(&cli->cl_loi_list_lock);
+ spin_lock(&cli->cl_loi_list_lock);
cli->cl_avail_grant = 0;
cli->cl_lost_grant = 0;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
+ spin_unlock(&cli->cl_loi_list_lock);
break;
}
case IMP_EVENT_INACTIVE: {
@@ -3073,8 +3092,9 @@ static int osc_import_event(struct obd_device *obd,
ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
cl_env_put(env, &refcheck);
- } else
+ } else {
rc = PTR_ERR(env);
+ }
break;
}
case IMP_EVENT_ACTIVE: {
@@ -3116,20 +3136,14 @@ static int osc_import_event(struct obd_device *obd,
* \retval zero the lock can't be canceled
* \retval other ok to cancel
*/
-static int osc_cancel_for_recovery(struct ldlm_lock *lock)
+static int osc_cancel_weight(struct ldlm_lock *lock)
{
- check_res_locked(lock->l_resource);
-
/*
- * Cancel all unused extent lock in granted mode LCK_PR or LCK_CR.
- *
- * XXX as a future improvement, we can also cancel unused write lock
- * if it doesn't have dirty data and active mmaps.
+ * Cancel all unused and granted extent lock.
*/
if (lock->l_resource->lr_type == LDLM_EXTENT &&
- (lock->l_granted_mode == LCK_PR ||
- lock->l_granted_mode == LCK_CR) &&
- (osc_dlm_lock_pageref(lock) == 0))
+ lock->l_granted_mode == lock->l_req_mode &&
+ osc_ldlm_weigh_ast(lock) == 0)
return 1;
return 0;
@@ -3170,6 +3184,14 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
cli->cl_writeback_work = handler;
+ handler = ptlrpcd_alloc_work(cli->cl_import, lru_queue_work, cli);
+ if (IS_ERR(handler)) {
+ rc = PTR_ERR(handler);
+ goto out_ptlrpcd_work;
+ }
+
+ cli->cl_lru_work = handler;
+
rc = osc_quota_setup(obd);
if (rc)
goto out_ptlrpcd_work;
@@ -3198,11 +3220,18 @@ int osc_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
}
INIT_LIST_HEAD(&cli->cl_grant_shrink_list);
- ns_register_cancel(obd->obd_namespace, osc_cancel_for_recovery);
+ ns_register_cancel(obd->obd_namespace, osc_cancel_weight);
return rc;
out_ptlrpcd_work:
- ptlrpcd_destroy_work(handler);
+ if (cli->cl_writeback_work) {
+ ptlrpcd_destroy_work(cli->cl_writeback_work);
+ cli->cl_writeback_work = NULL;
+ }
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
+ }
out_client_setup:
client_obd_cleanup(obd);
out_ptlrpcd:
@@ -3241,6 +3270,10 @@ static int osc_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
ptlrpcd_destroy_work(cli->cl_writeback_work);
cli->cl_writeback_work = NULL;
}
+ if (cli->cl_lru_work) {
+ ptlrpcd_destroy_work(cli->cl_lru_work);
+ cli->cl_lru_work = NULL;
+ }
obd_cleanup_client_import(obd);
ptlrpc_lprocfs_unregister_obd(obd);
lprocfs_obd_cleanup(obd);
@@ -3330,7 +3363,6 @@ static struct obd_ops osc_obd_ops = {
};
extern struct lu_kmem_descr osc_caches[];
-extern spinlock_t osc_ast_guard;
extern struct lock_class_key osc_ast_guard_class;
static int __init osc_init(void)
@@ -3357,9 +3389,6 @@ static int __init osc_init(void)
if (rc)
goto out_kmem;
- spin_lock_init(&osc_ast_guard);
- lockdep_set_class(&osc_ast_guard, &osc_ast_guard_class);
-
/* This is obviously too much memory, only prevent overflow here */
if (osc_reqpool_mem_max >= 1 << 12 || osc_reqpool_mem_max == 0) {
rc = -EINVAL;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/client.c b/drivers/staging/lustre/lustre/ptlrpc/client.c
index cf3ac8eee9ee..4b7912a2cb52 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/client.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/client.c
@@ -595,9 +595,9 @@ static int __ptlrpc_request_bufs_pack(struct ptlrpc_request *request,
struct obd_import *imp = request->rq_import;
int rc;
- if (unlikely(ctx))
+ if (unlikely(ctx)) {
request->rq_cli_ctx = sptlrpc_cli_ctx_get(ctx);
- else {
+ } else {
rc = sptlrpc_req_get_ctx(request);
if (rc)
goto out_free;
@@ -1082,7 +1082,6 @@ static int ptlrpc_console_allow(struct ptlrpc_request *req)
*/
if ((lustre_handle_is_used(&req->rq_import->imp_remote_handle)) &&
(opc == OST_CONNECT || opc == MDS_CONNECT || opc == MGS_CONNECT)) {
-
/* Suppress timed out reconnect requests */
if (req->rq_timedout)
return 0;
@@ -2087,7 +2086,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
CDEBUG(D_RPCTRACE, "set %p going to sleep for %d seconds\n",
set, timeout);
- if (timeout == 0 && !cfs_signal_pending())
+ if (timeout == 0 && !signal_pending(current))
/*
* No requests are in-flight (ether timed out
* or delayed), so we can allow interrupts.
@@ -2114,7 +2113,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* it being ignored forever
*/
if (rc == -ETIMEDOUT && !lwi.lwi_allow_intr &&
- cfs_signal_pending()) {
+ signal_pending(current)) {
sigset_t blocked_sigs =
cfs_block_sigsinv(LUSTRE_FATAL_SIGS);
@@ -2124,7 +2123,7 @@ int ptlrpc_set_wait(struct ptlrpc_request_set *set)
* important signals since ptlrpc set is not easily
* reentrant from userspace again
*/
- if (cfs_signal_pending())
+ if (signal_pending(current))
ptlrpc_interrupted_set(set);
cfs_restore_sigs(blocked_sigs);
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/events.c b/drivers/staging/lustre/lustre/ptlrpc/events.c
index 47be21ac9f10..fdcde9bbd788 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/events.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/events.c
@@ -69,7 +69,6 @@ void request_out_callback(lnet_event_t *ev)
req->rq_req_unlink = 0;
if (ev->type == LNET_EVENT_UNLINK || ev->status != 0) {
-
/* Failed send: make it seem like the reply timed out, just
* like failing sends in client.c does currently...
*/
diff --git a/drivers/staging/lustre/lustre/ptlrpc/import.c b/drivers/staging/lustre/lustre/ptlrpc/import.c
index cd94fed0ffdf..a4f7544f46b8 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/import.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/import.c
@@ -1001,6 +1001,7 @@ finish:
return 0;
}
} else {
+ static bool warned;
spin_lock(&imp->imp_lock);
list_del(&imp->imp_conn_current->oic_item);
@@ -1021,7 +1022,7 @@ finish:
goto out;
}
- if ((ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
+ if (!warned && (ocd->ocd_connect_flags & OBD_CONNECT_VERSION) &&
(ocd->ocd_version > LUSTRE_VERSION_CODE +
LUSTRE_VERSION_OFFSET_WARN ||
ocd->ocd_version < LUSTRE_VERSION_CODE -
@@ -1029,10 +1030,8 @@ finish:
/* Sigh, some compilers do not like #ifdef in the middle
* of macro arguments
*/
- const char *older = "older. Consider upgrading server or downgrading client"
- ;
- const char *newer = "newer than client version. Consider upgrading client"
- ;
+ const char *older = "older than client. Consider upgrading server";
+ const char *newer = "newer than client. Consider recompiling application";
LCONSOLE_WARN("Server %s version (%d.%d.%d.%d) is much %s (%s)\n",
obd2cli_tgt(imp->imp_obd),
@@ -1042,6 +1041,7 @@ finish:
OBD_OCD_VERSION_FIX(ocd->ocd_version),
ocd->ocd_version > LUSTRE_VERSION_CODE ?
newer : older, LUSTRE_VERSION_STRING);
+ warned = true;
}
#if LUSTRE_VERSION_CODE < OBD_OCD_VERSION(3, 2, 50, 0)
@@ -1370,7 +1370,6 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp)
if (rc)
goto out;
}
-
}
if (imp->imp_state == LUSTRE_IMP_REPLAY_WAIT) {
@@ -1453,7 +1452,6 @@ int ptlrpc_disconnect_import(struct obd_import *imp, int noclose)
back_to_sleep, LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(imp->imp_recovery_waitq,
!ptlrpc_import_in_recovery(imp), &lwi);
-
}
spin_lock(&imp->imp_lock);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/layout.c b/drivers/staging/lustre/lustre/ptlrpc/layout.c
index 5b06901e5729..c0ecd1625dc4 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/layout.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/layout.c
@@ -160,6 +160,16 @@ static const struct req_msg_field *fld_query_server[] = {
&RMF_FLD_MDFLD
};
+static const struct req_msg_field *fld_read_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_FLD_MDFLD
+};
+
+static const struct req_msg_field *fld_read_server[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GENERIC_DATA
+};
+
static const struct req_msg_field *mds_getattr_name_client[] = {
&RMF_PTLRPC_BODY,
&RMF_MDT_BODY,
@@ -566,7 +576,7 @@ static const struct req_msg_field *ost_get_info_generic_server[] = {
static const struct req_msg_field *ost_get_info_generic_client[] = {
&RMF_PTLRPC_BODY,
- &RMF_SETINFO_KEY
+ &RMF_GETINFO_KEY
};
static const struct req_msg_field *ost_get_last_id_server[] = {
@@ -574,6 +584,12 @@ static const struct req_msg_field *ost_get_last_id_server[] = {
&RMF_OBD_ID
};
+static const struct req_msg_field *ost_get_last_fid_client[] = {
+ &RMF_PTLRPC_BODY,
+ &RMF_GETINFO_KEY,
+ &RMF_FID,
+};
+
static const struct req_msg_field *ost_get_last_fid_server[] = {
&RMF_PTLRPC_BODY,
&RMF_FID,
@@ -643,6 +659,7 @@ static struct req_format *req_formats[] = {
&RQF_MGS_CONFIG_READ,
&RQF_SEQ_QUERY,
&RQF_FLD_QUERY,
+ &RQF_FLD_READ,
&RQF_MDS_CONNECT,
&RQF_MDS_DISCONNECT,
&RQF_MDS_GET_INFO,
@@ -696,7 +713,7 @@ static struct req_format *req_formats[] = {
&RQF_OST_BRW_WRITE,
&RQF_OST_STATFS,
&RQF_OST_SET_GRANT_INFO,
- &RQF_OST_GET_INFO_GENERIC,
+ &RQF_OST_GET_INFO,
&RQF_OST_GET_INFO_LAST_ID,
&RQF_OST_GET_INFO_LAST_FID,
&RQF_OST_SET_INFO_LAST_FID,
@@ -1162,6 +1179,10 @@ struct req_format RQF_FLD_QUERY =
DEFINE_REQ_FMT0("FLD_QUERY", fld_query_client, fld_query_server);
EXPORT_SYMBOL(RQF_FLD_QUERY);
+struct req_format RQF_FLD_READ =
+ DEFINE_REQ_FMT0("FLD_READ", fld_read_client, fld_read_server);
+EXPORT_SYMBOL(RQF_FLD_READ);
+
struct req_format RQF_LOG_CANCEL =
DEFINE_REQ_FMT0("OBD_LOG_CANCEL", log_cancel_client, empty);
EXPORT_SYMBOL(RQF_LOG_CANCEL);
@@ -1519,10 +1540,10 @@ struct req_format RQF_OST_SET_GRANT_INFO =
ost_body_only);
EXPORT_SYMBOL(RQF_OST_SET_GRANT_INFO);
-struct req_format RQF_OST_GET_INFO_GENERIC =
+struct req_format RQF_OST_GET_INFO =
DEFINE_REQ_FMT0("OST_GET_INFO", ost_get_info_generic_client,
ost_get_info_generic_server);
-EXPORT_SYMBOL(RQF_OST_GET_INFO_GENERIC);
+EXPORT_SYMBOL(RQF_OST_GET_INFO);
struct req_format RQF_OST_GET_INFO_LAST_ID =
DEFINE_REQ_FMT0("OST_GET_INFO_LAST_ID", ost_get_info_generic_client,
@@ -1530,7 +1551,7 @@ struct req_format RQF_OST_GET_INFO_LAST_ID =
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_ID);
struct req_format RQF_OST_GET_INFO_LAST_FID =
- DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", obd_set_info_client,
+ DEFINE_REQ_FMT0("OST_GET_INFO_LAST_FID", ost_get_last_fid_client,
ost_get_last_fid_server);
EXPORT_SYMBOL(RQF_OST_GET_INFO_LAST_FID);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
index c95a91ce26c9..64c0f1e17f36 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/lproc_ptlrpc.c
@@ -131,6 +131,7 @@ static struct ll_rpc_opcode {
{ SEC_CTX_INIT_CONT, "sec_ctx_init_cont" },
{ SEC_CTX_FINI, "sec_ctx_fini" },
{ FLD_QUERY, "fld_query" },
+ { FLD_READ, "fld_read" },
};
static struct ll_eopcode {
@@ -679,11 +680,11 @@ static ssize_t ptlrpc_lprocfs_nrs_seq_write(struct file *file,
/**
* The second token is either NULL, or an optional [reg|hp] string
*/
- if (strcmp(cmd, "reg") == 0)
+ if (strcmp(cmd, "reg") == 0) {
queue = PTLRPC_NRS_QUEUE_REG;
- else if (strcmp(cmd, "hp") == 0)
+ } else if (strcmp(cmd, "hp") == 0) {
queue = PTLRPC_NRS_QUEUE_HP;
- else {
+ } else {
rc = -EINVAL;
goto out;
}
@@ -693,8 +694,9 @@ default_queue:
if (queue == PTLRPC_NRS_QUEUE_HP && !nrs_svc_has_hp(svc)) {
rc = -ENODEV;
goto out;
- } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc))
+ } else if (queue == PTLRPC_NRS_QUEUE_BOTH && !nrs_svc_has_hp(svc)) {
queue = PTLRPC_NRS_QUEUE_REG;
+ }
/**
* Serialize NRS core lprocfs operations with policy registration/
@@ -1320,6 +1322,5 @@ int lprocfs_wr_pinger_recov(struct file *file, const char __user *buffer,
up_read(&obd->u.cli.cl_sem);
return count;
-
}
EXPORT_SYMBOL(lprocfs_wr_pinger_recov);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/nrs.c b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
index 710fb806f122..c444f516856f 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/nrs.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/nrs.c
@@ -975,7 +975,11 @@ static void nrs_svcpt_cleanup_locked(struct ptlrpc_service_part *svcpt)
LASSERT(mutex_is_locked(&nrs_core.nrs_mutex));
again:
- nrs = nrs_svcpt2nrs(svcpt, hp);
+ /* scp_nrs_hp could be NULL due to short of memory. */
+ nrs = hp ? svcpt->scp_nrs_hp : &svcpt->scp_nrs_reg;
+ /* check the nrs_svcpt to see if nrs is initialized. */
+ if (!nrs || !nrs->nrs_svcpt)
+ return;
nrs->nrs_stopping = 1;
list_for_each_entry_safe(policy, tmp, &nrs->nrs_policy_list, pol_list) {
@@ -1038,7 +1042,6 @@ static int nrs_policy_unregister_locked(struct ptlrpc_nrs_pol_desc *desc)
LASSERT(mutex_is_locked(&ptlrpc_all_services_mutex));
list_for_each_entry(svc, &ptlrpc_all_services, srv_list) {
-
if (!nrs_policy_compatible(svc, desc) ||
unlikely(svc->srv_is_stopping))
continue;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
index 492d63fad6f9..811acf6fc786 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/pack_generic.c
@@ -1160,7 +1160,6 @@ __u32 lustre_msg_get_timeout(struct lustre_msg *msg)
if (!pb) {
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
return 0;
-
}
return pb->pb_timeout;
}
@@ -1179,7 +1178,6 @@ __u32 lustre_msg_get_service_time(struct lustre_msg *msg)
if (!pb) {
CERROR("invalid msg %p: no ptlrpc body!\n", msg);
return 0;
-
}
return pb->pb_service_time;
}
@@ -1572,7 +1570,6 @@ static void lustre_swab_obdo(struct obdo *o)
CLASSERT(offsetof(typeof(*o), o_padding_4) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_5) != 0);
CLASSERT(offsetof(typeof(*o), o_padding_6) != 0);
-
}
void lustre_swab_obd_statfs(struct obd_statfs *os)
diff --git a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
index db003f5da09e..76a355a9db8b 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/ptlrpcd.c
@@ -387,7 +387,8 @@ static int ptlrpcd(void *arg)
{
struct ptlrpcd_ctl *pc = arg;
struct ptlrpc_request_set *set;
- struct lu_env env = { .le_ses = NULL };
+ struct lu_context ses = { 0 };
+ struct lu_env env = { .le_ses = &ses };
int rc = 0;
int exit = 0;
@@ -416,6 +417,13 @@ static int ptlrpcd(void *arg)
*/
rc = lu_context_init(&env.le_ctx,
LCT_CL_THREAD|LCT_REMEMBER|LCT_NOREF);
+ if (rc == 0) {
+ rc = lu_context_init(env.le_ses,
+ LCT_SESSION | LCT_REMEMBER | LCT_NOREF);
+ if (rc != 0)
+ lu_context_fini(&env.le_ctx);
+ }
+
if (rc != 0)
goto failed;
@@ -436,9 +444,10 @@ static int ptlrpcd(void *arg)
ptlrpc_expired_set, set);
lu_context_enter(&env.le_ctx);
- l_wait_event(set->set_waitq,
- ptlrpcd_check(&env, pc), &lwi);
+ lu_context_enter(env.le_ses);
+ l_wait_event(set->set_waitq, ptlrpcd_check(&env, pc), &lwi);
lu_context_exit(&env.le_ctx);
+ lu_context_exit(env.le_ses);
/*
* Abort inflight rpcs for forced stop case.
@@ -461,6 +470,7 @@ static int ptlrpcd(void *arg)
if (!list_empty(&set->set_requests))
ptlrpc_set_wait(set);
lu_context_fini(&env.le_ctx);
+ lu_context_fini(env.le_ses);
complete(&pc->pc_finishing);
@@ -899,8 +909,11 @@ int ptlrpcd_addref(void)
int rc = 0;
mutex_lock(&ptlrpcd_mutex);
- if (++ptlrpcd_users == 1)
+ if (++ptlrpcd_users == 1) {
rc = ptlrpcd_init();
+ if (rc < 0)
+ ptlrpcd_users--;
+ }
mutex_unlock(&ptlrpcd_mutex);
return rc;
}
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
index d3872b8c9a6e..02e6cda4c995 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
@@ -41,7 +41,6 @@
#define DEBUG_SUBSYSTEM S_SEC
#include "../../include/linux/libcfs/libcfs.h"
-#include <linux/crypto.h>
#include "../include/obd.h"
#include "../include/obd_cksum.h"
@@ -511,7 +510,6 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
{
struct cfs_crypto_hash_desc *hdesc;
int hashsize;
- char hashbuf[64];
unsigned int bufsize;
int i, err;
@@ -529,21 +527,23 @@ int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
for (i = 0; i < desc->bd_iov_count; i++) {
cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
- desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
+ desc->bd_iov[i].kiov_offset & ~PAGE_MASK,
desc->bd_iov[i].kiov_len);
}
+
if (hashsize > buflen) {
+ unsigned char hashbuf[CFS_CRYPTO_HASH_DIGESTSIZE_MAX];
+
bufsize = sizeof(hashbuf);
- err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
- &bufsize);
+ LASSERTF(bufsize >= hashsize, "bufsize = %u < hashsize %u\n",
+ bufsize, hashsize);
+ err = cfs_crypto_hash_final(hdesc, hashbuf, &bufsize);
memcpy(buf, hashbuf, buflen);
} else {
bufsize = buflen;
err = cfs_crypto_hash_final(hdesc, buf, &bufsize);
}
- if (err)
- cfs_crypto_hash_final(hdesc, NULL, NULL);
return err;
}
EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
diff --git a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
index 6276bf59c3aa..37c9f4c453de 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/sec_plain.c
@@ -162,7 +162,7 @@ static void corrupt_bulk_data(struct ptlrpc_bulk_desc *desc)
continue;
ptr = kmap(desc->bd_iov[i].kiov_page);
- off = desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK;
+ off = desc->bd_iov[i].kiov_offset & ~PAGE_MASK;
ptr[off] ^= 0x1;
kunmap(desc->bd_iov[i].kiov_page);
return;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/service.c b/drivers/staging/lustre/lustre/ptlrpc/service.c
index 1bbd1d39ccf8..17c7b9749f67 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/service.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/service.c
@@ -838,6 +838,11 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt,
{
ptlrpc_server_hpreq_fini(req);
+ if (req->rq_session.lc_thread) {
+ lu_context_exit(&req->rq_session);
+ lu_context_fini(&req->rq_session);
+ }
+
ptlrpc_server_drop_request(req);
}
@@ -1579,6 +1584,21 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
}
req->rq_svc_thread = thread;
+ if (thread) {
+ /* initialize request session, it is needed for request
+ * processing by target
+ */
+ rc = lu_context_init(&req->rq_session,
+ LCT_SERVER_SESSION | LCT_NOREF);
+ if (rc) {
+ CERROR("%s: failure to initialize session: rc = %d\n",
+ thread->t_name, rc);
+ goto err_req;
+ }
+ req->rq_session.lc_thread = thread;
+ lu_context_enter(&req->rq_session);
+ req->rq_svc_thread->t_env->le_ses = &req->rq_session;
+ }
ptlrpc_at_add_timed(req);
@@ -1612,7 +1632,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
struct timespec64 arrived;
unsigned long timediff_usecs;
unsigned long arrived_usecs;
- int rc;
int fail_opc = 0;
request = ptlrpc_server_request_get(svcpt, false);
@@ -1649,21 +1668,6 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
at_get(&svcpt->scp_at_estimate));
}
- rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("Failure to initialize session: %d\n", rc);
- goto out_req;
- }
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x5;
- lu_context_enter(&request->rq_session);
-
- CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
-
- request->rq_svc_thread = thread;
- if (thread)
- request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
@@ -1695,14 +1699,21 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
- rc = svc->srv_ops.so_req_handler(request);
+ CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
+
+ /* re-assign request and sesson thread to the current one */
+ request->rq_svc_thread = thread;
+ if (thread) {
+ LASSERT(request->rq_session.lc_thread);
+ request->rq_session.lc_thread = thread;
+ request->rq_session.lc_cookie = 0x55;
+ thread->t_env->le_ses = &request->rq_session;
+ }
+ svc->srv_ops.so_req_handler(request);
ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
put_conn:
- lu_context_exit(&request->rq_session);
- lu_context_fini(&request->rq_session);
-
if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
DEBUG_REQ(D_WARNING, request,
"Request took longer than estimated (%lld:%llds); "
@@ -1756,7 +1767,6 @@ put_conn:
request->rq_arrival_time.tv_sec);
}
-out_req:
ptlrpc_server_finish_active_request(svcpt, request);
return 1;
diff --git a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
index 3ffd2d91f274..aacc8108391d 100644
--- a/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
+++ b/drivers/staging/lustre/lustre/ptlrpc/wiretest.c
@@ -276,7 +276,9 @@ void lustre_assert_wire_constants(void)
(long long)FLD_QUERY);
LASSERTF(FLD_FIRST_OPC == 900, "found %lld\n",
(long long)FLD_FIRST_OPC);
- LASSERTF(FLD_LAST_OPC == 901, "found %lld\n",
+ LASSERTF(FLD_READ == 901, "found %lld\n",
+ (long long)FLD_READ);
+ LASSERTF(FLD_LAST_OPC == 902, "found %lld\n",
(long long)FLD_LAST_OPC);
LASSERTF(SEQ_QUERY == 700, "found %lld\n",
(long long)SEQ_QUERY);
@@ -1069,6 +1071,8 @@ void lustre_assert_wire_constants(void)
OBD_CONNECT_PINGLESS);
LASSERTF(OBD_CONNECT_FLOCK_DEAD == 0x8000000000000ULL,
"found 0x%.16llxULL\n", OBD_CONNECT_FLOCK_DEAD);
+ LASSERTF(OBD_CONNECT_OPEN_BY_FID == 0x20000000000000ULL,
+ "found 0x%.16llxULL\n", OBD_CONNECT_OPEN_BY_FID);
LASSERTF(OBD_CKSUM_CRC32 == 0x00000001UL, "found 0x%.8xUL\n",
(unsigned)OBD_CKSUM_CRC32);
LASSERTF(OBD_CKSUM_ADLER == 0x00000002UL, "found 0x%.8xUL\n",
@@ -1639,6 +1643,12 @@ void lustre_assert_wire_constants(void)
OBD_BRW_ASYNC);
LASSERTF(OBD_BRW_MEMALLOC == 0x800, "found 0x%.8x\n",
OBD_BRW_MEMALLOC);
+ LASSERTF(OBD_BRW_OVER_USRQUOTA == 0x1000, "found 0x%.8x\n",
+ OBD_BRW_OVER_USRQUOTA);
+ LASSERTF(OBD_BRW_OVER_GRPQUOTA == 0x2000, "found 0x%.8x\n",
+ OBD_BRW_OVER_GRPQUOTA);
+ LASSERTF(OBD_BRW_SOFT_SYNC == 0x4000, "found 0x%.8x\n",
+ OBD_BRW_SOFT_SYNC);
/* Checks for struct ost_body */
LASSERTF((int)sizeof(struct ost_body) == 208, "found %lld\n",
diff --git a/drivers/staging/media/omap1/omap1_camera.c b/drivers/staging/media/omap1/omap1_camera.c
index bd721e35474a..54b8dd2d2bba 100644
--- a/drivers/staging/media/omap1/omap1_camera.c
+++ b/drivers/staging/media/omap1/omap1_camera.c
@@ -1569,27 +1569,21 @@ static int omap1_cam_probe(struct platform_device *pdev)
unsigned int irq;
int err = 0;
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!res || (int)irq <= 0) {
+ if ((int)irq <= 0) {
err = -ENODEV;
goto exit;
}
- clk = clk_get(&pdev->dev, "armper_ck");
- if (IS_ERR(clk)) {
- err = PTR_ERR(clk);
- goto exit;
- }
+ clk = devm_clk_get(&pdev->dev, "armper_ck");
+ if (IS_ERR(clk))
+ return PTR_ERR(clk);
- pcdev = kzalloc(sizeof(*pcdev) + resource_size(res), GFP_KERNEL);
- if (!pcdev) {
- dev_err(&pdev->dev, "Could not allocate pcdev\n");
- err = -ENOMEM;
- goto exit_put_clk;
- }
+ pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev) + resource_size(res),
+ GFP_KERNEL);
+ if (!pcdev)
+ return -ENOMEM;
- pcdev->res = res;
pcdev->clk = clk;
pcdev->pdata = pdev->dev.platform_data;
@@ -1620,19 +1614,11 @@ static int omap1_cam_probe(struct platform_device *pdev)
INIT_LIST_HEAD(&pcdev->capture);
spin_lock_init(&pcdev->lock);
- /*
- * Request the region.
- */
- if (!request_mem_region(res->start, resource_size(res), DRIVER_NAME)) {
- err = -EBUSY;
- goto exit_kfree;
- }
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+ base = devm_ioremap_resource(&pdev->dev, res);
+ if (IS_ERR(base))
+ return PTR_ERR(base);
- base = ioremap(res->start, resource_size(res));
- if (!base) {
- err = -ENOMEM;
- goto exit_release;
- }
pcdev->irq = irq;
pcdev->base = base;
@@ -1642,8 +1628,7 @@ static int omap1_cam_probe(struct platform_device *pdev)
dma_isr, (void *)pcdev, &pcdev->dma_ch);
if (err < 0) {
dev_err(&pdev->dev, "Can't request DMA for OMAP1 Camera\n");
- err = -EBUSY;
- goto exit_iounmap;
+ return -EBUSY;
}
dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_ch);
@@ -1655,7 +1640,8 @@ static int omap1_cam_probe(struct platform_device *pdev)
/* setup DMA autoinitialization */
omap_dma_link_lch(pcdev->dma_ch, pcdev->dma_ch);
- err = request_irq(pcdev->irq, cam_isr, 0, DRIVER_NAME, pcdev);
+ err = devm_request_irq(&pdev->dev, pcdev->irq, cam_isr, 0, DRIVER_NAME,
+ pcdev);
if (err) {
dev_err(&pdev->dev, "Camera interrupt register failed\n");
goto exit_free_dma;
@@ -1669,24 +1655,14 @@ static int omap1_cam_probe(struct platform_device *pdev)
err = soc_camera_host_register(&pcdev->soc_host);
if (err)
- goto exit_free_irq;
+ return err;
dev_info(&pdev->dev, "OMAP1 Camera Interface driver loaded\n");
return 0;
-exit_free_irq:
- free_irq(pcdev->irq, pcdev);
exit_free_dma:
omap_free_dma(pcdev->dma_ch);
-exit_iounmap:
- iounmap(base);
-exit_release:
- release_mem_region(res->start, resource_size(res));
-exit_kfree:
- kfree(pcdev);
-exit_put_clk:
- clk_put(clk);
exit:
return err;
}
@@ -1696,23 +1672,11 @@ static int omap1_cam_remove(struct platform_device *pdev)
struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev);
struct omap1_cam_dev *pcdev = container_of(soc_host,
struct omap1_cam_dev, soc_host);
- struct resource *res;
-
- free_irq(pcdev->irq, pcdev);
omap_free_dma(pcdev->dma_ch);
soc_camera_host_unregister(soc_host);
- iounmap(pcdev->base);
-
- res = pcdev->res;
- release_mem_region(res->start, resource_size(res));
-
- clk_put(pcdev->clk);
-
- kfree(pcdev);
-
dev_info(&pdev->dev, "OMAP1 Camera Interface driver unloaded\n");
return 0;
diff --git a/drivers/staging/media/omap4iss/iss.c b/drivers/staging/media/omap4iss/iss.c
index c5a5138b3d3b..6ceb4eb00493 100644
--- a/drivers/staging/media/omap4iss/iss.c
+++ b/drivers/staging/media/omap4iss/iss.c
@@ -1065,7 +1065,7 @@ static int iss_register_entities(struct iss_device *iss)
}
ret = media_create_pad_link(&sensor->entity, 0, input, pad,
- flags);
+ flags);
if (ret < 0)
goto done;
}
diff --git a/drivers/staging/most/hdm-dim2/dim2_errors.h b/drivers/staging/most/hdm-dim2/dim2_errors.h
index 5a713df1d1d4..66343ba426c1 100644
--- a/drivers/staging/most/hdm-dim2/dim2_errors.h
+++ b/drivers/staging/most/hdm-dim2/dim2_errors.h
@@ -15,10 +15,6 @@
#ifndef _MOST_DIM_ERRORS_H
#define _MOST_DIM_ERRORS_H
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/**
* MOST DIM errors.
*/
@@ -58,8 +54,4 @@ enum dim_errors_t {
DIM_ERR_OVERFLOW,
};
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _MOST_DIM_ERRORS_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_hal.h b/drivers/staging/most/hdm-dim2/dim2_hal.h
index fc73d4f97734..1c924e869de7 100644
--- a/drivers/staging/most/hdm-dim2/dim2_hal.h
+++ b/drivers/staging/most/hdm-dim2/dim2_hal.h
@@ -18,10 +18,6 @@
#include <linux/types.h>
#include "dim2_reg.h"
-#ifdef __cplusplus
-extern "C" {
-#endif
-
/*
* The values below are specified in the hardware specification.
* So, they should not be changed until the hardware specification changes.
@@ -42,14 +38,12 @@ struct dim_ch_state_t {
u16 done_buffers; /* Number of completed buffers */
};
-typedef int atomic_counter_t;
-
struct int_ch_state {
/* changed only in interrupt context */
- volatile atomic_counter_t request_counter;
+ volatile int request_counter;
/* changed only in task context */
- volatile atomic_counter_t service_counter;
+ volatile int service_counter;
u8 idx1;
u8 idx2;
@@ -110,8 +104,4 @@ void dimcb_io_write(u32 __iomem *ptr32, u32 value);
void dimcb_on_error(u8 error_id, const char *error_message);
-#ifdef __cplusplus
-}
-#endif
-
#endif /* _DIM2_HAL_H */
diff --git a/drivers/staging/most/hdm-dim2/dim2_reg.h b/drivers/staging/most/hdm-dim2/dim2_reg.h
index bcf6a79f6744..e0837b6b9ae1 100644
--- a/drivers/staging/most/hdm-dim2/dim2_reg.h
+++ b/drivers/staging/most/hdm-dim2/dim2_reg.h
@@ -17,10 +17,6 @@
#include <linux/types.h>
-#ifdef __cplusplus
-extern "C" {
-#endif
-
struct dim2_regs {
/* 0x00 */ u32 MLBC0;
/* 0x01 */ u32 rsvd0[1];
@@ -166,8 +162,4 @@ enum {
CAT_CL_MASK = DIM2_MASK(6)
};
-#ifdef __cplusplus
-}
-#endif
-
#endif /* DIM2_OS62420_H */
diff --git a/drivers/staging/netlogic/xlr_net.c b/drivers/staging/netlogic/xlr_net.c
index aa1cdf602cf6..99445d0fcf9c 100644
--- a/drivers/staging/netlogic/xlr_net.c
+++ b/drivers/staging/netlogic/xlr_net.c
@@ -850,7 +850,7 @@ static int xlr_mii_probe(struct xlr_net_priv *priv)
/* Attach MAC to PHY */
phydev = phy_connect(priv->ndev, phydev_name(phydev),
- &xlr_gmac_link_adjust, priv->nd->phy_interface);
+ xlr_gmac_link_adjust, priv->nd->phy_interface);
if (IS_ERR(phydev)) {
pr_err("could not attach PHY\n");
diff --git a/drivers/staging/nvec/nvec.c b/drivers/staging/nvec/nvec.c
index 9fda136b8e05..c1feccf8d94a 100644
--- a/drivers/staging/nvec/nvec.c
+++ b/drivers/staging/nvec/nvec.c
@@ -264,7 +264,7 @@ int nvec_write_async(struct nvec_chip *nvec, const unsigned char *data,
msg = nvec_msg_alloc(nvec, NVEC_MSG_TX);
- if (msg == NULL)
+ if (!msg)
return -ENOMEM;
msg->data[0] = size;
@@ -620,7 +620,7 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else {
nvec->rx = nvec_msg_alloc(nvec, NVEC_MSG_RX);
/* Should not happen in a normal world */
- if (unlikely(nvec->rx == NULL)) {
+ if (unlikely(!nvec->rx)) {
nvec->state = 0;
break;
}
@@ -659,10 +659,11 @@ static irqreturn_t nvec_interrupt(int irq, void *dev)
} else if (nvec->tx && nvec->tx->pos < nvec->tx->size) {
to_send = nvec->tx->data[nvec->tx->pos++];
} else {
- dev_err(nvec->dev, "tx buffer underflow on %p (%u > %u)\n",
+ dev_err(nvec->dev,
+ "tx buffer underflow on %p (%u > %u)\n",
nvec->tx,
- (uint) (nvec->tx ? nvec->tx->pos : 0),
- (uint) (nvec->tx ? nvec->tx->size : 0));
+ (uint)(nvec->tx ? nvec->tx->pos : 0),
+ (uint)(nvec->tx ? nvec->tx->size : 0));
nvec->state = 0;
}
break;
diff --git a/drivers/staging/nvec/nvec_power.c b/drivers/staging/nvec/nvec_power.c
index b4a0545e8806..fcbb0fa03765 100644
--- a/drivers/staging/nvec/nvec_power.c
+++ b/drivers/staging/nvec/nvec_power.c
@@ -90,7 +90,7 @@ static int nvec_power_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
if (event_type != NVEC_SYS)
return NOTIFY_DONE;
@@ -126,7 +126,7 @@ static int nvec_power_bat_notifier(struct notifier_block *nb,
{
struct nvec_power *power =
container_of(nb, struct nvec_power, notifier);
- struct bat_response *res = (struct bat_response *)data;
+ struct bat_response *res = data;
int status_changed = 0;
if (event_type != NVEC_BAT)
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b6993b0b8170..a10fe3af9a9c 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -172,12 +172,13 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
old_group_mask = cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid));
cvmx_write_csr(CVMX_SSO_PPX_GRP_MSK(coreid),
- 1ull << pow_receive_group);
+ 1ull << pow_receive_group);
cvmx_read_csr(CVMX_SSO_PPX_GRP_MSK(coreid)); /* Flush */
} else {
old_group_mask = cvmx_read_csr(CVMX_POW_PP_GRP_MSKX(coreid));
cvmx_write_csr(CVMX_POW_PP_GRP_MSKX(coreid),
- (old_group_mask & ~0xFFFFull) | 1 << pow_receive_group);
+ (old_group_mask & ~0xFFFFull) |
+ 1 << pow_receive_group);
}
if (USE_ASYNC_IOBDMA) {
@@ -374,7 +375,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* doesn't exist.
*/
printk_ratelimited("Port %d not controlled by Linux, packet dropped\n",
- port);
+ port);
dev_kfree_skb_irq(skb);
}
/*
diff --git a/drivers/staging/octeon/ethernet-rx.h b/drivers/staging/octeon/ethernet-rx.h
index a5973fd015fc..315a63d7094f 100644
--- a/drivers/staging/octeon/ethernet-rx.h
+++ b/drivers/staging/octeon/ethernet-rx.h
@@ -30,7 +30,7 @@ static inline void cvm_oct_rx_refill_pool(int fill_threshold)
number_to_free);
if (num_freed != number_to_free) {
cvmx_fau_atomic_add32(FAU_NUM_PACKET_BUFFERS_TO_FREE,
- number_to_free - num_freed);
+ number_to_free - num_freed);
}
}
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index ffe9bd77a7bb..6b4c20872323 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -58,9 +58,9 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
/* Maximum number of SKBs to try to free per xmit packet. */
#define MAX_SKB_TO_FREE (MAX_OUT_QUEUE_DEPTH * 2)
-static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
+static inline int cvm_oct_adjust_skb_to_free(int skb_to_free, int fau)
{
- int32_t undo;
+ int undo;
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
@@ -83,7 +83,7 @@ static void cvm_oct_kick_tx_poll_watchdog(void)
static void cvm_oct_free_tx_skbs(struct net_device *dev)
{
- int32_t skb_to_free;
+ int skb_to_free;
int qos, queues_per_port;
int total_freed = 0;
int total_remaining = 0;
@@ -148,8 +148,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
enum {QUEUE_CORE, QUEUE_HW, QUEUE_DROP} queue_type;
struct octeon_ethernet *priv = netdev_priv(dev);
struct sk_buff *to_free_list;
- int32_t skb_to_free;
- int32_t buffers_to_free;
+ int skb_to_free;
+ int buffers_to_free;
u32 total_to_clean;
unsigned long flags;
#if REUSE_SKBUFFS_WITHOUT_FREE
@@ -220,7 +220,8 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
priv->fau + qos * 4, MAX_SKB_TO_FREE);
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau +
+ qos * 4);
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
goto skip_xmit;
}
@@ -402,7 +403,7 @@ dont_put_skbuff_in_hw:
}
skb_to_free = cvm_oct_adjust_skb_to_free(skb_to_free,
- priv->fau + qos * 4);
+ priv->fau + qos * 4);
/*
* If we're sending faster than the receive can free them then
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index 271e1b8d8506..e9cd5f242921 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -635,7 +635,7 @@ static struct device_node *cvm_oct_of_get_child(
}
static struct device_node *cvm_oct_node_for_port(struct device_node *pip,
- int interface, int port)
+ int interface, int port)
{
struct device_node *ni, *np;
@@ -815,7 +815,7 @@ static int cvm_oct_probe(struct platform_device *pdev)
free_netdev(dev);
} else if (register_netdev(dev) < 0) {
pr_err("Failed to register ethernet device for interface %d, port %d\n",
- interface, priv->port);
+ interface, priv->port);
free_netdev(dev);
} else {
cvm_oct_device[priv->port] = dev;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ap.c b/drivers/staging/rtl8188eu/core/rtw_ap.c
index 012860b34651..a5755358cc5d 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ap.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ap.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_AP_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index e5a6b7a70df7..77485235c615 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_CMD_C_
@@ -263,11 +258,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -350,7 +345,7 @@ u8 rtw_createbss_cmd(struct adapter *padapter)
RT_TRACE(_module_rtl871x_cmd_c_, _drv_info_, (" createbss for SSid:%s\n", pmlmepriv->assoc_ssid.Ssid));
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
@@ -521,7 +516,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
/* prepare cmd parameter */
param = kzalloc(sizeof(*param), GFP_KERNEL);
- if (param == NULL) {
+ if (!param) {
res = _FAIL;
goto exit;
}
@@ -530,7 +525,7 @@ u8 rtw_disassoc_cmd(struct adapter *padapter, u32 deauth_timeout_ms, bool enqueu
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
- if (cmdobj == NULL) {
+ if (!cmdobj) {
res = _FAIL;
kfree(param);
goto exit;
@@ -629,20 +624,20 @@ u8 rtw_clearstakey_cmd(struct adapter *padapter, u8 *psta, u8 entry, u8 enqueue)
clear_cam_entry(padapter, entry);
} else {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_ATOMIC);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
}
psetstakey_rsp = kzalloc(sizeof(struct set_stakey_rsp), GFP_ATOMIC);
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
res = _FAIL;
@@ -676,13 +671,13 @@ u8 rtw_addbareq_cmd(struct adapter *padapter, u8 tid, u8 *addr)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
paddbareq_parm = kzalloc(sizeof(struct addBaReq_parm), GFP_KERNEL);
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -713,13 +708,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -757,7 +752,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
/* prepare cmd parameter */
setChannelPlan_param = kzalloc(sizeof(struct SetChannelPlan_param), GFP_KERNEL);
- if (setChannelPlan_param == NULL) {
+ if (!setChannelPlan_param) {
res = _FAIL;
goto exit;
}
@@ -766,7 +761,7 @@ u8 rtw_set_chplan_cmd(struct adapter *padapter, u8 chplan, u8 enqueue)
if (enqueue) {
/* need enqueue, prepare cmd_obj and enqueue */
pcmdobj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmdobj == NULL) {
+ if (!pcmdobj) {
kfree(setChannelPlan_param);
res = _FAIL;
goto exit;
@@ -925,13 +920,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
if (enqueue) {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -968,13 +963,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1010,13 +1005,13 @@ u8 rtw_antenna_select_cmd(struct adapter *padapter, u8 antenna, u8 enqueue)
if (enqueue) {
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -1108,13 +1103,13 @@ u8 rtw_chk_hi_queue_cmd(struct adapter *padapter)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL);
- if (pdrvextra_cmd_parm == NULL) {
+ if (!pdrvextra_cmd_parm) {
kfree(ph2c);
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/core/rtw_debug.c b/drivers/staging/rtl8188eu/core/rtw_debug.c
index 93e898d598fe..db5c952ac852 100644
--- a/drivers/staging/rtl8188eu/core/rtw_debug.c
+++ b/drivers/staging/rtl8188eu/core/rtw_debug.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_DEBUG_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_efuse.c b/drivers/staging/rtl8188eu/core/rtw_efuse.c
index 19f11d04d152..c17870cddb5b 100644
--- a/drivers/staging/rtl8188eu/core/rtw_efuse.c
+++ b/drivers/staging/rtl8188eu/core/rtw_efuse.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_EFUSE_C_
@@ -107,7 +102,7 @@ efuse_phymap_to_logical(u8 *phymap, u16 _offset, u16 _size_byte, u8 *pbuf)
if (!efuseTbl)
return;
- eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(u16));
+ eFuseWord = (u16 **)rtw_malloc2d(EFUSE_MAX_SECTION_88E, EFUSE_MAX_WORD_UNIT, sizeof(*eFuseWord));
if (!eFuseWord) {
DBG_88E("%s: alloc eFuseWord fail!\n", __func__);
goto eFuseWord_failed;
diff --git a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
index f4e4baf6054a..0b0d78fe83ed 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ieee80211.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _IEEE80211_C
diff --git a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
index cf60717a6c19..f85a6abec3a3 100644
--- a/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
+++ b/drivers/staging/rtl8188eu/core/rtw_ioctl_set.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_IOCTL_SET_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme.c b/drivers/staging/rtl8188eu/core/rtw_mlme.c
index a645a620ebe2..1456499b84bf 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_MLME_C_
@@ -1584,13 +1579,13 @@ int rtw_set_auth(struct adapter *adapter, struct security_priv *psecuritypriv)
int res = _SUCCESS;
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL; /* try again */
goto exit;
}
psetauthparm = kzalloc(sizeof(struct setauth_parm), GFP_KERNEL);
- if (psetauthparm == NULL) {
+ if (!psetauthparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
@@ -1621,11 +1616,11 @@ int rtw_set_key(struct adapter *adapter, struct security_priv *psecuritypriv, in
int res = _SUCCESS;
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL; /* try again */
psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
res = _FAIL;
goto err_free_cmd;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 591a9127b573..7f32b39e5869 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_MLME_EXT_C_
@@ -606,8 +601,6 @@ static void issue_probersp(struct adapter *padapter, unsigned char *da)
pattrib->last_txcmdsz = pattrib->pktlen;
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
static int issue_probereq(struct adapter *padapter, struct ndis_802_11_ssid *pssid, u8 *da, bool wait_ack)
@@ -888,8 +881,6 @@ static void issue_auth(struct adapter *padapter, struct sta_info *psta,
rtw_wep_encrypt(padapter, (u8 *)pmgntframe);
DBG_88E("%s\n", __func__);
dump_mgntframe(padapter, pmgntframe);
-
- return;
}
@@ -1212,8 +1203,6 @@ exit:
rtw_buf_update(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len, (u8 *)pwlanhdr, pattrib->pktlen);
else
rtw_buf_free(&pmlmepriv->assoc_req, &pmlmepriv->assoc_req_len);
-
- return;
}
/* when wait_ack is true, this function should be called at process context */
@@ -2105,7 +2094,6 @@ static void site_survey(struct adapter *padapter)
issue_action_BSSCoexistPacket(padapter);
issue_action_BSSCoexistPacket(padapter);
}
- return;
}
/* collect bss info from Beacon and Probe request/response frames. */
@@ -4295,12 +4283,12 @@ void report_survey_event(struct adapter *padapter,
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct survey_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4332,8 +4320,6 @@ void report_survey_event(struct adapter *padapter,
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
pmlmeext->sitesurvey_res.bss_cnt++;
-
- return;
}
void report_surveydone_event(struct adapter *padapter)
@@ -4347,12 +4333,12 @@ void report_surveydone_event(struct adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct surveydone_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4377,8 +4363,6 @@ void report_surveydone_event(struct adapter *padapter)
DBG_88E("survey done event(%x)\n", psurveydone_evt->bss_cnt);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_join_res(struct adapter *padapter, int res)
@@ -4393,12 +4377,12 @@ void report_join_res(struct adapter *padapter, int res)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4429,8 +4413,6 @@ void report_join_res(struct adapter *padapter, int res)
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsigned short reason)
@@ -4446,12 +4428,12 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct stadel_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4486,8 +4468,6 @@ void report_del_sta_event(struct adapter *padapter, unsigned char *MacAddr, unsi
DBG_88E("report_del_sta_event: delete STA, mac_id =%d\n", mac_id);
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int cam_idx)
@@ -4501,12 +4481,12 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd_obj == NULL)
+ if (!pcmd_obj)
return;
cmdsz = sizeof(struct stassoc_event) + sizeof(struct C2HEvent_Header);
pevtcmd = kzalloc(cmdsz, GFP_KERNEL);
- if (pevtcmd == NULL) {
+ if (!pevtcmd) {
kfree(pcmd_obj);
return;
}
@@ -4532,8 +4512,6 @@ void report_add_sta_event(struct adapter *padapter, unsigned char *MacAddr, int
DBG_88E("report_add_sta_event: add STA\n");
rtw_enqueue_cmd(pcmdpriv, pcmd_obj);
-
- return;
}
@@ -4917,11 +4895,11 @@ void survey_timer_hdl(unsigned long data)
}
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
goto exit_survey_timer_hdl;
psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
goto exit_survey_timer_hdl;
}
@@ -4969,7 +4947,6 @@ void link_timer_hdl(unsigned long data)
issue_assocreq(padapter);
set_link_timer(pmlmeext, REASSOC_TO);
}
- return;
}
void addba_timer_hdl(unsigned long data)
@@ -5485,7 +5462,7 @@ u8 set_tx_beacon_cmd(struct adapter *padapter)
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
diff --git a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
index 5e1ef9fdcf47..59c6d8ab60f6 100644
--- a/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
+++ b/drivers/staging/rtl8188eu/core/rtw_pwrctrl.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_PWRCTRL_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_recv.c b/drivers/staging/rtl8188eu/core/rtw_recv.c
index 5f53aa1cfd8a..977bb2532c3e 100644
--- a/drivers/staging/rtl8188eu/core/rtw_recv.c
+++ b/drivers/staging/rtl8188eu/core/rtw_recv.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_RECV_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_rf.c b/drivers/staging/rtl8188eu/core/rtw_rf.c
index 4ad2d8f63acf..3fc1a8fd367c 100644
--- a/drivers/staging/rtl8188eu/core/rtw_rf.c
+++ b/drivers/staging/rtl8188eu/core/rtw_rf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_RF_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_security.c b/drivers/staging/rtl8188eu/core/rtw_security.c
index b781ccf45bc0..442a614a3726 100644
--- a/drivers/staging/rtl8188eu/core/rtw_security.c
+++ b/drivers/staging/rtl8188eu/core/rtw_security.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_SECURITY_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_sreset.c b/drivers/staging/rtl8188eu/core/rtw_sreset.c
index e725a4708775..13a5bf4730ab 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sreset.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sreset.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <rtw_sreset.h>
diff --git a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
index 78a9b9bf3b32..a71e25294add 100644
--- a/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
+++ b/drivers/staging/rtl8188eu/core/rtw_sta_mgt.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_STA_MGT_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 83096696cd5b..4410fe8d7c68 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_WLAN_UTIL_C_
diff --git a/drivers/staging/rtl8188eu/core/rtw_xmit.c b/drivers/staging/rtl8188eu/core/rtw_xmit.c
index f2dd7a60f67c..e0a5567f5942 100644
--- a/drivers/staging/rtl8188eu/core/rtw_xmit.c
+++ b/drivers/staging/rtl8188eu/core/rtw_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTW_XMIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
index a108e8032327..201c15b07f9e 100644
--- a/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
+++ b/drivers/staging/rtl8188eu/hal/Hal8188ERateAdaptive.c
@@ -557,7 +557,7 @@ int ODM_RAInfo_Init(struct odm_dm_struct *dm_odm, u8 macid)
u8 WirelessMode = 0xFF; /* invalid value */
u8 max_rate_idx = 0x13; /* MCS7 */
- if (dm_odm->pWirelessMode != NULL)
+ if (dm_odm->pWirelessMode)
WirelessMode = *(dm_odm->pWirelessMode);
if (WirelessMode != 0xFF) {
diff --git a/drivers/staging/rtl8188eu/hal/bb_cfg.c b/drivers/staging/rtl8188eu/hal/bb_cfg.c
index c2ad6a3b99da..cce1ea259b76 100644
--- a/drivers/staging/rtl8188eu/hal/bb_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/bb_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/fw.c b/drivers/staging/rtl8188eu/hal/fw.c
index 656133c47426..03d091bad13a 100644
--- a/drivers/staging/rtl8188eu/hal/fw.c
+++ b/drivers/staging/rtl8188eu/hal/fw.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/staging/rtl8188eu/hal/hal_com.c b/drivers/staging/rtl8188eu/hal/hal_com.c
index 3871cda2eec2..960cc406d238 100644
--- a/drivers/staging/rtl8188eu/hal/hal_com.c
+++ b/drivers/staging/rtl8188eu/hal/hal_com.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/hal/hal_intf.c b/drivers/staging/rtl8188eu/hal/hal_intf.c
index 85c17ef942f3..085f0fbd0c43 100644
--- a/drivers/staging/rtl8188eu/hal/hal_intf.c
+++ b/drivers/staging/rtl8188eu/hal/hal_intf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HAL_INTF_C_
@@ -186,7 +181,7 @@ s32 rtw_hal_mgnt_xmit(struct adapter *adapt, struct xmit_frame *pmgntframe)
s32 rtw_hal_init_xmit_priv(struct adapter *adapt)
{
- if (adapt->HalFunc.init_xmit_priv != NULL)
+ if (adapt->HalFunc.init_xmit_priv)
return adapt->HalFunc.init_xmit_priv(adapt);
return _FAIL;
}
diff --git a/drivers/staging/rtl8188eu/hal/mac_cfg.c b/drivers/staging/rtl8188eu/hal/mac_cfg.c
index 0bc1b215219a..6ed5e15ce661 100644
--- a/drivers/staging/rtl8188eu/hal/mac_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/mac_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/odm.c b/drivers/staging/rtl8188eu/hal/odm.c
index 8d2316b9e6e5..57a127501694 100644
--- a/drivers/staging/rtl8188eu/hal/odm.c
+++ b/drivers/staging/rtl8188eu/hal/odm.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
index 28b9f7f591c0..0555e42a3787 100644
--- a/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
+++ b/drivers/staging/rtl8188eu/hal/odm_HWConfig.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* include files */
diff --git a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
index c0242a095c19..dd9b902c8ae3 100644
--- a/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
+++ b/drivers/staging/rtl8188eu/hal/odm_RTL8188E.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/phy.c b/drivers/staging/rtl8188eu/hal/phy.c
index ae42b4492c77..a83bbea9be93 100644
--- a/drivers/staging/rtl8188eu/hal/phy.c
+++ b/drivers/staging/rtl8188eu/hal/phy.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_PHYCFG_C_
diff --git a/drivers/staging/rtl8188eu/hal/pwrseq.c b/drivers/staging/rtl8188eu/hal/pwrseq.c
index 20dce42cee1d..d92a34ea8d60 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseq.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseq.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include "pwrseq.h"
diff --git a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
index b76b0f5d6220..2867864bbfbe 100644
--- a/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
+++ b/drivers/staging/rtl8188eu/hal/pwrseqcmd.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#include <pwrseqcmd.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf.c b/drivers/staging/rtl8188eu/hal/rf.c
index 38845d17d593..1596274eefc5 100644
--- a/drivers/staging/rtl8188eu/hal/rf.c
+++ b/drivers/staging/rtl8188eu/hal/rf.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rf_cfg.c b/drivers/staging/rtl8188eu/hal/rf_cfg.c
index 44945427cc34..453f9e729067 100644
--- a/drivers/staging/rtl8188eu/hal/rf_cfg.c
+++ b/drivers/staging/rtl8188eu/hal/rf_cfg.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#include "odm_precomp.h"
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
index 580876313e98..2422c0297a50 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_cmd.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_CMD_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
index f9919a94a77e..81f2931876f8 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_dm.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* */
/* Description: */
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
index 2592bc298f84..0b444fd3e550 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_hal_init.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HAL_INIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
index 53cf3baf46e0..f110c961df70 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_rxdesc.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_REDESC_C_
@@ -45,7 +40,7 @@ static void process_link_qual(struct adapter *padapter,
struct rx_pkt_attrib *pattrib;
struct signal_stat *signal_stat;
- if (prframe == NULL || padapter == NULL)
+ if (!prframe || !padapter)
return;
pattrib = &prframe->attrib;
@@ -64,7 +59,7 @@ static void process_link_qual(struct adapter *padapter,
void rtl8188e_process_phy_info(struct adapter *padapter, void *prframe)
{
- struct recv_frame *precvframe = (struct recv_frame *)prframe;
+ struct recv_frame *precvframe = prframe;
/* Check RSSI */
process_rssi(padapter, precvframe);
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
index a6ba53b488e3..460a20558bc0 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188e_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
index 564cf53bff1b..d9e677ef8f84 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_led.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
index d6d009aafcf0..255d6f215091 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_recv.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188EU_RECV_C_
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
index c96d80487a56..ec21d8c82eba 100644
--- a/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
+++ b/drivers/staging/rtl8188eu/hal/rtl8188eu_xmit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _RTL8188E_XMIT_C_
#include <osdep_service.h>
diff --git a/drivers/staging/rtl8188eu/hal/usb_halinit.c b/drivers/staging/rtl8188eu/hal/usb_halinit.c
index 07a61b8271f0..87ea3b844951 100644
--- a/drivers/staging/rtl8188eu/hal/usb_halinit.c
+++ b/drivers/staging/rtl8188eu/hal/usb_halinit.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _HCI_HAL_INIT_C_
@@ -62,8 +57,8 @@ static bool HalUsbSetQueuePipeMapping8188EUsb(struct adapter *adapt, u8 NumInPip
_ConfigNormalChipOutEP_8188E(adapt, NumOutPipe);
/* Normal chip with one IN and one OUT doesn't have interrupt IN EP. */
- if (1 == haldata->OutEpNumber) {
- if (1 != NumInPipe)
+ if (haldata->OutEpNumber == 1) {
+ if (NumInPipe != 1)
return result;
}
@@ -179,7 +174,7 @@ static void _InitQueueReservedPage(struct adapter *Adapter)
if (haldata->OutEpQueueSel & TX_SELE_LQ)
numLQ = 0x1C;
- /* NOTE: This step shall be proceed before writting REG_RQPN. */
+ /* NOTE: This step shall be proceed before writing REG_RQPN. */
if (haldata->OutEpQueueSel & TX_SELE_NQ)
numNQ = 0x1C;
value8 = (u8)_NPQ(numNQ);
@@ -457,7 +452,8 @@ static void _InitRetryFunction(struct adapter *Adapter)
* When Who Remark
* 12/10/2010 MHC Separate to smaller function.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void usb_AggSettingTxUpdate(struct adapter *Adapter)
{
struct hal_data_8188e *haldata = GET_HAL_DATA(Adapter);
@@ -489,7 +485,8 @@ static void usb_AggSettingTxUpdate(struct adapter *Adapter)
* When Who Remark
* 12/10/2010 MHC Separate to smaller function.
*
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
static void
usb_AggSettingRxUpdate(
struct adapter *Adapter
@@ -655,7 +652,8 @@ static void _InitAntenna_Selection(struct adapter *Adapter)
* Revised History:
* When Who Remark
* 08/23/2010 MHC HW suspend mode switch test..
- *---------------------------------------------------------------------------*/
+ *---------------------------------------------------------------------------
+ */
enum rt_rf_power_state RfOnOffDetect(struct adapter *adapt)
{
u8 val8;
@@ -687,11 +685,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
#define HAL_INIT_PROFILE_TAG(stage) do {} while (0)
-
HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_BEGIN);
if (Adapter->pwrctrlpriv.bkeepfwalive) {
-
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
rtl88eu_phy_iq_calibrate(Adapter, true);
} else {
@@ -715,9 +711,8 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
/* Save target channel */
haldata->CurrentChannel = 6;/* default set to 6 */
- if (pwrctrlpriv->reg_rfoff) {
+ if (pwrctrlpriv->reg_rfoff)
pwrctrlpriv->rf_pwrstate = rf_off;
- }
/* 2010/08/09 MH We need to check if we need to turnon or off RF after detecting */
/* HW GPIO pin. Before PHY_RFConfig8192C. */
@@ -749,10 +744,9 @@ static u32 rtl8188eu_hal_init(struct adapter *Adapter)
DBG_88E("%s: Download Firmware failed!!\n", __func__);
Adapter->bFWReady = false;
return status;
- } else {
- RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
- Adapter->bFWReady = true;
}
+ RT_TRACE(_module_hci_hal_init_c_, _drv_info_, ("Initializeadapt8192CSdio(): Download Firmware Success!!\n"));
+ Adapter->bFWReady = true;
}
rtl8188e_InitializeFirmwareVars(Adapter);
@@ -878,7 +872,7 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_IQK);
/* 2010/08/26 MH Merge from 8192CE. */
if (pwrctrlpriv->rf_pwrstate == rf_on) {
if (haldata->odmpriv.RFCalibrateInfo.bIQKInitialized) {
- rtl88eu_phy_iq_calibrate(Adapter, true);
+ rtl88eu_phy_iq_calibrate(Adapter, true);
} else {
rtl88eu_phy_iq_calibrate(Adapter, false);
haldata->odmpriv.RFCalibrateInfo.bIQKInitialized = true;
@@ -905,7 +899,6 @@ HAL_INIT_PROFILE_TAG(HAL_INIT_STAGES_END);
DBG_88E("%s in %dms\n", __func__,
jiffies_to_msecs(jiffies - init_start_time));
-
return status;
}
@@ -968,6 +961,7 @@ static void CardDisableRTL8188EU(struct adapter *Adapter)
haldata->bMacPwrCtrlOn = false;
Adapter->bFWReady = false;
}
+
static void rtl8192cu_hw_power_down(struct adapter *adapt)
{
/* 2010/-8/09 MH For power down module, we need to enable register block contrl reg at 0x1c. */
@@ -980,7 +974,6 @@ static void rtl8192cu_hw_power_down(struct adapter *adapt)
static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
{
-
DBG_88E("==> %s\n", __func__);
usb_write32(Adapter, REG_HIMR_88E, IMR_DISABLED_88E);
@@ -999,14 +992,14 @@ static u32 rtl8188eu_hal_deinit(struct adapter *Adapter)
}
}
return _SUCCESS;
- }
+}
static unsigned int rtl8188eu_inirp_init(struct adapter *Adapter)
{
u8 i;
struct recv_buf *precvbuf;
uint status;
- struct recv_priv *precvpriv = &(Adapter->recvpriv);
+ struct recv_priv *precvpriv = &Adapter->recvpriv;
status = _SUCCESS;
@@ -1116,7 +1109,6 @@ readAdapterInfo_8188EU(
Hal_ReadAntennaDiversity88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_EfuseParseBoardType88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
Hal_ReadThermalMeter_88E(adapt, eeprom->efuse_eeprom_data, eeprom->bautoload_fail_flag);
-
}
static void _ReadPROMContent(
@@ -1212,7 +1204,7 @@ static void hw_var_set_opmode(struct adapter *Adapter, u8 variable, u8 *val)
StopTxBeacon(Adapter);
usb_write8(Adapter, REG_BCN_CTRL, 0x19);/* disable atim wnd */
- } else if ((mode == _HW_STATE_ADHOC_)) {
+ } else if (mode == _HW_STATE_ADHOC_) {
ResumeTxBeacon(Adapter);
usb_write8(Adapter, REG_BCN_CTRL, 0x1a);
} else if (mode == _HW_STATE_AP_) {
@@ -1363,7 +1355,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
u64 tsf;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
tsf = pmlmeext->TSFValue - rtw_modular64(pmlmeext->TSFValue, (pmlmeinfo->bcn_interval*1024)) - 1024; /* us */
@@ -1420,7 +1412,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
usb_write8(Adapter, REG_BCN_CTRL, usb_read8(Adapter, REG_BCN_CTRL) | BIT(4));
} else { /* sitesurvey done */
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
if ((is_client_associated_to_ap(Adapter)) ||
((pmlmeinfo->state&0x03) == WIFI_FW_ADHOC_STATE)) {
@@ -1490,7 +1482,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
{
u8 u1bAIFS, aSifsTime;
struct mlme_ext_priv *pmlmeext = &Adapter->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
usb_write8(Adapter, REG_SLOT, val[0]);
@@ -1790,7 +1782,7 @@ static void SetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
}
break;
case HW_VAR_H2C_MEDIA_STATUS_RPT:
- rtl8188e_set_FwMediaStatus_cmd(Adapter , (*(__le16 *)val));
+ rtl8188e_set_FwMediaStatus_cmd(Adapter, (*(__le16 *)val));
break;
case HW_VAR_BCN_VALID:
/* BCN_VALID, BIT16 of REG_TDECTRL = BIT0 of REG_TDECTRL+2, write 1 to clear, Clear by sw */
@@ -1855,7 +1847,6 @@ static void GetHwReg8188EU(struct adapter *Adapter, u8 variable, u8 *val)
default:
break;
}
-
}
/* */
@@ -1904,19 +1895,19 @@ GetHalDefVar8188EUsb(
case HAL_DEF_RA_DECISION_RATE:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetDecisionRate_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_RA_SGI:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetShortGI_8188E(&haldata->odmpriv, MacID);
}
break;
case HAL_DEF_PT_PWR_STATUS:
{
u8 MacID = *((u8 *)pValue);
- *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&(haldata->odmpriv), MacID);
+ *((u8 *)pValue) = ODM_RA_GetHwPwrStatus_8188E(&haldata->odmpriv, MacID);
}
break;
case HW_VAR_MAX_RX_AMPDU_FACTOR:
@@ -1939,7 +1930,7 @@ GetHalDefVar8188EUsb(
break;
case HW_DEF_ODM_DBG_FLAG:
{
- struct odm_dm_struct *dm_ocm = &(haldata->odmpriv);
+ struct odm_dm_struct *dm_ocm = &haldata->odmpriv;
pr_info("dm_ocm->DebugComponents = 0x%llx\n", dm_ocm->DebugComponents);
}
break;
@@ -1967,8 +1958,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
struct sta_info *psta;
struct hal_data_8188e *haldata = GET_HAL_DATA(adapt);
struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
- struct wlan_bssid_ex *cur_network = &(pmlmeinfo->network);
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
+ struct wlan_bssid_ex *cur_network = &pmlmeinfo->network;
if (mac_id >= NUM_STA) /* CAM_SIZE */
return;
@@ -1981,8 +1972,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
networkType = judge_network_type(adapt, cur_network->SupportedRates, supportRateNum) & 0xf;
raid = networktype_to_raid(networkType);
mask = update_supported_rate(cur_network->SupportedRates, supportRateNum);
- mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&(pmlmeinfo->HT_caps)) : 0;
- if (support_short_GI(adapt, &(pmlmeinfo->HT_caps)))
+ mask |= (pmlmeinfo->HT_enable) ? update_MSC_rate(&pmlmeinfo->HT_caps) : 0;
+ if (support_short_GI(adapt, &pmlmeinfo->HT_caps))
shortGIrate = true;
break;
case 1:/* for broadcast/multicast */
@@ -2023,8 +2014,8 @@ static void UpdateHalRAMask8188EUsb(struct adapter *adapt, u32 mac_id, u8 rssi_l
static void SetBeaconRelatedRegisters8188EUsb(struct adapter *adapt)
{
u32 value32;
- struct mlme_ext_priv *pmlmeext = &(adapt->mlmeextpriv);
- struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
+ struct mlme_ext_priv *pmlmeext = &adapt->mlmeextpriv;
+ struct mlme_ext_info *pmlmeinfo = &pmlmeext->mlmext_info;
u32 bcn_ctrl_reg = REG_BCN_CTRL;
/* reset TSF, enable update TSF, correcting TSF On Beacon */
@@ -2081,9 +2072,8 @@ void rtl8188eu_set_hal_ops(struct adapter *adapt)
{
struct hal_ops *halfunc = &adapt->HalFunc;
-
- adapt->HalData = kzalloc(sizeof(struct hal_data_8188e), GFP_KERNEL);
- if (adapt->HalData == NULL)
+ adapt->HalData = kzalloc(sizeof(*adapt->HalData), GFP_KERNEL);
+ if (!adapt->HalData)
DBG_88E("cant not alloc memory for HAL DATA\n");
halfunc->hal_power_on = rtl8188eu_InitPowerOn;
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
index 2670d6b6a79e..8990748a1919 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyCfg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYCFG_H__
#define __INC_HAL8188EPHYCFG_H__
diff --git a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
index 9f2969bf8355..344c73d1081b 100644
--- a/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
+++ b/drivers/staging/rtl8188eu/include/Hal8188EPhyReg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __INC_HAL8188EPHYREG_H__
#define __INC_HAL8188EPHYREG_H__
diff --git a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
index 1bf9bc70a696..dbb55247b0c6 100644
--- a/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
+++ b/drivers/staging/rtl8188eu/include/HalHWImg8188E_FW.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
-* You should have received a copy of the GNU General Public License along with
-* this program; if not, write to the Free Software Foundation, Inc.,
-* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
-*
-*
******************************************************************************/
#ifndef __INC_FW_8188E_HW_IMG_H
diff --git a/drivers/staging/rtl8188eu/include/HalVerDef.h b/drivers/staging/rtl8188eu/include/HalVerDef.h
index 6f2b2a436b04..d244efff3593 100644
--- a/drivers/staging/rtl8188eu/include/HalVerDef.h
+++ b/drivers/staging/rtl8188eu/include/HalVerDef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_VERSION_DEF_H__
#define __HAL_VERSION_DEF_H__
diff --git a/drivers/staging/rtl8188eu/include/basic_types.h b/drivers/staging/rtl8188eu/include/basic_types.h
index 3fb691daa5af..2c1676d2ac6e 100644
--- a/drivers/staging/rtl8188eu/include/basic_types.h
+++ b/drivers/staging/rtl8188eu/include/basic_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __BASIC_TYPES_H__
#define __BASIC_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/drv_types.h b/drivers/staging/rtl8188eu/include/drv_types.h
index dcb032b6c3a7..55506a7da1a4 100644
--- a/drivers/staging/rtl8188eu/include/drv_types.h
+++ b/drivers/staging/rtl8188eu/include/drv_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/*-----------------------------------------------------------------------------
diff --git a/drivers/staging/rtl8188eu/include/fw.h b/drivers/staging/rtl8188eu/include/fw.h
index 7884d8f65763..b016f32a8992 100644
--- a/drivers/staging/rtl8188eu/include/fw.h
+++ b/drivers/staging/rtl8188eu/include/fw.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
diff --git a/drivers/staging/rtl8188eu/include/hal_com.h b/drivers/staging/rtl8188eu/include/hal_com.h
index 47715d949d54..aaf444733507 100644
--- a/drivers/staging/rtl8188eu/include/hal_com.h
+++ b/drivers/staging/rtl8188eu/include/hal_com.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_COMMON_H__
#define __HAL_COMMON_H__
diff --git a/drivers/staging/rtl8188eu/include/hal_intf.h b/drivers/staging/rtl8188eu/include/hal_intf.h
index 1b1c10292456..eaf939bd4103 100644
--- a/drivers/staging/rtl8188eu/include/hal_intf.h
+++ b/drivers/staging/rtl8188eu/include/hal_intf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL_INTF_H__
#define __HAL_INTF_H__
diff --git a/drivers/staging/rtl8188eu/include/ieee80211.h b/drivers/staging/rtl8188eu/include/ieee80211.h
index f8f5eb6b7976..d8284c84f09c 100644
--- a/drivers/staging/rtl8188eu/include/ieee80211.h
+++ b/drivers/staging/rtl8188eu/include/ieee80211.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __IEEE80211_H
#define __IEEE80211_H
diff --git a/drivers/staging/rtl8188eu/include/mlme_osdep.h b/drivers/staging/rtl8188eu/include/mlme_osdep.h
index ae1722c67032..5a35b0866db6 100644
--- a/drivers/staging/rtl8188eu/include/mlme_osdep.h
+++ b/drivers/staging/rtl8188eu/include/mlme_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __MLME_OSDEP_H_
#define __MLME_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/mp_custom_oid.h b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
index 6fa52cf99c4e..1a06ee6ad460 100644
--- a/drivers/staging/rtl8188eu/include/mp_custom_oid.h
+++ b/drivers/staging/rtl8188eu/include/mp_custom_oid.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __CUSTOM_OID_H
#define __CUSTOM_OID_H
diff --git a/drivers/staging/rtl8188eu/include/odm.h b/drivers/staging/rtl8188eu/include/odm.h
index af781c7cd3a5..dbebf17f36d3 100644
--- a/drivers/staging/rtl8188eu/include/odm.h
+++ b/drivers/staging/rtl8188eu/include/odm.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_HWConfig.h b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
index ef792bfd535e..da7325d599c6 100644
--- a/drivers/staging/rtl8188eu/include/odm_HWConfig.h
+++ b/drivers/staging/rtl8188eu/include/odm_HWConfig.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
index 14dce6c4b1bc..72b4db67ac33 100644
--- a/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
+++ b/drivers/staging/rtl8188eu/include/odm_RTL8188E.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_RTL8188E_H__
#define __ODM_RTL8188E_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
index 5a61f902bc1b..c82c09013487 100644
--- a/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
+++ b/drivers/staging/rtl8188eu/include/odm_RegDefine11N.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_REGDEFINE11N_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_debug.h b/drivers/staging/rtl8188eu/include/odm_debug.h
index e9390963d6ff..52e51f19f752 100644
--- a/drivers/staging/rtl8188eu/include/odm_debug.h
+++ b/drivers/staging/rtl8188eu/include/odm_debug.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/include/odm_precomp.h b/drivers/staging/rtl8188eu/include/odm_precomp.h
index 0f236da09277..9e5fe1777e6c 100644
--- a/drivers/staging/rtl8188eu/include/odm_precomp.h
+++ b/drivers/staging/rtl8188eu/include/odm_precomp.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_PRECOMP_H__
diff --git a/drivers/staging/rtl8188eu/include/odm_reg.h b/drivers/staging/rtl8188eu/include/odm_reg.h
index 7f10b695cf9d..3405a44a19ed 100644
--- a/drivers/staging/rtl8188eu/include/odm_reg.h
+++ b/drivers/staging/rtl8188eu/include/odm_reg.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/* */
/* File Name: odm_reg.h */
diff --git a/drivers/staging/rtl8188eu/include/odm_types.h b/drivers/staging/rtl8188eu/include/odm_types.h
index c1355b959c55..3474a9c72640 100644
--- a/drivers/staging/rtl8188eu/include/odm_types.h
+++ b/drivers/staging/rtl8188eu/include/odm_types.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __ODM_TYPES_H__
#define __ODM_TYPES_H__
diff --git a/drivers/staging/rtl8188eu/include/osdep_intf.h b/drivers/staging/rtl8188eu/include/osdep_intf.h
index 1521744d626c..54fca79827e3 100644
--- a/drivers/staging/rtl8188eu/include/osdep_intf.h
+++ b/drivers/staging/rtl8188eu/include/osdep_intf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __OSDEP_INTF_H_
diff --git a/drivers/staging/rtl8188eu/include/osdep_service.h b/drivers/staging/rtl8188eu/include/osdep_service.h
index 22de53d6539a..5475956c5ee5 100644
--- a/drivers/staging/rtl8188eu/include/osdep_service.h
+++ b/drivers/staging/rtl8188eu/include/osdep_service.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __OSDEP_SERVICE_H_
#define __OSDEP_SERVICE_H_
diff --git a/drivers/staging/rtl8188eu/include/pwrseq.h b/drivers/staging/rtl8188eu/include/pwrseq.h
index 9dbf8435f147..afd61cf4cb15 100644
--- a/drivers/staging/rtl8188eu/include/pwrseq.h
+++ b/drivers/staging/rtl8188eu/include/pwrseq.h
@@ -12,11 +12,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HAL8188EPWRSEQ_H__
diff --git a/drivers/staging/rtl8188eu/include/pwrseqcmd.h b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
index 468a3fb28e00..c4a919ea17ea 100644
--- a/drivers/staging/rtl8188eu/include/pwrseqcmd.h
+++ b/drivers/staging/rtl8188eu/include/pwrseqcmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __HALPWRSEQCMD_H__
#define __HALPWRSEQCMD_H__
diff --git a/drivers/staging/rtl8188eu/include/recv_osdep.h b/drivers/staging/rtl8188eu/include/recv_osdep.h
index fdeb603b6cc1..cad31587c30a 100644
--- a/drivers/staging/rtl8188eu/include/recv_osdep.h
+++ b/drivers/staging/rtl8188eu/include/recv_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RECV_OSDEP_H_
#define __RECV_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
index f813ce0563f8..4d7d804658c2 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_cmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_CMD_H__
#define __RTL8188E_CMD_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
index 5e0ac31ef464..4190112a50bf 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_dm.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_DM_H__
#define __RTL8188E_DM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
index 9f5050e6f6ab..9dd5c293a54b 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_hal.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_HAL_H__
#define __RTL8188E_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_led.h b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
index c0147e73cd8c..fca6d8c81e90 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_led.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_led.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_LED_H__
#define __RTL8188E_LED_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
index 5fed30d389a2..54048bc826e5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_recv.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_RECV_H__
#define __RTL8188E_RECV_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
index beeee4a6b0bc..fb82f663b1f5 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_spec.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
*******************************************************************************/
#ifndef __RTL8188E_SPEC_H__
#define __RTL8188E_SPEC_H__
diff --git a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
index 0b96d42e290b..65a63df2077f 100644
--- a/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtl8188e_xmit.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTL8188E_XMIT_H__
#define __RTL8188E_XMIT_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_android.h b/drivers/staging/rtl8188eu/include/rtw_android.h
index e85bf1ff01f8..e81ee92b0ae2 100644
--- a/drivers/staging/rtl8188eu/include/rtw_android.h
+++ b/drivers/staging/rtl8188eu/include/rtw_android.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_ANDROID_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_ap.h b/drivers/staging/rtl8188eu/include/rtw_ap.h
index 6128ccce91ba..b820684bc3fe 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ap.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ap.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_AP_H_
#define __RTW_AP_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_cmd.h b/drivers/staging/rtl8188eu/include/rtw_cmd.h
index 9e9f5f4af8f1..08ca59217cb7 100644
--- a/drivers/staging/rtl8188eu/include/rtw_cmd.h
+++ b/drivers/staging/rtl8188eu/include/rtw_cmd.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_CMD_H_
#define __RTW_CMD_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_debug.h b/drivers/staging/rtl8188eu/include/rtw_debug.h
index 971bf457f32d..7ed4cada7efa 100644
--- a/drivers/staging/rtl8188eu/include/rtw_debug.h
+++ b/drivers/staging/rtl8188eu/include/rtw_debug.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_DEBUG_H__
#define __RTW_DEBUG_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_eeprom.h b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
index 904fea1fad6c..5dd73841dd9e 100644
--- a/drivers/staging/rtl8188eu/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8188eu/include/rtw_eeprom.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_EEPROM_H__
#define __RTW_EEPROM_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_efuse.h b/drivers/staging/rtl8188eu/include/rtw_efuse.h
index 5660eed7196b..9bfb10c302b5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_efuse.h
+++ b/drivers/staging/rtl8188eu/include/rtw_efuse.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_EFUSE_H__
#define __RTW_EFUSE_H__
diff --git a/drivers/staging/rtl8188eu/include/rtw_event.h b/drivers/staging/rtl8188eu/include/rtw_event.h
index 52151dc4495a..5c34e567d341 100644
--- a/drivers/staging/rtl8188eu/include/rtw_event.h
+++ b/drivers/staging/rtl8188eu/include/rtw_event.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_EVENT_H_
#define _RTW_EVENT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ht.h b/drivers/staging/rtl8188eu/include/rtw_ht.h
index beb210b37083..b45483fd069f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ht.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ht.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_HT_H_
#define _RTW_HT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
index ee2cb54a7552..3a652df4b26c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_IOCTL_H_
#define _RTW_IOCTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
index 8fa3858cb776..da4949f94f4c 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_rtl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_IOCTL_RTL_H_
#define _RTW_IOCTL_RTL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
index fa9d655eaab9..b6e14a8b7a11 100644
--- a/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
+++ b/drivers/staging/rtl8188eu/include/rtw_ioctl_set.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_IOCTL_SET_H_
#define __RTW_IOCTL_SET_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_iol.h b/drivers/staging/rtl8188eu/include/rtw_iol.h
index 68aae7f0b02f..1f324e68d2ae 100644
--- a/drivers/staging/rtl8188eu/include/rtw_iol.h
+++ b/drivers/staging/rtl8188eu/include/rtw_iol.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_IOL_H_
#define __RTW_IOL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme.h b/drivers/staging/rtl8188eu/include/rtw_mlme.h
index 4c992573e3ca..5d8bce0f58db 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_MLME_H_
#define __RTW_MLME_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
index 44711332b90c..27382ff24a84 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mlme_ext.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_MLME_EXT_H_
#define __RTW_MLME_EXT_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
index 30fd17f23bf1..02b300217185 100644
--- a/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
+++ b/drivers/staging/rtl8188eu/include/rtw_mp_phy_regdef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
/*****************************************************************************
*
diff --git a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
index a493d4c37ef1..9680e2eab62f 100644
--- a/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
+++ b/drivers/staging/rtl8188eu/include/rtw_pwrctrl.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_PWRCTRL_H_
#define __RTW_PWRCTRL_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_qos.h b/drivers/staging/rtl8188eu/include/rtw_qos.h
index bbee1ddc00bb..45a77f6f8427 100644
--- a/drivers/staging/rtl8188eu/include/rtw_qos.h
+++ b/drivers/staging/rtl8188eu/include/rtw_qos.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_QOS_H_
#define _RTW_QOS_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_recv.h b/drivers/staging/rtl8188eu/include/rtw_recv.h
index eb1ac3d03123..b0373b6216d6 100644
--- a/drivers/staging/rtl8188eu/include/rtw_recv.h
+++ b/drivers/staging/rtl8188eu/include/rtw_recv.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_RECV_H_
#define _RTW_RECV_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_rf.h b/drivers/staging/rtl8188eu/include/rtw_rf.h
index 35f61be12acd..66896af02042 100644
--- a/drivers/staging/rtl8188eu/include/rtw_rf.h
+++ b/drivers/staging/rtl8188eu/include/rtw_rf.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_RF_H_
#define __RTW_RF_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_security.h b/drivers/staging/rtl8188eu/include/rtw_security.h
index a1aebe6c8452..ca1247bce6e3 100644
--- a/drivers/staging/rtl8188eu/include/rtw_security.h
+++ b/drivers/staging/rtl8188eu/include/rtw_security.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __RTW_SECURITY_H_
#define __RTW_SECURITY_H_
diff --git a/drivers/staging/rtl8188eu/include/rtw_sreset.h b/drivers/staging/rtl8188eu/include/rtw_sreset.h
index 3a62ed010875..ce027dfdecc5 100644
--- a/drivers/staging/rtl8188eu/include/rtw_sreset.h
+++ b/drivers/staging/rtl8188eu/include/rtw_sreset.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_SRESET_C_
#define _RTW_SRESET_C_
diff --git a/drivers/staging/rtl8188eu/include/rtw_xmit.h b/drivers/staging/rtl8188eu/include/rtw_xmit.h
index b7c20883d355..a0853bab3edb 100644
--- a/drivers/staging/rtl8188eu/include/rtw_xmit.h
+++ b/drivers/staging/rtl8188eu/include/rtw_xmit.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _RTW_XMIT_H_
#define _RTW_XMIT_H_
diff --git a/drivers/staging/rtl8188eu/include/sta_info.h b/drivers/staging/rtl8188eu/include/sta_info.h
index d4e78326fc8d..42a035123365 100644
--- a/drivers/staging/rtl8188eu/include/sta_info.h
+++ b/drivers/staging/rtl8188eu/include/sta_info.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __STA_INFO_H_
#define __STA_INFO_H_
diff --git a/drivers/staging/rtl8188eu/include/usb_hal.h b/drivers/staging/rtl8188eu/include/usb_hal.h
index 8a65995d5e48..b1bf07a9013e 100644
--- a/drivers/staging/rtl8188eu/include/usb_hal.h
+++ b/drivers/staging/rtl8188eu/include/usb_hal.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __USB_HAL_H__
#define __USB_HAL_H__
diff --git a/drivers/staging/rtl8188eu/include/usb_ops_linux.h b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
index 4fdc536cba79..220733314f8b 100644
--- a/drivers/staging/rtl8188eu/include/usb_ops_linux.h
+++ b/drivers/staging/rtl8188eu/include/usb_ops_linux.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __USB_OPS_LINUX_H__
#define __USB_OPS_LINUX_H__
diff --git a/drivers/staging/rtl8188eu/include/wifi.h b/drivers/staging/rtl8188eu/include/wifi.h
index 6cb5beca1672..e7c512183619 100644
--- a/drivers/staging/rtl8188eu/include/wifi.h
+++ b/drivers/staging/rtl8188eu/include/wifi.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef _WIFI_H_
#define _WIFI_H_
diff --git a/drivers/staging/rtl8188eu/include/wlan_bssdef.h b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
index 85b99da49a2d..560966cd7dfe 100644
--- a/drivers/staging/rtl8188eu/include/wlan_bssdef.h
+++ b/drivers/staging/rtl8188eu/include/wlan_bssdef.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __WLAN_BSSDEF_H__
#define __WLAN_BSSDEF_H__
diff --git a/drivers/staging/rtl8188eu/include/xmit_osdep.h b/drivers/staging/rtl8188eu/include/xmit_osdep.h
index 13965f2489db..f96ca6af934d 100644
--- a/drivers/staging/rtl8188eu/include/xmit_osdep.h
+++ b/drivers/staging/rtl8188eu/include/xmit_osdep.h
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#ifndef __XMIT_OSDEP_H_
#define __XMIT_OSDEP_H_
diff --git a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
index 911980495fb2..5672f014cc46 100644
--- a/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/ioctl_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _IOCTL_LINUX_C_
@@ -2120,13 +2115,13 @@ static u8 set_pairwise_key(struct adapter *padapter, struct sta_info *psta)
u8 res = _SUCCESS;
ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (ph2c == NULL) {
+ if (!ph2c) {
res = _FAIL;
goto exit;
}
psetstakey_para = kzalloc(sizeof(struct set_stakey_parm), GFP_KERNEL);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
res = _FAIL;
goto exit;
@@ -2158,12 +2153,12 @@ static int set_group_key(struct adapter *padapter, u8 *key, u8 alg, int keyid)
DBG_88E("%s\n", __func__);
pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL);
- if (pcmd == NULL) {
+ if (!pcmd) {
res = _FAIL;
goto exit;
}
psetkeyparm = kzalloc(sizeof(struct setkey_parm), GFP_KERNEL);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
kfree(pcmd);
res = _FAIL;
goto exit;
diff --git a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
index 08bfa76f4975..bc756267c7fc 100644
--- a/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/mlme_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/os_dep/os_intfs.c b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
index 7986e678521a..ae2caff030f1 100644
--- a/drivers/staging/rtl8188eu/os_dep/os_intfs.c
+++ b/drivers/staging/rtl8188eu/os_dep/os_intfs.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _OS_INTFS_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/osdep_service.c b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
index f090bef59594..764250b4ba86 100644
--- a/drivers/staging/rtl8188eu/os_dep/osdep_service.c
+++ b/drivers/staging/rtl8188eu/os_dep/osdep_service.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
diff --git a/drivers/staging/rtl8188eu/os_dep/recv_linux.c b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
index d4734baffc8a..0c44914ea3e6 100644
--- a/drivers/staging/rtl8188eu/os_dep/recv_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/recv_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <osdep_service.h>
#include <drv_types.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/rtw_android.c b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
index 5f3337c281ee..41e1b1d15b81 100644
--- a/drivers/staging/rtl8188eu/os_dep/rtw_android.c
+++ b/drivers/staging/rtl8188eu/os_dep/rtw_android.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#include <linux/module.h>
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 794cc114348c..11d51a30170f 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define pr_fmt(fmt) "R8188EU: " fmt
@@ -65,7 +60,7 @@ static struct dvobj_priv *usb_dvobj_init(struct usb_interface *usb_intf)
struct usb_device *pusbd;
pdvobjpriv = kzalloc(sizeof(*pdvobjpriv), GFP_KERNEL);
- if (pdvobjpriv == NULL)
+ if (!pdvobjpriv)
return NULL;
pdvobjpriv->pusbintf = usb_intf;
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
index 0fea338d7313..ce1e1a135f1b 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_ops_linux.c
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
******************************************************************************/
#define _USB_OPS_LINUX_C_
diff --git a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
index 1593e280e060..221e2750652e 100644
--- a/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
+++ b/drivers/staging/rtl8188eu/os_dep/xmit_linux.c
@@ -11,11 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
- *
******************************************************************************/
#define _XMIT_OSDEP_C_
diff --git a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
index f18fc0b6775b..051c2be842d0 100644
--- a/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
+++ b/drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
@@ -746,7 +746,7 @@ static void RxReorderIndicatePacket(struct ieee80211_device *ieee,
// Indicate packets
if(index>REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
kfree(prxbIndicateArray);
return;
}
diff --git a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
index 148d0d45547b..6033502eff3d 100644
--- a/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
+++ b/drivers/staging/rtl8192u/ieee80211/rtl819x_TSProc.c
@@ -75,7 +75,7 @@ static void RxPktPendingTimeout(unsigned long data)
// Indicate packets
if(index > REORDER_WIN_SIZE){
- IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorer buffer full!! \n");
+ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n");
spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags);
return;
}
diff --git a/drivers/staging/rtl8192u/r8190_rtl8256.c b/drivers/staging/rtl8192u/r8190_rtl8256.c
index 5c3bb3be2720..d733fb2ade91 100644
--- a/drivers/staging/rtl8192u/r8190_rtl8256.c
+++ b/drivers/staging/rtl8192u/r8190_rtl8256.c
@@ -194,7 +194,7 @@ void phy_RF8256_Config_ParaFile(struct net_device *dev)
break;
}
- /*----Restore RFENV control type----*/;
+ /*----Restore RFENV control type----*/
switch (eRFPath) {
case RF90_PATH_A:
case RF90_PATH_C:
diff --git a/drivers/staging/rtl8192u/r8192U_core.c b/drivers/staging/rtl8192u/r8192U_core.c
index 4af0140c6ead..8c1d73719147 100644
--- a/drivers/staging/rtl8192u/r8192U_core.c
+++ b/drivers/staging/rtl8192u/r8192U_core.c
@@ -235,7 +235,6 @@ static void CamResetAllEntry(struct net_device *dev)
*/
ulcommand |= BIT(31) | BIT(30);
write_nic_dword(dev, RWCAM, ulcommand);
-
}
@@ -298,6 +297,7 @@ int read_nic_byte_E(struct net_device *dev, int indx, u8 *data)
return 0;
}
+
/* as 92U has extend page from 4 to 16, so modify functions below. */
void write_nic_byte(struct net_device *dev, int indx, u8 data)
{
@@ -319,14 +319,11 @@ void write_nic_byte(struct net_device *dev, int indx, u8 data)
if (status < 0)
netdev_err(dev, "write_nic_byte TimeOut! status: %d\n", status);
-
-
}
void write_nic_word(struct net_device *dev, int indx, u16 data)
{
-
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -345,13 +342,11 @@ void write_nic_word(struct net_device *dev, int indx, u16 data)
if (status < 0)
netdev_err(dev, "write_nic_word TimeOut! status: %d\n", status);
-
}
void write_nic_dword(struct net_device *dev, int indx, u32 data)
{
-
int status;
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -372,7 +367,6 @@ void write_nic_dword(struct net_device *dev, int indx, u32 data)
if (status < 0)
netdev_err(dev, "write_nic_dword TimeOut! status: %d\n",
status);
-
}
@@ -738,7 +732,6 @@ void rtl8192_update_msr(struct net_device *dev)
* master (see the create BSS/IBSS func)
*/
if (priv->ieee80211->state == IEEE80211_LINKED) {
-
if (priv->ieee80211->iw_mode == IW_MODE_INFRA)
msr |= (MSR_LINK_MANAGED << MSR_LINK_SHIFT);
else if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
@@ -773,11 +766,10 @@ static void rtl8192_rx_isr(struct urb *urb);
static u32 get_rxpacket_shiftbytes_819xusb(struct ieee80211_rx_stats *pstats)
{
-
return (sizeof(rx_desc_819x_usb) + pstats->RxDrvInfoSize
+ pstats->RxBufShift);
-
}
+
static int rtl8192_rx_initiate(struct net_device *dev)
{
struct r8192_priv *priv = (struct r8192_priv *)ieee80211_priv(dev);
@@ -874,6 +866,7 @@ void rtl8192_set_rxconf(struct net_device *dev)
write_nic_dword(dev, RCR, rxconf);
}
+
/* wait to be removed */
void rtl8192_rx_enable(struct net_device *dev)
{
@@ -943,9 +936,9 @@ inline u16 ieeerate2rtlrate(int rate)
return 11;
default:
return 3;
-
}
}
+
static u16 rtl_rate[] = {10, 20, 55, 110, 60, 90, 120, 180, 240, 360, 480, 540};
inline u16 rtl8192_rate2rate(short rate)
{
@@ -1050,7 +1043,7 @@ static void rtl8192_hard_data_xmit(struct sk_buff *skb, struct net_device *dev,
spin_lock_irqsave(&priv->tx_lock, flags);
- memcpy((unsigned char *)(skb->cb), &dev, sizeof(dev));
+ *(struct net_device **)(skb->cb) = dev;
tcb_desc->bTxEnableFwCalcDur = 1;
skb_push(skb, priv->ieee80211->tx_headroom);
ret = rtl8192_tx(dev, skb);
@@ -1100,7 +1093,7 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
if (!skb)
return;
- dev = (struct net_device *)(skb->cb);
+ dev = *(struct net_device **)(skb->cb);
tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
queue_index = tcb_desc->queue_index;
@@ -1149,7 +1142,6 @@ static void rtl8192_tx_isr(struct urb *tx_urb)
return; /* avoid further processing AMSDU */
}
}
-
}
static void rtl8192_config_rate(struct net_device *dev, u16 *rate_config)
@@ -1272,11 +1264,10 @@ static void rtl8192_update_cap(struct net_device *dev, u16 cap)
priv->slot_time = slot_time;
write_nic_byte(dev, SLOT_TIME, slot_time);
}
-
}
+
static void rtl8192_net_update(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_network *net;
u16 BcnTimeCfg = 0, BcnCW = 6, BcnIFS = 0xf;
@@ -1303,9 +1294,6 @@ static void rtl8192_net_update(struct net_device *dev)
write_nic_word(dev, BCN_TCFG, BcnTimeCfg);
}
-
-
-
}
/* temporary hw beacon is not used any more.
@@ -1315,6 +1303,7 @@ void rtl819xusb_beacon_tx(struct net_device *dev, u16 tx_rate)
{
}
+
inline u8 rtl8192_IsWirelessBMode(u16 rate)
{
if (((rate <= 110) && (rate != 60) && (rate != 90)) || (rate == 220))
@@ -1737,7 +1726,6 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
#ifndef JACKSON_NEW_RX
for (i = 0; i < (MAX_RX_URB + 1); i++) {
-
priv->rx_urb[i] = usb_alloc_urb(0, GFP_KERNEL);
priv->rx_urb[i]->transfer_buffer =
@@ -1782,8 +1770,8 @@ static short rtl8192_usb_initendpoints(struct net_device *dev)
netdev_dbg(dev, "End of initendpoints\n");
return 0;
-
}
+
#ifdef THOMAS_BEACON
static void rtl8192_usb_deleteendpoints(struct net_device *dev)
{
@@ -1820,7 +1808,6 @@ void rtl8192_usb_deleteendpoints(struct net_device *dev)
}
kfree(priv->rx_urb);
priv->rx_urb = NULL;
-
}
#else
kfree(priv->rx_urb);
@@ -1888,6 +1875,7 @@ static void rtl8192_update_beacon(struct work_struct *work)
net->bssht.bdRT2RTLongSlotTime;
rtl8192_update_cap(dev, net->capability);
}
+
/*
* background support to run QoS activate functionality
*/
@@ -1992,7 +1980,6 @@ static int rtl8192_handle_beacon(struct net_device *dev,
rtl8192_qos_handle_probe_response(priv, 1, network);
schedule_delayed_work(&priv->update_beacon_wq, 0);
return 0;
-
}
/*
@@ -2007,7 +1994,7 @@ static int rtl8192_qos_association_resp(struct r8192_priv *priv,
u32 size = sizeof(struct ieee80211_qos_parameters);
int set_qos_param = 0;
- if ((priv == NULL) || (network == NULL))
+ if (!priv || !network)
return 0;
if (priv->ieee80211->state != IEEE80211_LINKED)
@@ -2182,6 +2169,7 @@ static u8 rtl8192_getSupportedWireleeMode(struct net_device *dev)
}
return ret;
}
+
static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -2223,8 +2211,8 @@ static void rtl8192_SetWirelessMode(struct net_device *dev, u8 wireless_mode)
priv->ieee80211->pHTInfo->bEnableHT = 0;
RT_TRACE(COMP_INIT, "Current Wireless Mode is %x\n", wireless_mode);
rtl8192_refresh_supportrate(priv);
-
}
+
/* init priv variables here. only non_zero value should be initialized here. */
static void rtl8192_init_priv_variable(struct net_device *dev)
{
@@ -2432,6 +2420,7 @@ static inline u16 endian_swap(u16 *data)
*data = (tmp >> 8) | (tmp << 8);
return *data;
}
+
static void rtl8192_read_eeprom_info(struct net_device *dev)
{
u16 wEPROM_ID = 0;
@@ -2627,7 +2616,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
default:
priv->CustomerID = RT_CID_DEFAULT;
break;
-
}
switch (priv->CustomerID) {
@@ -2642,7 +2630,6 @@ static void rtl8192_read_eeprom_info(struct net_device *dev)
default:
priv->LedStrategy = SW_LED_MODE0;
break;
-
}
@@ -2676,7 +2663,6 @@ static short rtl8192_get_channel_map(struct net_device *dev)
static short rtl8192_init(struct net_device *dev)
{
-
struct r8192_priv *priv = ieee80211_priv(dev);
memset(&(priv->stats), 0, sizeof(struct Stats));
@@ -2797,8 +2783,6 @@ static void rtl8192_hwconfig(struct net_device *dev)
/* Set Tx Antenna including Feedback control */
/* Set Auto Rate fallback control */
-
-
}
@@ -3027,7 +3011,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
bMaskByte2);
for (i = 0; i < CCKTxBBGainTableLength; i++) {
-
if (TempCCk == priv->cck_txbbgain_table[i].ccktxbb_valuearray[0]) {
priv->cck_present_attentuation_20Mdefault = (u8)i;
break;
@@ -3037,7 +3020,6 @@ static bool rtl8192_adapter_start(struct net_device *dev)
priv->cck_present_attentuation_difference = 0;
priv->cck_present_attentuation =
priv->cck_present_attentuation_20Mdefault;
-
}
}
write_nic_byte(dev, 0x87, 0x0);
@@ -3222,7 +3204,6 @@ static RESET_TYPE rtl819x_ifcheck_resetornot(struct net_device *dev)
} else {
return RESET_TYPE_NORESET;
}
-
}
static void rtl8192_cancel_deferred_work(struct r8192_priv *priv);
@@ -3250,7 +3231,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
if ((priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP40) ||
(priv->ieee80211->pairwise_key_type == KEY_TYPE_WEP104)) {
-
for (EntryId = 0; EntryId < 4; EntryId++) {
MacAddr = CAM_CONST_ADDR[EntryId];
setKey(dev, EntryId, EntryId,
@@ -3259,7 +3239,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
}
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_TKIP) {
-
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
(u8 *)dev->dev_addr, 0, NULL);
@@ -3267,7 +3246,6 @@ static void CamRestoreAllEntry(struct net_device *dev)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
MacAddr, 0, NULL);
} else if (priv->ieee80211->pairwise_key_type == KEY_TYPE_CCMP) {
-
if (priv->ieee80211->iw_mode == IW_MODE_ADHOC)
setKey(dev, 4, 0, priv->ieee80211->pairwise_key_type,
(u8 *)dev->dev_addr, 0, NULL);
@@ -3301,6 +3279,7 @@ static void CamRestoreAllEntry(struct net_device *dev)
CAM_CONST_ADDR[0], 0, NULL);
}
}
+
/* This function is used to fix Tx/Rx stop bug temporarily.
* This function will do "system reset" to NIC when Tx or Rx is stuck.
* The method checking Tx/Rx stuck of this function is supported by FW,
@@ -3468,7 +3447,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
/* for AP roaming */
if (priv->ieee80211->state == IEEE80211_LINKED &&
priv->ieee80211->iw_mode == IW_MODE_INFRA) {
-
rtl819x_update_rxcounts(priv, &TotalRxBcnNum, &TotalRxDataNum);
if ((TotalRxBcnNum + TotalRxDataNum) == 0) {
#ifdef TODO
@@ -3485,7 +3463,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
priv->ieee80211->link_change(dev);
queue_work(priv->ieee80211->wq,
&priv->ieee80211->associate_procedure_wq);
-
}
}
priv->ieee80211->LinkDetectInfo.NumRecvBcnInPeriod = 0;
@@ -3510,7 +3487,6 @@ static void rtl819x_watchdog_wqcallback(struct work_struct *work)
priv->bForcedSilentReset = false;
priv->bResetInProgress = false;
RT_TRACE(COMP_TRACE, " <==RtUsbCheckForHangWorkItemCallback()\n");
-
}
static void watch_dog_timer_callback(unsigned long data)
@@ -3521,6 +3497,7 @@ static void watch_dog_timer_callback(unsigned long data)
mod_timer(&priv->watch_dog_timer,
jiffies + msecs_to_jiffies(IEEE80211_WATCH_DOG_TIME));
}
+
static int _rtl8192_up(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
@@ -3560,7 +3537,6 @@ static int rtl8192_open(struct net_device *dev)
ret = rtl8192_up(dev);
up(&priv->wx_sem);
return ret;
-
}
@@ -3587,7 +3563,6 @@ static int rtl8192_close(struct net_device *dev)
up(&priv->wx_sem);
return ret;
-
}
int rtl8192_down(struct net_device *dev)
@@ -3649,7 +3624,6 @@ void rtl8192_commit(struct net_device *dev)
rtl8192_rtx_disable(dev);
reset_status = _rtl8192_up(dev);
-
}
static void rtl8192_restart(struct work_struct *work)
@@ -4111,7 +4085,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
(((priv->undecorated_smoothed_pwdb) * (Rx_Smooth_Factor - 1)) +
(pprevious_stats->RxPWDBAll)) / (Rx_Smooth_Factor);
}
-
}
/* Check EVM */
@@ -4159,8 +4132,6 @@ static void rtl8192_process_phyinfo(struct r8192_priv *priv, u8 *buffer,
}
}
}
-
-
}
/*-----------------------------------------------------------------------------
@@ -4201,6 +4172,7 @@ static u8 rtl819x_evm_dbtopercentage(char value)
ret_val = 100;
return ret_val;
}
+
/* We want good-looking for signal strength/quality */
static long rtl819x_signal_scale_mapping(long currsig)
{
@@ -4542,7 +4514,6 @@ static void TranslateRxSignalStuff819xUsb(struct sk_buff *skb,
bpacket_match_bssid, bpacket_toself,
bPacketBeacon, bToSelfBA);
rtl8192_record_rxdesc_forlateruse(pstats, &previous_stats);
-
}
/**
@@ -4758,7 +4729,6 @@ static void query_rxdesc_status(struct sk_buff *skb,
RT_TRACE(COMP_RXDESC,
"driver_info->FirstAGGR = %d, driver_info->PartAggr = %d\n",
driver_info->FirstAGGR, driver_info->PartAggr);
-
}
skb_pull(skb, sizeof(rx_desc_819x_usb));
@@ -4822,7 +4792,6 @@ static void rtl8192_rx_nomal(struct sk_buff *skb)
netdev_dbg(dev, "actual_length: %d\n", skb->len);
dev_kfree_skb_any(skb);
}
-
}
static void rtl819xusb_process_received_packet(
@@ -4898,7 +4867,6 @@ static void rtl8192_rx_cmd(struct sk_buff *skb)
};
if ((skb->len >= (20 + sizeof(rx_desc_819x_usb))) && (skb->len < RX_URB_SIZE)) {
-
query_rx_cmdpkt_desc_status(skb, &stats);
/* prfd->queue_id = 1; */
@@ -4937,7 +4905,6 @@ static void rtl8192_irq_rx_tasklet(struct r8192_priv *priv)
info->out_pipe);
dev_kfree_skb(skb);
break;
-
}
}
}
@@ -4971,7 +4938,7 @@ static int rtl8192_usb_probe(struct usb_interface *intf,
RT_TRACE(COMP_INIT, "Oops: i'm coming\n");
dev = alloc_ieee80211(sizeof(struct r8192_priv));
- if (dev == NULL)
+ if (!dev)
return -ENOMEM;
usb_set_intfdata(intf, dev);
@@ -5034,7 +5001,6 @@ fail:
*/
static void rtl8192_cancel_deferred_work(struct r8192_priv *priv)
{
-
cancel_work_sync(&priv->reset_wq);
cancel_delayed_work(&priv->watch_dog_wq);
cancel_delayed_work(&priv->update_beacon_wq);
@@ -5191,13 +5157,12 @@ void setKey(struct net_device *dev, u8 EntryNo, u8 KeyIndex, u16 KeyType,
write_nic_dword(dev, RWCAM, TargetCommand);
} else {
/* Key Material */
- if (KeyContent != NULL) {
+ if (KeyContent) {
write_nic_dword(dev, WCAMI, (u32)(*(KeyContent + i - 2)));
write_nic_dword(dev, RWCAM, TargetCommand);
}
}
}
-
}
/***************************************************************************
diff --git a/drivers/staging/rtl8192u/r8192U_wx.c b/drivers/staging/rtl8192u/r8192U_wx.c
index f828e6441f2d..837704de3ea4 100644
--- a/drivers/staging/rtl8192u/r8192U_wx.c
+++ b/drivers/staging/rtl8192u/r8192U_wx.c
@@ -30,7 +30,6 @@
static const u32 rtl8180_rates[] = {1000000, 2000000, 5500000, 11000000,
6000000, 9000000, 12000000, 18000000, 24000000, 36000000, 48000000, 54000000};
-
#ifndef ENETDOWN
#define ENETDOWN 1
#endif
@@ -44,7 +43,6 @@ static int r8192_wx_get_freq(struct net_device *dev,
return ieee80211_wx_get_freq(priv->ieee80211, a, wrqu, b);
}
-
static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -53,8 +51,6 @@ static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
return ieee80211_wx_get_mode(priv->ieee80211, a, wrqu, b);
}
-
-
static int r8192_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -64,8 +60,6 @@ static int r8192_wx_get_rate(struct net_device *dev,
return ieee80211_wx_get_rate(priv->ieee80211, info, wrqu, extra);
}
-
-
static int r8192_wx_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -82,7 +76,6 @@ static int r8192_wx_set_rate(struct net_device *dev,
return ret;
}
-
static int r8192_wx_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -148,7 +141,6 @@ static int r8192_wx_force_reset(struct net_device *dev,
}
-
static int r8192_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -301,7 +293,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
/* range->min_r_time; */ /* Minimal retry lifetime */
/* range->max_r_time; */ /* Maximal retry lifetime */
-
for (i = 0, val = 0; i < 14; i++) {
/* Include only legal frequencies for some countries */
@@ -326,7 +317,6 @@ static int rtl8180_wx_get_range(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -396,9 +386,6 @@ static int r8192_wx_set_essid(struct net_device *dev,
return ret;
}
-
-
-
static int r8192_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
@@ -415,7 +402,6 @@ static int r8192_wx_get_essid(struct net_device *dev,
return ret;
}
-
static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
@@ -439,7 +425,6 @@ static int r8192_wx_get_name(struct net_device *dev,
return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
}
-
static int r8192_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -493,7 +478,6 @@ static int r8192_wx_set_wap(struct net_device *dev,
}
-
static int r8192_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -503,7 +487,6 @@ static int r8192_wx_get_wap(struct net_device *dev,
return ieee80211_wx_get_wap(priv->ieee80211, info, wrqu, extra);
}
-
static int r8192_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
@@ -695,7 +678,6 @@ static int r8192_wx_get_retry(struct net_device *dev,
wrqu->retry.value = priv->retry_data;
}
-
return 0;
}
@@ -711,7 +693,6 @@ static int r8192_wx_get_sens(struct net_device *dev,
return 0;
}
-
static int r8192_wx_set_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
@@ -862,7 +843,6 @@ static int dummy(struct net_device *dev, struct iw_request_info *a,
return -1;
}
-
static iw_handler r8192_wx_handlers[] = {
NULL, /* SIOCSIWCOMMIT */
r8192_wx_get_name, /* SIOCGIWNAME */
@@ -949,7 +929,6 @@ static const struct iw_priv_args r8192_private_args[] = {
};
-
static iw_handler r8192_private_handler[] = {
r8192_wx_set_crcmon,
r8192_wx_set_scan_type,
@@ -985,7 +964,6 @@ struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
return wstats;
}
-
struct iw_handler_def r8192_wx_handlers_def = {
.standard = r8192_wx_handlers,
.num_standard = ARRAY_SIZE(r8192_wx_handlers),
diff --git a/drivers/staging/rtl8712/basic_types.h b/drivers/staging/rtl8712/basic_types.h
index 7561bed5dd44..f5c0231891b1 100644
--- a/drivers/staging/rtl8712/basic_types.h
+++ b/drivers/staging/rtl8712/basic_types.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/drv_types.h b/drivers/staging/rtl8712/drv_types.h
index 29e47e1501c5..ae79047ac6dc 100644
--- a/drivers/staging/rtl8712/drv_types.h
+++ b/drivers/staging/rtl8712/drv_types.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/ethernet.h b/drivers/staging/rtl8712/ethernet.h
index fad173f4097e..039da36fad3d 100644
--- a/drivers/staging/rtl8712/ethernet.h
+++ b/drivers/staging/rtl8712/ethernet.h
@@ -11,10 +11,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/hal_init.c b/drivers/staging/rtl8712/hal_init.c
index 8008efe5686d..0dd458d1402c 100644
--- a/drivers/staging/rtl8712/hal_init.c
+++ b/drivers/staging/rtl8712/hal_init.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
@@ -201,8 +197,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
0x0000ffff);
memcpy(ppayload, ptr, dump_imem_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- dump_imem_sz + TXDESC_SIZE,
- (u8 *)ptx_desc);
+ dump_imem_sz + TXDESC_SIZE,
+ (u8 *)ptx_desc);
ptr += dump_imem_sz;
imem_sz -= dump_imem_sz;
} while (imem_sz > 0);
@@ -230,7 +226,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
0x0000ffff);
memcpy(ppayload, ptr, dump_emem_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- dump_emem_sz + TXDESC_SIZE, (u8 *)ptx_desc);
+ dump_emem_sz + TXDESC_SIZE,
+ (u8 *)ptx_desc);
ptr += dump_emem_sz;
emem_sz -= dump_emem_sz;
} while (emem_sz > 0);
@@ -282,7 +279,7 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
ptx_desc->txdw0 |= cpu_to_le32(BIT(28));
memcpy(ppayload, &fwhdr.fwpriv, fwhdr.fw_priv_sz);
r8712_write_mem(padapter, RTL8712_DMA_VOQ,
- fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
+ fwhdr.fw_priv_sz + TXDESC_SIZE, (u8 *)ptx_desc);
/* polling dmem code done */
i = 100;
@@ -297,7 +294,8 @@ static u8 rtl8712_dl_fw(struct _adapter *padapter)
tmp8 = r8712_read8(padapter, 0x1025000A);
if (tmp8 & BIT(4)) /* When boot from EEPROM,
- & FW need more time to read EEPROM */
+ * & FW need more time to read EEPROM
+ */
i = 60;
else /* boot from EFUSE */
i = 30;
@@ -332,7 +330,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
r8712_write32(padapter, RCR, (val32 | BIT(26))); /* Enable RX TCP
- Checksum offload */
+ * Checksum offload
+ */
netdev_info(padapter->pnetdev, "2 RCR=0x%x\n",
r8712_read32(padapter, RCR));
val32 = r8712_read32(padapter, RCR);
@@ -346,7 +345,8 @@ uint rtl8712_hal_init(struct _adapter *padapter)
r8712_write8(padapter, 0x102500BD, r8712_read8(padapter, 0x102500BD) |
BIT(7)); /* enable usb rx aggregation */
r8712_write8(padapter, 0x102500D9, 1); /* TH=1 => means that invalidate
- * usb rx aggregation */
+ * usb rx aggregation
+ */
r8712_write8(padapter, 0x1025FE5B, 0x04); /* 1.7ms/4 */
/* Fix the RX FIFO issue(USB error) */
r8712_write8(padapter, 0x1025fe5C, r8712_read8(padapter, 0x1025fe5C)
@@ -367,7 +367,8 @@ uint rtl8712_hal_deinit(struct _adapter *padapter)
r8712_write8(padapter, SYS_FUNC_EN + 1, 0x70);
r8712_write8(padapter, PMC_FSM, 0x06); /* Enable Loader Data Keep */
r8712_write8(padapter, SYS_ISO_CTRL, 0xF9); /* Isolation signals from
- * CORE, PLL */
+ * CORE, PLL
+ */
r8712_write8(padapter, SYS_ISO_CTRL + 1, 0xe8); /* Enable EFUSE 1.2V */
r8712_write8(padapter, AFE_PLL_CTRL, 0x00); /* Disable AFE PLL. */
r8712_write8(padapter, LDOA15_CTRL, 0x54); /* Disable A15V */
diff --git a/drivers/staging/rtl8712/ieee80211.c b/drivers/staging/rtl8712/ieee80211.c
index d13b4d53c256..8918654b44ed 100644
--- a/drivers/staging/rtl8712/ieee80211.c
+++ b/drivers/staging/rtl8712/ieee80211.c
@@ -13,10 +13,6 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
- * You should have received a copy of the GNU General Public License along with
- * this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
- *
* Modifications for inclusion into the Linux staging tree are
* Copyright(c) 2010 Larry Finger. All rights reserved.
*
diff --git a/drivers/staging/rtl8712/mlme_linux.c b/drivers/staging/rtl8712/mlme_linux.c
index e4e4bdee78be..af7c4a47738a 100644
--- a/drivers/staging/rtl8712/mlme_linux.c
+++ b/drivers/staging/rtl8712/mlme_linux.c
@@ -153,7 +153,7 @@ void r8712_report_sec_ie(struct _adapter *adapter, u8 authmode, u8 *sec_ie)
buff = NULL;
if (authmode == _WPA_IE_ID_) {
buff = kzalloc(IW_CUSTOM_MAX, GFP_ATOMIC);
- if (buff == NULL)
+ if (!buff)
return;
p = buff;
p += sprintf(p, "ASSOCINFO(ReqIEs=");
diff --git a/drivers/staging/rtl8712/os_intfs.c b/drivers/staging/rtl8712/os_intfs.c
index ab19112eae13..57211f7e68a5 100644
--- a/drivers/staging/rtl8712/os_intfs.c
+++ b/drivers/staging/rtl8712/os_intfs.c
@@ -389,7 +389,7 @@ static int netdev_open(struct net_device *pnetdev)
padapter->bup = true;
if (rtl871x_hal_init(padapter) != _SUCCESS)
goto netdev_open_error;
- if (r8712_initmac == NULL)
+ if (!r8712_initmac)
/* Use the mac address stored in the Efuse */
memcpy(pnetdev->dev_addr,
padapter->eeprompriv.mac_addr, ETH_ALEN);
@@ -413,7 +413,7 @@ static int netdev_open(struct net_device *pnetdev)
}
if (start_drv_threads(padapter) != _SUCCESS)
goto netdev_open_error;
- if (padapter->dvobjpriv.inirp_init == NULL)
+ if (!padapter->dvobjpriv.inirp_init)
goto netdev_open_error;
else
padapter->dvobjpriv.inirp_init(padapter);
diff --git a/drivers/staging/rtl8712/osdep_service.h b/drivers/staging/rtl8712/osdep_service.h
index 076d5083c723..ad041c96fdb8 100644
--- a/drivers/staging/rtl8712/osdep_service.h
+++ b/drivers/staging/rtl8712/osdep_service.h
@@ -57,9 +57,6 @@ struct __queue {
spin_lock_init(&((pqueue)->lock)); \
} while (0)
-#define LIST_CONTAINOR(ptr, type, member) \
- ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
-
static inline u32 _down_sema(struct semaphore *sema)
{
if (down_interruptible(sema))
diff --git a/drivers/staging/rtl8712/rtl8712_cmd.c b/drivers/staging/rtl8712/rtl8712_cmd.c
index 50f400234593..13c018340ff2 100644
--- a/drivers/staging/rtl8712/rtl8712_cmd.c
+++ b/drivers/staging/rtl8712/rtl8712_cmd.c
@@ -135,7 +135,7 @@ static u8 read_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -149,7 +149,7 @@ static u8 write_macreg_hdl(struct _adapter *padapter, u8 *pbuf)
/* invoke cmd->callback function */
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -165,7 +165,7 @@ static u8 read_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -178,7 +178,7 @@ static u8 write_bbreg_hdl(struct _adapter *padapter, u8 *pbuf)
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -194,7 +194,7 @@ static u8 read_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
if (pcmd->rsp && pcmd->rspsz > 0)
memcpy(pcmd->rsp, (u8 *)&val, pcmd->rspsz);
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -207,7 +207,7 @@ static u8 write_rfreg_hdl(struct _adapter *padapter, u8 *pbuf)
struct cmd_obj *pcmd = (struct cmd_obj *)pbuf;
pcmd_callback = cmd_callback[pcmd->cmdcode].callback;
- if (pcmd_callback == NULL)
+ if (!pcmd_callback)
r8712_free_cmd_obj(pcmd);
else
pcmd_callback(padapter, pcmd);
@@ -227,7 +227,7 @@ static struct cmd_obj *cmd_hdl_filter(struct _adapter *padapter,
{
struct cmd_obj *pcmd_r;
- if (pcmd == NULL)
+ if (!pcmd)
return pcmd;
pcmd_r = NULL;
@@ -416,7 +416,7 @@ _next:
/* free all cmd_obj resources */
do {
pcmd = r8712_dequeue_cmd(&(pcmdpriv->cmd_queue));
- if (pcmd == NULL)
+ if (!pcmd)
break;
r8712_free_cmd_obj(pcmd);
} while (1);
@@ -431,7 +431,7 @@ void r8712_event_handle(struct _adapter *padapter, uint *peventbuf)
void (*event_callback)(struct _adapter *dev, u8 *pbuf);
struct evt_priv *pevt_priv = &(padapter->evtpriv);
- if (peventbuf == NULL)
+ if (!peventbuf)
goto _abort_event_;
evt_sz = (u16)(le32_to_cpu(*peventbuf) & 0xffff);
evt_seq = (u8)((le32_to_cpu(*peventbuf) >> 24) & 0x7f);
diff --git a/drivers/staging/rtl8712/rtl8712_recv.c b/drivers/staging/rtl8712/rtl8712_recv.c
index d187508dd1e0..f25b34c7d115 100644
--- a/drivers/staging/rtl8712/rtl8712_recv.c
+++ b/drivers/staging/rtl8712/rtl8712_recv.c
@@ -204,7 +204,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
pfree_recv_queue = &adapter->recvpriv.free_recv_queue;
phead = &defrag_q->queue;
plist = phead->next;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
list_del_init(&prframe->u.list);
pfhdr = &prframe->u.hdr;
curfragnum = 0;
@@ -219,7 +219,7 @@ static union recv_frame *recvframe_defrag(struct _adapter *adapter,
plist = &defrag_q->queue;
plist = plist->next;
while (!end_of_queue_search(phead, plist)) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = container_of(plist, union recv_frame, u.list);
pnfhdr = &pnextrframe->u.hdr;
/*check the fragment sequence (2nd ~n fragment frame) */
if (curfragnum != pnfhdr->attrib.frag_num) {
@@ -492,7 +492,7 @@ static int enqueue_reorder_recvframe(struct recv_reorder_ctrl *preorder_ctrl,
phead = &ppending_recvframe_queue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pnextrframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ pnextrframe = container_of(plist, union recv_frame, u.list);
pnextattrib = &pnextrframe->u.hdr.attrib;
if (SN_LESS(pnextattrib->seq_num, pattrib->seq_num))
plist = plist->next;
@@ -525,14 +525,14 @@ int r8712_recv_indicatepkts_in_order(struct _adapter *padapter,
if (list_empty(phead))
return true;
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
preorder_ctrl->indicate_seq = pattrib->seq_num;
}
/* Prepare indication list and indication.
* Check if there is any packet need indicate. */
while (!list_empty(phead)) {
- prframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ prframe = container_of(plist, union recv_frame, u.list);
pattrib = &prframe->u.hdr.attrib;
if (!SN_LESS(preorder_ctrl->indicate_seq, pattrib->seq_num)) {
plist = plist->next;
diff --git a/drivers/staging/rtl8712/rtl8712_xmit.c b/drivers/staging/rtl8712/rtl8712_xmit.c
index b21a60e9f8a9..7e0b94503dfc 100644
--- a/drivers/staging/rtl8712/rtl8712_xmit.c
+++ b/drivers/staging/rtl8712/rtl8712_xmit.c
@@ -169,8 +169,8 @@ static struct xmit_frame *dequeue_one_xmitframe(struct xmit_priv *pxmitpriv,
xmitframe_phead = &pframe_queue->queue;
xmitframe_plist = xmitframe_phead->next;
if (!end_of_queue_search(xmitframe_phead, xmitframe_plist)) {
- pxmitframe = LIST_CONTAINOR(xmitframe_plist,
- struct xmit_frame, list);
+ pxmitframe = container_of(xmitframe_plist,
+ struct xmit_frame, list);
list_del_init(&pxmitframe->list);
ptxservq->qcnt--;
phwxmit->txcmdcnt++;
@@ -209,8 +209,8 @@ static struct xmit_frame *dequeue_xframe_ex(struct xmit_priv *pxmitpriv,
sta_phead = &phwxmit->sta_queue->queue;
sta_plist = sta_phead->next;
while (!end_of_queue_search(sta_phead, sta_plist)) {
- ptxservq = LIST_CONTAINOR(sta_plist, struct tx_servq,
- tx_pending);
+ ptxservq = container_of(sta_plist, struct tx_servq,
+ tx_pending);
pframe_queue = &ptxservq->sta_pending;
pxmitframe = dequeue_one_xmitframe(pxmitpriv, phwxmit,
ptxservq, pframe_queue);
diff --git a/drivers/staging/rtl8712/rtl871x_cmd.c b/drivers/staging/rtl8712/rtl871x_cmd.c
index 86136cc73672..aed03cfbb1ba 100644
--- a/drivers/staging/rtl8712/rtl871x_cmd.c
+++ b/drivers/staging/rtl8712/rtl871x_cmd.c
@@ -225,10 +225,10 @@ u8 r8712_sitesurvey_cmd(struct _adapter *padapter,
struct mlme_priv *pmlmepriv = &padapter->mlmepriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psurveyPara = kmalloc(sizeof(*psurveyPara), GFP_ATOMIC);
- if (psurveyPara == NULL) {
+ if (!psurveyPara) {
kfree(ph2c);
return _FAIL;
}
@@ -258,10 +258,10 @@ u8 r8712_setdatarate_cmd(struct _adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pbsetdataratepara = kmalloc(sizeof(*pbsetdataratepara), GFP_ATOMIC);
- if (pbsetdataratepara == NULL) {
+ if (!pbsetdataratepara) {
kfree(ph2c);
return _FAIL;
}
@@ -280,10 +280,10 @@ u8 r8712_set_chplan_cmd(struct _adapter *padapter, int chplan)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetchplanpara = kmalloc(sizeof(*psetchplanpara), GFP_ATOMIC);
- if (psetchplanpara == NULL) {
+ if (!psetchplanpara) {
kfree(ph2c);
return _FAIL;
}
@@ -301,10 +301,10 @@ u8 r8712_setbasicrate_cmd(struct _adapter *padapter, u8 *rateset)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pssetbasicratepara = kmalloc(sizeof(*pssetbasicratepara), GFP_ATOMIC);
- if (pssetbasicratepara == NULL) {
+ if (!pssetbasicratepara) {
kfree(ph2c);
return _FAIL;
}
@@ -322,10 +322,10 @@ u8 r8712_setfwdig_cmd(struct _adapter *padapter, u8 type)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
+ if (!pwriteptmparm) {
kfree(ph2c);
return _FAIL;
}
@@ -342,10 +342,10 @@ u8 r8712_setfwra_cmd(struct _adapter *padapter, u8 type)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriteptmparm = kmalloc(sizeof(*pwriteptmparm), GFP_ATOMIC);
- if (pwriteptmparm == NULL) {
+ if (!pwriteptmparm) {
kfree(ph2c);
return _FAIL;
}
@@ -362,10 +362,10 @@ u8 r8712_setrfreg_cmd(struct _adapter *padapter, u8 offset, u32 val)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pwriterfparm = kmalloc(sizeof(*pwriterfparm), GFP_ATOMIC);
- if (pwriterfparm == NULL) {
+ if (!pwriterfparm) {
kfree(ph2c);
return _FAIL;
}
@@ -383,10 +383,10 @@ u8 r8712_getrfreg_cmd(struct _adapter *padapter, u8 offset, u8 *pval)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
prdrfparm = kmalloc(sizeof(*prdrfparm), GFP_ATOMIC);
- if (prdrfparm == NULL) {
+ if (!prdrfparm) {
kfree(ph2c);
return _FAIL;
}
@@ -427,7 +427,7 @@ u8 r8712_createbss_cmd(struct _adapter *padapter)
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
INIT_LIST_HEAD(&pcmd->list);
pcmd->cmdcode = _CreateBss_CMD_;
@@ -457,7 +457,7 @@ u8 r8712_joinbss_cmd(struct _adapter *padapter, struct wlan_network *pnetwork)
padapter->ledpriv.LedControlHandler(padapter, LED_CTL_START_TO_LINK);
pcmd = kmalloc(sizeof(*pcmd), GFP_ATOMIC);
- if (pcmd == NULL)
+ if (!pcmd)
return _FAIL;
/* for hidden ap to set fw_state here */
@@ -587,10 +587,10 @@ u8 r8712_disassoc_cmd(struct _adapter *padapter) /* for sta_mode */
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
pdisconnect_cmd = kmalloc(sizeof(*pdisconnect_cmd), GFP_ATOMIC);
- if (pdisconnect_cmd == NULL)
+ if (!pdisconnect_cmd)
return _FAIL;
pdisconnect = kmalloc(sizeof(*pdisconnect), GFP_ATOMIC);
- if (pdisconnect == NULL) {
+ if (!pdisconnect) {
kfree(pdisconnect_cmd);
return _FAIL;
}
@@ -609,10 +609,10 @@ u8 r8712_setopmode_cmd(struct _adapter *padapter,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetop = kmalloc(sizeof(*psetop), GFP_ATOMIC);
- if (psetop == NULL) {
+ if (!psetop) {
kfree(ph2c);
return _FAIL;
}
@@ -633,15 +633,15 @@ u8 r8712_setstakey_cmd(struct _adapter *padapter, u8 *psta, u8 unicast_key)
struct sta_info *sta = (struct sta_info *)psta;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetstakey_para = kmalloc(sizeof(*psetstakey_para), GFP_ATOMIC);
- if (psetstakey_para == NULL) {
+ if (!psetstakey_para) {
kfree(ph2c);
return _FAIL;
}
psetstakey_rsp = kmalloc(sizeof(*psetstakey_rsp), GFP_ATOMIC);
- if (psetstakey_rsp == NULL) {
+ if (!psetstakey_rsp) {
kfree(ph2c);
kfree(psetstakey_para);
return _FAIL;
@@ -673,10 +673,10 @@ u8 r8712_setrfintfs_cmd(struct _adapter *padapter, u8 mode)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetrfintfsparm = kmalloc(sizeof(*psetrfintfsparm), GFP_ATOMIC);
- if (psetrfintfsparm == NULL) {
+ if (!psetrfintfsparm) {
kfree(ph2c);
return _FAIL;
}
@@ -695,10 +695,10 @@ u8 r8712_setrttbl_cmd(struct _adapter *padapter,
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetrttblparm = kmalloc(sizeof(*psetrttblparm), GFP_ATOMIC);
- if (psetrttblparm == NULL) {
+ if (!psetrttblparm) {
kfree(ph2c);
return _FAIL;
}
@@ -716,10 +716,10 @@ u8 r8712_setMacAddr_cmd(struct _adapter *padapter, u8 *mac_addr)
struct SetMacAddr_param *psetMacAddr_para;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetMacAddr_para = kmalloc(sizeof(*psetMacAddr_para), GFP_ATOMIC);
- if (psetMacAddr_para == NULL) {
+ if (!psetMacAddr_para) {
kfree(ph2c);
return _FAIL;
}
@@ -738,15 +738,15 @@ u8 r8712_setassocsta_cmd(struct _adapter *padapter, u8 *mac_addr)
struct set_assocsta_rsp *psetassocsta_rsp = NULL;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
psetassocsta_para = kmalloc(sizeof(*psetassocsta_para), GFP_ATOMIC);
- if (psetassocsta_para == NULL) {
+ if (!psetassocsta_para) {
kfree(ph2c);
return _FAIL;
}
psetassocsta_rsp = kmalloc(sizeof(*psetassocsta_rsp), GFP_ATOMIC);
- if (psetassocsta_rsp == NULL) {
+ if (!psetassocsta_rsp) {
kfree(ph2c);
kfree(psetassocsta_para);
return _FAIL;
@@ -766,10 +766,10 @@ u8 r8712_addbareq_cmd(struct _adapter *padapter, u8 tid)
struct addBaReq_parm *paddbareq_parm;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
paddbareq_parm = kmalloc(sizeof(*paddbareq_parm), GFP_ATOMIC);
- if (paddbareq_parm == NULL) {
+ if (!paddbareq_parm) {
kfree(ph2c);
return _FAIL;
}
@@ -787,10 +787,10 @@ u8 r8712_wdg_wk_cmd(struct _adapter *padapter)
struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
pdrvintcmd_param = kmalloc(sizeof(*pdrvintcmd_param), GFP_ATOMIC);
- if (pdrvintcmd_param == NULL) {
+ if (!pdrvintcmd_param) {
kfree(ph2c);
return _FAIL;
}
@@ -961,10 +961,10 @@ u8 r8712_disconnectCtrlEx_cmd(struct _adapter *adapter, u32 enableDrvCtrl,
struct cmd_priv *pcmdpriv = &adapter->cmdpriv;
ph2c = kmalloc(sizeof(*ph2c), GFP_ATOMIC);
- if (ph2c == NULL)
+ if (!ph2c)
return _FAIL;
param = kzalloc(sizeof(*param), GFP_ATOMIC);
- if (param == NULL) {
+ if (!param) {
kfree(ph2c);
return _FAIL;
}
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
index 1b9e24900477..e205adf24da2 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_linux.c
@@ -399,7 +399,7 @@ static int wpa_set_encryption(struct net_device *dev, struct ieee_param *param,
if (wep_key_len > 0) {
wep_key_len = wep_key_len <= 5 ? 5 : 13;
pwep = kzalloc(sizeof(*pwep), GFP_ATOMIC);
- if (pwep == NULL)
+ if (!pwep)
return -ENOMEM;
pwep->KeyLength = wep_key_len;
pwep->Length = wep_key_len +
@@ -1060,8 +1060,8 @@ static int r8711_wx_set_wap(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_bssid = pnetwork->network.MacAddress;
if (!memcmp(dst_bssid, temp->sa_data, ETH_ALEN)) {
@@ -1216,7 +1216,7 @@ static int r8711_wx_get_scan(struct net_device *dev,
ret = -E2BIG;
break;
}
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
ev = translate_scan(padapter, a, pnetwork, ev, stop);
plist = plist->next;
}
@@ -1271,8 +1271,8 @@ static int r8711_wx_set_essid(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, pmlmepriv->pscanned))
break;
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
dst_ssid = pnetwork->network.Ssid.Ssid;
if ((!memcmp(dst_ssid, src_ssid, ndis_ssid.SsidLength))
@@ -1793,7 +1793,7 @@ static int r871x_wx_set_enc_ext(struct net_device *dev,
param_len = sizeof(struct ieee_param) + pext->key_len;
param = kzalloc(param_len, GFP_ATOMIC);
- if (param == NULL)
+ if (!param)
return -ENOMEM;
param->cmd = IEEE_CMD_SET_ENCRYPTION;
eth_broadcast_addr(param->sta_addr);
@@ -1986,7 +1986,7 @@ static int r871x_get_ap_info(struct net_device *dev,
while (1) {
if (end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (!mac_pton(data, bssid)) {
netdev_info(dev, "r8712u: Invalid BSSID '%s'.\n",
(u8 *)data);
diff --git a/drivers/staging/rtl8712/rtl871x_ioctl_set.c b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
index f772675ae9cd..56760cda8e89 100644
--- a/drivers/staging/rtl8712/rtl871x_ioctl_set.c
+++ b/drivers/staging/rtl8712/rtl871x_ioctl_set.c
@@ -34,12 +34,6 @@
#include "usb_osintf.h"
#include "usb_ops.h"
-#define IS_MAC_ADDRESS_BROADCAST(addr) \
-( \
- ((addr[0] == 0xff) && (addr[1] == 0xff) && \
- (addr[2] == 0xff) && (addr[3] == 0xff) && \
- (addr[4] == 0xff) && (addr[5] == 0xff)) ? true : false \
-)
static u8 validate_ssid(struct ndis_802_11_ssid *ssid)
{
diff --git a/drivers/staging/rtl8712/rtl871x_mlme.c b/drivers/staging/rtl8712/rtl871x_mlme.c
index 62d4ae85af15..772bf9fa9592 100644
--- a/drivers/staging/rtl8712/rtl871x_mlme.c
+++ b/drivers/staging/rtl8712/rtl871x_mlme.c
@@ -155,7 +155,7 @@ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue,
phead = &scanned_queue->queue;
plist = phead->next;
while (plist != phead) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
plist = plist->next;
if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN))
break;
@@ -176,7 +176,7 @@ static void _free_network_queue(struct _adapter *padapter)
phead = &scanned_queue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
plist = plist->next;
_free_network(pmlmepriv, pnetwork);
}
@@ -304,7 +304,7 @@ struct wlan_network *r8712_get_oldest_wlan_network(
while (1) {
if (end_of_queue_search(phead, plist) == true)
break;
- pwlan = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pwlan = container_of(plist, struct wlan_network, list);
if (pwlan->fixed != true) {
if (oldest == NULL ||
time_after((unsigned long)oldest->last_scanned,
@@ -390,7 +390,7 @@ static void update_scanned_network(struct _adapter *adapter,
if (end_of_queue_search(phead, plist))
break;
- pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list);
+ pnetwork = container_of(plist, struct wlan_network, list);
if (is_same_network(&pnetwork->network, target))
break;
if ((oldest == ((struct wlan_network *)0)) ||
@@ -1135,8 +1135,8 @@ int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv)
}
return _FAIL;
}
- pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned,
- struct wlan_network, list);
+ pnetwork = container_of(pmlmepriv->pscanned,
+ struct wlan_network, list);
if (pnetwork == NULL)
return _FAIL;
pmlmepriv->pscanned = pmlmepriv->pscanned->next;
@@ -1205,7 +1205,7 @@ sint r8712_set_auth(struct _adapter *adapter,
return _FAIL;
psetauthparm = kzalloc(sizeof(*psetauthparm), GFP_ATOMIC);
- if (psetauthparm == NULL) {
+ if (!psetauthparm) {
kfree(pcmd);
return _FAIL;
}
@@ -1234,7 +1234,7 @@ sint r8712_set_key(struct _adapter *adapter,
if (!pcmd)
return _FAIL;
psetkeyparm = kzalloc(sizeof(*psetkeyparm), GFP_ATOMIC);
- if (psetkeyparm == NULL) {
+ if (!psetkeyparm) {
ret = _FAIL;
goto err_free_cmd;
}
diff --git a/drivers/staging/rtl8712/rtl871x_recv.c b/drivers/staging/rtl8712/rtl871x_recv.c
index 616ca3965919..23c143890252 100644
--- a/drivers/staging/rtl8712/rtl871x_recv.c
+++ b/drivers/staging/rtl8712/rtl871x_recv.c
@@ -142,7 +142,7 @@ void r8712_free_recvframe_queue(struct __queue *pframequeue,
phead = &pframequeue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- precvframe = LIST_CONTAINOR(plist, union recv_frame, u);
+ precvframe = container_of(plist, union recv_frame, u.list);
plist = plist->next;
r8712_free_recvframe(precvframe, pfree_recv_queue);
}
diff --git a/drivers/staging/rtl8712/rtl871x_sta_mgt.c b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
index e90c00de7499..e11ce2896893 100644
--- a/drivers/staging/rtl8712/rtl871x_sta_mgt.c
+++ b/drivers/staging/rtl8712/rtl871x_sta_mgt.c
@@ -216,8 +216,8 @@ void r8712_free_all_stainfo(struct _adapter *padapter)
phead = &(pstapriv->sta_hash[index]);
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist,
- struct sta_info, hash_list);
+ psta = container_of(plist,
+ struct sta_info, hash_list);
plist = plist->next;
if (pbcmc_stainfo != psta)
r8712_free_stainfo(padapter, psta);
@@ -241,7 +241,7 @@ struct sta_info *r8712_get_stainfo(struct sta_priv *pstapriv, u8 *hwaddr)
phead = &(pstapriv->sta_hash[index]);
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- psta = LIST_CONTAINOR(plist, struct sta_info, hash_list);
+ psta = container_of(plist, struct sta_info, hash_list);
if ((!memcmp(psta->hwaddr, hwaddr, ETH_ALEN))) {
/* if found the matched address */
break;
diff --git a/drivers/staging/rtl8712/rtl871x_xmit.c b/drivers/staging/rtl8712/rtl871x_xmit.c
index c6d952f5d8f9..99256baafd38 100644
--- a/drivers/staging/rtl8712/rtl871x_xmit.c
+++ b/drivers/staging/rtl8712/rtl871x_xmit.c
@@ -848,7 +848,7 @@ void r8712_free_xmitframe_queue(struct xmit_priv *pxmitpriv,
phead = &pframequeue->queue;
plist = phead->next;
while (!end_of_queue_search(phead, plist)) {
- pxmitframe = LIST_CONTAINOR(plist, struct xmit_frame, list);
+ pxmitframe = container_of(plist, struct xmit_frame, list);
plist = plist->next;
r8712_free_xmitframe(pxmitpriv, pxmitframe);
}
diff --git a/drivers/staging/rtl8712/usb_ops_linux.c b/drivers/staging/rtl8712/usb_ops_linux.c
index 454cdf6c7fa1..6f12345709c2 100644
--- a/drivers/staging/rtl8712/usb_ops_linux.c
+++ b/drivers/staging/rtl8712/usb_ops_linux.c
@@ -504,7 +504,7 @@ int r8712_usbctrl_vendorreq(struct intf_priv *pintfpriv, u8 request, u16 value,
u8 *palloc_buf, *pIo_buf;
palloc_buf = kmalloc((u32)len + 16, GFP_ATOMIC);
- if (palloc_buf == NULL)
+ if (!palloc_buf)
return -ENOMEM;
pIo_buf = palloc_buf + 16 - ((addr_t)(palloc_buf) & 0x0f);
if (requesttype == 0x01) {
diff --git a/drivers/staging/rtl8723au/Kconfig b/drivers/staging/rtl8723au/Kconfig
index 435f3594dabe..277c1ab69317 100644
--- a/drivers/staging/rtl8723au/Kconfig
+++ b/drivers/staging/rtl8723au/Kconfig
@@ -1,5 +1,5 @@
config R8723AU
- tristate "Realtek RTL8723AU Wireless LAN NIC driver"
+ tristate "Realtek RTL8723AU Wireless LAN NIC driver (deprecated)"
depends on USB && WLAN && RFKILL
select WIRELESS_EXT
select WEXT_PRIV
@@ -7,7 +7,10 @@ config R8723AU
default n
---help---
This option adds the Realtek RTL8723AU USB device such as found in
- the Lenovo Yogi 13 tablet. If built as a module, it will be called r8723au.
+ the Lenovo Yoga 13 tablet. If built as a module, it will be called r8723au.
+
+ Note: This driver is deprecated and scheduled to be removed in a
+ future kernel release. Please use rtl8xxxu instead.
if R8723AU
diff --git a/drivers/staging/rtl8723au/core/rtw_ap.c b/drivers/staging/rtl8723au/core/rtw_ap.c
index f68e2770255d..aad686da3cf0 100644
--- a/drivers/staging/rtl8723au/core/rtw_ap.c
+++ b/drivers/staging/rtl8723au/core/rtw_ap.c
@@ -1719,7 +1719,8 @@ void stop_ap_mode23a(struct rtw_adapter *padapter)
}
spin_unlock_bh(&pacl_node_q->lock);
- DBG_8723A("%s, free acl_node_queue, num =%d\n", __func__, pacl_list->num);
+ DBG_8723A("%s, free acl_node_queue, num =%d\n",
+ __func__, pacl_list->num);
rtw_sta_flush23a(padapter);
diff --git a/drivers/staging/rtl8723au/core/rtw_recv.c b/drivers/staging/rtl8723au/core/rtw_recv.c
index 989ed0726817..150dabc2a58d 100644
--- a/drivers/staging/rtl8723au/core/rtw_recv.c
+++ b/drivers/staging/rtl8723au/core/rtw_recv.c
@@ -211,31 +211,6 @@ u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter)
return cnt;
}
-int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue)
-{
- spin_lock_bh(&queue->lock);
-
- list_del_init(&precvbuf->list);
- list_add(&precvbuf->list, get_list_head(queue));
-
- spin_unlock_bh(&queue->lock);
-
- return _SUCCESS;
-}
-
-int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue)
-{
- unsigned long irqL;
-
- spin_lock_irqsave(&queue->lock, irqL);
-
- list_del_init(&precvbuf->list);
-
- list_add_tail(&precvbuf->list, get_list_head(queue));
- spin_unlock_irqrestore(&queue->lock, irqL);
- return _SUCCESS;
-}
-
struct recv_buf *rtw_dequeue_recvbuf23a (struct rtw_queue *queue)
{
unsigned long irqL;
diff --git a/drivers/staging/rtl8723au/core/rtw_wlan_util.c b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
index cc2b84be9774..694cf17f82cf 100644
--- a/drivers/staging/rtl8723au/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8723au/core/rtw_wlan_util.c
@@ -304,21 +304,11 @@ inline void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch)
adapter_to_dvobj(adapter)->oper_channel = ch;
}
-inline u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_bwmode;
-}
-
inline void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw)
{
adapter_to_dvobj(adapter)->oper_bwmode = bw;
}
-inline u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter)
-{
- return adapter_to_dvobj(adapter)->oper_ch_offset;
-}
-
inline void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset)
{
adapter_to_dvobj(adapter)->oper_ch_offset = offset;
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
index e81301fcb01d..1ea0af499ce9 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_hal_init.c
@@ -1175,8 +1175,6 @@ int InitLLTTable23a(struct rtw_adapter *padapter, u32 boundary)
/* Let last entry point to the start entry of ring buffer */
status = _LLTWrite(padapter, Last_Entry_Of_TxPktBuf, txpktbuf_bndy);
- if (status != _SUCCESS)
- return status;
return status;
}
diff --git a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
index ce0d8d894787..24c0ff3d82bc 100644
--- a/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
+++ b/drivers/staging/rtl8723au/hal/rtl8723a_rf6052.c
@@ -465,7 +465,7 @@ static int phy_RF6052_Config_ParaFile(struct rtw_adapter *Adapter)
break;
}
- /*----Restore RFENV control type----*/;
+ /*----Restore RFENV control type----*/
switch (eRFPath) {
case RF_PATH_A:
PHY_SetBBReg(Adapter, pPhyReg->rfintfs,
diff --git a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
index ea2a6c914d38..0e7d3da91471 100644
--- a/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
+++ b/drivers/staging/rtl8723au/include/rtw_mlme_ext.h
@@ -461,9 +461,7 @@ void Update23aTblForSoftAP(u8 *bssrateset, u32 bssratelen);
u8 rtw_get_oper_ch23a(struct rtw_adapter *adapter);
void rtw_set_oper_ch23a(struct rtw_adapter *adapter, u8 ch);
-u8 rtw_get_oper_bw23a(struct rtw_adapter *adapter);
void rtw_set_oper_bw23a(struct rtw_adapter *adapter, u8 bw);
-u8 rtw_get_oper_ch23aoffset(struct rtw_adapter *adapter);
void rtw_set_oper_ch23aoffset23a(struct rtw_adapter *adapter, u8 offset);
void set_channel_bwmode23a(struct rtw_adapter *padapter, unsigned char channel,
diff --git a/drivers/staging/rtl8723au/include/rtw_recv.h b/drivers/staging/rtl8723au/include/rtw_recv.h
index dc784be3ddd9..85a5edb450e3 100644
--- a/drivers/staging/rtl8723au/include/rtw_recv.h
+++ b/drivers/staging/rtl8723au/include/rtw_recv.h
@@ -279,8 +279,6 @@ int rtw_enqueue_recvframe23a(struct recv_frame *precvframe, struct rtw_queue *qu
u32 rtw_free_uc_swdec_pending_queue23a(struct rtw_adapter *adapter);
-int rtw_enqueue_recvbuf23a_to_head(struct recv_buf *precvbuf, struct rtw_queue *queue);
-int rtw_enqueue_recvbuf23a(struct recv_buf *precvbuf, struct rtw_queue *queue);
struct recv_buf *rtw_dequeue_recvbuf23a(struct rtw_queue *queue);
void rtw_reordering_ctrl_timeout_handler23a(unsigned long pcontext);
diff --git a/drivers/staging/rtl8723au/os_dep/usb_intf.c b/drivers/staging/rtl8723au/os_dep/usb_intf.c
index 27b3a5b7d8d4..cf83efffbffd 100644
--- a/drivers/staging/rtl8723au/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8723au/os_dep/usb_intf.c
@@ -532,6 +532,7 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
{
struct rtw_adapter *if1 = NULL;
struct dvobj_priv *dvobj;
+ struct usb_device *udev;
int status = _FAIL;
RT_TRACE(_module_hci_intfs_c_, _drv_err_, "+rtw_drv_init\n");
@@ -544,6 +545,10 @@ static int rtw_drv_init(struct usb_interface *pusb_intf,
goto exit;
}
+ udev = dvobj->pusbdev;
+ dev_warn(&udev->dev, "WARNING: The rtl8723au driver is deprecated!");
+ dev_warn(&udev->dev, "Please use the rtl8xxxu driver for this device!");
+
if1 = rtw_usb_if1_init(dvobj, pusb_intf, pdid);
if (!if1) {
DBG_8723A("rtw_init_primary_adapter Failed!\n");
diff --git a/drivers/staging/rts5208/ms.c b/drivers/staging/rts5208/ms.c
index a780185a3754..0f0cd4a03cd4 100644
--- a/drivers/staging/rts5208/ms.c
+++ b/drivers/staging/rts5208/ms.c
@@ -49,7 +49,7 @@ static int ms_parse_err_code(struct rtsx_chip *chip)
}
static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode,
- u8 tpc, u8 cnt, u8 cfg)
+ u8 tpc, u8 cnt, u8 cfg)
{
struct ms_info *ms_card = &chip->ms_card;
int retval;
@@ -2691,7 +2691,7 @@ static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no)
}
if ((log_blk < ms_start_idx[seg_no]) ||
- (log_blk >= ms_start_idx[seg_no+1])) {
+ (log_blk >= ms_start_idx[seg_no + 1])) {
if (!(chip->card_wp & MS_CARD)) {
retval = ms_erase_block(chip, phy_blk);
if (retval != STATUS_SUCCESS)
@@ -3836,7 +3836,7 @@ static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip,
start_page = (u8)(start_sector & ms_card->page_off);
for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) {
- if (log_blk < ms_start_idx[seg_no+1])
+ if (log_blk < ms_start_idx[seg_no + 1])
break;
}
@@ -4264,7 +4264,7 @@ int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip)
memset(buf1, 0, 32);
rtsx_stor_get_xfer_buf(buf2, min_t(int, 12, scsi_bufflen(srb)), srb);
for (i = 0; i < 8; i++)
- buf1[8+i] = buf2[4+i];
+ buf1[8 + i] = buf2[4 + i];
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf1, 32);
@@ -4399,10 +4399,10 @@ int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
- buf[i] = buf[4+i];
+ buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
- buf[8+i] = 0;
+ buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA,
32, WAIT_INT, buf, 32);
@@ -4511,10 +4511,10 @@ int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip)
rtsx_stor_get_xfer_buf(buf, bufflen, srb);
for (i = 0; i < 8; i++)
- buf[i] = buf[4+i];
+ buf[i] = buf[4 + i];
for (i = 0; i < 24; i++)
- buf[8+i] = 0;
+ buf[8 + i] = 0;
retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT,
buf, 32);
diff --git a/drivers/staging/rts5208/rtsx_card.c b/drivers/staging/rts5208/rtsx_card.c
index 437436f5dbdd..231833a3045e 100644
--- a/drivers/staging/rts5208/rtsx_card.c
+++ b/drivers/staging/rts5208/rtsx_card.c
@@ -628,11 +628,6 @@ void rtsx_init_cards(struct rtsx_chip *chip)
}
}
-static inline u8 double_depth(u8 depth)
-{
- return (depth > 1) ? (depth - 1) : depth;
-}
-
int switch_ssc_clock(struct rtsx_chip *chip, int clk)
{
int retval;
@@ -1184,22 +1179,6 @@ int check_card_wp(struct rtsx_chip *chip, unsigned int lun)
return 0;
}
-int check_card_fail(struct rtsx_chip *chip, unsigned int lun)
-{
- if (chip->card_fail & chip->lun2card[lun])
- return 1;
-
- return 0;
-}
-
-int check_card_ejected(struct rtsx_chip *chip, unsigned int lun)
-{
- if (chip->card_ejected & chip->lun2card[lun])
- return 1;
-
- return 0;
-}
-
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun)
{
if ((chip->card_ready & chip->lun2card[lun]) == XD_CARD)
diff --git a/drivers/staging/rts5208/rtsx_card.h b/drivers/staging/rts5208/rtsx_card.h
index 8f2cf9a4ec69..56df9a431d6d 100644
--- a/drivers/staging/rts5208/rtsx_card.h
+++ b/drivers/staging/rts5208/rtsx_card.h
@@ -1024,8 +1024,6 @@ int detect_card_cd(struct rtsx_chip *chip, int card);
int check_card_exist(struct rtsx_chip *chip, unsigned int lun);
int check_card_ready(struct rtsx_chip *chip, unsigned int lun);
int check_card_wp(struct rtsx_chip *chip, unsigned int lun);
-int check_card_fail(struct rtsx_chip *chip, unsigned int lun);
-int check_card_ejected(struct rtsx_chip *chip, unsigned int lun);
void eject_card(struct rtsx_chip *chip, unsigned int lun);
u8 get_lun_card(struct rtsx_chip *chip, unsigned int lun);
diff --git a/drivers/staging/rts5208/rtsx_chip.c b/drivers/staging/rts5208/rtsx_chip.c
index c0ce659a5aa6..bcc4b666d79f 100644
--- a/drivers/staging/rts5208/rtsx_chip.c
+++ b/drivers/staging/rts5208/rtsx_chip.c
@@ -43,14 +43,6 @@ static void rtsx_calibration(struct rtsx_chip *chip)
rtsx_write_phy_register(chip, 0x00, 0x0288);
}
-void rtsx_disable_card_int(struct rtsx_chip *chip)
-{
- u32 reg = rtsx_readl(chip, RTSX_BIER);
-
- reg &= ~(XD_INT_EN | SD_INT_EN | MS_INT_EN);
- rtsx_writel(chip, RTSX_BIER, reg);
-}
-
void rtsx_enable_card_int(struct rtsx_chip *chip)
{
u32 reg = rtsx_readl(chip, RTSX_BIER);
@@ -1447,12 +1439,6 @@ delink_stage:
rtsx_delink_stage(chip);
}
-void rtsx_undo_delink(struct rtsx_chip *chip)
-{
- chip->auto_delink_allowed = 0;
- rtsx_write_register(chip, CHANGE_LINK_STATE, 0x0A, 0x00);
-}
-
/**
* rtsx_stop_cmd - stop command transfer and DMA transfer
* @chip: Realtek's card reader chip
@@ -2000,27 +1986,6 @@ int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit)
return STATUS_SUCCESS;
}
-int rtsx_check_link_ready(struct rtsx_chip *chip)
-{
- int retval;
- u8 val;
-
- retval = rtsx_read_register(chip, IRQSTAT0, &val);
- if (retval) {
- rtsx_trace(chip);
- return retval;
- }
-
- dev_dbg(rtsx_dev(chip), "IRQSTAT0: 0x%x\n", val);
- if (val & LINK_RDY_INT) {
- dev_dbg(rtsx_dev(chip), "Delinked!\n");
- rtsx_write_register(chip, IRQSTAT0, LINK_RDY_INT, LINK_RDY_INT);
- return STATUS_FAIL;
- }
-
- return STATUS_SUCCESS;
-}
-
static void rtsx_handle_pm_dstate(struct rtsx_chip *chip, u8 dstate)
{
u32 ultmp;
diff --git a/drivers/staging/rts5208/rtsx_chip.h b/drivers/staging/rts5208/rtsx_chip.h
index c295b1eedb44..c08164f3247e 100644
--- a/drivers/staging/rts5208/rtsx_chip.h
+++ b/drivers/staging/rts5208/rtsx_chip.h
@@ -950,7 +950,6 @@ do { \
int rtsx_force_power_on(struct rtsx_chip *chip, u8 ctl);
int rtsx_force_power_down(struct rtsx_chip *chip, u8 ctl);
-void rtsx_disable_card_int(struct rtsx_chip *chip);
void rtsx_enable_card_int(struct rtsx_chip *chip);
void rtsx_enable_bus_int(struct rtsx_chip *chip);
void rtsx_disable_bus_int(struct rtsx_chip *chip);
@@ -958,7 +957,6 @@ int rtsx_reset_chip(struct rtsx_chip *chip);
int rtsx_init_chip(struct rtsx_chip *chip);
void rtsx_release_chip(struct rtsx_chip *chip);
void rtsx_polling_func(struct rtsx_chip *chip);
-void rtsx_undo_delink(struct rtsx_chip *chip);
void rtsx_stop_cmd(struct rtsx_chip *chip, int card);
int rtsx_write_register(struct rtsx_chip *chip, u16 addr, u8 mask, u8 data);
int rtsx_read_register(struct rtsx_chip *chip, u16 addr, u8 *data);
@@ -975,7 +973,6 @@ int rtsx_read_efuse(struct rtsx_chip *chip, u8 addr, u8 *val);
int rtsx_write_efuse(struct rtsx_chip *chip, u8 addr, u8 val);
int rtsx_clr_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
int rtsx_set_phy_reg_bit(struct rtsx_chip *chip, u8 reg, u8 bit);
-int rtsx_check_link_ready(struct rtsx_chip *chip);
void rtsx_enter_ss(struct rtsx_chip *chip);
void rtsx_exit_ss(struct rtsx_chip *chip);
int rtsx_pre_handle_interrupt(struct rtsx_chip *chip);
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index 87d697623cba..6219e047557e 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -1928,9 +1928,9 @@ static int sd_tuning_rx(struct rtsx_chip *chip)
tuning_cmd = sd_sdr_tuning_rx_cmd;
} else {
- if (CHK_MMC_DDR52(sd_card))
+ if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = mmc_ddr_tunning_rx_cmd;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2054,9 +2054,9 @@ static int sd_tuning_tx(struct rtsx_chip *chip)
tuning_cmd = sd_sdr_tuning_tx_cmd;
} else {
- if (CHK_MMC_DDR52(sd_card))
+ if (CHK_MMC_DDR52(sd_card)) {
tuning_cmd = sd_ddr_tuning_tx_cmd;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2678,9 +2678,9 @@ RTY_SD_RST:
}
j++;
- if (j < 3)
+ if (j < 3) {
goto RTY_SD_RST;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
@@ -2690,9 +2690,9 @@ RTY_SD_RST:
SD_RSP_TYPE_R3, rsp, 5);
if (retval != STATUS_SUCCESS) {
k++;
- if (k < 3)
+ if (k < 3) {
goto RTY_SD_RST;
- else {
+ } else {
rtsx_trace(chip);
return STATUS_FAIL;
}
diff --git a/drivers/staging/skein/skein_api.c b/drivers/staging/skein/skein_api.c
index 36f849fbba5e..cab26e736111 100644
--- a/drivers/staging/skein/skein_api.c
+++ b/drivers/staging/skein/skein_api.c
@@ -165,7 +165,6 @@ int skein_update(struct skein_ctx *ctx, const u8 *msg,
break;
}
return ret;
-
}
int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
@@ -210,7 +209,7 @@ int skein_update_bits(struct skein_ctx *ctx, const u8 *msg,
/* internal sanity check: there IS a partial byte in the buffer! */
skein_assert(length != 0);
/* partial byte bit mask */
- mask = (u8) (1u << (7 - (msg_bit_cnt & 7)));
+ mask = (u8)(1u << (7 - (msg_bit_cnt & 7)));
/* apply bit padding on final byte (in the buffer) */
up[length - 1] = (u8)((up[length - 1] & (0 - mask)) | mask);
diff --git a/drivers/staging/skein/skein_base.c b/drivers/staging/skein/skein_base.c
index 25a01ca76953..c24a57396483 100644
--- a/drivers/staging/skein/skein_base.c
+++ b/drivers/staging/skein/skein_base.c
@@ -58,7 +58,7 @@ int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -98,7 +98,7 @@ int skein_256_init_ext(struct skein_256_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -171,7 +171,7 @@ int skein_256_update(struct skein_256_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_256_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_256_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_256_BLOCK_BYTES;
skein_256_process_block(ctx, msg, n,
SKEIN_256_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_256_BLOCK_BYTES;
@@ -205,7 +205,7 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -219,19 +219,19 @@ int skein_256_final(struct skein_256_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_256_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
if (n >= SKEIN_256_BLOCK_BYTES)
n = SKEIN_256_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -282,7 +282,7 @@ int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -326,7 +326,7 @@ int skein_512_init_ext(struct skein_512_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -398,7 +398,7 @@ int skein_512_update(struct skein_512_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_512_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_512_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_512_BLOCK_BYTES;
skein_512_process_block(ctx, msg, n,
SKEIN_512_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_512_BLOCK_BYTES;
@@ -432,7 +432,7 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -446,19 +446,19 @@ int skein_512_final(struct skein_512_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_512_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
if (n >= SKEIN_512_BLOCK_BYTES)
n = SKEIN_512_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -506,7 +506,7 @@ int skein_1024_init(struct skein_1024_ctx *ctx, size_t hash_bit_len)
cfg.w[1] = skein_swap64(hash_bit_len);
cfg.w[2] = skein_swap64(SKEIN_CFG_TREE_INFO_SEQUENTIAL);
/* zero pad config block */
- memset(&cfg.w[3], 0, sizeof(cfg) - 3*sizeof(cfg.w[0]));
+ memset(&cfg.w[3], 0, sizeof(cfg) - 3 * sizeof(cfg.w[0]));
/* compute the initial chaining values from config block */
/* zero the chaining variables */
@@ -547,7 +547,7 @@ int skein_1024_init_ext(struct skein_1024_ctx *ctx, size_t hash_bit_len,
skein_assert(sizeof(cfg.b) >= sizeof(ctx->x));
/* do a mini-Init right here */
/* set output hash bit count = state size */
- ctx->h.hash_bit_len = 8*sizeof(ctx->x);
+ ctx->h.hash_bit_len = 8 * sizeof(ctx->x);
/* set tweaks: T0 = 0; T1 = KEY type */
skein_start_new_type(ctx, KEY);
/* zero the initial chaining variables */
@@ -620,7 +620,7 @@ int skein_1024_update(struct skein_1024_ctx *ctx, const u8 *msg,
*/
if (msg_byte_cnt > SKEIN_1024_BLOCK_BYTES) {
/* number of full blocks to process */
- n = (msg_byte_cnt-1) / SKEIN_1024_BLOCK_BYTES;
+ n = (msg_byte_cnt - 1) / SKEIN_1024_BLOCK_BYTES;
skein_1024_process_block(ctx, msg, n,
SKEIN_1024_BLOCK_BYTES);
msg_byte_cnt -= n * SKEIN_1024_BLOCK_BYTES;
@@ -654,7 +654,7 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -668,19 +668,19 @@ int skein_1024_final(struct skein_1024_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
if (n >= SKEIN_1024_BLOCK_BYTES)
n = SKEIN_1024_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -702,7 +702,7 @@ int skein_256_final_pad(struct skein_256_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_256_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_256_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_256_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -724,7 +724,7 @@ int skein_512_final_pad(struct skein_512_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_512_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_512_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_512_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -746,7 +746,7 @@ int skein_1024_final_pad(struct skein_1024_ctx *ctx, u8 *hash_val)
/* zero pad b[] if necessary */
if (ctx->h.b_cnt < SKEIN_1024_BLOCK_BYTES)
memset(&ctx->b[ctx->h.b_cnt], 0,
- SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
+ SKEIN_1024_BLOCK_BYTES - ctx->h.b_cnt);
/* process the final block */
skein_1024_process_block(ctx, ctx->b, 1, ctx->h.b_cnt);
@@ -775,19 +775,19 @@ int skein_256_output(struct skein_256_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_256_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_256_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_256_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_256_BLOCK_BYTES;
if (n >= SKEIN_256_BLOCK_BYTES)
n = SKEIN_256_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_256_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_256_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -812,19 +812,19 @@ int skein_512_output(struct skein_512_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_512_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_512_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_512_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_512_BLOCK_BYTES;
if (n >= SKEIN_512_BLOCK_BYTES)
n = SKEIN_512_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_512_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_512_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
@@ -849,19 +849,19 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val)
memset(ctx->b, 0, sizeof(ctx->b));
/* keep a local copy of counter mode "key" */
memcpy(x, ctx->x, sizeof(x));
- for (i = 0; i*SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
+ for (i = 0; i * SKEIN_1024_BLOCK_BYTES < byte_cnt; i++) {
/* build the counter block */
- ((u64 *)ctx->b)[0] = skein_swap64((u64) i);
+ ((u64 *)ctx->b)[0] = skein_swap64((u64)i);
skein_start_new_type(ctx, OUT_FINAL);
/* run "counter mode" */
skein_1024_process_block(ctx, ctx->b, 1, sizeof(u64));
/* number of output bytes left to go */
- n = byte_cnt - i*SKEIN_1024_BLOCK_BYTES;
+ n = byte_cnt - i * SKEIN_1024_BLOCK_BYTES;
if (n >= SKEIN_1024_BLOCK_BYTES)
n = SKEIN_1024_BLOCK_BYTES;
/* "output" the ctr mode bytes */
- skein_put64_lsb_first(hash_val+i*SKEIN_1024_BLOCK_BYTES, ctx->x,
- n);
+ skein_put64_lsb_first(hash_val + (i * SKEIN_1024_BLOCK_BYTES),
+ ctx->x, n);
/* restore the counter mode key for next time */
memcpy(ctx->x, x, sizeof(x));
}
diff --git a/drivers/staging/skein/skein_base.h b/drivers/staging/skein/skein_base.h
index 3c7f8ad3627d..dc464f334a58 100644
--- a/drivers/staging/skein/skein_base.h
+++ b/drivers/staging/skein/skein_base.h
@@ -32,7 +32,7 @@
/* below two prototype assume we are handed aligned data */
#define skein_put64_lsb_first(dst08, src64, b_cnt) memcpy(dst08, src64, b_cnt)
#define skein_get64_lsb_first(dst64, src08, w_cnt) \
- memcpy(dst64, src08, 8*(w_cnt))
+ memcpy(dst64, src08, 8 * (w_cnt))
#define skein_swap64(w64) (w64)
enum {
@@ -48,17 +48,17 @@ enum {
#define SKEIN_1024_STATE_WORDS 16
#define SKEIN_MAX_STATE_WORDS 16
-#define SKEIN_256_STATE_BYTES (8*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BYTES (8*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BYTES (8*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_STATE_BYTES (8 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_STATE_BYTES (8 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_STATE_BYTES (8 * SKEIN_1024_STATE_WORDS)
-#define SKEIN_256_STATE_BITS (64*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_STATE_BITS (64*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_STATE_BITS (64*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_STATE_BITS (64 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_STATE_BITS (64 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_STATE_BITS (64 * SKEIN_1024_STATE_WORDS)
-#define SKEIN_256_BLOCK_BYTES (8*SKEIN_256_STATE_WORDS)
-#define SKEIN_512_BLOCK_BYTES (8*SKEIN_512_STATE_WORDS)
-#define SKEIN_1024_BLOCK_BYTES (8*SKEIN_1024_STATE_WORDS)
+#define SKEIN_256_BLOCK_BYTES (8 * SKEIN_256_STATE_WORDS)
+#define SKEIN_512_BLOCK_BYTES (8 * SKEIN_512_STATE_WORDS)
+#define SKEIN_1024_BLOCK_BYTES (8 * SKEIN_1024_STATE_WORDS)
struct skein_ctx_hdr {
size_t hash_bit_len; /* size of hash result, in bits */
@@ -84,11 +84,6 @@ struct skein_1024_ctx { /* 1024-bit Skein hash context structure */
u8 b[SKEIN_1024_BLOCK_BYTES]; /* partial block buf (8-byte aligned) */
};
-static inline u64 rotl_64(u64 x, u8 N)
-{
- return (x << N) | (x >> (64 - N));
-}
-
/* Skein APIs for (incremental) "straight hashing" */
int skein_256_init(struct skein_256_ctx *ctx, size_t hash_bit_len);
int skein_512_init(struct skein_512_ctx *ctx, size_t hash_bit_len);
@@ -162,13 +157,13 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_T1_POS_FINAL SKEIN_T1_BIT(127) /* 127 final blk flag */
/* tweak word tweak[1]: flag bit definition(s) */
-#define SKEIN_T1_FLAG_FIRST (((u64) 1) << SKEIN_T1_POS_FIRST)
-#define SKEIN_T1_FLAG_FINAL (((u64) 1) << SKEIN_T1_POS_FINAL)
-#define SKEIN_T1_FLAG_BIT_PAD (((u64) 1) << SKEIN_T1_POS_BIT_PAD)
+#define SKEIN_T1_FLAG_FIRST (((u64)1) << SKEIN_T1_POS_FIRST)
+#define SKEIN_T1_FLAG_FINAL (((u64)1) << SKEIN_T1_POS_FINAL)
+#define SKEIN_T1_FLAG_BIT_PAD (((u64)1) << SKEIN_T1_POS_BIT_PAD)
/* tweak word tweak[1]: tree level bit field mask */
#define SKEIN_T1_TREE_LVL_MASK (((u64)0x7F) << SKEIN_T1_POS_TREE_LVL)
-#define SKEIN_T1_TREE_LEVEL(n) (((u64) (n)) << SKEIN_T1_POS_TREE_LVL)
+#define SKEIN_T1_TREE_LEVEL(n) (((u64)(n)) << SKEIN_T1_POS_TREE_LVL)
/* tweak word tweak[1]: block type field */
#define SKEIN_BLK_TYPE_KEY (0) /* key, for MAC and KDF */
@@ -181,7 +176,7 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_BLK_TYPE_OUT (63) /* output stage */
#define SKEIN_BLK_TYPE_MASK (63) /* bit field mask */
-#define SKEIN_T1_BLK_TYPE(T) (((u64) (SKEIN_BLK_TYPE_##T)) << \
+#define SKEIN_T1_BLK_TYPE(T) (((u64)(SKEIN_BLK_TYPE_##T)) << \
SKEIN_T1_POS_BLK_TYPE)
#define SKEIN_T1_BLK_TYPE_KEY SKEIN_T1_BLK_TYPE(KEY) /* for MAC and KDF */
#define SKEIN_T1_BLK_TYPE_CFG SKEIN_T1_BLK_TYPE(CFG) /* config block */
@@ -204,11 +199,11 @@ int skein_1024_output(struct skein_1024_ctx *ctx, u8 *hash_val);
#define SKEIN_ID_STRING_LE (0x33414853) /* "SHA3" (little-endian)*/
#endif
-#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64) (hi32)) << 32))
+#define SKEIN_MK_64(hi32, lo32) ((lo32) + (((u64)(hi32)) << 32))
#define SKEIN_SCHEMA_VER SKEIN_MK_64(SKEIN_VERSION, SKEIN_ID_STRING_LE)
#define SKEIN_KS_PARITY SKEIN_MK_64(0x1BD11BDA, 0xA9FC1A22)
-#define SKEIN_CFG_STR_LEN (4*8)
+#define SKEIN_CFG_STR_LEN (4 * 8)
/* bit field definitions in config block tree_info word */
#define SKEIN_CFG_TREE_LEAF_SIZE_POS (0)
@@ -327,9 +322,9 @@ enum {
#define SKEIN_512_ROUNDS_TOTAL (72)
#define SKEIN_1024_ROUNDS_TOTAL (80)
#else /* allow command-line define in range 8*(5..14) */
-#define SKEIN_256_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/100) + 5) % 10) + 5))
-#define SKEIN_512_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS/10) + 5) % 10) + 5))
-#define SKEIN_1024_ROUNDS_TOTAL (8*((((SKEIN_ROUNDS) + 5) % 10) + 5))
+#define SKEIN_256_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 100) + 5) % 10) + 5))
+#define SKEIN_512_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS / 10) + 5) % 10) + 5))
+#define SKEIN_1024_ROUNDS_TOTAL (8 * ((((SKEIN_ROUNDS) + 5) % 10) + 5))
#endif
#endif /* ifndef _SKEIN_H_ */
diff --git a/drivers/staging/skein/skein_block.c b/drivers/staging/skein/skein_block.c
index 45b47327e024..59a0a8a82118 100644
--- a/drivers/staging/skein/skein_block.c
+++ b/drivers/staging/skein/skein_block.c
@@ -15,6 +15,7 @@
************************************************************************/
#include <linux/string.h>
+#include <linux/bitops.h>
#include "skein_base.h"
#include "skein_block.h"
@@ -59,10 +60,10 @@
#define ROUND256(p0, p1, p2, p3, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
} while (0)
@@ -120,10 +121,10 @@
#if !(SKEIN_USE_ASM & 512)
#undef RCNT
-#define RCNT (SKEIN_512_ROUNDS_TOTAL/8)
+#define RCNT (SKEIN_512_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
-#define SKEIN_UNROLL_512 (((SKEIN_LOOP)/10)%10)
+#define SKEIN_UNROLL_512 (((SKEIN_LOOP) / 10) % 10)
#else
#define SKEIN_UNROLL_512 (0)
#endif
@@ -136,15 +137,16 @@
#define ROUND512(p0, p1, p2, p3, p4, p5, p6, p7, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
X##p4 += X##p5; \
- X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 = rol64(X##p5, ROT##_2); \
X##p5 ^= X##p4; \
- X##p6 += X##p7; X##p7 = rotl_64(X##p7, ROT##_3);\
+ X##p6 += X##p7; \
+ X##p7 = rol64(X##p7, ROT##_3); \
X##p7 ^= X##p6; \
} while (0)
@@ -200,7 +202,7 @@
} while (0)
#define R512_UNROLL_R(NN) \
((SKEIN_UNROLL_512 == 0 && \
- SKEIN_512_ROUNDS_TOTAL/8 > (NN)) || \
+ SKEIN_512_ROUNDS_TOTAL / 8 > (NN)) || \
(SKEIN_UNROLL_512 > (NN)))
#if (SKEIN_UNROLL_512 > 14)
@@ -210,7 +212,7 @@
#if !(SKEIN_USE_ASM & 1024)
#undef RCNT
-#define RCNT (SKEIN_1024_ROUNDS_TOTAL/8)
+#define RCNT (SKEIN_1024_ROUNDS_TOTAL / 8)
#ifdef SKEIN_LOOP /* configure how much to unroll the loop */
#define SKEIN_UNROLL_1024 ((SKEIN_LOOP) % 10)
#else
@@ -226,28 +228,28 @@
pF, ROT, r_num) \
do { \
X##p0 += X##p1; \
- X##p1 = rotl_64(X##p1, ROT##_0); \
+ X##p1 = rol64(X##p1, ROT##_0); \
X##p1 ^= X##p0; \
X##p2 += X##p3; \
- X##p3 = rotl_64(X##p3, ROT##_1); \
+ X##p3 = rol64(X##p3, ROT##_1); \
X##p3 ^= X##p2; \
X##p4 += X##p5; \
- X##p5 = rotl_64(X##p5, ROT##_2); \
+ X##p5 = rol64(X##p5, ROT##_2); \
X##p5 ^= X##p4; \
X##p6 += X##p7; \
- X##p7 = rotl_64(X##p7, ROT##_3); \
+ X##p7 = rol64(X##p7, ROT##_3); \
X##p7 ^= X##p6; \
X##p8 += X##p9; \
- X##p9 = rotl_64(X##p9, ROT##_4); \
+ X##p9 = rol64(X##p9, ROT##_4); \
X##p9 ^= X##p8; \
X##pA += X##pB; \
- X##pB = rotl_64(X##pB, ROT##_5); \
+ X##pB = rol64(X##pB, ROT##_5); \
X##pB ^= X##pA; \
X##pC += X##pD; \
- X##pD = rotl_64(X##pD, ROT##_6); \
+ X##pD = rol64(X##pD, ROT##_6); \
X##pD ^= X##pC; \
X##pE += X##pF; \
- X##pF = rotl_64(X##pF, ROT##_7); \
+ X##pF = rol64(X##pF, ROT##_7); \
X##pF ^= X##pE; \
} while (0)
@@ -311,28 +313,28 @@
#define R1024_8_ROUNDS(R) \
do { \
R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_0, 8*(R) + 1); \
+ 13, 14, 15, R1024_0, 8 * (R) + 1); \
R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_1, 8*(R) + 2); \
+ 05, 08, 01, R1024_1, 8 * (R) + 2); \
R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_2, 8*(R) + 3); \
+ 11, 10, 09, R1024_2, 8 * (R) + 3); \
R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_3, 8*(R) + 4); \
- I1024(2*(R)); \
+ 03, 12, 07, R1024_3, 8 * (R) + 4); \
+ I1024(2 * (R)); \
R1024(00, 01, 02, 03, 04, 05, 06, 07, 08, 09, 10, 11, 12, \
- 13, 14, 15, R1024_4, 8*(R) + 5); \
+ 13, 14, 15, R1024_4, 8 * (R) + 5); \
R1024(00, 09, 02, 13, 06, 11, 04, 15, 10, 07, 12, 03, 14, \
- 05, 08, 01, R1024_5, 8*(R) + 6); \
+ 05, 08, 01, R1024_5, 8 * (R) + 6); \
R1024(00, 07, 02, 05, 04, 03, 06, 01, 12, 15, 14, 13, 08, \
- 11, 10, 09, R1024_6, 8*(R) + 7); \
+ 11, 10, 09, R1024_6, 8 * (R) + 7); \
R1024(00, 15, 02, 11, 06, 13, 04, 09, 14, 01, 08, 05, 10, \
- 03, 12, 07, R1024_7, 8*(R) + 8); \
- I1024(2*(R)+1); \
+ 03, 12, 07, R1024_7, 8 * (R) + 8); \
+ I1024(2 * (R) + 1); \
} while (0)
#define R1024_UNROLL_R(NN) \
((SKEIN_UNROLL_1024 == 0 && \
- SKEIN_1024_ROUNDS_TOTAL/8 > (NN)) || \
+ SKEIN_1024_ROUNDS_TOTAL / 8 > (NN)) || \
(SKEIN_UNROLL_1024 > (NN)))
#if (SKEIN_UNROLL_1024 > 14)
@@ -351,10 +353,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
size_t r;
#if SKEIN_UNROLL_256
/* key schedule: chaining vars + tweak + "rot"*/
- u64 kw[WCNT+4+RCNT*2];
+ u64 kw[WCNT + 4 + (RCNT * 2)];
#else
/* key schedule words : chaining vars + tweak */
- u64 kw[WCNT+4];
+ u64 kw[WCNT + 4];
#endif
u64 X0, X1, X2, X3; /* local copy of context vars, for speed */
u64 w[WCNT]; /* local copy of input block */
@@ -460,9 +462,10 @@ void skein_256_process_block(struct skein_256_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_256_process_block_code_size(void)
{
- return ((u8 *) skein_256_process_block_code_size) -
- ((u8 *) skein_256_process_block);
+ return ((u8 *)skein_256_process_block_code_size) -
+ ((u8 *)skein_256_process_block);
}
+
unsigned int skein_256_unroll_cnt(void)
{
return SKEIN_UNROLL_256;
@@ -480,9 +483,11 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
};
size_t r;
#if SKEIN_UNROLL_512
- u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot"*/
+ /* key sched: chaining vars + tweak + "rot"*/
+ u64 kw[WCNT + 4 + RCNT * 2];
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT + 4];
#endif
u64 X0, X1, X2, X3, X4, X5, X6, X7; /* local copies, for speed */
u64 w[WCNT]; /* local copy of input block */
@@ -543,7 +548,6 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
for (r = 1;
r < (SKEIN_UNROLL_512 ? 2 * RCNT : 2);
r += (SKEIN_UNROLL_512 ? 2 * SKEIN_UNROLL_512 : 1)) {
-
R512_8_ROUNDS(0);
#if R512_UNROLL_R(1)
@@ -609,9 +613,10 @@ void skein_512_process_block(struct skein_512_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_512_process_block_code_size(void)
{
- return ((u8 *) skein_512_process_block_code_size) -
- ((u8 *) skein_512_process_block);
+ return ((u8 *)skein_512_process_block_code_size) -
+ ((u8 *)skein_512_process_block);
}
+
unsigned int skein_512_unroll_cnt(void)
{
return SKEIN_UNROLL_512;
@@ -629,9 +634,11 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
};
size_t r;
#if (SKEIN_UNROLL_1024 != 0)
- u64 kw[WCNT+4+RCNT*2]; /* key sched: chaining vars + tweak + "rot" */
+ /* key sched: chaining vars + tweak + "rot" */
+ u64 kw[WCNT + 4 + (RCNT * 2)];
#else
- u64 kw[WCNT+4]; /* key schedule words : chaining vars + tweak */
+ /* key schedule words : chaining vars + tweak */
+ u64 kw[WCNT + 4];
#endif
/* local copy of vars, for speed */
@@ -771,9 +778,10 @@ void skein_1024_process_block(struct skein_1024_ctx *ctx, const u8 *blk_ptr,
#if defined(SKEIN_CODE_SIZE) || defined(SKEIN_PERF)
size_t skein_1024_process_block_code_size(void)
{
- return ((u8 *) skein_1024_process_block_code_size) -
- ((u8 *) skein_1024_process_block);
+ return ((u8 *)skein_1024_process_block_code_size) -
+ ((u8 *)skein_1024_process_block);
}
+
unsigned int skein_1024_unroll_cnt(void)
{
return SKEIN_UNROLL_1024;
diff --git a/drivers/staging/skein/skein_generic.c b/drivers/staging/skein/skein_generic.c
index e29b9abaa4e6..11f5e530a75f 100644
--- a/drivers/staging/skein/skein_generic.c
+++ b/drivers/staging/skein/skein_generic.c
@@ -27,7 +27,7 @@ static int skein256_init(struct shash_desc *desc)
}
static int skein256_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_256_update((struct skein_256_ctx *)shash_desc_ctx(desc),
data, len);
@@ -62,7 +62,7 @@ static int skein512_init(struct shash_desc *desc)
}
static int skein512_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_512_update((struct skein_512_ctx *)shash_desc_ctx(desc),
data, len);
@@ -97,7 +97,7 @@ static int skein1024_init(struct shash_desc *desc)
}
static int skein1024_update(struct shash_desc *desc, const u8 *data,
- unsigned int len)
+ unsigned int len)
{
return skein_1024_update((struct skein_1024_ctx *)shash_desc_ctx(desc),
data, len);
diff --git a/drivers/staging/skein/threefish_api.h b/drivers/staging/skein/threefish_api.h
index 8e0a0b77ecce..615e467579ee 100644
--- a/drivers/staging/skein/threefish_api.h
+++ b/drivers/staging/skein/threefish_api.h
@@ -52,7 +52,7 @@ enum threefish_size {
*/
struct threefish_key {
u64 state_size;
- u64 key[SKEIN_MAX_STATE_WORDS+1]; /* max number of key words*/
+ u64 key[SKEIN_MAX_STATE_WORDS + 1]; /* max number of key words*/
u64 tweak[3];
};
diff --git a/drivers/staging/skein/threefish_block.c b/drivers/staging/skein/threefish_block.c
index e19ac4368651..a95563fad071 100644
--- a/drivers/staging/skein/threefish_block.c
+++ b/drivers/staging/skein/threefish_block.c
@@ -512,622 +512,622 @@ void threefish_decrypt_256(struct threefish_key *key_ctx, u64 *input,
b2 -= k0 + t1;
b3 -= k1 + 18;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k2;
b1 -= k3 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k4 + t0;
b3 -= k0 + 17;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k1;
b1 -= k2 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k3 + t2;
b3 -= k4 + 16;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k0;
b1 -= k1 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k2 + t1;
b3 -= k3 + 15;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k4;
b1 -= k0 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k1 + t0;
b3 -= k2 + 14;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k3;
b1 -= k4 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k0 + t2;
b3 -= k1 + 13;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k2;
b1 -= k3 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k4 + t1;
b3 -= k0 + 12;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k1;
b1 -= k2 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k3 + t0;
b3 -= k4 + 11;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k0;
b1 -= k1 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k2 + t2;
b3 -= k3 + 10;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k4;
b1 -= k0 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k1 + t1;
b3 -= k2 + 9;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k3;
b1 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k0 + t0;
b3 -= k1 + 8;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k2;
b1 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k4 + t2;
b3 -= k0 + 7;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k1;
b1 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k3 + t1;
b3 -= k4 + 6;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k0;
b1 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k2 + t0;
b3 -= k3 + 5;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k4;
b1 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k1 + t2;
b3 -= k2 + 4;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k3;
b1 -= k4 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k0 + t1;
b3 -= k1 + 3;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k2;
b1 -= k3 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k4 + t0;
b3 -= k0 + 2;
tmp = b3 ^ b0;
- b3 = (tmp >> 32) | (tmp << (64 - 32));
+ b3 = ror64(tmp, 32);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 32) | (tmp << (64 - 32));
+ b1 = ror64(tmp, 32);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 58) | (tmp << (64 - 58));
+ b1 = ror64(tmp, 58);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 46) | (tmp << (64 - 46));
+ b3 = ror64(tmp, 46);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 12) | (tmp << (64 - 12));
+ b1 = ror64(tmp, 12);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b0 -= b1 + k1;
b1 -= k2 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 33) | (tmp << (64 - 33));
+ b3 = ror64(tmp, 33);
b2 -= b3 + k3 + t2;
b3 -= k4 + 1;
tmp = b3 ^ b0;
- b3 = (tmp >> 5) | (tmp << (64 - 5));
+ b3 = ror64(tmp, 5);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 37) | (tmp << (64 - 37));
+ b1 = ror64(tmp, 37);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b0 -= b1;
tmp = b3 ^ b2;
- b3 = (tmp >> 40) | (tmp << (64 - 40));
+ b3 = ror64(tmp, 40);
b2 -= b3;
tmp = b3 ^ b0;
- b3 = (tmp >> 52) | (tmp << (64 - 52));
+ b3 = ror64(tmp, 52);
b0 -= b3;
tmp = b1 ^ b2;
- b1 = (tmp >> 57) | (tmp << (64 - 57));
+ b1 = ror64(tmp, 57);
b2 -= b1;
tmp = b1 ^ b0;
- b1 = (tmp >> 14) | (tmp << (64 - 14));
+ b1 = ror64(tmp, 14);
b0 -= b1 + k0;
b1 -= k1 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b2 -= b3 + k2 + t1;
b3 -= k3;
@@ -2125,1226 +2125,1226 @@ void threefish_decrypt_512(struct threefish_key *key_ctx, u64 *input,
b7 -= k7 + 18;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k5 + t0;
b7 -= k6 + 17;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k3;
b5 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k8;
b1 -= k0;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k4 + t2;
b7 -= k5 + 16;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k2;
b5 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k7;
b1 -= k8;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k3 + t1;
b7 -= k4 + 15;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k1;
b5 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k8;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k6;
b1 -= k7;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k2 + t0;
b7 -= k3 + 14;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k0;
b5 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k5;
b1 -= k6;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k1 + t2;
b7 -= k2 + 13;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k8;
b5 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k4;
b1 -= k5;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k0 + t1;
b7 -= k1 + 12;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k7;
b5 -= k8 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k3;
b1 -= k4;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k8 + t0;
b7 -= k0 + 11;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k6;
b5 -= k7 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k2;
b1 -= k3;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k7 + t2;
b7 -= k8 + 10;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k5;
b5 -= k6 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k1;
b1 -= k2;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k6 + t1;
b7 -= k7 + 9;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k4;
b5 -= k5 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k0;
b1 -= k1;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k5 + t0;
b7 -= k6 + 8;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k3;
b5 -= k4 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k8;
b1 -= k0;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k4 + t2;
b7 -= k5 + 7;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k2;
b5 -= k3 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k7;
b1 -= k8;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k3 + t1;
b7 -= k4 + 6;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k1;
b5 -= k2 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k8;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k6;
b1 -= k7;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k2 + t0;
b7 -= k3 + 5;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k0;
b5 -= k1 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k5;
b1 -= k6;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k1 + t2;
b7 -= k2 + 4;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k8;
b5 -= k0 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k4;
b1 -= k5;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k0 + t1;
b7 -= k1 + 3;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k7;
b5 -= k8 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k3;
b1 -= k4;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k8 + t0;
b7 -= k0 + 2;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k6;
b5 -= k7 + t2;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k2;
b1 -= k3;
tmp = b3 ^ b4;
- b3 = (tmp >> 22) | (tmp << (64 - 22));
+ b3 = ror64(tmp, 22);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 56) | (tmp << (64 - 56));
+ b5 = ror64(tmp, 56);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 35) | (tmp << (64 - 35));
+ b7 = ror64(tmp, 35);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 8) | (tmp << (64 - 8));
+ b1 = ror64(tmp, 8);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 43) | (tmp << (64 - 43));
+ b7 = ror64(tmp, 43);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 39) | (tmp << (64 - 39));
+ b5 = ror64(tmp, 39);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 29) | (tmp << (64 - 29));
+ b3 = ror64(tmp, 29);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 25) | (tmp << (64 - 25));
+ b1 = ror64(tmp, 25);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 17) | (tmp << (64 - 17));
+ b3 = ror64(tmp, 17);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 10) | (tmp << (64 - 10));
+ b5 = ror64(tmp, 10);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 50) | (tmp << (64 - 50));
+ b7 = ror64(tmp, 50);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 24) | (tmp << (64 - 24));
+ b7 = ror64(tmp, 24);
b6 -= b7 + k7 + t2;
b7 -= k8 + 1;
tmp = b5 ^ b4;
- b5 = (tmp >> 34) | (tmp << (64 - 34));
+ b5 = ror64(tmp, 34);
b4 -= b5 + k5;
b5 -= k6 + t1;
tmp = b3 ^ b2;
- b3 = (tmp >> 30) | (tmp << (64 - 30));
+ b3 = ror64(tmp, 30);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 39) | (tmp << (64 - 39));
+ b1 = ror64(tmp, 39);
b0 -= b1 + k1;
b1 -= k2;
tmp = b3 ^ b4;
- b3 = (tmp >> 56) | (tmp << (64 - 56));
+ b3 = ror64(tmp, 56);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 54) | (tmp << (64 - 54));
+ b5 = ror64(tmp, 54);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 9) | (tmp << (64 - 9));
+ b7 = ror64(tmp, 9);
b0 -= b7;
tmp = b1 ^ b6;
- b1 = (tmp >> 44) | (tmp << (64 - 44));
+ b1 = ror64(tmp, 44);
b6 -= b1;
tmp = b7 ^ b2;
- b7 = (tmp >> 39) | (tmp << (64 - 39));
+ b7 = ror64(tmp, 39);
b2 -= b7;
tmp = b5 ^ b0;
- b5 = (tmp >> 36) | (tmp << (64 - 36));
+ b5 = ror64(tmp, 36);
b0 -= b5;
tmp = b3 ^ b6;
- b3 = (tmp >> 49) | (tmp << (64 - 49));
+ b3 = ror64(tmp, 49);
b6 -= b3;
tmp = b1 ^ b4;
- b1 = (tmp >> 17) | (tmp << (64 - 17));
+ b1 = ror64(tmp, 17);
b4 -= b1;
tmp = b3 ^ b0;
- b3 = (tmp >> 42) | (tmp << (64 - 42));
+ b3 = ror64(tmp, 42);
b0 -= b3;
tmp = b5 ^ b6;
- b5 = (tmp >> 14) | (tmp << (64 - 14));
+ b5 = ror64(tmp, 14);
b6 -= b5;
tmp = b7 ^ b4;
- b7 = (tmp >> 27) | (tmp << (64 - 27));
+ b7 = ror64(tmp, 27);
b4 -= b7;
tmp = b1 ^ b2;
- b1 = (tmp >> 33) | (tmp << (64 - 33));
+ b1 = ror64(tmp, 33);
b2 -= b1;
tmp = b7 ^ b6;
- b7 = (tmp >> 37) | (tmp << (64 - 37));
+ b7 = ror64(tmp, 37);
b6 -= b7 + k6 + t1;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 19) | (tmp << (64 - 19));
+ b5 = ror64(tmp, 19);
b4 -= b5 + k4;
b5 -= k5 + t0;
tmp = b3 ^ b2;
- b3 = (tmp >> 36) | (tmp << (64 - 36));
+ b3 = ror64(tmp, 36);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b0 -= b1 + k0;
b1 -= k1;
@@ -5521,2722 +5521,2722 @@ void threefish_decrypt_1024(struct threefish_key *key_ctx, u64 *input,
b14 -= k0 + t0;
b15 -= k1 + 20;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k16 + t2;
b15 -= k0 + 19;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k14;
b13 -= k15 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k12;
b11 -= k13;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k10;
b9 -= k11;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k8;
b7 -= k9;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k6;
b5 -= k7;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k2;
b1 -= k3;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k15 + t1;
b15 -= k16 + 18;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k13;
b13 -= k14 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k11;
b11 -= k12;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k9;
b9 -= k10;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k7;
b7 -= k8;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k5;
b5 -= k6;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k1;
b1 -= k2;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k14 + t0;
b15 -= k15 + 17;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k12;
b13 -= k13 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k10;
b11 -= k11;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k8;
b9 -= k9;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k6;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k4;
b5 -= k5;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k0;
b1 -= k1;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k13 + t2;
b15 -= k14 + 16;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k11;
b13 -= k12 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k9;
b11 -= k10;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k7;
b9 -= k8;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k5;
b7 -= k6;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k3;
b5 -= k4;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k1;
b3 -= k2;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k16;
b1 -= k0;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k12 + t1;
b15 -= k13 + 15;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k10;
b13 -= k11 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k8;
b11 -= k9;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k6;
b9 -= k7;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k4;
b7 -= k5;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k2;
b5 -= k3;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k0;
b3 -= k1;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k15;
b1 -= k16;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k11 + t0;
b15 -= k12 + 14;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k9;
b13 -= k10 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k7;
b11 -= k8;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k5;
b9 -= k6;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k3;
b7 -= k4;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k1;
b5 -= k2;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k16;
b3 -= k0;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k14;
b1 -= k15;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k10 + t2;
b15 -= k11 + 13;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k8;
b13 -= k9 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k6;
b11 -= k7;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k4;
b9 -= k5;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k2;
b7 -= k3;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k0;
b5 -= k1;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k15;
b3 -= k16;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k13;
b1 -= k14;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k9 + t1;
b15 -= k10 + 12;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k7;
b13 -= k8 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k5;
b11 -= k6;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k3;
b9 -= k4;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k1;
b7 -= k2;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k16;
b5 -= k0;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k14;
b3 -= k15;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k12;
b1 -= k13;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k8 + t0;
b15 -= k9 + 11;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k6;
b13 -= k7 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k4;
b11 -= k5;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k2;
b9 -= k3;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k0;
b7 -= k1;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k15;
b5 -= k16;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k13;
b3 -= k14;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k11;
b1 -= k12;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k7 + t2;
b15 -= k8 + 10;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k5;
b13 -= k6 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k3;
b11 -= k4;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k1;
b9 -= k2;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k16;
b7 -= k0;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k14;
b5 -= k15;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k12;
b3 -= k13;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k10;
b1 -= k11;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k6 + t1;
b15 -= k7 + 9;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k4;
b13 -= k5 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k2;
b11 -= k3;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k0;
b9 -= k1;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k15;
b7 -= k16;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k13;
b5 -= k14;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k11;
b3 -= k12;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k9;
b1 -= k10;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k5 + t0;
b15 -= k6 + 8;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k3;
b13 -= k4 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k1;
b11 -= k2;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k16;
b9 -= k0;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k14;
b7 -= k15;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k12;
b5 -= k13;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k10;
b3 -= k11;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k8;
b1 -= k9;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k4 + t2;
b15 -= k5 + 7;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k2;
b13 -= k3 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k0;
b11 -= k1;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k15;
b9 -= k16;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k13;
b7 -= k14;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k11;
b5 -= k12;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k9;
b3 -= k10;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k7;
b1 -= k8;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k3 + t1;
b15 -= k4 + 6;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k1;
b13 -= k2 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k16;
b11 -= k0;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k14;
b9 -= k15;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k12;
b7 -= k13;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k10;
b5 -= k11;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k8;
b3 -= k9;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k6;
b1 -= k7;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k2 + t0;
b15 -= k3 + 5;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k0;
b13 -= k1 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k15;
b11 -= k16;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k13;
b9 -= k14;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k11;
b7 -= k12;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k9;
b5 -= k10;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k7;
b3 -= k8;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k5;
b1 -= k6;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k1 + t2;
b15 -= k2 + 4;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k16;
b13 -= k0 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k14;
b11 -= k15;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k12;
b9 -= k13;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k10;
b7 -= k11;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k8;
b5 -= k9;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k6;
b3 -= k7;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k4;
b1 -= k5;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k0 + t1;
b15 -= k1 + 3;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k15;
b13 -= k16 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k13;
b11 -= k14;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k11;
b9 -= k12;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k9;
b7 -= k10;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k7;
b5 -= k8;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k5;
b3 -= k6;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k3;
b1 -= k4;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k16 + t0;
b15 -= k0 + 2;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k14;
b13 -= k15 + t2;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k12;
b11 -= k13;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k10;
b9 -= k11;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k8;
b7 -= k9;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k6;
b5 -= k7;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k4;
b3 -= k5;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k2;
b1 -= k3;
tmp = b7 ^ b12;
- b7 = (tmp >> 20) | (tmp << (64 - 20));
+ b7 = ror64(tmp, 20);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 37) | (tmp << (64 - 37));
+ b3 = ror64(tmp, 37);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 31) | (tmp << (64 - 31));
+ b5 = ror64(tmp, 31);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 23) | (tmp << (64 - 23));
+ b1 = ror64(tmp, 23);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 52) | (tmp << (64 - 52));
+ b9 = ror64(tmp, 52);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 35) | (tmp << (64 - 35));
+ b13 = ror64(tmp, 35);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 48) | (tmp << (64 - 48));
+ b11 = ror64(tmp, 48);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 9) | (tmp << (64 - 9));
+ b15 = ror64(tmp, 9);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 25) | (tmp << (64 - 25));
+ b9 = ror64(tmp, 25);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 44) | (tmp << (64 - 44));
+ b11 = ror64(tmp, 44);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 42) | (tmp << (64 - 42));
+ b13 = ror64(tmp, 42);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 19) | (tmp << (64 - 19));
+ b15 = ror64(tmp, 19);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 46) | (tmp << (64 - 46));
+ b1 = ror64(tmp, 46);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 47) | (tmp << (64 - 47));
+ b3 = ror64(tmp, 47);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 44) | (tmp << (64 - 44));
+ b5 = ror64(tmp, 44);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 42) | (tmp << (64 - 42));
+ b5 = ror64(tmp, 42);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 53) | (tmp << (64 - 53));
+ b3 = ror64(tmp, 53);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 4) | (tmp << (64 - 4));
+ b7 = ror64(tmp, 4);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 51) | (tmp << (64 - 51));
+ b15 = ror64(tmp, 51);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 56) | (tmp << (64 - 56));
+ b11 = ror64(tmp, 56);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 34) | (tmp << (64 - 34));
+ b13 = ror64(tmp, 34);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 16) | (tmp << (64 - 16));
+ b9 = ror64(tmp, 16);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 30) | (tmp << (64 - 30));
+ b15 = ror64(tmp, 30);
b14 -= b15 + k15 + t2;
b15 -= k16 + 1;
tmp = b13 ^ b12;
- b13 = (tmp >> 44) | (tmp << (64 - 44));
+ b13 = ror64(tmp, 44);
b12 -= b13 + k13;
b13 -= k14 + t1;
tmp = b11 ^ b10;
- b11 = (tmp >> 47) | (tmp << (64 - 47));
+ b11 = ror64(tmp, 47);
b10 -= b11 + k11;
b11 -= k12;
tmp = b9 ^ b8;
- b9 = (tmp >> 12) | (tmp << (64 - 12));
+ b9 = ror64(tmp, 12);
b8 -= b9 + k9;
b9 -= k10;
tmp = b7 ^ b6;
- b7 = (tmp >> 31) | (tmp << (64 - 31));
+ b7 = ror64(tmp, 31);
b6 -= b7 + k7;
b7 -= k8;
tmp = b5 ^ b4;
- b5 = (tmp >> 37) | (tmp << (64 - 37));
+ b5 = ror64(tmp, 37);
b4 -= b5 + k5;
b5 -= k6;
tmp = b3 ^ b2;
- b3 = (tmp >> 9) | (tmp << (64 - 9));
+ b3 = ror64(tmp, 9);
b2 -= b3 + k3;
b3 -= k4;
tmp = b1 ^ b0;
- b1 = (tmp >> 41) | (tmp << (64 - 41));
+ b1 = ror64(tmp, 41);
b0 -= b1 + k1;
b1 -= k2;
tmp = b7 ^ b12;
- b7 = (tmp >> 25) | (tmp << (64 - 25));
+ b7 = ror64(tmp, 25);
b12 -= b7;
tmp = b3 ^ b10;
- b3 = (tmp >> 16) | (tmp << (64 - 16));
+ b3 = ror64(tmp, 16);
b10 -= b3;
tmp = b5 ^ b8;
- b5 = (tmp >> 28) | (tmp << (64 - 28));
+ b5 = ror64(tmp, 28);
b8 -= b5;
tmp = b1 ^ b14;
- b1 = (tmp >> 47) | (tmp << (64 - 47));
+ b1 = ror64(tmp, 47);
b14 -= b1;
tmp = b9 ^ b4;
- b9 = (tmp >> 41) | (tmp << (64 - 41));
+ b9 = ror64(tmp, 41);
b4 -= b9;
tmp = b13 ^ b6;
- b13 = (tmp >> 48) | (tmp << (64 - 48));
+ b13 = ror64(tmp, 48);
b6 -= b13;
tmp = b11 ^ b2;
- b11 = (tmp >> 20) | (tmp << (64 - 20));
+ b11 = ror64(tmp, 20);
b2 -= b11;
tmp = b15 ^ b0;
- b15 = (tmp >> 5) | (tmp << (64 - 5));
+ b15 = ror64(tmp, 5);
b0 -= b15;
tmp = b9 ^ b10;
- b9 = (tmp >> 17) | (tmp << (64 - 17));
+ b9 = ror64(tmp, 17);
b10 -= b9;
tmp = b11 ^ b8;
- b11 = (tmp >> 59) | (tmp << (64 - 59));
+ b11 = ror64(tmp, 59);
b8 -= b11;
tmp = b13 ^ b14;
- b13 = (tmp >> 41) | (tmp << (64 - 41));
+ b13 = ror64(tmp, 41);
b14 -= b13;
tmp = b15 ^ b12;
- b15 = (tmp >> 34) | (tmp << (64 - 34));
+ b15 = ror64(tmp, 34);
b12 -= b15;
tmp = b1 ^ b6;
- b1 = (tmp >> 13) | (tmp << (64 - 13));
+ b1 = ror64(tmp, 13);
b6 -= b1;
tmp = b3 ^ b4;
- b3 = (tmp >> 51) | (tmp << (64 - 51));
+ b3 = ror64(tmp, 51);
b4 -= b3;
tmp = b5 ^ b2;
- b5 = (tmp >> 4) | (tmp << (64 - 4));
+ b5 = ror64(tmp, 4);
b2 -= b5;
tmp = b7 ^ b0;
- b7 = (tmp >> 33) | (tmp << (64 - 33));
+ b7 = ror64(tmp, 33);
b0 -= b7;
tmp = b1 ^ b8;
- b1 = (tmp >> 52) | (tmp << (64 - 52));
+ b1 = ror64(tmp, 52);
b8 -= b1;
tmp = b5 ^ b14;
- b5 = (tmp >> 23) | (tmp << (64 - 23));
+ b5 = ror64(tmp, 23);
b14 -= b5;
tmp = b3 ^ b12;
- b3 = (tmp >> 18) | (tmp << (64 - 18));
+ b3 = ror64(tmp, 18);
b12 -= b3;
tmp = b7 ^ b10;
- b7 = (tmp >> 49) | (tmp << (64 - 49));
+ b7 = ror64(tmp, 49);
b10 -= b7;
tmp = b15 ^ b4;
- b15 = (tmp >> 55) | (tmp << (64 - 55));
+ b15 = ror64(tmp, 55);
b4 -= b15;
tmp = b11 ^ b6;
- b11 = (tmp >> 10) | (tmp << (64 - 10));
+ b11 = ror64(tmp, 10);
b6 -= b11;
tmp = b13 ^ b2;
- b13 = (tmp >> 19) | (tmp << (64 - 19));
+ b13 = ror64(tmp, 19);
b2 -= b13;
tmp = b9 ^ b0;
- b9 = (tmp >> 38) | (tmp << (64 - 38));
+ b9 = ror64(tmp, 38);
b0 -= b9;
tmp = b15 ^ b14;
- b15 = (tmp >> 37) | (tmp << (64 - 37));
+ b15 = ror64(tmp, 37);
b14 -= b15 + k14 + t1;
b15 -= k15;
tmp = b13 ^ b12;
- b13 = (tmp >> 22) | (tmp << (64 - 22));
+ b13 = ror64(tmp, 22);
b12 -= b13 + k12;
b13 -= k13 + t0;
tmp = b11 ^ b10;
- b11 = (tmp >> 17) | (tmp << (64 - 17));
+ b11 = ror64(tmp, 17);
b10 -= b11 + k10;
b11 -= k11;
tmp = b9 ^ b8;
- b9 = (tmp >> 8) | (tmp << (64 - 8));
+ b9 = ror64(tmp, 8);
b8 -= b9 + k8;
b9 -= k9;
tmp = b7 ^ b6;
- b7 = (tmp >> 47) | (tmp << (64 - 47));
+ b7 = ror64(tmp, 47);
b6 -= b7 + k6;
b7 -= k7;
tmp = b5 ^ b4;
- b5 = (tmp >> 8) | (tmp << (64 - 8));
+ b5 = ror64(tmp, 8);
b4 -= b5 + k4;
b5 -= k5;
tmp = b3 ^ b2;
- b3 = (tmp >> 13) | (tmp << (64 - 13));
+ b3 = ror64(tmp, 13);
b2 -= b3 + k2;
b3 -= k3;
tmp = b1 ^ b0;
- b1 = (tmp >> 24) | (tmp << (64 - 24));
+ b1 = ror64(tmp, 24);
b0 -= b1 + k0;
b1 -= k1;
diff --git a/drivers/staging/slicoss/slicoss.c b/drivers/staging/slicoss/slicoss.c
index 6d50fc4fd02e..ac126d4f3117 100644
--- a/drivers/staging/slicoss/slicoss.c
+++ b/drivers/staging/slicoss/slicoss.c
@@ -898,6 +898,7 @@ static void slic_upr_start(struct adapter *adapter)
{
struct slic_upr *upr;
__iomem struct slic_regs *slic_regs = adapter->slic_regs;
+
upr = adapter->upr_list;
if (!upr)
return;
@@ -1144,7 +1145,7 @@ static int slic_config_get(struct adapter *adapter, u32 config, u32 config_h)
/*
* Compute a checksum of the EEPROM according to RFC 1071.
*/
-static u16 slic_eeprom_cksum(void *eeprom, unsigned len)
+static u16 slic_eeprom_cksum(void *eeprom, unsigned int len)
{
u16 *wp = eeprom;
u32 checksum = 0;
@@ -1855,6 +1856,11 @@ static void slic_xmit_build_request(struct adapter *adapter,
ihcmd->u.slic_buffers.totlen = skb->len;
phys_addr = pci_map_single(adapter->pcidev, skb->data, skb->len,
PCI_DMA_TODEVICE);
+ if (pci_dma_mapping_error(adapter->pcidev, phys_addr)) {
+ kfree_skb(skb);
+ dev_err(&adapter->pcidev->dev, "DMA mapping error\n");
+ return;
+ }
ihcmd->u.slic_buffers.bufs[0].paddrl = SLIC_GET_ADDR_LOW(phys_addr);
ihcmd->u.slic_buffers.bufs[0].paddrh = SLIC_GET_ADDR_HIGH(phys_addr);
ihcmd->u.slic_buffers.bufs[0].length = skb->len;
diff --git a/drivers/staging/sm750fb/ddk750_chip.c b/drivers/staging/sm750fb/ddk750_chip.c
index 95f7cae3cc23..f80ee776677f 100644
--- a/drivers/staging/sm750fb/ddk750_chip.c
+++ b/drivers/staging/sm750fb/ddk750_chip.c
@@ -306,7 +306,7 @@ unsigned int calcPllValue(unsigned int request_orig, pll_value_t *pll)
unsigned int input, request;
unsigned int tmpClock, ret;
const int max_OD = 3;
- int max_d;
+ int max_d = 6;
if (getChipType() == SM750LE) {
/* SM750LE don't have prgrammable PLL and M/N values to work on.
diff --git a/drivers/staging/speakup/main.c b/drivers/staging/speakup/main.c
index a22fb07512a1..97ca4ecca8a9 100644
--- a/drivers/staging/speakup/main.c
+++ b/drivers/staging/speakup/main.c
@@ -263,7 +263,7 @@ static struct notifier_block vt_notifier_block = {
static unsigned char get_attributes(struct vc_data *vc, u16 *pos)
{
pos = screen_pos(vc, pos - (u16 *)vc->vc_origin, 1);
- return (u_char) (scr_readw(pos) >> 8);
+ return (scr_readw(pos) & ~vc->vc_hi_font_mask) >> 8;
}
static void speakup_date(struct vc_data *vc)
@@ -473,8 +473,10 @@ static u16 get_char(struct vc_data *vc, u16 *pos, u_char *attribs)
w = scr_readw(pos);
c = w & 0xff;
- if (w & vc->vc_hi_font_mask)
+ if (w & vc->vc_hi_font_mask) {
+ w &= ~vc->vc_hi_font_mask;
c |= 0x100;
+ }
ch = inverse_translate(vc, c, 0);
*attribs = (w & 0xff00) >> 8;
diff --git a/drivers/staging/speakup/serialio.h b/drivers/staging/speakup/serialio.h
index 1b399214ecf7..3ad7ff0bc3c3 100644
--- a/drivers/staging/speakup/serialio.h
+++ b/drivers/staging/speakup/serialio.h
@@ -6,6 +6,7 @@
#ifndef __sparc__
#include <linux/serial.h>
#endif
+#include <linux/serial_core.h>
/*
* this is cut&paste from 8250.h. Get rid of the structure, the definitions
@@ -16,7 +17,7 @@ struct old_serial_port {
unsigned int baud_base;
unsigned int port;
unsigned int irq;
- unsigned int flags; /* unused */
+ upf_t flags; /* unused */
};
/* countdown values for serial timeouts in us */
diff --git a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
index b0498ff32405..c2359de17eaf 100644
--- a/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
+++ b/drivers/staging/unisys/Documentation/ABI/sysfs-platform-visorchipset
@@ -50,20 +50,6 @@ Description: This field is used to tell s-Par which type of recovery tool
commission the guest.
Users: sparmaintainer@unisys.com
-What: guest/chipsetready
-Date: 7/18/2014
-KernelVersion: TBD
-Contact: sparmaintainer@unisys.com
-Description: This entry is used by Unisys application software on the guest
- to acknowledge completion of specific events for integration
- purposes, but these acknowledgements are not required for the
- guest to operate correctly. The interface accepts one of two
- strings: MODULES_LOADED to indicate that the s-Par driver
- modules have been loaded successfully, or CALLHOMEDISK_MOUNTED,
- which indicates that the disk used to support call home services
- has been successfully mounted.
-Users: sparmaintainer@unisys.com
-
What: parahotplug/deviceenabled
Date: 7/18/2014
KernelVersion: TBD
diff --git a/drivers/staging/unisys/Documentation/overview.txt b/drivers/staging/unisys/Documentation/overview.txt
index c2d8dd4a2e41..1146c1cf5c2a 100644
--- a/drivers/staging/unisys/Documentation/overview.txt
+++ b/drivers/staging/unisys/Documentation/overview.txt
@@ -137,12 +137,6 @@ called automatically by the visorbus driver at appropriate times:
* The resume() function is the "book-end" to pause(), and is described above.
-If/when a function driver creates a Linux device (that needs to be accessed
-from usermode), it calls visorbus_registerdevnode(), passing the major and
-minor number of the device. (Of course not all function drivers will need
-to do this.) This simply creates the appropriate "devmajorminor" sysfs entry
-described below, so that a hotplug script can use it to create a device node.
-
2.1.3. sysfs Advertised Information
-----------------------------------
@@ -197,19 +191,6 @@ The following files exist under /sys/devices/visorbus<x>/vbus<x>:dev<y>:
if the appropriate function driver has not
been loaded yet.
- devmajorminor
-
- <devname> if applicable, each file here identifies (via
- ... its file contents) the
- "<major>:<minor>" needed for a device node to
- enable access from usermode. There is exactly
- one file here for each different device node
- that can be accessed (from usermode). Note
- that this info is provided by a particular
- function driver, so these will not exist
- until AFTER the appropriate function driver
- controlling this device class is loaded.
-
channel properties of the device channel (all in
ascii text format)
diff --git a/drivers/staging/unisys/Documentation/proc-entries.txt b/drivers/staging/unisys/Documentation/proc-entries.txt
deleted file mode 100644
index 426f92b1c577..000000000000
--- a/drivers/staging/unisys/Documentation/proc-entries.txt
+++ /dev/null
@@ -1,93 +0,0 @@
- s-Par Proc Entries
-This document describes the proc entries created by the Unisys s-Par modules.
-
-Support Module Entries
-These entries are provided primarily for debugging.
-
-/proc/uislib/info: This entry contains debugging information for the
-uislib module, including bus information and memory usage.
-
-/proc/visorchipset/controlvm: This directory contains debugging
-entries for the controlvm channel used by visorchipset.
-
-/proc/uislib/platform: This entry is used to display the platform
-number this node is in the system. For some guests, this may be
-invalid.
-
-/proc/visorchipset/chipsetready: This entry is written to by scripts
-to signify that any user level activity has been completed before the
-guest can be considered running and is shown as running in the s-Par
-UI.
-
-Device Entries
-These entries provide status of the devices shared by a service partition.
-
-/proc/uislib/vbus: this is a directory containing entries for each
-virtual bus. Each numbered sub-directory contains an info entry, which
-describes the devices that appear on that bus.
-
-/proc/uislib/cycles_before_wait: This entry is used to tune
-performance, by setting the number of cycles we wait before going idle
-when in polling mode. A longer time will reduce message latency but
-spend more processing time polling.
-
-/proc/uislib/smart_wakeup: This entry is used to tune performance, by
-enabling or disabling smart wakeup.
-
-/proc/virthba/info: This entry contains debugging information for the
-virthba module, including interrupt information and memory usage.
-
-/proc/virthba/enable_ints: This entry controls interrupt use by the
-virthba module. Writing a 0 to this entry will disable interrupts.
-
-/proc/virtnic/info: This entry contains debugging information for the
-virtnic module, including interrupt information, send and receive
-counts, and other device information.
-
-/proc/virtnic/ethX: This is a directory containing entries for each
-virtual NIC. Each named subdirectory contains two entries,
-clientstring and zone.
-
-/proc/virtpci/info: This entry contains debugging information for the
-virtpci module, including virtual PCI bus information and device
-locations.
-
-/proc/virtnic/enable_ints: This entry controls interrupt use by the
-virtnic module. Writing a 0 to this entry will disable interrupts.
-
-Visorconinclient, visordiag, visornoop, visorserialclient, and
-visorvideoclient Entries
-
-The entries in proc for these modules all follow the same
-pattern. Each module has its own proc directory with the same name,
-e.g. visordiag presents a /proc/visordiag directory. Inside of the
-module's directory are a device directory, which contains one numbered
-directory for each device provided by that module. Each device has a
-diag entry that presents the device number and visorbus name for that
-device. The module directory also has a driver/diag entry, which
-reports the corresponding s-Par version number of the driver.
-
-Automated Installation Entries
-
-These entries are used to pass information between the s-Par platform
-and the Linux-based installation and recovery tool. These values are
-read/write, however, the guest can only reset them to 0, or report an
-error status through the installer entry. The values are only set via
-s-Par's firmware interface, to help prevent accidentally booting into
-the tool.
-
-/proc/visorchipset/boottotool: This entry instructs s-Par that the
-next reboot will launch the installation and recovery tool. If set to
-0, the next boot will happen according to the UEFI boot manager
-settings.
-
-/proc/visorchipset/toolaction: This entry indicates the installation
-and recovery tool mode requested for the next boot.
-
-/proc/visorchipset/installer: this entry is used by the installation
-and recovery tool to pass status and result information back to the
-s-Par firmware.
-
-/proc/visorchipset/partition: This directory contains the guest
-partition configuration data for each virtual bus, for use during
-installation and at runtime for s-Par service partitions.
diff --git a/drivers/staging/unisys/MAINTAINERS b/drivers/staging/unisys/MAINTAINERS
index cc46e37e64c1..1f0425bf3583 100644
--- a/drivers/staging/unisys/MAINTAINERS
+++ b/drivers/staging/unisys/MAINTAINERS
@@ -2,5 +2,4 @@ Unisys s-Par drivers
M: David Kershner <sparmaintainer@unisys.com>
S: Maintained
F: Documentation/s-Par/overview.txt
-F: Documentation/s-Par/proc-entries.txt
F: drivers/staging/unisys/
diff --git a/drivers/staging/unisys/include/channel.h b/drivers/staging/unisys/include/channel.h
index 5af59a5fce61..db4e6b28755b 100644
--- a/drivers/staging/unisys/include/channel.h
+++ b/drivers/staging/unisys/include/channel.h
@@ -76,9 +76,9 @@ enum channel_clientstate {
};
static inline const u8 *
-ULTRA_CHANNELCLI_STRING(u32 v)
+ULTRA_CHANNELCLI_STRING(u32 state)
{
- switch (v) {
+ switch (state) {
case CHANNELCLI_DETACHED:
return (const u8 *)("DETACHED");
case CHANNELCLI_DISABLED:
@@ -411,7 +411,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
mb(); /* required for channel synch */
}
if (readl(&hdr->cli_state_os) == CHANNELCLI_OWNED) {
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state;
* come out of it
*/
@@ -459,7 +459,7 @@ spar_channel_client_acquire_os(void __iomem *ch, u8 *id)
mb(); /* required for channel synch */
return 0;
}
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state; come out of it */
pr_info("%s Channel OS client acquire now successful\n", id);
writeb(0, &hdr->cli_error_os);
@@ -472,7 +472,7 @@ spar_channel_client_release_os(void __iomem *ch, u8 *id)
{
struct channel_header __iomem *hdr = ch;
- if (readb(&hdr->cli_error_os) != 0) {
+ if (readb(&hdr->cli_error_os)) {
/* we are in an error msg throttling state; come out of it */
pr_info("%s Channel OS client error state cleared\n", id);
writeb(0, &hdr->cli_error_os);
diff --git a/drivers/staging/unisys/include/iochannel.h b/drivers/staging/unisys/include/iochannel.h
index 880d9f04cbcf..5ccf81485d72 100644
--- a/drivers/staging/unisys/include/iochannel.h
+++ b/drivers/staging/unisys/include/iochannel.h
@@ -253,48 +253,6 @@ struct uiscmdrsp_scsi {
/* SCSI device version for no disk inquiry result */
#define SCSI_SPC2_VER 4 /* indicates SCSI SPC2 (SPC3 is 5) */
-/* Windows and Linux want different things for a non-existent lun. So, we'll let
- * caller pass in the peripheral qualifier and type.
- * NOTE:[4] SCSI returns (n-4); so we return length-1-4 or length-5.
- */
-
-#define SET_NO_DISK_INQUIRY_RESULT(buf, len, lun, lun0notpresent, notpresent) \
- do { \
- memset(buf, 0, \
- MINNUM(len, \
- (unsigned int)NO_DISK_INQUIRY_RESULT_LEN)); \
- buf[2] = (u8)SCSI_SPC2_VER; \
- if (lun == 0) { \
- buf[0] = (u8)lun0notpresent; \
- buf[3] = (u8)DEV_HISUPPORT; \
- } else \
- buf[0] = (u8)notpresent; \
- buf[4] = (u8)( \
- MINNUM(len, \
- (unsigned int)NO_DISK_INQUIRY_RESULT_LEN) - 5);\
- if (len >= NO_DISK_INQUIRY_RESULT_LEN) { \
- buf[8] = 'D'; \
- buf[9] = 'E'; \
- buf[10] = 'L'; \
- buf[11] = 'L'; \
- buf[16] = 'P'; \
- buf[17] = 'S'; \
- buf[18] = 'E'; \
- buf[19] = 'U'; \
- buf[20] = 'D'; \
- buf[21] = 'O'; \
- buf[22] = ' '; \
- buf[23] = 'D'; \
- buf[24] = 'E'; \
- buf[25] = 'V'; \
- buf[26] = 'I'; \
- buf[27] = 'C'; \
- buf[28] = 'E'; \
- buf[30] = ' '; \
- buf[31] = '.'; \
- } \
- } while (0)
-
/* Struct & Defines to support sense information. */
/* The following struct is returned in sensebuf field in uiscmdrsp_scsi. It is
diff --git a/drivers/staging/unisys/include/visorbus.h b/drivers/staging/unisys/include/visorbus.h
index 2a64a9ce0208..9baf1ec70d01 100644
--- a/drivers/staging/unisys/include/visorbus.h
+++ b/drivers/staging/unisys/include/visorbus.h
@@ -61,54 +61,55 @@ struct visor_channeltype_descriptor {
const char *name;
};
-/** Information provided by each visor driver when it registers with the
- * visorbus driver.
+/**
+ * struct visor_driver - Information provided by each visor driver when it
+ * registers with the visorbus driver.
+ * @name: Name of the visor driver.
+ * @version: The numbered version of the driver (x.x.xxx).
+ * @vertag: A human readable version string.
+ * @owner: The module owner.
+ * @channel_types: Types of channels handled by this driver, ending with
+ * a zero GUID. Our specialized BUS.match() method knows
+ * about this list, and uses it to determine whether this
+ * driver will in fact handle a new device that it has
+ * detected.
+ * @probe: Called when a new device comes online, by our probe()
+ * function specified by driver.probe() (triggered
+ * ultimately by some call to driver_register(),
+ * bus_add_driver(), or driver_attach()).
+ * @remove: Called when a new device is removed, by our remove()
+ * function specified by driver.remove() (triggered
+ * ultimately by some call to device_release_driver()).
+ * @channel_interrupt: Called periodically, whenever there is a possiblity
+ * that "something interesting" may have happened to the
+ * channel.
+ * @pause: Called to initiate a change of the device's state. If
+ * the return valu`e is < 0, there was an error and the
+ * state transition will NOT occur. If the return value
+ * is >= 0, then the state transition was INITIATED
+ * successfully, and complete_func() will be called (or
+ * was just called) with the final status when either the
+ * state transition fails or completes successfully.
+ * @resume: Behaves similar to pause.
+ * @driver: Private reference to the device driver. For use by bus
+ * driver only.
+ * @version_attr: Private version field. For use by bus driver only.
*/
struct visor_driver {
const char *name;
const char *version;
const char *vertag;
- const char *build_date;
- const char *build_time;
struct module *owner;
-
- /** Types of channels handled by this driver, ending with 0 GUID.
- * Our specialized BUS.match() method knows about this list, and
- * uses it to determine whether this driver will in fact handle a
- * new device that it has detected.
- */
struct visor_channeltype_descriptor *channel_types;
-
- /** Called when a new device comes online, by our probe() function
- * specified by driver.probe() (triggered ultimately by some call
- * to driver_register() / bus_add_driver() / driver_attach()).
- */
int (*probe)(struct visor_device *dev);
-
- /** Called when a new device is removed, by our remove() function
- * specified by driver.remove() (triggered ultimately by some call
- * to device_release_driver()).
- */
void (*remove)(struct visor_device *dev);
-
- /** Called periodically, whenever there is a possibility that
- * "something interesting" may have happened to the channel state.
- */
void (*channel_interrupt)(struct visor_device *dev);
-
- /** Called to initiate a change of the device's state. If the return
- * valu`e is < 0, there was an error and the state transition will NOT
- * occur. If the return value is >= 0, then the state transition was
- * INITIATED successfully, and complete_func() will be called (or was
- * just called) with the final status when either the state transition
- * fails or completes successfully.
- */
int (*pause)(struct visor_device *dev,
visorbus_state_complete_func complete_func);
int (*resume)(struct visor_device *dev,
visorbus_state_complete_func complete_func);
- /** These fields are for private use by the bus driver only. */
+ /* These fields are for private use by the bus driver only. */
struct device_driver driver;
struct driver_attribute version_attr;
};
@@ -116,48 +117,58 @@ struct visor_driver {
#define to_visor_driver(x) ((x) ? \
(container_of(x, struct visor_driver, driver)) : (NULL))
-/** A device type for things "plugged" into the visorbus bus */
+/**
+ * struct visor_device - A device type for things "plugged" into the visorbus
+ * bus
+ * visorchannel: Points to the channel that the device is
+ * associated with.
+ * channel_type_guid: Identifies the channel type to the bus driver.
+ * device: Device struct meant for use by the bus driver
+ * only.
+ * list_all: Used by the bus driver to enumerate devices.
+ * periodic_work: Device work queue. Private use by bus driver
+ * only.
+ * being_removed: Indicates that the device is being removed from
+ * the bus. Private bus driver use only.
+ * visordriver_callback_lock: Used by the bus driver to lock when handling
+ * channel events.
+ * pausing: Indicates that a change towards a paused state.
+ * is in progress. Only modified by the bus driver.
+ * resuming: Indicates that a change towards a running state
+ * is in progress. Only modified by the bus driver.
+ * chipset_bus_no: Private field used by the bus driver.
+ * chipset_dev_no: Private field used the bus driver.
+ * state: Used to indicate the current state of the
+ * device.
+ * inst: Unique GUID for this instance of the device.
+ * name: Name of the device.
+ * pending_msg_hdr: For private use by bus driver to respond to
+ * hypervisor requests.
+ * vbus_hdr_info: A pointer to header info. Private use by bus
+ * driver.
+ * partition_uuid: Indicates client partion id. This should be the
+ * same across all visor_devices in the current
+ * guest. Private use by bus driver only.
+ */
struct visor_device {
- /** visor driver can use the visorchannel member with the functions
- * defined in visorchannel.h to access the channel
- */
struct visorchannel *visorchannel;
uuid_le channel_type_guid;
- u64 channel_bytes;
-
- /** These fields are for private use by the bus driver only.
- * A notable exception is that the visor driver can use
- * visor_get_drvdata() and visor_set_drvdata() to retrieve or stash
- * private visor driver specific data within the device member.
- */
+ /* These fields are for private use by the bus driver only. */
struct device device;
struct list_head list_all;
struct periodic_work *periodic_work;
bool being_removed;
- bool responded_to_device_create;
- struct kobject kobjdevmajorminor; /* visorbus<x>/dev<y>/devmajorminor/*/
- struct {
- int major, minor;
- void *attr; /* private use by devmajorminor_attr.c you can
- * change this constant to whatever you want
- */
- } devnodes[5];
- /* the code will detect and behave appropriately) */
struct semaphore visordriver_callback_lock;
bool pausing;
bool resuming;
u32 chipset_bus_no;
u32 chipset_dev_no;
struct visorchipset_state state;
- uuid_le type;
uuid_le inst;
u8 *name;
- u8 *description;
struct controlvm_message_header *pending_msg_hdr;
void *vbus_hdr_info;
- u32 switch_no;
- u32 internal_port_no;
uuid_le partition_uuid;
};
@@ -174,8 +185,6 @@ int visorbus_write_channel(struct visor_device *dev,
unsigned long nbytes);
int visorbus_clear_channel(struct visor_device *dev,
unsigned long offset, u8 ch, unsigned long nbytes);
-int visorbus_registerdevnode(struct visor_device *dev,
- const char *name, int major, int minor);
void visorbus_enable_channel_interrupts(struct visor_device *dev);
void visorbus_disable_channel_interrupts(struct visor_device *dev);
#endif
diff --git a/drivers/staging/unisys/visorbus/visorbus_main.c b/drivers/staging/unisys/visorbus/visorbus_main.c
index 533bb5b3d284..3a147dbbd7b5 100644
--- a/drivers/staging/unisys/visorbus/visorbus_main.c
+++ b/drivers/staging/unisys/visorbus/visorbus_main.c
@@ -33,6 +33,9 @@ static int visorbus_forcenomatch;
static int visorbus_debugref;
#define SERIALLOOPBACKCHANADDR (100 * 1024 * 1024)
+/* Display string that is guaranteed to be no longer the 99 characters*/
+#define LINESIZE 99
+
#define CURRENT_FILE_PC VISOR_BUS_PC_visorbus_main_c
#define POLLJIFFIES_TESTWORK 100
#define POLLJIFFIES_NORMALCHANNEL 10
@@ -182,7 +185,6 @@ static int
visorbus_match(struct device *xdev, struct device_driver *xdrv)
{
uuid_le channel_type;
- int rc = 0;
int i;
struct visor_device *dev;
struct visor_driver *drv;
@@ -190,26 +192,23 @@ visorbus_match(struct device *xdev, struct device_driver *xdrv)
dev = to_visor_device(xdev);
drv = to_visor_driver(xdrv);
channel_type = visorchannel_get_uuid(dev->visorchannel);
- if (visorbus_forcematch) {
- rc = 1;
- goto away;
- }
- if (visorbus_forcenomatch)
- goto away;
+ if (visorbus_forcematch)
+ return 1;
+ if (visorbus_forcenomatch)
+ return 0;
if (!drv->channel_types)
- goto away;
+ return 0;
+
for (i = 0;
(uuid_le_cmp(drv->channel_types[i].guid, NULL_UUID_LE) != 0) ||
(drv->channel_types[i].name);
i++)
if (uuid_le_cmp(drv->channel_types[i].guid,
- channel_type) == 0) {
- rc = i + 1;
- goto away;
- }
-away:
- return rc;
+ channel_type) == 0)
+ return i + 1;
+
+ return 0;
}
/** This is called when device_unregister() is called for the bus device
@@ -243,180 +242,6 @@ visorbus_release_device(struct device *xdev)
kfree(dev);
}
-/* Implement publishing of device node attributes under:
- *
- * /sys/bus/visorbus<x>/dev<y>/devmajorminor
- *
- */
-
-#define to_devmajorminor_attr(_attr) \
- container_of(_attr, struct devmajorminor_attribute, attr)
-#define to_visor_device_from_kobjdevmajorminor(obj) \
- container_of(obj, struct visor_device, kobjdevmajorminor)
-
-struct devmajorminor_attribute {
- struct attribute attr;
- int slot;
- ssize_t (*show)(struct visor_device *, int slot, char *buf);
- ssize_t (*store)(struct visor_device *, int slot, const char *buf,
- size_t count);
-};
-
-static ssize_t DEVMAJORMINOR_ATTR(struct visor_device *dev, int slot, char *buf)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
-
- if (slot < 0 || slot >= maxdevnodes)
- return 0;
- return snprintf(buf, PAGE_SIZE, "%d:%d\n",
- dev->devnodes[slot].major, dev->devnodes[slot].minor);
-}
-
-static ssize_t
-devmajorminor_attr_show(struct kobject *kobj, struct attribute *attr, char *buf)
-{
- struct devmajorminor_attribute *devmajorminor_attr =
- to_devmajorminor_attr(attr);
- struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
- ssize_t ret = 0;
-
- if (devmajorminor_attr->show)
- ret = devmajorminor_attr->show(dev,
- devmajorminor_attr->slot, buf);
- return ret;
-}
-
-static ssize_t
-devmajorminor_attr_store(struct kobject *kobj,
- struct attribute *attr, const char *buf, size_t count)
-{
- struct devmajorminor_attribute *devmajorminor_attr =
- to_devmajorminor_attr(attr);
- struct visor_device *dev = to_visor_device_from_kobjdevmajorminor(kobj);
- ssize_t ret = 0;
-
- if (devmajorminor_attr->store)
- ret = devmajorminor_attr->store(dev,
- devmajorminor_attr->slot,
- buf, count);
- return ret;
-}
-
-static int register_devmajorminor_attributes(struct visor_device *dev);
-
-static int
-devmajorminor_create_file(struct visor_device *dev, const char *name,
- int major, int minor)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
- struct devmajorminor_attribute *myattr = NULL;
- int x = -1, rc = 0, slot = -1;
-
- register_devmajorminor_attributes(dev);
- for (slot = 0; slot < maxdevnodes; slot++)
- if (!dev->devnodes[slot].attr)
- break;
- if (slot == maxdevnodes) {
- rc = -ENOMEM;
- goto away;
- }
- myattr = kzalloc(sizeof(*myattr), GFP_KERNEL);
- if (!myattr) {
- rc = -ENOMEM;
- goto away;
- }
- myattr->show = DEVMAJORMINOR_ATTR;
- myattr->store = NULL;
- myattr->slot = slot;
- myattr->attr.name = name;
- myattr->attr.mode = S_IRUGO;
- dev->devnodes[slot].attr = myattr;
- dev->devnodes[slot].major = major;
- dev->devnodes[slot].minor = minor;
- x = sysfs_create_file(&dev->kobjdevmajorminor, &myattr->attr);
- if (x < 0) {
- rc = x;
- goto away;
- }
- kobject_uevent(&dev->device.kobj, KOBJ_ONLINE);
-away:
- if (rc < 0) {
- kfree(myattr);
- myattr = NULL;
- dev->devnodes[slot].attr = NULL;
- }
- return rc;
-}
-
-static void
-devmajorminor_remove_file(struct visor_device *dev, int slot)
-{
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
- struct devmajorminor_attribute *myattr = NULL;
-
- if (slot < 0 || slot >= maxdevnodes)
- return;
- myattr = (struct devmajorminor_attribute *)(dev->devnodes[slot].attr);
- if (!myattr)
- return;
- sysfs_remove_file(&dev->kobjdevmajorminor, &myattr->attr);
- kobject_uevent(&dev->device.kobj, KOBJ_OFFLINE);
- dev->devnodes[slot].attr = NULL;
- kfree(myattr);
-}
-
-static void
-devmajorminor_remove_all_files(struct visor_device *dev)
-{
- int i = 0;
- int maxdevnodes = ARRAY_SIZE(dev->devnodes) / sizeof(dev->devnodes[0]);
-
- for (i = 0; i < maxdevnodes; i++)
- devmajorminor_remove_file(dev, i);
-}
-
-static const struct sysfs_ops devmajorminor_sysfs_ops = {
- .show = devmajorminor_attr_show,
- .store = devmajorminor_attr_store,
-};
-
-static struct kobj_type devmajorminor_kobj_type = {
- .sysfs_ops = &devmajorminor_sysfs_ops
-};
-
-static int
-register_devmajorminor_attributes(struct visor_device *dev)
-{
- int rc = 0, x = 0;
-
- if (dev->kobjdevmajorminor.parent)
- goto away; /* already registered */
- x = kobject_init_and_add(&dev->kobjdevmajorminor,
- &devmajorminor_kobj_type, &dev->device.kobj,
- "devmajorminor");
- if (x < 0) {
- rc = x;
- goto away;
- }
-
- kobject_uevent(&dev->kobjdevmajorminor, KOBJ_ADD);
-
-away:
- return rc;
-}
-
-static void
-unregister_devmajorminor_attributes(struct visor_device *dev)
-{
- if (!dev->kobjdevmajorminor.parent)
- return; /* already unregistered */
- devmajorminor_remove_all_files(dev);
-
- kobject_del(&dev->kobjdevmajorminor);
- kobject_put(&dev->kobjdevmajorminor);
- dev->kobjdevmajorminor.parent = NULL;
-}
-
/* begin implementation of specific channel attributes to appear under
* /sys/bus/visorbus<x>/dev<y>/channel
*/
@@ -427,7 +252,7 @@ static ssize_t physaddr_show(struct device *dev, struct device_attribute *attr,
if (!vdev->visorchannel)
return 0;
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_physaddr(vdev->visorchannel));
}
@@ -449,7 +274,7 @@ static ssize_t clientpartition_show(struct device *dev,
if (!vdev->visorchannel)
return 0;
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n",
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n",
visorchannel_get_clientpartition(vdev->visorchannel));
}
@@ -457,24 +282,24 @@ static ssize_t typeguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
- char s[99];
+ char typeid[LINESIZE];
if (!vdev->visorchannel)
return 0;
return snprintf(buf, PAGE_SIZE, "%s\n",
- visorchannel_id(vdev->visorchannel, s));
+ visorchannel_id(vdev->visorchannel, typeid));
}
static ssize_t zoneguid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct visor_device *vdev = to_visor_device(dev);
- char s[99];
+ char zoneid[LINESIZE];
if (!vdev->visorchannel)
return 0;
return snprintf(buf, PAGE_SIZE, "%s\n",
- visorchannel_zoneid(vdev->visorchannel, s));
+ visorchannel_zoneid(vdev->visorchannel, zoneid));
}
static ssize_t typename_show(struct device *dev, struct device_attribute *attr,
@@ -541,7 +366,7 @@ static ssize_t partition_handle_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 handle = visorchannel_get_clientpartition(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", handle);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", handle);
}
static ssize_t partition_guid_show(struct device *dev,
@@ -566,7 +391,7 @@ static ssize_t channel_addr_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 addr = visorchannel_get_physaddr(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", addr);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", addr);
}
static ssize_t channel_bytes_show(struct device *dev,
@@ -575,7 +400,7 @@ static ssize_t channel_bytes_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
u64 nbytes = visorchannel_get_nbytes(vdev->visorchannel);
- return snprintf(buf, PAGE_SIZE, "0x%Lx\n", nbytes);
+ return snprintf(buf, PAGE_SIZE, "0x%llx\n", nbytes);
}
static ssize_t channel_id_show(struct device *dev,
@@ -598,9 +423,9 @@ static ssize_t client_bus_info_show(struct device *dev,
struct visor_device *vdev = to_visor_device(dev);
struct visorchannel *channel = vdev->visorchannel;
- int i, x, remain = PAGE_SIZE;
+ int i, shift, remain = PAGE_SIZE;
unsigned long off;
- char *p = buf;
+ char *pos = buf;
u8 *partition_name;
struct ultra_vbus_deviceinfo dev_info;
@@ -608,44 +433,45 @@ static ssize_t client_bus_info_show(struct device *dev,
if (channel) {
if (vdev->name)
partition_name = vdev->name;
- x = snprintf(p, remain,
- "Client device / client driver info for %s partition (vbus #%d):\n",
- partition_name, vdev->chipset_dev_no);
- p += x;
- remain -= x;
- x = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- chp_info),
- &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string(&dev_info, p,
- remain, -1);
- p += x;
- remain -= x;
+ shift = snprintf(pos, remain,
+ "Client device / client driver info for %s eartition (vbus #%d):\n",
+ partition_name, vdev->chipset_dev_no);
+ pos += shift;
+ remain -= shift;
+ shift = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ chp_info),
+ &dev_info, sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string(&dev_info, pos,
+ remain, -1);
+ pos += shift;
+ remain -= shift;
}
- x = visorchannel_read(channel,
- offsetof(struct
- spar_vbus_channel_protocol,
- bus_info),
- &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string(&dev_info, p,
- remain, -1);
- p += x;
- remain -= x;
+ shift = visorchannel_read(channel,
+ offsetof(struct
+ spar_vbus_channel_protocol,
+ bus_info),
+ &dev_info, sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string(&dev_info, pos,
+ remain, -1);
+ pos += shift;
+ remain -= shift;
}
off = offsetof(struct spar_vbus_channel_protocol, dev_info);
i = 0;
while (off + sizeof(dev_info) <=
visorchannel_get_nbytes(channel)) {
- x = visorchannel_read(channel,
- off, &dev_info, sizeof(dev_info));
- if (x >= 0) {
- x = vbuschannel_devinfo_to_string
- (&dev_info, p, remain, i);
- p += x;
- remain -= x;
+ shift = visorchannel_read(channel,
+ off, &dev_info,
+ sizeof(dev_info));
+ if (shift >= 0) {
+ shift = vbuschannel_devinfo_to_string
+ (&dev_info, pos, remain, i);
+ pos += shift;
+ remain -= shift;
}
off += sizeof(dev_info);
i++;
@@ -752,36 +578,28 @@ dev_stop_periodic_work(struct visor_device *dev)
static int
visordriver_probe_device(struct device *xdev)
{
- int rc;
+ int res;
struct visor_driver *drv;
struct visor_device *dev;
drv = to_visor_driver(xdev->driver);
dev = to_visor_device(xdev);
+
+ if (!drv->probe)
+ return -ENODEV;
+
down(&dev->visordriver_callback_lock);
dev->being_removed = false;
- /*
- * ensure that the dev->being_removed flag is cleared before
- * we start the probe
- */
- wmb();
- get_device(&dev->device);
- if (!drv->probe) {
- up(&dev->visordriver_callback_lock);
- rc = -ENODEV;
- goto away;
+
+ res = drv->probe(dev);
+ if (res >= 0) {
+ /* success: reference kept via unmatched get_device() */
+ get_device(&dev->device);
+ fix_vbus_dev_info(dev);
}
- rc = drv->probe(dev);
- if (rc < 0)
- goto away;
- fix_vbus_dev_info(dev);
up(&dev->visordriver_callback_lock);
- rc = 0;
-away:
- if (rc != 0)
- put_device(&dev->device);
- return rc;
+ return res;
}
/** This is called when device_unregister() is called for each child device
@@ -798,21 +616,12 @@ visordriver_remove_device(struct device *xdev)
drv = to_visor_driver(xdev->driver);
down(&dev->visordriver_callback_lock);
dev->being_removed = true;
- /*
- * ensure that the dev->being_removed flag is set before we start the
- * actual removal
- */
- wmb();
- if (drv) {
- if (drv->remove)
- drv->remove(dev);
- }
+ if (drv->remove)
+ drv->remove(dev);
up(&dev->visordriver_callback_lock);
dev_stop_periodic_work(dev);
- devmajorminor_remove_all_files(dev);
put_device(&dev->device);
-
return 0;
}
@@ -928,14 +737,6 @@ visorbus_clear_channel(struct visor_device *dev, unsigned long offset, u8 ch,
}
EXPORT_SYMBOL_GPL(visorbus_clear_channel);
-int
-visorbus_registerdevnode(struct visor_device *dev,
- const char *name, int major, int minor)
-{
- return devmajorminor_create_file(dev, name, major, minor);
-}
-EXPORT_SYMBOL_GPL(visorbus_registerdevnode);
-
/** We don't really have a real interrupt, so for now we just call the
* interrupt function periodically...
*/
@@ -970,7 +771,7 @@ EXPORT_SYMBOL_GPL(visorbus_disable_channel_interrupts);
static int
create_visor_device(struct visor_device *dev)
{
- int rc;
+ int err;
u32 chipset_bus_no = dev->chipset_bus_no;
u32 chipset_dev_no = dev->chipset_dev_no;
@@ -992,8 +793,8 @@ create_visor_device(struct visor_device *dev)
if (!dev->periodic_work) {
POSTCODE_LINUX_3(DEVICE_CREATE_FAILURE_PC, chipset_dev_no,
DIAG_SEVERITY_ERR);
- rc = -EINVAL;
- goto away;
+ err = -EINVAL;
+ goto err_put;
}
/* bus_id must be a unique name with respect to this bus TYPE
@@ -1019,36 +820,25 @@ create_visor_device(struct visor_device *dev)
* claim the device. The device will be linked onto
* bus_type.klist_devices regardless (use bus_for_each_dev).
*/
- rc = device_add(&dev->device);
- if (rc < 0) {
+ err = device_add(&dev->device);
+ if (err < 0) {
POSTCODE_LINUX_3(DEVICE_ADD_PC, chipset_bus_no,
DIAG_SEVERITY_ERR);
- goto away;
- }
-
- rc = register_devmajorminor_attributes(dev);
- if (rc < 0) {
- POSTCODE_LINUX_3(DEVICE_REGISTER_FAILURE_PC, chipset_dev_no,
- DIAG_SEVERITY_ERR);
- goto away_unregister;
+ goto err_put;
}
list_add_tail(&dev->list_all, &list_all_device_instances);
- return 0;
-
-away_unregister:
- device_unregister(&dev->device);
+ return 0; /* success: reference kept via unmatched get_device() */
-away:
+err_put:
put_device(&dev->device);
- return rc;
+ return err;
}
static void
remove_visor_device(struct visor_device *dev)
{
list_del(&dev->list_all);
- unregister_devmajorminor_attributes(dev);
put_device(&dev->device);
device_unregister(&dev->device);
}
@@ -1477,24 +1267,24 @@ struct channel_size_info {
int
visorbus_init(void)
{
- int rc = 0;
+ int err;
- POSTCODE_LINUX_3(DRIVER_ENTRY_PC, rc, POSTCODE_SEVERITY_INFO);
+ POSTCODE_LINUX_3(DRIVER_ENTRY_PC, 0, POSTCODE_SEVERITY_INFO);
bus_device_info_init(&clientbus_driverinfo,
"clientbus", "visorbus",
VERSION, NULL);
- rc = create_bus_type();
- if (rc < 0) {
+ err = create_bus_type();
+ if (err < 0) {
POSTCODE_LINUX_2(BUS_CREATE_ENTRY_PC, DIAG_SEVERITY_ERR);
- goto away;
+ goto error;
}
periodic_dev_workqueue = create_singlethread_workqueue("visorbus_dev");
if (!periodic_dev_workqueue) {
POSTCODE_LINUX_2(CREATE_WORKQUEUE_PC, DIAG_SEVERITY_ERR);
- rc = -ENOMEM;
- goto away;
+ err = -ENOMEM;
+ goto error;
}
/* This enables us to receive notifications when devices appear for
@@ -1504,13 +1294,11 @@ visorbus_init(void)
&chipset_responders,
&chipset_driverinfo);
- rc = 0;
+ return 0;
-away:
- if (rc)
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
- return rc;
+error:
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ return err;
}
void
diff --git a/drivers/staging/unisys/visorbus/visorchannel.c b/drivers/staging/unisys/visorbus/visorchannel.c
index b68a904ac617..43373582cf1d 100644
--- a/drivers/staging/unisys/visorbus/visorchannel.c
+++ b/drivers/staging/unisys/visorbus/visorchannel.c
@@ -40,7 +40,6 @@ struct visorchannel {
bool requested;
struct channel_header chan_hdr;
uuid_le guid;
- ulong size;
bool needs_lock; /* channel creator knows if more than one */
/* thread will be inserting or removing */
spinlock_t insert_lock; /* protect head writes in chan_hdr */
@@ -134,8 +133,6 @@ visorchannel_create_guts(u64 physaddr, unsigned long channel_bytes,
}
channel->nbytes = channel_bytes;
-
- channel->size = channel_bytes;
channel->guid = guid;
return channel;
@@ -186,7 +183,7 @@ EXPORT_SYMBOL_GPL(visorchannel_get_physaddr);
ulong
visorchannel_get_nbytes(struct visorchannel *channel)
{
- return channel->size;
+ return channel->nbytes;
}
EXPORT_SYMBOL_GPL(visorchannel_get_nbytes);
diff --git a/drivers/staging/unisys/visorbus/visorchipset.c b/drivers/staging/unisys/visorbus/visorchipset.c
index 9cf4f8463c4e..5ba5936e2203 100644
--- a/drivers/staging/unisys/visorbus/visorchipset.c
+++ b/drivers/staging/unisys/visorbus/visorchipset.c
@@ -59,14 +59,13 @@
*/
static int visorchipset_major;
static int visorchipset_visorbusregwait = 1; /* default is on */
-static int visorchipset_holdchipsetready;
static unsigned long controlvm_payload_bytes_buffered;
static u32 dump_vhba_bus;
static int
visorchipset_open(struct inode *inode, struct file *file)
{
- unsigned minor_number = iminor(inode);
+ unsigned int minor_number = iminor(inode);
if (minor_number)
return -ENODEV;
@@ -90,9 +89,6 @@ static unsigned long poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_FAST;
static unsigned long most_recent_message_jiffies;
static int visorbusregistered;
-#define MAX_CHIPSET_EVENTS 2
-static u8 chipset_events[MAX_CHIPSET_EVENTS] = { 0, 0 };
-
struct parser_context {
unsigned long allocbytes;
unsigned long param_bytes;
@@ -107,7 +103,6 @@ static DEFINE_SEMAPHORE(notifier_lock);
static struct cdev file_cdev;
static struct visorchannel **file_controlvm_channel;
-static struct controlvm_message_header g_chipset_msg_hdr;
static struct controlvm_message_packet g_devicechangestate_packet;
static LIST_HEAD(bus_info_list);
@@ -156,8 +151,6 @@ struct putfile_active_buffer {
/* a payload from a controlvm message, containing a file data buffer */
struct parser_context *parser_ctx;
/* points within data area of parser_ctx to next byte of data */
- u8 *pnext;
- /* # bytes left from <pnext> to the end of this data buffer */
size_t bytes_remaining;
};
@@ -171,14 +164,10 @@ struct putfile_request {
/* header from original TransmitFile request */
struct controlvm_message_header controlvm_header;
- u64 file_request_number; /* from original TransmitFile request */
/* link to next struct putfile_request */
struct list_head next_putfile_request;
- /* most-recent sequence number supplied via a controlvm message */
- u64 data_sequence_number;
-
/* head of putfile_buffer_entry list, which describes the data to be
* supplied as putfile data;
* - this list is added to when controlvm messages come in that supply
@@ -274,11 +263,6 @@ static ssize_t remaining_steps_store(struct device *dev,
const char *buf, size_t count);
static DEVICE_ATTR_RW(remaining_steps);
-static ssize_t chipsetready_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count);
-static DEVICE_ATTR_WO(chipsetready);
-
static ssize_t devicedisabled_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count);
@@ -303,16 +287,6 @@ static struct attribute_group visorchipset_install_group = {
.attrs = visorchipset_install_attrs
};
-static struct attribute *visorchipset_guest_attrs[] = {
- &dev_attr_chipsetready.attr,
- NULL
-};
-
-static struct attribute_group visorchipset_guest_group = {
- .name = "guest",
- .attrs = visorchipset_guest_attrs
-};
-
static struct attribute *visorchipset_parahotplug_attrs[] = {
&dev_attr_devicedisabled.attr,
&dev_attr_deviceenabled.attr,
@@ -326,7 +300,6 @@ static struct attribute_group visorchipset_parahotplug_group = {
static const struct attribute_group *visorchipset_dev_groups[] = {
&visorchipset_install_group,
- &visorchipset_guest_group,
&visorchipset_parahotplug_group,
NULL
};
@@ -359,8 +332,7 @@ static struct parser_context *
parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
{
int allocbytes = sizeof(struct parser_context) + bytes;
- struct parser_context *rc = NULL;
- struct parser_context *ctx = NULL;
+ struct parser_context *ctx;
if (retry)
*retry = false;
@@ -374,15 +346,13 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
> MAX_CONTROLVM_PAYLOAD_BYTES) {
if (retry)
*retry = true;
- rc = NULL;
- goto cleanup;
+ return NULL;
}
ctx = kzalloc(allocbytes, GFP_KERNEL | __GFP_NORETRY);
if (!ctx) {
if (retry)
*retry = true;
- rc = NULL;
- goto cleanup;
+ return NULL;
}
ctx->allocbytes = allocbytes;
@@ -393,35 +363,27 @@ parser_init_byte_stream(u64 addr, u32 bytes, bool local, bool *retry)
if (local) {
void *p;
- if (addr > virt_to_phys(high_memory - 1)) {
- rc = NULL;
- goto cleanup;
- }
+ if (addr > virt_to_phys(high_memory - 1))
+ goto err_finish_ctx;
p = __va((unsigned long)(addr));
memcpy(ctx->data, p, bytes);
} else {
void *mapping = memremap(addr, bytes, MEMREMAP_WB);
- if (!mapping) {
- rc = NULL;
- goto cleanup;
- }
+ if (!mapping)
+ goto err_finish_ctx;
memcpy(ctx->data, mapping, bytes);
memunmap(mapping);
}
ctx->byte_stream = true;
- rc = ctx;
-cleanup:
- if (rc) {
- controlvm_payload_bytes_buffered += ctx->param_bytes;
- } else {
- if (ctx) {
- parser_done(ctx);
- ctx = NULL;
- }
- }
- return rc;
+ controlvm_payload_bytes_buffered += ctx->param_bytes;
+
+ return ctx;
+
+err_finish_ctx:
+ parser_done(ctx);
+ return NULL;
}
static uuid_le
@@ -523,7 +485,7 @@ static ssize_t toolaction_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- u8 tool_action;
+ u8 tool_action = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -541,10 +503,11 @@ static ssize_t toolaction_store(struct device *dev,
if (kstrtou8(buf, 10, &tool_action))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- tool_action),
- &tool_action, sizeof(u8));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ tool_action),
+ &tool_action, sizeof(u8));
if (ret)
return ret;
@@ -576,10 +539,11 @@ static ssize_t boottotool_store(struct device *dev,
return -EINVAL;
efi_spar_indication.boot_to_tool = val;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- efi_spar_ind), &(efi_spar_indication),
- sizeof(struct efi_spar_indication));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ efi_spar_ind), &(efi_spar_indication),
+ sizeof(struct efi_spar_indication));
if (ret)
return ret;
@@ -589,7 +553,7 @@ static ssize_t boottotool_store(struct device *dev,
static ssize_t error_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u32 error;
+ u32 error = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -607,10 +571,11 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &error))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_error),
- &error, sizeof(u32));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_error),
+ &error, sizeof(u32));
if (ret)
return ret;
return count;
@@ -619,12 +584,13 @@ static ssize_t error_store(struct device *dev, struct device_attribute *attr,
static ssize_t textid_show(struct device *dev, struct device_attribute *attr,
char *buf)
{
- u32 text_id;
+ u32 text_id = 0;
- visorchannel_read(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
+ visorchannel_read
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
return scnprintf(buf, PAGE_SIZE, "%i\n", text_id);
}
@@ -637,10 +603,11 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
if (kstrtou32(buf, 10, &text_id))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_text_id),
- &text_id, sizeof(u32));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_text_id),
+ &text_id, sizeof(u32));
if (ret)
return ret;
return count;
@@ -649,7 +616,7 @@ static ssize_t textid_store(struct device *dev, struct device_attribute *attr,
static ssize_t remaining_steps_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- u16 remaining_steps;
+ u16 remaining_steps = 0;
visorchannel_read(controlvm_channel,
offsetof(struct spar_controlvm_channel_protocol,
@@ -668,10 +635,11 @@ static ssize_t remaining_steps_store(struct device *dev,
if (kstrtou16(buf, 10, &remaining_steps))
return -EINVAL;
- ret = visorchannel_write(controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- installation_remaining_steps),
- &remaining_steps, sizeof(u16));
+ ret = visorchannel_write
+ (controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ installation_remaining_steps),
+ &remaining_steps, sizeof(u16));
if (ret)
return ret;
return count;
@@ -717,26 +685,6 @@ struct visor_device *visorbus_get_device_by_id(u32 bus_no, u32 dev_no,
}
EXPORT_SYMBOL(visorbus_get_device_by_id);
-static u8
-check_chipset_events(void)
-{
- int i;
- u8 send_msg = 1;
- /* Check events to determine if response should be sent */
- for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
- send_msg &= chipset_events[i];
- return send_msg;
-}
-
-static void
-clear_chipset_events(void)
-{
- int i;
- /* Clear chipset_events */
- for (i = 0; i < MAX_CHIPSET_EVENTS; i++)
- chipset_events[i] = 0;
-}
-
void
visorchipset_register_busdev(
struct visorchipset_busdev_notifiers *notifiers,
@@ -772,7 +720,7 @@ chipset_init(struct controlvm_message *inmsg)
POSTCODE_LINUX_2(CHIPSET_INIT_ENTRY_PC, POSTCODE_SEVERITY_INFO);
if (chipset_inited) {
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_respond;
}
chipset_inited = 1;
POSTCODE_LINUX_2(CHIPSET_INIT_EXIT_PC, POSTCODE_SEVERITY_INFO);
@@ -789,7 +737,7 @@ chipset_init(struct controlvm_message *inmsg)
*/
features |= ULTRA_CHIPSET_FEATURE_REPLY;
-cleanup:
+out_respond:
if (inmsg->hdr.flags.response_expected)
controlvm_respond_chipset_init(&inmsg->hdr, rc, features);
}
@@ -970,28 +918,31 @@ bus_epilog(struct visor_device *bus_info,
u32 cmd, struct controlvm_message_header *msg_hdr,
int response, bool need_response)
{
- bool notified = false;
struct controlvm_message_header *pmsg_hdr = NULL;
+ down(&notifier_lock);
+
if (!bus_info) {
/* relying on a valid passed in response code */
/* be lazy and re-use msg_hdr for this failure, is this ok?? */
pmsg_hdr = msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (bus_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = bus_info->pending_msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
- response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto away;
+ POSTCODE_LINUX_4(MALLOC_FAILURE_PC, cmd,
+ bus_info->chipset_bus_no,
+ POSTCODE_SEVERITY_ERR);
+ goto out_unlock;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -999,37 +950,27 @@ bus_epilog(struct visor_device *bus_info,
bus_info->pending_msg_hdr = pmsg_hdr;
}
- down(&notifier_lock);
if (response == CONTROLVM_RESP_SUCCESS) {
switch (cmd) {
case CONTROLVM_BUS_CREATE:
if (busdev_notifiers.bus_create) {
(*busdev_notifiers.bus_create) (bus_info);
- notified = true;
+ goto out_unlock;
}
break;
case CONTROLVM_BUS_DESTROY:
if (busdev_notifiers.bus_destroy) {
(*busdev_notifiers.bus_destroy) (bus_info);
- notified = true;
+ goto out_unlock;
}
break;
}
}
-away:
- if (notified)
- /* The callback function just called above is responsible
- * for calling the appropriate visorchipset_busdev_responders
- * function, which will call bus_responder()
- */
- ;
- else
- /*
- * Do not kfree(pmsg_hdr) as this is the failure path.
- * The success path ('notified') will call the responder
- * directly and kfree() there.
- */
- bus_responder(cmd, pmsg_hdr, response);
+
+out_respond_and_unlock:
+ bus_responder(cmd, pmsg_hdr, response);
+
+out_unlock:
up(&notifier_lock);
}
@@ -1040,30 +981,30 @@ device_epilog(struct visor_device *dev_info,
bool need_response, bool for_visorbus)
{
struct visorchipset_busdev_notifiers *notifiers;
- bool notified = false;
struct controlvm_message_header *pmsg_hdr = NULL;
notifiers = &busdev_notifiers;
+ down(&notifier_lock);
if (!dev_info) {
/* relying on a valid passed in response code */
/* be lazy and re-use msg_hdr for this failure, is this ok?? */
pmsg_hdr = msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (dev_info->pending_msg_hdr) {
/* only non-NULL if dev is still waiting on a response */
response = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
pmsg_hdr = dev_info->pending_msg_hdr;
- goto away;
+ goto out_respond_and_unlock;
}
if (need_response) {
pmsg_hdr = kzalloc(sizeof(*pmsg_hdr), GFP_KERNEL);
if (!pmsg_hdr) {
response = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto away;
+ goto out_respond_and_unlock;
}
memcpy(pmsg_hdr, msg_hdr,
@@ -1071,13 +1012,12 @@ device_epilog(struct visor_device *dev_info,
dev_info->pending_msg_hdr = pmsg_hdr;
}
- down(&notifier_lock);
if (response >= 0) {
switch (cmd) {
case CONTROLVM_DEVICE_CREATE:
if (notifiers->device_create) {
(*notifiers->device_create) (dev_info);
- notified = true;
+ goto out_unlock;
}
break;
case CONTROLVM_DEVICE_CHANGESTATE:
@@ -1087,7 +1027,7 @@ device_epilog(struct visor_device *dev_info,
segment_state_running.operating) {
if (notifiers->device_resume) {
(*notifiers->device_resume) (dev_info);
- notified = true;
+ goto out_unlock;
}
}
/* ServerNotReady / ServerLost / SegmentStateStandby */
@@ -1099,32 +1039,23 @@ device_epilog(struct visor_device *dev_info,
*/
if (notifiers->device_pause) {
(*notifiers->device_pause) (dev_info);
- notified = true;
+ goto out_unlock;
}
}
break;
case CONTROLVM_DEVICE_DESTROY:
if (notifiers->device_destroy) {
(*notifiers->device_destroy) (dev_info);
- notified = true;
+ goto out_unlock;
}
break;
}
}
-away:
- if (notified)
- /* The callback function just called above is responsible
- * for calling the appropriate visorchipset_busdev_responders
- * function, which will call device_responder()
- */
- ;
- else
- /*
- * Do not kfree(pmsg_hdr) as this is the failure path.
- * The success path ('notified') will call the responder
- * directly and kfree() there.
- */
- device_responder(cmd, pmsg_hdr, response);
+
+out_respond_and_unlock:
+ device_responder(cmd, pmsg_hdr, response);
+
+out_unlock:
up(&notifier_lock);
}
@@ -1142,14 +1073,14 @@ bus_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_bus_epilog;
}
bus_info = kzalloc(sizeof(*bus_info), GFP_KERNEL);
if (!bus_info) {
POSTCODE_LINUX_3(BUS_CREATE_FAILURE_PC, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto cleanup;
+ goto out_bus_epilog;
}
INIT_LIST_HEAD(&bus_info->list_all);
@@ -1169,7 +1100,7 @@ bus_create(struct controlvm_message *inmsg)
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
kfree(bus_info);
bus_info = NULL;
- goto cleanup;
+ goto out_bus_epilog;
}
bus_info->visorchannel = visorchannel;
if (uuid_le_cmp(cmd->create_bus.bus_inst_uuid, spar_siovm_uuid) == 0) {
@@ -1179,7 +1110,7 @@ bus_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_3(BUS_CREATE_EXIT_PC, bus_no, POSTCODE_SEVERITY_INFO);
-cleanup:
+out_bus_epilog:
bus_epilog(bus_info, CONTROLVM_BUS_CREATE, &inmsg->hdr,
rc, inmsg->hdr.flags.response_expected == 1);
}
@@ -1231,8 +1162,9 @@ bus_configure(struct controlvm_message *inmsg,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_MESSAGE_ID_INVALID_FOR_CLIENT;
} else {
- visorchannel_set_clientpartition(bus_info->visorchannel,
- cmd->configure_bus.guest_handle);
+ visorchannel_set_clientpartition
+ (bus_info->visorchannel,
+ cmd->configure_bus.guest_handle);
bus_info->partition_uuid = parser_id_get(parser_ctx);
parser_param_start(parser_ctx, PARSERSTRING_NAME);
bus_info->name = parser_string_get(parser_ctx);
@@ -1260,14 +1192,14 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- goto cleanup;
+ goto out_respond;
}
if (bus_info->state.created == 0) {
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_BUS_INVALID;
- goto cleanup;
+ goto out_respond;
}
dev_info = visorbus_get_device_by_id(bus_no, dev_no, NULL);
@@ -1275,7 +1207,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_ALREADY_DONE;
- goto cleanup;
+ goto out_respond;
}
dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL);
@@ -1283,7 +1215,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_FAILURE_PC, dev_no, bus_no,
POSTCODE_SEVERITY_ERR);
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
- goto cleanup;
+ goto out_respond;
}
dev_info->chipset_bus_no = bus_no;
@@ -1308,7 +1240,7 @@ my_device_create(struct controlvm_message *inmsg)
rc = -CONTROLVM_RESP_ERROR_KMALLOC_FAILED;
kfree(dev_info);
dev_info = NULL;
- goto cleanup;
+ goto out_respond;
}
dev_info->visorchannel = visorchannel;
dev_info->channel_type_guid = cmd->create_device.data_type_uuid;
@@ -1318,7 +1250,7 @@ my_device_create(struct controlvm_message *inmsg)
POSTCODE_LINUX_4(DEVICE_CREATE_EXIT_PC, dev_no, bus_no,
POSTCODE_SEVERITY_INFO);
-cleanup:
+out_respond:
device_epilog(dev_info, segment_state_running,
CONTROLVM_DEVICE_CREATE, &inmsg->hdr, rc,
inmsg->hdr.flags.response_expected == 1, 1);
@@ -1382,35 +1314,23 @@ initialize_controlvm_payload_info(u64 phys_addr, u64 offset, u32 bytes,
struct visor_controlvm_payload_info *info)
{
u8 *payload = NULL;
- int rc = CONTROLVM_RESP_SUCCESS;
- if (!info) {
- rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- goto cleanup;
- }
+ if (!info)
+ return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+
memset(info, 0, sizeof(struct visor_controlvm_payload_info));
- if ((offset == 0) || (bytes == 0)) {
- rc = -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
- goto cleanup;
- }
+ if ((offset == 0) || (bytes == 0))
+ return -CONTROLVM_RESP_ERROR_PAYLOAD_INVALID;
+
payload = memremap(phys_addr + offset, bytes, MEMREMAP_WB);
- if (!payload) {
- rc = -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
- goto cleanup;
- }
+ if (!payload)
+ return -CONTROLVM_RESP_ERROR_IOREMAP_FAILED;
info->offset = offset;
info->bytes = bytes;
info->ptr = payload;
-cleanup:
- if (rc < 0) {
- if (payload) {
- memunmap(payload);
- payload = NULL;
- }
- }
- return rc;
+ return CONTROLVM_RESP_SUCCESS;
}
static void
@@ -1490,14 +1410,8 @@ chipset_ready(struct controlvm_message_header *msg_hdr)
if (rc != CONTROLVM_RESP_SUCCESS)
rc = -rc;
- if (msg_hdr->flags.response_expected && !visorchipset_holdchipsetready)
+ if (msg_hdr->flags.response_expected)
controlvm_respond(msg_hdr, rc);
- if (msg_hdr->flags.response_expected && visorchipset_holdchipsetready) {
- /* Send CHIPSET_READY response when all modules have been loaded
- * and disks mounted for the partition
- */
- g_chipset_msg_hdr = *msg_hdr;
- }
}
static void
@@ -1726,9 +1640,10 @@ parahotplug_process_message(struct controlvm_message *inmsg)
* initialization.
*/
parahotplug_request_kickoff(req);
- controlvm_respond_physdev_changestate(&inmsg->hdr,
- CONTROLVM_RESP_SUCCESS,
- inmsg->cmd.device_change_state.state);
+ controlvm_respond_physdev_changestate
+ (&inmsg->hdr,
+ CONTROLVM_RESP_SUCCESS,
+ inmsg->cmd.device_change_state.state);
parahotplug_request_destroy(req);
} else {
/* For disable messages, add the request to the
@@ -1840,8 +1755,9 @@ handle_command(struct controlvm_message inmsg, u64 channel_addr)
break;
default:
if (inmsg.hdr.flags.response_expected)
- controlvm_respond(&inmsg.hdr,
- -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
+ controlvm_respond
+ (&inmsg.hdr,
+ -CONTROLVM_RESP_ERROR_MESSAGE_ID_UNKNOWN);
break;
}
@@ -1885,31 +1801,11 @@ controlvm_periodic_work(struct work_struct *work)
struct controlvm_message inmsg;
bool got_command = false;
bool handle_command_failed = false;
- static u64 poll_count;
/* make sure visorbus server is registered for controlvm callbacks */
if (visorchipset_visorbusregwait && !visorbusregistered)
goto cleanup;
- poll_count++;
- if (poll_count >= 250)
- ; /* keep going */
- else
- goto cleanup;
-
- /* Check events to determine if response to CHIPSET_READY
- * should be sent
- */
- if (visorchipset_holdchipsetready &&
- (g_chipset_msg_hdr.id != CONTROLVM_INVALID)) {
- if (check_chipset_events() == 1) {
- controlvm_respond(&g_chipset_msg_hdr, 0);
- clear_chipset_events();
- memset(&g_chipset_msg_hdr, 0,
- sizeof(struct controlvm_message_header));
- }
- }
-
while (visorchannel_signalremove(controlvm_channel,
CONTROLVM_QUEUE_RESPONSE,
&inmsg))
@@ -1979,8 +1875,11 @@ setup_crash_devices_work_queue(struct work_struct *work)
u16 local_crash_msg_count;
/* make sure visorbus is registered for controlvm callbacks */
- if (visorchipset_visorbusregwait && !visorbusregistered)
- goto cleanup;
+ if (visorchipset_visorbusregwait && !visorbusregistered) {
+ poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
+ schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
+ return;
+ }
POSTCODE_LINUX_2(CRASH_DEV_ENTRY_PC, POSTCODE_SEVERITY_INFO);
@@ -2057,13 +1956,6 @@ setup_crash_devices_work_queue(struct work_struct *work)
return;
}
POSTCODE_LINUX_2(CRASH_DEV_EXIT_PC, POSTCODE_SEVERITY_INFO);
- return;
-
-cleanup:
-
- poll_jiffies = POLLJIFFIES_CONTROLVMCHANNEL_SLOW;
-
- schedule_delayed_work(&periodic_controlvm_work, poll_jiffies);
}
static void
@@ -2135,25 +2027,6 @@ device_resume_response(struct visor_device *dev_info, int response)
dev_info->pending_msg_hdr = NULL;
}
-static ssize_t chipsetready_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t count)
-{
- char msgtype[64];
-
- if (sscanf(buf, "%63s", msgtype) != 1)
- return -EINVAL;
-
- if (!strcmp(msgtype, "CALLHOMEDISK_MOUNTED")) {
- chipset_events[0] = 1;
- return count;
- } else if (!strcmp(msgtype, "MODULES_LOADED")) {
- chipset_events[1] = 1;
- return count;
- }
- return -EINVAL;
-}
-
/* The parahotplug/devicedisabled interface gets called by our support script
* when an SR-IOV device has been shut down. The ID is passed to the script
* and then passed back when the device has been removed.
@@ -2205,10 +2078,11 @@ visorchipset_mmap(struct file *file, struct vm_area_struct *vma)
if (!*file_controlvm_channel)
return -ENXIO;
- visorchannel_read(*file_controlvm_channel,
- offsetof(struct spar_controlvm_channel_protocol,
- gp_control_channel),
- &addr, sizeof(addr));
+ visorchannel_read
+ (*file_controlvm_channel,
+ offsetof(struct spar_controlvm_channel_protocol,
+ gp_control_channel),
+ &addr, sizeof(addr));
if (!addr)
return -ENXIO;
@@ -2308,16 +2182,25 @@ visorchipset_file_init(dev_t major_dev, struct visorchannel **controlvm_channel)
return 0;
}
+static void
+visorchipset_file_cleanup(dev_t major_dev)
+{
+ if (file_cdev.ops)
+ cdev_del(&file_cdev);
+ file_cdev.ops = NULL;
+ unregister_chrdev_region(major_dev, 1);
+}
+
static int
visorchipset_init(struct acpi_device *acpi_device)
{
- int rc = 0;
+ int err = -ENODEV;
u64 addr;
uuid_le uuid = SPAR_CONTROLVM_CHANNEL_PROTOCOL_UUID;
addr = controlvm_get_channel_address();
if (!addr)
- return -ENODEV;
+ goto error;
memset(&busdev_notifiers, 0, sizeof(busdev_notifiers));
memset(&controlvm_payload_info, 0, sizeof(controlvm_payload_info));
@@ -2325,24 +2208,19 @@ visorchipset_init(struct acpi_device *acpi_device)
controlvm_channel = visorchannel_create_with_lock(addr, 0,
GFP_KERNEL, uuid);
if (!controlvm_channel)
- return -ENODEV;
+ goto error;
+
if (SPAR_CONTROLVM_CHANNEL_OK_CLIENT(
visorchannel_get_header(controlvm_channel))) {
initialize_controlvm_payload();
} else {
- visorchannel_destroy(controlvm_channel);
- controlvm_channel = NULL;
- return -ENODEV;
+ goto error_destroy_channel;
}
major_dev = MKDEV(visorchipset_major, 0);
- rc = visorchipset_file_init(major_dev, &controlvm_channel);
- if (rc < 0) {
- POSTCODE_LINUX_2(CHIPSET_INIT_FAILURE_PC, DIAG_SEVERITY_ERR);
- goto cleanup;
- }
-
- memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
+ err = visorchipset_file_init(major_dev, &controlvm_channel);
+ if (err < 0)
+ goto error_destroy_payload;
/* if booting in a crash kernel */
if (is_kdump_kernel())
@@ -2359,27 +2237,33 @@ visorchipset_init(struct acpi_device *acpi_device)
visorchipset_platform_device.dev.devt = major_dev;
if (platform_device_register(&visorchipset_platform_device) < 0) {
POSTCODE_LINUX_2(DEVICE_REGISTER_FAILURE_PC, DIAG_SEVERITY_ERR);
- rc = -ENODEV;
- goto cleanup;
+ err = -ENODEV;
+ goto error_cancel_work;
}
POSTCODE_LINUX_2(CHIPSET_INIT_SUCCESS_PC, POSTCODE_SEVERITY_INFO);
- rc = visorbus_init();
-cleanup:
- if (rc) {
- POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, rc,
- POSTCODE_SEVERITY_ERR);
- }
- return rc;
-}
+ err = visorbus_init();
+ if (err < 0)
+ goto error_unregister;
-static void
-visorchipset_file_cleanup(dev_t major_dev)
-{
- if (file_cdev.ops)
- cdev_del(&file_cdev);
- file_cdev.ops = NULL;
- unregister_chrdev_region(major_dev, 1);
+ return 0;
+
+error_unregister:
+ platform_device_unregister(&visorchipset_platform_device);
+
+error_cancel_work:
+ cancel_delayed_work_sync(&periodic_controlvm_work);
+ visorchipset_file_cleanup(major_dev);
+
+error_destroy_payload:
+ destroy_controlvm_payload_info(&controlvm_payload_info);
+
+error_destroy_channel:
+ visorchannel_destroy(controlvm_channel);
+
+error:
+ POSTCODE_LINUX_3(CHIPSET_INIT_FAILURE_PC, err, POSTCODE_SEVERITY_ERR);
+ return err;
}
static int
@@ -2392,8 +2276,6 @@ visorchipset_exit(struct acpi_device *acpi_device)
cancel_delayed_work_sync(&periodic_controlvm_work);
destroy_controlvm_payload_info(&controlvm_payload_info);
- memset(&g_chipset_msg_hdr, 0, sizeof(struct controlvm_message_header));
-
visorchannel_destroy(controlvm_channel);
visorchipset_file_cleanup(visorchipset_platform_device.dev.devt);
@@ -2460,12 +2342,8 @@ module_param_named(major, visorchipset_major, int, S_IRUGO);
MODULE_PARM_DESC(visorchipset_major,
"major device number to use for the device node");
module_param_named(visorbusregwait, visorchipset_visorbusregwait, int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_visorbusreqwait,
+MODULE_PARM_DESC(visorchipset_visorbusregwait,
"1 to have the module wait for the visor bus to register");
-module_param_named(holdchipsetready, visorchipset_holdchipsetready,
- int, S_IRUGO);
-MODULE_PARM_DESC(visorchipset_holdchipsetready,
- "1 to hold response to CHIPSET_READY");
module_init(init_unisys);
module_exit(exit_unisys);
diff --git a/drivers/staging/unisys/visorhba/visorhba_main.c b/drivers/staging/unisys/visorhba/visorhba_main.c
index e93bb1dbfd97..6a4570d10642 100644
--- a/drivers/staging/unisys/visorhba/visorhba_main.c
+++ b/drivers/staging/unisys/visorhba/visorhba_main.c
@@ -52,6 +52,8 @@ static int visorhba_resume(struct visor_device *dev,
static ssize_t info_debugfs_read(struct file *file, char __user *buf,
size_t len, loff_t *offset);
+static int set_no_disk_inquiry_result(unsigned char *buf,
+ size_t len, bool is_lun0);
static struct dentry *visorhba_debugfs_dir;
static const struct file_operations debugfs_info_fops = {
.read = info_debugfs_read,
@@ -83,12 +85,6 @@ static struct visor_driver visorhba_driver = {
MODULE_DEVICE_TABLE(visorbus, visorhba_channel_types);
MODULE_ALIAS("visorbus:" SPAR_VHBA_CHANNEL_PROTOCOL_UUID_STR);
-struct visor_thread_info {
- struct task_struct *task;
- struct completion has_stopped;
- int id;
-};
-
struct visordisk_info {
u32 valid;
u32 channel, id, lun; /* Disk Path */
@@ -135,7 +131,7 @@ struct visorhba_devdata {
struct visordisk_info head;
unsigned int max_buff_len;
int devnum;
- struct visor_thread_info threadinfo;
+ struct task_struct *thread;
int thread_wait_ms;
};
@@ -152,28 +148,36 @@ static struct visorhba_devices_open visorhbas_open[VISORHBA_OPEN_MAX];
(iter->lun == match->lun))
/**
* visor_thread_start - starts a thread for the device
- * @thrinfo: The thread to start
* @threadfn: Function the thread starts
* @thrcontext: Context to pass to the thread, i.e. devdata
* @name: string describing name of thread
*
* Starts a thread for the device.
*
- * Return 0 on success;
+ * Return the task_struct * denoting the thread on success,
+ * or NULL on failure
*/
-static int visor_thread_start(struct visor_thread_info *thrinfo,
- int (*threadfn)(void *),
- void *thrcontext, char *name)
+static struct task_struct *visor_thread_start
+(int (*threadfn)(void *), void *thrcontext, char *name)
{
- /* used to stop the thread */
- init_completion(&thrinfo->has_stopped);
- thrinfo->task = kthread_run(threadfn, thrcontext, "%s", name);
- if (IS_ERR(thrinfo->task)) {
- thrinfo->id = 0;
- return PTR_ERR(thrinfo->task);
+ struct task_struct *task;
+
+ task = kthread_run(threadfn, thrcontext, "%s", name);
+ if (IS_ERR(task)) {
+ pr_err("visorbus failed to start thread\n");
+ return NULL;
}
- thrinfo->id = thrinfo->task->pid;
- return 0;
+ return task;
+}
+
+/**
+ * visor_thread_stop - stops the thread if it is running
+ */
+static void visor_thread_stop(struct task_struct *task)
+{
+ if (!task)
+ return; /* no thread running */
+ kthread_stop(task);
}
/**
@@ -231,16 +235,17 @@ static void *del_scsipending_ent(struct visorhba_devdata *devdata,
int del)
{
unsigned long flags;
- void *sent = NULL;
+ void *sent;
- if (del < MAX_PENDING_REQUESTS) {
- spin_lock_irqsave(&devdata->privlock, flags);
- sent = devdata->pending[del].sent;
+ if (del >= MAX_PENDING_REQUESTS)
+ return NULL;
- devdata->pending[del].cmdtype = 0;
- devdata->pending[del].sent = NULL;
- spin_unlock_irqrestore(&devdata->privlock, flags);
- }
+ spin_lock_irqsave(&devdata->privlock, flags);
+ sent = devdata->pending[del].sent;
+
+ devdata->pending[del].cmdtype = 0;
+ devdata->pending[del].sent = NULL;
+ spin_unlock_irqrestore(&devdata->privlock, flags);
return sent;
}
@@ -681,7 +686,7 @@ static void visorhba_serverdown_complete(struct visorhba_devdata *devdata)
/* Stop using the IOVM response queue (queue should be drained
* by the end)
*/
- kthread_stop(devdata->threadinfo.task);
+ visor_thread_stop(devdata->thread);
/* Fail commands that weren't completed */
spin_lock_irqsave(&devdata->privlock, flags);
@@ -772,6 +777,24 @@ do_scsi_linuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
}
}
+static int set_no_disk_inquiry_result(unsigned char *buf,
+ size_t len, bool is_lun0)
+{
+ if (!buf || len < NO_DISK_INQUIRY_RESULT_LEN)
+ return -EINVAL;
+ memset(buf, 0, NO_DISK_INQUIRY_RESULT_LEN);
+ buf[2] = SCSI_SPC2_VER;
+ if (is_lun0) {
+ buf[0] = DEV_DISK_CAPABLE_NOT_PRESENT;
+ buf[3] = DEV_HISUPPORT;
+ } else {
+ buf[0] = DEV_NOT_CAPABLE;
+ }
+ buf[4] = NO_DISK_INQUIRY_RESULT_LEN - 5;
+ strncpy(buf + 8, "DELLPSEUDO DEVICE .", NO_DISK_INQUIRY_RESULT_LEN - 8);
+ return 0;
+}
+
/**
* do_scsi_nolinuxstat - scsi command didn't have linuxstat
* @cmdrsp: response from IOVM
@@ -804,10 +827,8 @@ do_scsi_nolinuxstat(struct uiscmdrsp *cmdrsp, struct scsi_cmnd *scsicmd)
* a disk there so we'll present a processor
* there.
*/
- SET_NO_DISK_INQUIRY_RESULT(buf, cmdrsp->scsi.bufflen,
- scsidev->lun,
- DEV_DISK_CAPABLE_NOT_PRESENT,
- DEV_NOT_CAPABLE);
+ set_no_disk_inquiry_result(buf, (size_t)cmdrsp->scsi.bufflen,
+ scsidev->lun == 0);
if (scsi_sg_count(scsicmd) == 0) {
memcpy(scsi_sglist(scsicmd), buf,
@@ -929,14 +950,15 @@ static void process_disk_notify(struct Scsi_Host *shost,
struct diskaddremove *dar;
dar = kzalloc(sizeof(*dar), GFP_ATOMIC);
- if (dar) {
- dar->add = cmdrsp->disknotify.add;
- dar->shost = shost;
- dar->channel = cmdrsp->disknotify.channel;
- dar->id = cmdrsp->disknotify.id;
- dar->lun = cmdrsp->disknotify.lun;
- queue_disk_add_remove(dar);
- }
+ if (!dar)
+ return;
+
+ dar->add = cmdrsp->disknotify.add;
+ dar->shost = shost;
+ dar->channel = cmdrsp->disknotify.channel;
+ dar->id = cmdrsp->disknotify.id;
+ dar->lun = cmdrsp->disknotify.lun;
+ queue_disk_add_remove(dar);
}
/**
@@ -1064,8 +1086,8 @@ static int visorhba_resume(struct visor_device *dev,
if (devdata->serverdown && !devdata->serverchangingstate)
devdata->serverchangingstate = true;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vhba_incming");
+ devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
+ "vhba_incming");
devdata->serverdown = false;
devdata->serverchangingstate = false;
@@ -1141,8 +1163,8 @@ static int visorhba_probe(struct visor_device *dev)
goto err_scsi_remove_host;
devdata->thread_wait_ms = 2;
- visor_thread_start(&devdata->threadinfo, process_incoming_rsps,
- devdata, "vhba_incoming");
+ devdata->thread = visor_thread_start(process_incoming_rsps, devdata,
+ "vhba_incoming");
scsi_scan_host(scsihost);
@@ -1172,7 +1194,7 @@ static void visorhba_remove(struct visor_device *dev)
return;
scsihost = devdata->scsihost;
- kthread_stop(devdata->threadinfo.task);
+ visor_thread_stop(devdata->thread);
scsi_remove_host(scsihost);
scsi_host_put(scsihost);
diff --git a/drivers/staging/unisys/visorinput/visorinput.c b/drivers/staging/unisys/visorinput/visorinput.c
index 13c0316112ac..12a3570780fc 100644
--- a/drivers/staging/unisys/visorinput/visorinput.c
+++ b/drivers/staging/unisys/visorinput/visorinput.c
@@ -123,9 +123,9 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_GRAVE, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_BACKSLASH, /* FIXME, '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -173,7 +173,7 @@ static const unsigned char visorkbd_keycode[KEYCODE_TABLE_BYTES] = {
[88] = KEY_F12,
[90] = KEY_KPLEFTPAREN,
[91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
+ [92] = KEY_KPASTERISK,
[93] = KEY_KPASTERISK,
[94] = KEY_KPPLUS,
[95] = KEY_HELP,
@@ -467,18 +467,14 @@ handle_locking_key(struct input_dev *visorinput_dev,
break;
default:
led = -1;
- break;
+ return;
}
- if (led >= 0) {
- int old_state = (test_bit(led, visorinput_dev->led) != 0);
-
- if (old_state != desired_state) {
- input_report_key(visorinput_dev, keycode, 1);
- input_sync(visorinput_dev);
- input_report_key(visorinput_dev, keycode, 0);
- input_sync(visorinput_dev);
- __change_bit(led, visorinput_dev->led);
- }
+ if (test_bit(led, visorinput_dev->led) != desired_state) {
+ input_report_key(visorinput_dev, keycode, 1);
+ input_sync(visorinput_dev);
+ input_report_key(visorinput_dev, keycode, 0);
+ input_sync(visorinput_dev);
+ __change_bit(led, visorinput_dev->led);
}
}
diff --git a/drivers/staging/unisys/visornic/visornic_main.c b/drivers/staging/unisys/visornic/visornic_main.c
index be0d057346c3..fd7c9a6cb6f3 100644
--- a/drivers/staging/unisys/visornic/visornic_main.c
+++ b/drivers/staging/unisys/visornic/visornic_main.c
@@ -109,51 +109,46 @@ struct chanstat {
};
struct visornic_devdata {
- unsigned short enabled; /* 0 disabled 1 enabled to receive */
- unsigned short enab_dis_acked; /* NET_RCV_ENABLE/DISABLE acked by
- * IOPART
- */
+ /* 0 disabled 1 enabled to receive */
+ unsigned short enabled;
+ /* NET_RCV_ENABLE/DISABLE acked by IOPART */
+ unsigned short enab_dis_acked;
+
struct visor_device *dev;
struct net_device *netdev;
struct net_device_stats net_stats;
atomic_t interrupt_rcvd;
wait_queue_head_t rsp_queue;
struct sk_buff **rcvbuf;
- u64 incarnation_id; /* lets IOPART know about re-birth */
- unsigned short old_flags; /* flags as they were prior to
- * set_multicast_list
- */
- atomic_t usage; /* count of users */
- int num_rcv_bufs; /* indicates how many rcv buffers
- * the vnic will post
- */
+ /* incarnation_id lets IOPART know about re-birth */
+ u64 incarnation_id;
+ /* flags as they were prior to set_multicast_list */
+ unsigned short old_flags;
+ atomic_t usage; /* count of users */
+
+ /* number of rcv buffers the vnic will post */
+ int num_rcv_bufs;
int num_rcv_bufs_could_not_alloc;
atomic_t num_rcvbuf_in_iovm;
unsigned long alloc_failed_in_if_needed_cnt;
unsigned long alloc_failed_in_repost_rtn_cnt;
- unsigned long max_outstanding_net_xmits; /* absolute max number of
- * outstanding xmits - should
- * never hit this
- */
- unsigned long upper_threshold_net_xmits; /* high water mark for
- * calling netif_stop_queue()
- */
- unsigned long lower_threshold_net_xmits; /* high water mark for calling
- * netif_wake_queue()
- */
- struct sk_buff_head xmitbufhead; /* xmitbufhead is the head of the
- * xmit buffer list that have been
- * sent to the IOPART end
- */
+
+ /* absolute max number of outstanding xmits - should never hit this */
+ unsigned long max_outstanding_net_xmits;
+ /* high water mark for calling netif_stop_queue() */
+ unsigned long upper_threshold_net_xmits;
+ /* high water mark for calling netif_wake_queue() */
+ unsigned long lower_threshold_net_xmits;
+ /* xmitbufhead - head of the xmit buffer list sent to the IOPART end */
+ struct sk_buff_head xmitbufhead;
+
visorbus_state_complete_func server_down_complete_func;
struct work_struct timeout_reset;
- struct uiscmdrsp *cmdrsp_rcv; /* cmdrsp_rcv is used for
- * posting/unposting rcv buffers
- */
- struct uiscmdrsp *xmit_cmdrsp; /* used to issue NET_XMIT - there is
- * never more that one xmit in
- * progress at a time
- */
+ /* cmdrsp_rcv is used for posting/unposting rcv buffers */
+ struct uiscmdrsp *cmdrsp_rcv;
+ /* xmit_cmdrsp - issues NET_XMIT - only one active xmit at a time */
+ struct uiscmdrsp *xmit_cmdrsp;
+
bool server_down; /* IOPART is down */
bool server_change_state; /* Processing SERVER_CHANGESTATE msg */
bool going_away; /* device is being torn down */
@@ -173,18 +168,10 @@ struct visornic_devdata {
unsigned long n_rcv1; /* # rcvs of 1 buffers */
unsigned long n_rcv2; /* # rcvs of 2 buffers */
unsigned long n_rcvx; /* # rcvs of >2 buffers */
- unsigned long found_repost_rcvbuf_cnt; /* # times we called
- * repost_rcvbuf_cnt
- */
- unsigned long repost_found_skb_cnt; /* # times found the skb */
- unsigned long n_repost_deficit; /* # times we couldn't find
- * all of the rcv buffers
- */
- unsigned long bad_rcv_buf; /* # times we negleted to
- * free the rcv skb because
- * we didn't know where it
- * came from
- */
+ unsigned long found_repost_rcvbuf_cnt; /* # repost_rcvbuf_cnt */
+ unsigned long repost_found_skb_cnt; /* # of found the skb */
+ unsigned long n_repost_deficit; /* # of lost rcv buffers */
+ unsigned long bad_rcv_buf; /* # of unknown rcv skb not freed */
unsigned long n_rcv_packets_not_accepted;/* # bogs rcv packets */
int queuefullmsg_logged;
@@ -209,18 +196,17 @@ static void poll_for_irq(unsigned long v);
* Return value indicates number of entries filled in frags
* Negative values indicate an error.
*/
-static unsigned int
+static int
visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
unsigned int frags_max,
struct phys_info frags[])
{
- unsigned int count = 0, ii, size, offset = 0, numfrags;
+ unsigned int count = 0, frag, size, offset = 0, numfrags;
unsigned int total_count;
numfrags = skb_shinfo(skb)->nr_frags;
- /*
- * Compute the number of fragments this skb has, and if its more than
+ /* Compute the number of fragments this skb has, and if its more than
* frag array can hold, linearize the skb
*/
total_count = numfrags + (firstfraglen / PI_PAGE_SIZE);
@@ -257,23 +243,20 @@ visor_copy_fragsinfo_from_skb(struct sk_buff *skb, unsigned int firstfraglen,
if ((count + numfrags) > frags_max)
return -EINVAL;
- for (ii = 0; ii < numfrags; ii++) {
+ for (frag = 0; frag < numfrags; frag++) {
count = add_physinfo_entries(page_to_pfn(
- skb_frag_page(&skb_shinfo(skb)->frags[ii])),
- skb_shinfo(skb)->frags[ii].
+ skb_frag_page(&skb_shinfo(skb)->frags[frag])),
+ skb_shinfo(skb)->frags[frag].
page_offset,
- skb_shinfo(skb)->frags[ii].
+ skb_shinfo(skb)->frags[frag].
size, count, frags_max, frags);
- /*
- * add_physinfo_entries only returns
+ /* add_physinfo_entries only returns
* zero if the frags array is out of room
* That should never happen because we
* fail above, if count+numfrags > frags_max.
- * Given that theres no recovery mechanism from putting
- * half a packet in the I/O channel, panic here as this
- * should never happen
*/
- BUG_ON(!count);
+ if (!count)
+ return -EINVAL;
}
}
if (skb_shinfo(skb)->frag_list) {
@@ -299,8 +282,7 @@ static ssize_t enable_ints_write(struct file *file,
const char __user *buffer,
size_t count, loff_t *ppos)
{
- /*
- * Don't want to break ABI here by having a debugfs
+ /* Don't want to break ABI here by having a debugfs
* file that no longer exists or is writable, so
* lets just make this a vestigual function
*/
@@ -308,8 +290,7 @@ static ssize_t enable_ints_write(struct file *file,
}
/**
- * visornic_serverdown_complete - IOPART went down, need to pause
- * device
+ * visornic_serverdown_complete - IOPART went down, pause device
* @work: Work queue it was scheduled on
*
* The IO partition has gone down and we need to do some cleanup
@@ -344,7 +325,7 @@ visornic_serverdown_complete(struct visornic_devdata *devdata)
}
/**
- * visornic_serverdown - Command has notified us that IOPARt is down
+ * visornic_serverdown - Command has notified us that IOPART is down
* @devdata: device that is being managed by IOPART
*
* Schedule the work needed to handle the server down request. Make
@@ -356,28 +337,38 @@ visornic_serverdown(struct visornic_devdata *devdata,
visorbus_state_complete_func complete_func)
{
unsigned long flags;
+ int err;
spin_lock_irqsave(&devdata->priv_lock, flags);
- if (!devdata->server_down && !devdata->server_change_state) {
- if (devdata->going_away) {
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- dev_dbg(&devdata->dev->device,
- "%s aborting because device removal pending\n",
- __func__);
- return -ENODEV;
- }
- devdata->server_change_state = true;
- devdata->server_down_complete_func = complete_func;
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- visornic_serverdown_complete(devdata);
- } else if (devdata->server_change_state) {
+ if (devdata->server_change_state) {
dev_dbg(&devdata->dev->device, "%s changing state\n",
__func__);
- spin_unlock_irqrestore(&devdata->priv_lock, flags);
- return -EINVAL;
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ if (devdata->server_down) {
+ dev_dbg(&devdata->dev->device, "%s already down\n",
+ __func__);
+ err = -EINVAL;
+ goto err_unlock;
+ }
+ if (devdata->going_away) {
+ dev_dbg(&devdata->dev->device,
+ "%s aborting because device removal pending\n",
+ __func__);
+ err = -ENODEV;
+ goto err_unlock;
}
+ devdata->server_change_state = true;
+ devdata->server_down_complete_func = complete_func;
spin_unlock_irqrestore(&devdata->priv_lock, flags);
+
+ visornic_serverdown_complete(devdata);
return 0;
+
+err_unlock:
+ spin_unlock_irqrestore(&devdata->priv_lock, flags);
+ return err;
}
/**
@@ -395,20 +386,19 @@ alloc_rcv_buf(struct net_device *netdev)
/* NOTE: the first fragment in each rcv buffer is pointed to by
* rcvskb->data. For now all rcv buffers will be RCVPOST_BUF_SIZE
- * in length, so the firstfrag is large enough to hold 1514.
+ * in length, so the first frag is large enough to hold 1514.
*/
skb = alloc_skb(RCVPOST_BUF_SIZE, GFP_ATOMIC);
if (!skb)
return NULL;
skb->dev = netdev;
- skb->len = RCVPOST_BUF_SIZE;
/* current value of mtu doesn't come into play here; large
* packets will just end up using multiple rcv buffers all of
- * same size
+ * same size.
*/
- skb->data_len = 0; /* dev_alloc_skb already zeroes it out
- * for clarification.
- */
+ skb->len = RCVPOST_BUF_SIZE;
+ /* alloc_skb already zeroes it out for clarification. */
+ skb->data_len = 0;
return skb;
}
@@ -436,8 +426,8 @@ post_skb(struct uiscmdrsp *cmdrsp,
cmdrsp->net.type = NET_RCV_POST;
cmdrsp->cmdtype = CMD_NET_TYPE;
if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- cmdrsp)) {
+ IOCHAN_TO_IOPART,
+ cmdrsp)) {
atomic_inc(&devdata->num_rcvbuf_in_iovm);
devdata->chstat.sent_post++;
} else {
@@ -465,8 +455,8 @@ send_enbdis(struct net_device *netdev, int state,
devdata->cmdrsp_rcv->net.type = NET_RCV_ENBDIS;
devdata->cmdrsp_rcv->cmdtype = CMD_NET_TYPE;
if (visorchannel_signalinsert(devdata->dev->visorchannel,
- IOCHAN_TO_IOPART,
- devdata->cmdrsp_rcv))
+ IOCHAN_TO_IOPART,
+ devdata->cmdrsp_rcv))
devdata->chstat.sent_enbdis++;
}
@@ -872,8 +862,7 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
if (vnic_hit_high_watermark(devdata,
devdata->max_outstanding_net_xmits)) {
- /* too many NET_XMITs queued over to IOVM - need to wait
- */
+ /* extra NET_XMITs queued over to IOVM - need to wait */
devdata->chstat.reject_count++;
if (!devdata->queuefullmsg_logged &&
((devdata->chstat.reject_count & 0x3ff) == 1))
@@ -950,16 +939,12 @@ visornic_xmit(struct sk_buff *skb, struct net_device *netdev)
devdata->net_stats.tx_bytes += skb->len;
devdata->chstat.sent_xmit++;
- /* check to see if we have hit the high watermark for
- * netif_stop_queue()
- */
+ /* check if we have hit the high watermark for netif_stop_queue() */
if (vnic_hit_high_watermark(devdata,
devdata->upper_threshold_net_xmits)) {
- /* too many NET_XMITs queued over to IOVM - need to wait */
- netif_stop_queue(netdev); /* calling stop queue - call
- * netif_wake_queue() after lower
- * threshold
- */
+ /* extra NET_XMITs queued over to IOVM - need to wait */
+ /* stop queue - call netif_wake_queue() after lower threshold */
+ netif_stop_queue(netdev);
dev_dbg(&netdev->dev,
"%s busy - invoking iovm flow control\n",
__func__);
@@ -1312,16 +1297,13 @@ visornic_rx(struct uiscmdrsp *cmdrsp)
break;
}
}
+ /* accept pkt, dest matches a multicast addr */
if (found_mc)
- break; /* accept packet, dest
- * matches a multicast
- * address
- */
+ break;
}
+ /* accept packet, h_dest must match vnic mac address */
} else if (skb->pkt_type == PACKET_HOST) {
- break; /* accept packet, h_dest must match vnic
- * mac address
- */
+ break;
} else if (skb->pkt_type == PACKET_OTHERHOST) {
/* something is not right */
dev_err(&devdata->netdev->dev,
@@ -1409,14 +1391,10 @@ static ssize_t info_debugfs_read(struct file *file, char __user *buf,
if (!vbuf)
return -ENOMEM;
- /* for each vnic channel
- * dump out channel specific data
- */
+ /* for each vnic channel dump out channel specific data */
rcu_read_lock();
for_each_netdev_rcu(current->nsproxy->net_ns, dev) {
- /*
- * Only consider netdevs that are visornic, and are open
- */
+ /* Only consider netdevs that are visornic, and are open */
if ((dev->netdev_ops != &visornic_dev_ops) ||
(!netif_queue_stopped(dev)))
continue;
@@ -1643,12 +1621,12 @@ service_resp_queue(struct uiscmdrsp *cmdrsp, struct visornic_devdata *devdata,
/* ASSERT netdev == vnicinfo->netdev; */
if ((netdev == devdata->netdev) &&
netif_queue_stopped(netdev)) {
- /* check to see if we have crossed
- * the lower watermark for
- * netif_wake_queue()
+ /* check if we have crossed the lower watermark
+ * for netif_wake_queue()
*/
- if (vnic_hit_low_watermark(devdata,
- devdata->lower_threshold_net_xmits)) {
+ if (vnic_hit_low_watermark
+ (devdata,
+ devdata->lower_threshold_net_xmits)) {
/* enough NET_XMITs completed
* so can restart netif queue
*/
@@ -1712,10 +1690,7 @@ static int visornic_poll(struct napi_struct *napi, int budget)
send_rcv_posts_if_needed(devdata);
service_resp_queue(devdata->cmdrsp, devdata, &rx_count, budget);
- /*
- * If there aren't any more packets to receive
- * stop the poll
- */
+ /* If there aren't any more packets to receive stop the poll */
if (rx_count < budget)
napi_complete(napi);
@@ -1867,8 +1842,7 @@ static int visornic_probe(struct visor_device *dev)
setup_timer(&devdata->irq_poll_timer, poll_for_irq,
(unsigned long)devdata);
- /*
- * Note: This time has to start running before the while
+ /* Note: This time has to start running before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
*/
@@ -1897,8 +1871,7 @@ static int visornic_probe(struct visor_device *dev)
/* Let's start our threads to get responses */
netif_napi_add(netdev, &devdata->napi, visornic_poll, NAPI_WEIGHT);
- /*
- * Note: Interupts have to be enable before the while
+ /* Note: Interupts have to be enable before the while
* loop below because the napi routine is responsible for
* setting enab_dis_acked
*/
diff --git a/drivers/staging/vme/devices/vme_pio2_gpio.c b/drivers/staging/vme/devices/vme_pio2_gpio.c
index 6d361201d98c..ba9fe3bc2642 100644
--- a/drivers/staging/vme/devices/vme_pio2_gpio.c
+++ b/drivers/staging/vme/devices/vme_pio2_gpio.c
@@ -92,7 +92,7 @@ static void pio2_gpio_set(struct gpio_chip *chip,
}
/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
+static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
{
int data;
struct pio2_card *card = gpiochip_get_data(chip);
@@ -111,7 +111,8 @@ static int pio2_gpio_dir_in(struct gpio_chip *chip, unsigned offset)
}
/* Directionality configured at board build - send appropriate response */
-static int pio2_gpio_dir_out(struct gpio_chip *chip, unsigned offset, int value)
+static int pio2_gpio_dir_out(struct gpio_chip *chip,
+ unsigned int offset, int value)
{
int data;
struct pio2_card *card = gpiochip_get_data(chip);
diff --git a/drivers/staging/vt6655/baseband.c b/drivers/staging/vt6655/baseband.c
index 1e6c0c4a0307..654d072bdc28 100644
--- a/drivers/staging/vt6655/baseband.c
+++ b/drivers/staging/vt6655/baseband.c
@@ -36,8 +36,10 @@
* Revision History:
* 06-10-2003 Bryan YC Fan: Re-write codes to support VT3253 spec.
* 08-07-2003 Bryan YC Fan: Add MAXIM2827/2825 and RFMD2959 support.
- * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and BBvCalculateParameter().
- * cancel the setting of MAC_REG_SOFTPWRCTL on BBbVT3253Init().
+ * 08-26-2003 Kyle Hsu : Modify BBuGetFrameTime() and
+ * BBvCalculateParameter().
+ * cancel the setting of MAC_REG_SOFTPWRCTL on
+ * BBbVT3253Init().
* Add the comments.
* 09-01-2003 Bryan YC Fan: RF & BB tables updated.
* Modified BBvLoopbackOn & BBvLoopbackOff().
@@ -66,7 +68,7 @@
/*--------------------- Static Variables --------------------------*/
#define CB_VT3253_INIT_FOR_RFMD 446
-static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
+static const unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
{0x00, 0x30},
{0x01, 0x00},
{0x02, 0x00},
@@ -516,7 +518,7 @@ static unsigned char byVT3253InitTab_RFMD[CB_VT3253_INIT_FOR_RFMD][2] = {
};
#define CB_VT3253B0_INIT_FOR_RFMD 256
-static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
+static const unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -777,7 +779,8 @@ static unsigned char byVT3253B0_RFMD[CB_VT3253B0_INIT_FOR_RFMD][2] = {
#define CB_VT3253B0_AGC_FOR_RFMD2959 195
/* For RFMD2959 */
-static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
+static
+unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] = {
{0xF0, 0x00},
{0xF1, 0x3E},
{0xF0, 0x80},
@@ -977,7 +980,8 @@ static unsigned char byVT3253B0_AGC4_RFMD2959[CB_VT3253B0_AGC_FOR_RFMD2959][2] =
#define CB_VT3253B0_INIT_FOR_AIROHA2230 256
/* For AIROHA */
-static unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
+static
+unsigned char byVT3253B0_AIROHA2230[CB_VT3253B0_INIT_FOR_AIROHA2230][2] = {
{0x00, 0x31},
{0x01, 0x00},
{0x02, 0x00},
@@ -2160,9 +2164,13 @@ bool BBbVT3253Init(struct vnt_private *priv)
/* {{ RobertYu:20050223, request by JerryChung */
- /* Init ANT B select,TX Config CR09 = 0x61->0x45, 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
+ /* Init ANT B select,TX Config CR09 = 0x61->0x45,
+ * 0x45->0x41(VC1/VC2 define, make the ANT_A, ANT_B inverted)
+ */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x09,0x41);*/
- /* Init ANT B select,RX Config CR10 = 0x28->0x2A, 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted) */
+ /* Init ANT B select,RX Config CR10 = 0x28->0x2A,
+ * 0x2A->0x28(VC1/VC2 define, make the ANT_A, ANT_B inverted)
+ */
/*bResult &= BBbWriteEmbedded(dwIoBase,0x0a,0x28);*/
/* Select VC1/VC2, CR215 = 0x02->0x06 */
bResult &= BBbWriteEmbedded(priv, 0xd7, 0x06);
diff --git a/drivers/staging/vt6655/baseband.h b/drivers/staging/vt6655/baseband.h
index 43a4fb1f3570..b4e8c43180ec 100644
--- a/drivers/staging/vt6655/baseband.h
+++ b/drivers/staging/vt6655/baseband.h
@@ -77,8 +77,10 @@ BBuGetFrameTime(
void vnt_get_phy_field(struct vnt_private *, u32 frame_length,
u16 tx_rate, u8 pkt_type, struct vnt_phy_field *);
-bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char *pbyData);
-bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr, unsigned char byData);
+bool BBbReadEmbedded(struct vnt_private *, unsigned char byBBAddr,
+ unsigned char *pbyData);
+bool BBbWriteEmbedded(struct vnt_private *, unsigned char byBBAddr,
+ unsigned char byData);
void BBvSetShortSlotTime(struct vnt_private *);
void BBvSetVGAGainOffset(struct vnt_private *, unsigned char byData);
diff --git a/drivers/staging/vt6655/card.c b/drivers/staging/vt6655/card.c
index 3d338122b590..afb1e8bde975 100644
--- a/drivers/staging/vt6655/card.c
+++ b/drivers/staging/vt6655/card.c
@@ -336,7 +336,8 @@ bool CARDbSetPhyParameter(struct vnt_private *priv, u8 bb_type)
}
if (priv->byCWMaxMin != byCWMaxMin) {
priv->byCWMaxMin = byCWMaxMin;
- VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0, priv->byCWMaxMin);
+ VNSvOutPortB(priv->PortOffset + MAC_REG_CWMAXMIN0,
+ priv->byCWMaxMin);
}
priv->byPacketType = CARDbyGetPktType(priv);
@@ -373,9 +374,12 @@ bool CARDbUpdateTSF(struct vnt_private *priv, unsigned char byRxRate,
qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
local_tsf);
/* adjust TSF, HW's TSF add TSF Offset reg */
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
- VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
- MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TSFSYNCEN);
+ VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST,
+ (u32)qwTSFOffset);
+ VNSvOutPortD(priv->PortOffset + MAC_REG_TSFOFST + 4,
+ (u32)(qwTSFOffset >> 32));
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL,
+ TFTCTL_TSFSYNCEN);
}
return true;
}
@@ -407,7 +411,8 @@ bool CARDbSetBeaconPeriod(struct vnt_private *priv,
priv->wBeaconInterval = wBeaconInterval;
/* Set NextTBTT */
VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT, (u32)qwNextTBTT);
- VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4, (u32)(qwNextTBTT >> 32));
+ VNSvOutPortD(priv->PortOffset + MAC_REG_NEXTTBTT + 4,
+ (u32)(qwNextTBTT >> 32));
MACvRegBitsOn(priv->PortOffset, MAC_REG_TFTCTL, TFTCTL_TBTTSYNCEN);
return true;
@@ -433,15 +438,19 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE2);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE3);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE2);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE3);
break;
}
@@ -451,7 +460,8 @@ bool CARDbRadioPowerOff(struct vnt_private *priv)
priv->bRadioOff = true;
pr_debug("chester power off\n");
- MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
+ MACvRegBitsOn(priv->PortOffset, MAC_REG_GPIOCTL0,
+ LED_ACTSET); /* LED issue */
return bResult;
}
@@ -488,21 +498,24 @@ bool CARDbRadioPowerOn(struct vnt_private *priv)
switch (priv->byRFType) {
case RF_RFMD2959:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_TXPEINV);
- MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL, SOFTPWRCTL_SWPE1);
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_TXPEINV);
+ MACvWordRegBitsOff(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ SOFTPWRCTL_SWPE1);
break;
case RF_AIROHA:
case RF_AL2230S:
case RF_AIROHA7230:
- MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL, (SOFTPWRCTL_SWPE2 |
- SOFTPWRCTL_SWPE3));
+ MACvWordRegBitsOn(priv->PortOffset, MAC_REG_SOFTPWRCTL,
+ (SOFTPWRCTL_SWPE2 | SOFTPWRCTL_SWPE3));
break;
}
priv->bRadioOff = false;
pr_debug("chester power on\n");
- MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0, LED_ACTSET); /* LED issue */
+ MACvRegBitsOff(priv->PortOffset, MAC_REG_GPIOCTL0,
+ LED_ACTSET); /* LED issue */
return bResult;
}
@@ -717,55 +730,72 @@ void CARDvSetRSPINF(struct vnt_private *priv, u8 bb_type)
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_6,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_9 */
s_vCalculateOFDMRParameter(RATE_9M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_9,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_12 */
s_vCalculateOFDMRParameter(RATE_12M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_12,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_18 */
s_vCalculateOFDMRParameter(RATE_18M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_18,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_24 */
s_vCalculateOFDMRParameter(RATE_24M,
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_24,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_36 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_36M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_36M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_36,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_48 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_48M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_48M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_48,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_54 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_54,
+ MAKEWORD(byTxRate, byRsvTime));
/* RSPINF_a_72 */
- s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate((void *)priv, RATE_54M),
+ s_vCalculateOFDMRParameter(CARDwGetOFDMControlRate(
+ (void *)priv,
+ RATE_54M),
bb_type,
&byTxRate,
&byRsvTime);
- VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72, MAKEWORD(byTxRate, byRsvTime));
+ VNSvOutPortW(priv->PortOffset + MAC_REG_RSPINF_A_72,
+ MAKEWORD(byTxRate, byRsvTime));
/* Set to Page0 */
MACvSelectPage0(priv->PortOffset);
@@ -830,7 +860,8 @@ unsigned char CARDbyGetPktType(struct vnt_private *priv)
*
* Return Value: none
*/
-void CARDvSetLoopbackMode(struct vnt_private *priv, unsigned short wLoopbackMode)
+void CARDvSetLoopbackMode(struct vnt_private *priv,
+ unsigned short wLoopbackMode)
{
switch (wLoopbackMode) {
case CARD_LB_NONE:
@@ -965,7 +996,8 @@ u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval)
*
* Return Value: none
*/
-void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInterval)
+void CARDvSetFirstNextTBTT(struct vnt_private *priv,
+ unsigned short wBeaconInterval)
{
void __iomem *dwIoBase = priv->PortOffset;
u64 qwNextTBTT = 0;
@@ -993,7 +1025,8 @@ void CARDvSetFirstNextTBTT(struct vnt_private *priv, unsigned short wBeaconInter
*
* Return Value: none
*/
-void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF, unsigned short wBeaconInterval)
+void CARDvUpdateNextTBTT(struct vnt_private *priv, u64 qwTSF,
+ unsigned short wBeaconInterval)
{
void __iomem *dwIoBase = priv->PortOffset;
diff --git a/drivers/staging/vt6655/card.h b/drivers/staging/vt6655/card.h
index 16cca49e680a..0203c7fd91a2 100644
--- a/drivers/staging/vt6655/card.h
+++ b/drivers/staging/vt6655/card.h
@@ -38,7 +38,8 @@
* LOBYTE is MAC LB mode, HIBYTE is MII LB mode
*/
#define CARD_LB_NONE MAKEWORD(MAC_LB_NONE, 0)
-#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0) /* PHY must ISO, avoid MAC loopback packet go out */
+/* PHY must ISO, avoid MAC loopback packet go out */
+#define CARD_LB_MAC MAKEWORD(MAC_LB_INTERNAL, 0)
#define CARD_LB_PHY MAKEWORD(MAC_LB_EXT, 0)
#define DEFAULT_MSDU_LIFETIME 512 /* ms */
@@ -71,8 +72,10 @@ void CARDvUpdateBasicTopRate(struct vnt_private *);
bool CARDbIsOFDMinBasicRate(struct vnt_private *);
void CARDvSetLoopbackMode(struct vnt_private *, unsigned short wLoopbackMode);
bool CARDbSoftwareReset(struct vnt_private *);
-void CARDvSetFirstNextTBTT(struct vnt_private *, unsigned short wBeaconInterval);
-void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF, unsigned short wBeaconInterval);
+void CARDvSetFirstNextTBTT(struct vnt_private *,
+ unsigned short wBeaconInterval);
+void CARDvUpdateNextTBTT(struct vnt_private *, u64 qwTSF,
+ unsigned short wBeaconInterval);
bool CARDbGetCurrentTSF(struct vnt_private *, u64 *pqwCurrTSF);
u64 CARDqGetNextTBTT(u64 qwTSF, unsigned short wBeaconInterval);
u64 CARDqGetTSFOffset(unsigned char byRxRate, u64 qwTSF1, u64 qwTSF2);
diff --git a/drivers/staging/vt6655/desc.h b/drivers/staging/vt6655/desc.h
index 9fbc7172484e..2d7f6ae89164 100644
--- a/drivers/staging/vt6655/desc.h
+++ b/drivers/staging/vt6655/desc.h
@@ -157,7 +157,8 @@
/* TD_INFO flags control bit */
#define TD_FLAGS_NETIF_SKB 0x01 /* check if need release skb */
-#define TD_FLAGS_PRIV_SKB 0x02 /* check if called from private skb (hostap) */
+/* check if called from private skb (hostap) */
+#define TD_FLAGS_PRIV_SKB 0x02
#define TD_FLAGS_PS_RETRY 0x04 /* check if PS STA frame re-transmit */
/*
diff --git a/drivers/staging/vt6655/mac.c b/drivers/staging/vt6655/mac.c
index 45196c6e9e12..8e13f7f41415 100644
--- a/drivers/staging/vt6655/mac.c
+++ b/drivers/staging/vt6655/mac.c
@@ -47,7 +47,8 @@
*
* Revision History:
* 08-22-2003 Kyle Hsu : Porting MAC functions from sim53
- * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()& MACvEnableBusSusEn()
+ * 09-03-2003 Bryan YC Fan : Add MACvClearBusSusInd()&
+ * MACvEnableBusSusEn()
* 09-18-2003 Jerry Chen : Add MACvSetKeyEntry & MACvDisableKeyEntry
*
*/
@@ -138,7 +139,8 @@ bool MACbIsIntDisable(struct vnt_private *priv)
* Return Value: none
*
*/
-void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
+void MACvSetShortRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit)
{
void __iomem *io_base = priv->PortOffset;
/* set SRT */
@@ -160,7 +162,8 @@ void MACvSetShortRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit
* Return Value: none
*
*/
-void MACvSetLongRetryLimit(struct vnt_private *priv, unsigned char byRetryLimit)
+void MACvSetLongRetryLimit(struct vnt_private *priv,
+ unsigned char byRetryLimit)
{
void __iomem *io_base = priv->PortOffset;
/* set LRT */
@@ -304,7 +307,8 @@ bool MACbSoftwareReset(struct vnt_private *priv)
/*
* Description:
- * save some important register's value, then do reset, then restore register's value
+ * save some important register's value, then do reset, then restore
+ * register's value
*
* Parameters:
* In:
@@ -738,7 +742,8 @@ void MACvTimer0MicroSDelay(struct vnt_private *priv, unsigned int uDelay)
* Return Value: none
*
*/
-void MACvOneShotTimer1MicroSec(struct vnt_private *priv, unsigned int uDelayTime)
+void MACvOneShotTimer1MicroSec(struct vnt_private *priv,
+ unsigned int uDelayTime)
{
void __iomem *io_base = priv->PortOffset;
diff --git a/drivers/staging/vt6655/srom.c b/drivers/staging/vt6655/srom.c
index 9ec49e653b61..ee992772066f 100644
--- a/drivers/staging/vt6655/srom.c
+++ b/drivers/staging/vt6655/srom.c
@@ -72,7 +72,8 @@
* Return Value: data read
*
*/
-unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase, unsigned char byContntOffset)
+unsigned char SROMbyReadEmbedded(void __iomem *dwIoBase,
+ unsigned char byContntOffset)
{
unsigned short wDelay, wNoACK;
unsigned char byWait;
@@ -124,7 +125,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
/* ii = Rom Address */
for (ii = 0; ii < EEP_MAX_CONTEXT_SIZE; ii++) {
- *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase, (unsigned char)ii);
+ *pbyEepromRegs = SROMbyReadEmbedded(dwIoBase,
+ (unsigned char)ii);
pbyEepromRegs++;
}
}
@@ -141,7 +143,8 @@ void SROMvReadAllContents(void __iomem *dwIoBase, unsigned char *pbyEepromRegs)
* Return Value: none
*
*/
-void SROMvReadEtherAddress(void __iomem *dwIoBase, unsigned char *pbyEtherAddress)
+void SROMvReadEtherAddress(void __iomem *dwIoBase,
+ unsigned char *pbyEtherAddress)
{
unsigned char ii;
diff --git a/drivers/staging/vt6656/baseband.c b/drivers/staging/vt6656/baseband.c
index 9417c935fc30..882fe54ce41d 100644
--- a/drivers/staging/vt6656/baseband.c
+++ b/drivers/staging/vt6656/baseband.c
@@ -138,7 +138,7 @@ static const u16 vnt_frame_time[MAX_RATE] = {
*
*/
unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
- unsigned int frame_length, u16 tx_rate)
+ unsigned int frame_length, u16 tx_rate)
{
unsigned int frame_time;
unsigned int preamble;
@@ -195,7 +195,7 @@ unsigned int vnt_get_frame_time(u8 preamble_type, u8 pkt_type,
*
*/
void vnt_get_phy_field(struct vnt_private *priv, u32 frame_length,
- u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
+ u16 tx_rate, u8 pkt_type, struct vnt_phy_field *phy)
{
u32 bit_count;
u32 count = 0;
@@ -355,7 +355,7 @@ void vnt_set_antenna_mode(struct vnt_private *priv, u8 antenna_mode)
}
vnt_control_out(priv, MESSAGE_TYPE_SET_ANTMD,
- (u16)antenna_mode, 0, 0, NULL);
+ (u16)antenna_mode, 0, 0, NULL);
}
/*
@@ -383,7 +383,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
u8 data;
status = vnt_control_in(priv, MESSAGE_TYPE_READ, 0,
- MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
+ MESSAGE_REQUEST_EEPROM, EEP_MAX_CONTEXT_SIZE,
priv->eeprom);
if (status != STATUS_SUCCESS)
return false;
@@ -393,7 +393,7 @@ int vnt_vt3184_init(struct vnt_private *priv)
dev_dbg(&priv->usb->dev, "RF Type %d\n", priv->rf_type);
if ((priv->rf_type == RF_AL2230) ||
- (priv->rf_type == RF_AL2230S)) {
+ (priv->rf_type == RF_AL2230S)) {
priv->bb_rx_conf = vnt_vt3184_al2230[10];
length = sizeof(vnt_vt3184_al2230);
addr = vnt_vt3184_al2230;
@@ -457,21 +457,21 @@ int vnt_vt3184_init(struct vnt_private *priv)
memcpy(array, addr, length);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBREG, length, array);
+ MESSAGE_REQUEST_BBREG, length, array);
memcpy(array, agc, length_agc);
vnt_control_out(priv, MESSAGE_TYPE_WRITE, 0,
- MESSAGE_REQUEST_BBAGC, length_agc, array);
+ MESSAGE_REQUEST_BBAGC, length_agc, array);
if ((priv->rf_type == RF_VT3226) ||
- (priv->rf_type == RF_VT3342A0)) {
+ (priv->rf_type == RF_VT3342A0)) {
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x23);
+ MAC_REG_ITRTMSET, 0x23);
vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
} else if (priv->rf_type == RF_VT3226D0) {
vnt_control_out_u8(priv, MESSAGE_REQUEST_MACREG,
- MAC_REG_ITRTMSET, 0x11);
+ MAC_REG_ITRTMSET, 0x11);
vnt_mac_reg_bits_on(priv, MAC_REG_PAPEDELAY, 0x01);
}
@@ -482,12 +482,12 @@ int vnt_vt3184_init(struct vnt_private *priv)
/* Fix for TX USB resets from vendors driver */
vnt_control_in(priv, MESSAGE_TYPE_READ, USB_REG4,
- MESSAGE_REQUEST_MEM, sizeof(data), &data);
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
data |= 0x2;
vnt_control_out(priv, MESSAGE_TYPE_WRITE, USB_REG4,
- MESSAGE_REQUEST_MEM, sizeof(data), &data);
+ MESSAGE_REQUEST_MEM, sizeof(data), &data);
return true;
}
@@ -814,7 +814,7 @@ void vnt_update_pre_ed_threshold(struct vnt_private *priv, int scanning)
priv->bb_pre_ed_index = ed_inx;
dev_dbg(&priv->usb->dev, "%s bb_pre_ed_rssi %d\n",
- __func__, priv->bb_pre_ed_rssi);
+ __func__, priv->bb_pre_ed_rssi);
if (!cr_201 && !cr_206)
return;
diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c
index fc5fe4ec6d05..ac4fecb30d0e 100644
--- a/drivers/staging/vt6656/main_usb.c
+++ b/drivers/staging/vt6656/main_usb.c
@@ -238,7 +238,7 @@ static int vnt_init_registers(struct vnt_private *priv)
priv->tx_antenna_mode = ANT_B;
priv->rx_antenna_sel = 1;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_A;
else
priv->rx_antenna_mode = ANT_B;
@@ -248,14 +248,14 @@ static int vnt_init_registers(struct vnt_private *priv)
if (antenna & EEP_ANTENNA_AUX) {
priv->tx_antenna_mode = ANT_A;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_B;
else
priv->rx_antenna_mode = ANT_A;
} else {
priv->tx_antenna_mode = ANT_B;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
priv->rx_antenna_mode = ANT_A;
else
priv->rx_antenna_mode = ANT_B;
diff --git a/drivers/staging/vt6656/wcmd.c b/drivers/staging/vt6656/wcmd.c
index 4846a898d39b..95faaeb7432a 100644
--- a/drivers/staging/vt6656/wcmd.c
+++ b/drivers/staging/vt6656/wcmd.c
@@ -97,7 +97,7 @@ void vnt_run_command(struct work_struct *work)
if (test_bit(DEVICE_FLAGS_DISCONNECTED, &priv->flags))
return;
- if (priv->cmd_running != true)
+ if (!priv->cmd_running)
return;
switch (priv->command_state) {
@@ -143,13 +143,13 @@ void vnt_run_command(struct work_struct *work)
if (priv->rx_antenna_sel == 0) {
priv->rx_antenna_sel = 1;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
vnt_set_antenna_mode(priv, ANT_RXA);
else
vnt_set_antenna_mode(priv, ANT_RXB);
} else {
priv->rx_antenna_sel = 0;
- if (priv->tx_rx_ant_inv == true)
+ if (priv->tx_rx_ant_inv)
vnt_set_antenna_mode(priv, ANT_RXB);
else
vnt_set_antenna_mode(priv, ANT_RXA);
@@ -174,7 +174,7 @@ int vnt_schedule_command(struct vnt_private *priv, enum vnt_cmd command)
ADD_ONE_WITH_WRAP_AROUND(priv->cmd_enqueue_idx, CMD_Q_SIZE);
priv->free_cmd_queue--;
- if (priv->cmd_running == false)
+ if (!priv->cmd_running)
vnt_cmd_complete(priv);
return true;
diff --git a/drivers/staging/wilc1000/Kconfig b/drivers/staging/wilc1000/Kconfig
index dce9cee9134a..73f7fefd3bc3 100644
--- a/drivers/staging/wilc1000/Kconfig
+++ b/drivers/staging/wilc1000/Kconfig
@@ -1,6 +1,5 @@
config WILC1000
tristate
- select WIRELESS_EXT
---help---
This module only support IEEE 802.11n WiFi.
diff --git a/drivers/staging/wilc1000/host_interface.c b/drivers/staging/wilc1000/host_interface.c
index 0a922c7c7cbf..953584248e63 100644
--- a/drivers/staging/wilc1000/host_interface.c
+++ b/drivers/staging/wilc1000/host_interface.c
@@ -2,6 +2,7 @@
#include <linux/time.h>
#include <linux/kthread.h>
#include <linux/delay.h>
+#include <linux/completion.h>
#include "host_interface.h"
#include "coreconfigurator.h"
#include "wilc_wlan.h"
@@ -230,10 +231,10 @@ bool wilc_optaining_ip;
static u8 P2P_LISTEN_STATE;
static struct task_struct *hif_thread_handler;
static struct message_queue hif_msg_q;
-static struct semaphore hif_sema_thread;
-static struct semaphore hif_sema_driver;
-static struct semaphore hif_sema_wait_response;
-static struct semaphore hif_sema_deinit;
+static struct completion hif_thread_comp;
+static struct completion hif_driver_comp;
+static struct completion hif_wait_response;
+static struct mutex hif_deinit_lock;
static struct timer_list periodic_rssi;
u8 wilc_multicast_mac_addr_list[WILC_MULTICAST_TABLE_SIZE][ETH_ALEN];
@@ -262,6 +263,7 @@ static struct wilc_vif *join_req_vif;
static void *host_int_ParseJoinBssParam(struct network_info *ptstrNetworkInfo);
static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx);
+static s32 Handle_ScanDone(struct wilc_vif *vif, enum scan_event enuEvent);
/* The u8IfIdx starts from 0 to NUM_CONCURRENT_IFC -1, but 0 index used as
* special purpose in wilc device, so we add 1 to the index to starts from 1.
@@ -305,10 +307,10 @@ static void handle_set_channel(struct wilc_vif *vif,
netdev_err(vif->ndev, "Failed to set channel\n");
}
-static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
- struct drv_handler *hif_drv_handler)
+static void handle_set_wfi_drv_handler(struct wilc_vif *vif,
+ struct drv_handler *hif_drv_handler)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_SET_DRV_HANDLER;
@@ -316,24 +318,20 @@ static s32 handle_set_wfi_drv_handler(struct wilc_vif *vif,
wid.val = (s8 *)hif_drv_handler;
wid.size = sizeof(*hif_drv_handler);
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- hif_drv_handler->handler);
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ hif_drv_handler->handler);
if (!hif_drv_handler->handler)
- up(&hif_sema_driver);
+ complete(&hif_driver_comp);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_set_operation_mode(struct wilc_vif *vif,
- struct op_mode *hif_op_mode)
+static void handle_set_operation_mode(struct wilc_vif *vif,
+ struct op_mode *hif_op_mode)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_SET_OPERATION_MODE;
@@ -341,23 +339,19 @@ static s32 handle_set_operation_mode(struct wilc_vif *vif,
wid.val = (s8 *)&hif_op_mode->mode;
wid.size = sizeof(u32);
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
if ((hif_op_mode->mode) == IDLE_MODE)
- up(&hif_sema_driver);
+ complete(&hif_driver_comp);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set driver handler\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
+static void handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
char firmware_ip_addr[4] = {0};
@@ -371,22 +365,18 @@ static s32 handle_set_ip_address(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
wid.val = (u8 *)ip_addr;
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, SET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
host_int_get_ipaddress(vif, firmware_ip_addr, idx);
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to set IP address\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
+static void handle_get_ip_address(struct wilc_vif *vif, u8 idx)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_IP_ADDRESS;
@@ -394,8 +384,8 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
wid.val = kmalloc(IP_ALEN, GFP_KERNEL);
wid.size = IP_ALEN;
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
memcpy(get_ip[idx], wid.val, IP_ALEN);
@@ -404,18 +394,14 @@ static s32 handle_get_ip_address(struct wilc_vif *vif, u8 idx)
if (memcmp(get_ip[idx], set_ip[idx], IP_ALEN) != 0)
wilc_setup_ipaddress(vif, set_ip[idx], idx);
- if (result != 0) {
+ if (ret)
netdev_err(vif->ndev, "Failed to get IP address\n");
- return -EINVAL;
- }
-
- return result;
}
-static s32 handle_get_mac_address(struct wilc_vif *vif,
- struct get_mac_addr *get_mac_addr)
+static void handle_get_mac_address(struct wilc_vif *vif,
+ struct get_mac_addr *get_mac_addr)
{
- s32 result = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_MAC_ADDR;
@@ -423,16 +409,12 @@ static s32 handle_get_mac_address(struct wilc_vif *vif,
wid.val = get_mac_addr->mac_addr;
wid.size = ETH_ALEN;
- result = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
- wilc_get_vif_idx(vif));
+ ret = wilc_send_config_pkt(vif, GET_CFG, &wid, 1,
+ wilc_get_vif_idx(vif));
- if (result) {
+ if (ret)
netdev_err(vif->ndev, "Failed to get mac address\n");
- result = -EFAULT;
- }
- up(&hif_sema_wait_response);
-
- return result;
+ complete(&hif_wait_response);
}
static s32 handle_cfg_param(struct wilc_vif *vif,
@@ -455,7 +437,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "check value 6 over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -471,7 +453,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Impossible value\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -486,7 +468,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1 ~ 65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -500,7 +482,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Invalid power mode\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -515,7 +497,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -530,7 +512,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Range(1~256) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -545,7 +527,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -560,7 +542,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Threshold Range fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -574,7 +556,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Preamle Range(0~2) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -588,7 +570,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Short slot(2) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -602,7 +584,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "TXOP prot disable\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -617,7 +599,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Beacon interval(1~65535)fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -632,7 +614,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "DTIM range(1~255) fail\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -646,7 +628,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Site survey disable\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -661,7 +643,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Site scan time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -676,7 +658,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Active time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -691,7 +673,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "Passive time(1~65535) over\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -713,7 +695,7 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
} else {
netdev_err(vif->ndev, "out of TX rate\n");
result = -EINVAL;
- goto ERRORHANDLER;
+ goto unlock;
}
i++;
}
@@ -724,28 +706,24 @@ static s32 handle_cfg_param(struct wilc_vif *vif,
if (result)
netdev_err(vif->ndev, "Error in setting CFG params\n");
-ERRORHANDLER:
+unlock:
mutex_unlock(&hif_drv->cfg_values_lock);
return result;
}
-static s32 Handle_ScanDone(struct wilc_vif *vif,
- enum scan_event enuEvent);
-
-static s32 Handle_Scan(struct wilc_vif *vif,
- struct scan_attr *pstrHostIFscanAttr)
+static s32 handle_scan(struct wilc_vif *vif, struct scan_attr *scan_info)
{
s32 result = 0;
- struct wid strWIDList[5];
- u32 u32WidsCount = 0;
+ struct wid wid_list[5];
+ u32 index = 0;
u32 i;
- u8 *pu8Buffer;
+ u8 *buffer;
u8 valuesize = 0;
u8 *pu8HdnNtwrksWidVal = NULL;
struct host_if_drv *hif_drv = vif->hif_drv;
- hif_drv->usr_scan_req.scan_result = pstrHostIFscanAttr->result;
- hif_drv->usr_scan_req.arg = pstrHostIFscanAttr->arg;
+ hif_drv->usr_scan_req.scan_result = scan_info->result;
+ hif_drv->usr_scan_req.arg = scan_info->arg;
if ((hif_drv->hif_state >= HOST_IF_SCANNING) &&
(hif_drv->hif_state < HOST_IF_CONNECTED)) {
@@ -762,72 +740,70 @@ static s32 Handle_Scan(struct wilc_vif *vif,
hif_drv->usr_scan_req.rcvd_ch_cnt = 0;
- strWIDList[u32WidsCount].id = (u16)WID_SSID_PROBE_REQ;
- strWIDList[u32WidsCount].type = WID_STR;
+ wid_list[index].id = (u16)WID_SSID_PROBE_REQ;
+ wid_list[index].type = WID_STR;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++)
- valuesize += ((pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len) + 1);
+ for (i = 0; i < scan_info->hidden_network.n_ssids; i++)
+ valuesize += ((scan_info->hidden_network.net_info[i].ssid_len) + 1);
pu8HdnNtwrksWidVal = kmalloc(valuesize + 1, GFP_KERNEL);
- strWIDList[u32WidsCount].val = pu8HdnNtwrksWidVal;
- if (strWIDList[u32WidsCount].val) {
- pu8Buffer = strWIDList[u32WidsCount].val;
+ wid_list[index].val = pu8HdnNtwrksWidVal;
+ if (wid_list[index].val) {
+ buffer = wid_list[index].val;
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.n_ssids;
+ *buffer++ = scan_info->hidden_network.n_ssids;
- for (i = 0; i < pstrHostIFscanAttr->hidden_network.n_ssids; i++) {
- *pu8Buffer++ = pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
- memcpy(pu8Buffer, pstrHostIFscanAttr->hidden_network.net_info[i].ssid, pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len);
- pu8Buffer += pstrHostIFscanAttr->hidden_network.net_info[i].ssid_len;
+ for (i = 0; i < scan_info->hidden_network.n_ssids; i++) {
+ *buffer++ = scan_info->hidden_network.net_info[i].ssid_len;
+ memcpy(buffer, scan_info->hidden_network.net_info[i].ssid, scan_info->hidden_network.net_info[i].ssid_len);
+ buffer += scan_info->hidden_network.net_info[i].ssid_len;
}
- strWIDList[u32WidsCount].size = (s32)(valuesize + 1);
- u32WidsCount++;
+ wid_list[index].size = (s32)(valuesize + 1);
+ index++;
}
- {
- strWIDList[u32WidsCount].id = WID_INFO_ELEMENT_PROBE;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
- strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ies;
- strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ies_len;
- u32WidsCount++;
- }
+ wid_list[index].id = WID_INFO_ELEMENT_PROBE;
+ wid_list[index].type = WID_BIN_DATA;
+ wid_list[index].val = scan_info->ies;
+ wid_list[index].size = scan_info->ies_len;
+ index++;
- strWIDList[u32WidsCount].id = WID_SCAN_TYPE;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->type;
- u32WidsCount++;
+ wid_list[index].id = WID_SCAN_TYPE;
+ wid_list[index].type = WID_CHAR;
+ wid_list[index].size = sizeof(char);
+ wid_list[index].val = (s8 *)&scan_info->type;
+ index++;
- strWIDList[u32WidsCount].id = WID_SCAN_CHANNEL_LIST;
- strWIDList[u32WidsCount].type = WID_BIN_DATA;
+ wid_list[index].id = WID_SCAN_CHANNEL_LIST;
+ wid_list[index].type = WID_BIN_DATA;
- if (pstrHostIFscanAttr->ch_freq_list &&
- pstrHostIFscanAttr->ch_list_len > 0) {
+ if (scan_info->ch_freq_list &&
+ scan_info->ch_list_len > 0) {
int i;
- for (i = 0; i < pstrHostIFscanAttr->ch_list_len; i++) {
- if (pstrHostIFscanAttr->ch_freq_list[i] > 0)
- pstrHostIFscanAttr->ch_freq_list[i] = pstrHostIFscanAttr->ch_freq_list[i] - 1;
+ for (i = 0; i < scan_info->ch_list_len; i++) {
+ if (scan_info->ch_freq_list[i] > 0)
+ scan_info->ch_freq_list[i] = scan_info->ch_freq_list[i] - 1;
}
}
- strWIDList[u32WidsCount].val = pstrHostIFscanAttr->ch_freq_list;
- strWIDList[u32WidsCount].size = pstrHostIFscanAttr->ch_list_len;
- u32WidsCount++;
+ wid_list[index].val = scan_info->ch_freq_list;
+ wid_list[index].size = scan_info->ch_list_len;
+ index++;
- strWIDList[u32WidsCount].id = WID_START_SCAN_REQ;
- strWIDList[u32WidsCount].type = WID_CHAR;
- strWIDList[u32WidsCount].size = sizeof(char);
- strWIDList[u32WidsCount].val = (s8 *)&pstrHostIFscanAttr->src;
- u32WidsCount++;
+ wid_list[index].id = WID_START_SCAN_REQ;
+ wid_list[index].type = WID_CHAR;
+ wid_list[index].size = sizeof(char);
+ wid_list[index].val = (s8 *)&scan_info->src;
+ index++;
if (hif_drv->hif_state == HOST_IF_CONNECTED)
scan_while_connected = true;
else if (hif_drv->hif_state == HOST_IF_IDLE)
scan_while_connected = false;
- result = wilc_send_config_pkt(vif, SET_CFG, strWIDList,
- u32WidsCount,
+ result = wilc_send_config_pkt(vif, SET_CFG, wid_list,
+ index,
wilc_get_vif_idx(vif));
if (result)
@@ -839,13 +815,13 @@ ERRORHANDLER:
Handle_ScanDone(vif, SCAN_EVENT_ABORTED);
}
- kfree(pstrHostIFscanAttr->ch_freq_list);
- pstrHostIFscanAttr->ch_freq_list = NULL;
+ kfree(scan_info->ch_freq_list);
+ scan_info->ch_freq_list = NULL;
- kfree(pstrHostIFscanAttr->ies);
- pstrHostIFscanAttr->ies = NULL;
- kfree(pstrHostIFscanAttr->hidden_network.net_info);
- pstrHostIFscanAttr->hidden_network.net_info = NULL;
+ kfree(scan_info->ies);
+ scan_info->ies = NULL;
+ kfree(scan_info->hidden_network.net_info);
+ scan_info->hidden_network.net_info = NULL;
kfree(pu8HdnNtwrksWidVal);
@@ -1610,7 +1586,7 @@ static int Handle_Key(struct wilc_vif *vif,
&wid, 1,
wilc_get_vif_idx(vif));
}
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
break;
case WPA_RX_GTK:
@@ -1644,10 +1620,10 @@ static int Handle_Key(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kzalloc(RX_MIC_KEY_MSG_LEN, GFP_KERNEL);
- if (pu8keybuf == NULL) {
+ if (!pu8keybuf) {
ret = -ENOMEM;
goto _WPARxGtk_end_case_;
}
@@ -1673,7 +1649,7 @@ static int Handle_Key(struct wilc_vif *vif,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
}
_WPARxGtk_end_case_:
kfree(pstrHostIFkeyAttr->attr.wpa.key);
@@ -1711,7 +1687,7 @@ _WPARxGtk_end_case_:
strWIDList, 2,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
} else if (pstrHostIFkeyAttr->action & ADDKEY) {
pu8keybuf = kmalloc(PTK_KEY_MSG_LEN, GFP_KERNEL);
if (!pu8keybuf) {
@@ -1734,7 +1710,7 @@ _WPARxGtk_end_case_:
&wid, 1,
wilc_get_vif_idx(vif));
kfree(pu8keybuf);
- up(&hif_drv->sem_test_key_block);
+ complete(&hif_drv->comp_test_key_block);
}
_WPAPtk_end_case_:
@@ -1856,7 +1832,7 @@ static void Handle_Disconnect(struct wilc_vif *vif)
}
}
- up(&hif_drv->sem_test_disconn_block);
+ complete(&hif_drv->comp_test_disconn_block);
}
void wilc_resolve_disconnect_aberration(struct wilc_vif *vif)
@@ -1885,7 +1861,7 @@ static void Handle_GetRssi(struct wilc_vif *vif)
result = -EFAULT;
}
- up(&vif->hif_drv->sem_get_rssi);
+ complete(&vif->hif_drv->comp_get_rssi);
}
static s32 Handle_GetStatistics(struct wilc_vif *vif,
@@ -1938,7 +1914,7 @@ static s32 Handle_GetStatistics(struct wilc_vif *vif,
wilc_enable_tcp_ack_filter(false);
if (pstrStatistics != &vif->wilc->dummy_statistics)
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
return 0;
}
@@ -1979,7 +1955,7 @@ static s32 Handle_Get_InActiveTime(struct wilc_vif *vif,
return -EFAULT;
}
- up(&hif_drv->sem_inactive_time);
+ complete(&hif_drv->comp_inactive_time);
return result;
}
@@ -2172,7 +2148,7 @@ static void Handle_DelAllSta(struct wilc_vif *vif,
ERRORHANDLER:
kfree(wid.val);
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
}
static void Handle_DelStation(struct wilc_vif *vif,
@@ -2472,7 +2448,7 @@ static void handle_set_tx_pwr(struct wilc_vif *vif, u8 tx_pwr)
static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
{
- s32 ret = 0;
+ int ret = 0;
struct wid wid;
wid.id = (u16)WID_TX_POWER;
@@ -2485,7 +2461,7 @@ static void handle_get_tx_pwr(struct wilc_vif *vif, u8 *tx_pwr)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- up(&hif_sema_wait_response);
+ complete(&hif_wait_response);
}
static int hostIFthread(void *pvArg)
@@ -2518,7 +2494,7 @@ static int hostIFthread(void *pvArg)
switch (msg.id) {
case HOST_IF_MSG_SCAN:
- Handle_Scan(msg.vif, &msg.body.scan_info);
+ handle_scan(msg.vif, &msg.body.scan_info);
break;
case HOST_IF_MSG_CONNECT:
@@ -2667,7 +2643,7 @@ static int hostIFthread(void *pvArg)
}
}
- up(&hif_sema_thread);
+ complete(&hif_thread_comp);
return 0;
}
@@ -2730,7 +2706,8 @@ int wilc_remove_wep_key(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Request to remove WEP key\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2758,7 +2735,8 @@ int wilc_set_wep_default_keyid(struct wilc_vif *vif, u8 index)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Default key index\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2791,7 +2769,7 @@ int wilc_add_wep_key_bss_sta(struct wilc_vif *vif, const u8 *key, u8 len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "STA - WEP Key\n");
- down(&hif_drv->sem_test_key_block);
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2827,7 +2805,8 @@ int wilc_add_wep_key_bss_ap(struct wilc_vif *vif, const u8 *key, u8 len,
if (result)
netdev_err(vif->ndev, "AP - WEP Key\n");
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2882,8 +2861,8 @@ int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len,
if (result)
netdev_err(vif->ndev, "PTK Key\n");
-
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2950,8 +2929,8 @@ int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "RX GTK\n");
-
- down(&hif_drv->sem_test_key_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_key_block);
return result;
}
@@ -2961,14 +2940,8 @@ int wilc_set_pmkid_info(struct wilc_vif *vif,
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
int i;
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_KEY;
@@ -3007,7 +2980,7 @@ int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr)
return -EFAULT;
}
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3097,8 +3070,8 @@ int wilc_disconnect(struct wilc_vif *vif, u16 reason_code)
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Failed to send message: disconnect\n");
-
- down(&hif_drv->sem_test_disconn_block);
+ else
+ wait_for_completion(&hif_drv->comp_test_disconn_block);
return result;
}
@@ -3110,12 +3083,6 @@ static s32 host_int_get_assoc_res_info(struct wilc_vif *vif,
{
s32 result = 0;
struct wid wid;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "Driver is null\n");
- return -EFAULT;
- }
wid.id = (u16)WID_ASSOC_RES_INFO;
wid.type = WID_STR;
@@ -3138,12 +3105,6 @@ int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel)
{
int result;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_SET_CHANNEL;
@@ -3219,8 +3180,8 @@ s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac,
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result)
netdev_err(vif->ndev, "Failed to send get host ch param\n");
-
- down(&hif_drv->sem_inactive_time);
+ else
+ wait_for_completion(&hif_drv->comp_inactive_time);
*pu32InactiveTime = inactive_time;
@@ -3243,7 +3204,7 @@ int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level)
return -EFAULT;
}
- down(&hif_drv->sem_get_rssi);
+ wait_for_completion(&hif_drv->comp_get_rssi);
if (!rssi_level) {
netdev_err(vif->ndev, "RSS pointer value is null\n");
@@ -3272,7 +3233,7 @@ int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats)
}
if (stats != &vif->wilc->dummy_statistics)
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3382,7 +3343,7 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
scan_while_connected = false;
- sema_init(&hif_sema_wait_response, 0);
+ init_completion(&hif_wait_response);
hif_drv = kzalloc(sizeof(struct host_if_drv), GFP_KERNEL);
if (!hif_drv) {
@@ -3399,15 +3360,15 @@ int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler)
wilc_optaining_ip = false;
if (clients_count == 0) {
- sema_init(&hif_sema_thread, 0);
- sema_init(&hif_sema_driver, 0);
- sema_init(&hif_sema_deinit, 1);
+ init_completion(&hif_thread_comp);
+ init_completion(&hif_driver_comp);
+ mutex_init(&hif_deinit_lock);
}
- sema_init(&hif_drv->sem_test_key_block, 0);
- sema_init(&hif_drv->sem_test_disconn_block, 0);
- sema_init(&hif_drv->sem_get_rssi, 0);
- sema_init(&hif_drv->sem_inactive_time, 0);
+ init_completion(&hif_drv->comp_test_key_block);
+ init_completion(&hif_drv->comp_test_disconn_block);
+ init_completion(&hif_drv->comp_get_rssi);
+ init_completion(&hif_drv->comp_inactive_time);
if (clients_count == 0) {
result = wilc_mq_create(&hif_msg_q);
@@ -3469,7 +3430,7 @@ int wilc_deinit(struct wilc_vif *vif)
return -EFAULT;
}
- down(&hif_sema_deinit);
+ mutex_lock(&hif_deinit_lock);
terminated_handle = hif_drv;
@@ -3479,7 +3440,7 @@ int wilc_deinit(struct wilc_vif *vif)
del_timer_sync(&hif_drv->remain_on_ch_timer);
wilc_set_wfi_drv_handler(vif, 0, 0);
- down(&hif_sema_driver);
+ wait_for_completion(&hif_driver_comp);
if (hif_drv->usr_scan_req.scan_result) {
hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL,
@@ -3494,15 +3455,14 @@ int wilc_deinit(struct wilc_vif *vif)
memset(&msg, 0, sizeof(struct host_if_msg));
if (clients_count == 1) {
- del_timer_sync(&periodic_rssi);
msg.id = HOST_IF_MSG_EXIT;
msg.vif = vif;
result = wilc_mq_send(&hif_msg_q, &msg, sizeof(struct host_if_msg));
if (result != 0)
netdev_err(vif->ndev, "deinit : Error(%d)\n", result);
-
- down(&hif_sema_thread);
+ else
+ wait_for_completion(&hif_thread_comp);
wilc_mq_destroy(&hif_msg_q);
}
@@ -3511,7 +3471,7 @@ int wilc_deinit(struct wilc_vif *vif)
clients_count--;
terminated_handle = NULL;
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return result;
}
@@ -3558,25 +3518,25 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
struct host_if_drv *hif_drv = NULL;
struct wilc_vif *vif;
- down(&hif_sema_deinit);
+ mutex_lock(&hif_deinit_lock);
id = ((pu8Buffer[u32Length - 4]) | (pu8Buffer[u32Length - 3] << 8) | (pu8Buffer[u32Length - 2] << 16) | (pu8Buffer[u32Length - 1] << 24));
vif = wilc_get_vif_from_idx(wilc, id);
if (!vif) {
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
hif_drv = vif->hif_drv;
if (!hif_drv || hif_drv == terminated_handle) {
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
if (!hif_drv->usr_conn_req.conn_result) {
netdev_err(vif->ndev, "there is no current Connect Request\n");
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
return;
}
@@ -3593,7 +3553,7 @@ void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *pu8Buffer,
if (result)
netdev_err(vif->ndev, "synchronous info (%d)\n", result);
- up(&hif_sema_deinit);
+ mutex_unlock(&hif_deinit_lock);
}
void wilc_scan_complete_received(struct wilc *wilc, u8 *pu8Buffer,
@@ -3634,12 +3594,6 @@ int wilc_remain_on_channel(struct wilc_vif *vif, u32 session_id,
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3688,12 +3642,6 @@ int wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3727,12 +3675,6 @@ int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period,
int result = 0;
struct host_if_msg msg;
struct beacon_attr *beacon_info = &msg.body.beacon_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3776,12 +3718,6 @@ int wilc_del_beacon(struct wilc_vif *vif)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
msg.id = HOST_IF_MSG_DEL_BEACON;
msg.vif = vif;
@@ -3798,12 +3734,6 @@ int wilc_add_station(struct wilc_vif *vif, struct add_sta_param *sta_param)
int result = 0;
struct host_if_msg msg;
struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3830,12 +3760,6 @@ int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr)
int result = 0;
struct host_if_msg msg;
struct del_sta *del_sta_info = &msg.body.del_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3858,16 +3782,10 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
int result = 0;
struct host_if_msg msg;
struct del_all_sta *del_all_sta_info = &msg.body.del_all_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
u8 zero_addr[ETH_ALEN] = {0};
int i;
u8 assoc_sta = 0;
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
-
memset(&msg, 0, sizeof(struct host_if_msg));
msg.id = HOST_IF_MSG_DEL_ALL_STA;
@@ -3887,8 +3805,8 @@ int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN])
if (result)
netdev_err(vif->ndev, "wilc_mq_send fail\n");
-
- down(&hif_sema_wait_response);
+ else
+ wait_for_completion(&hif_wait_response);
return result;
}
@@ -3899,12 +3817,6 @@ int wilc_edit_station(struct wilc_vif *vif,
int result = 0;
struct host_if_msg msg;
struct add_sta_param *add_sta_info = &msg.body.add_sta_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -3932,12 +3844,6 @@ int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout)
int result = 0;
struct host_if_msg msg;
struct power_mgmt_param *pwr_mgmt_info = &msg.body.pwr_mgmt_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
if (wilc_wlan_get_num_conn_ifcs(vif->wilc) == 2 && enabled)
return 0;
@@ -3962,12 +3868,6 @@ int wilc_setup_multicast_filter(struct wilc_vif *vif, bool enabled,
int result = 0;
struct host_if_msg msg;
struct set_multicast *multicast_filter_param = &msg.body.multicast_info;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4141,12 +4041,6 @@ int wilc_setup_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4167,12 +4061,6 @@ static int host_int_get_ipaddress(struct wilc_vif *vif, u8 *ip_addr, u8 idx)
{
int result = 0;
struct host_if_msg msg;
- struct host_if_drv *hif_drv = vif->hif_drv;
-
- if (!hif_drv) {
- netdev_err(vif->ndev, "driver is null\n");
- return -EFAULT;
- }
memset(&msg, 0, sizeof(struct host_if_msg));
@@ -4221,7 +4109,7 @@ int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power)
if (ret)
netdev_err(vif->ndev, "Failed to get TX PWR\n");
- down(&hif_sema_wait_response);
+ wait_for_completion(&hif_wait_response);
*tx_power = msg.body.tx_power.tx_pwr;
return ret;
diff --git a/drivers/staging/wilc1000/host_interface.h b/drivers/staging/wilc1000/host_interface.h
index 01f3222a4231..8d2dd0db0bed 100644
--- a/drivers/staging/wilc1000/host_interface.h
+++ b/drivers/staging/wilc1000/host_interface.h
@@ -275,10 +275,10 @@ struct host_if_drv {
struct cfg_param_attr cfg_values;
struct mutex cfg_values_lock;
- struct semaphore sem_test_key_block;
- struct semaphore sem_test_disconn_block;
- struct semaphore sem_get_rssi;
- struct semaphore sem_inactive_time;
+ struct completion comp_test_key_block;
+ struct completion comp_test_disconn_block;
+ struct completion comp_get_rssi;
+ struct completion comp_inactive_time;
struct timer_list scan_timer;
struct timer_list connect_timer;
diff --git a/drivers/staging/wilc1000/linux_mon.c b/drivers/staging/wilc1000/linux_mon.c
index 7d9e5ded8ff4..242f82f4d24f 100644
--- a/drivers/staging/wilc1000/linux_mon.c
+++ b/drivers/staging/wilc1000/linux_mon.c
@@ -24,7 +24,7 @@ struct wilc_wfi_radiotap_cb_hdr {
static struct net_device *wilc_wfi_mon; /* global monitor netdev */
-static u8 srcAdd[6];
+static u8 srcadd[6];
static u8 bssid[6];
static u8 broadcast[] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
/**
@@ -59,9 +59,10 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
/* Get WILC header */
memcpy(&header, (buff - HOST_HDR_OFFSET), HOST_HDR_OFFSET);
-
- /* The packet offset field conain info about what type of managment frame */
- /* we are dealing with and ack status */
+ /*
+ * The packet offset field contain info about what type of management
+ * the frame we are dealing with and ack status
+ */
pkt_offset = GET_PKT_OFFSET(header);
if (pkt_offset & IS_MANAGMEMENT_CALLBACK) {
@@ -105,7 +106,7 @@ void WILC_WFI_monitor_rx(u8 *buff, u32 size)
hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */
hdr->hdr.it_len = cpu_to_le16(sizeof(struct wilc_wfi_radiotap_hdr));
hdr->hdr.it_present = cpu_to_le32
- (1 << IEEE80211_RADIOTAP_RATE); /* | */
+ (1 << IEEE80211_RADIOTAP_RATE); /* | */
hdr->rate = 5; /* txrate->bitrate / 5; */
}
@@ -127,8 +128,10 @@ struct tx_complete_mon_data {
static void mgmt_tx_complete(void *priv, int status)
{
struct tx_complete_mon_data *pv_data = priv;
-
- /* incase of fully hosting mode, the freeing will be done in response to the cfg packet */
+ /*
+ * in case of fully hosting mode, the freeing will be done
+ * in response to the cfg packet
+ */
kfree(pv_data->buff);
kfree(pv_data);
@@ -225,11 +228,11 @@ static netdev_tx_t WILC_WFI_mon_xmit(struct sk_buff *skb,
skb->dev = mon_priv->real_ndev;
/* Identify if Ethernet or MAC header (data or mgmt) */
- memcpy(srcAdd, &skb->data[10], 6);
+ memcpy(srcadd, &skb->data[10], 6);
memcpy(bssid, &skb->data[16], 6);
/* if source address and bssid fields are equal>>Mac header */
/*send it to mgmt frames handler */
- if (!(memcmp(srcAdd, bssid, 6))) {
+ if (!(memcmp(srcadd, bssid, 6))) {
ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len);
if (ret)
netdev_err(dev, "fail to mgmt tx\n");
@@ -255,7 +258,8 @@ static const struct net_device_ops wilc_wfi_netdev_ops = {
* @date 12 JUL 2012
* @version 1.0
*/
-struct net_device *WILC_WFI_init_mon_interface(const char *name, struct net_device *real_dev)
+struct net_device *WILC_WFI_init_mon_interface(const char *name,
+ struct net_device *real_dev)
{
u32 ret = 0;
struct WILC_WFI_mon_priv *priv;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index bfa754bb022d..4f93c11e73c0 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -22,6 +22,7 @@
#include <linux/skbuff.h>
#include <linux/semaphore.h>
+#include <linux/completion.h>
static int dev_state_ev_handler(struct notifier_block *this,
unsigned long event, void *ptr);
@@ -30,8 +31,6 @@ static struct notifier_block g_dev_notifier = {
.notifier_call = dev_state_ev_handler
};
-#define IRQ_WAIT 1
-#define IRQ_NO_WAIT 0
static struct semaphore close_exit_sync;
static int wlan_deinit_locks(struct net_device *dev);
@@ -259,10 +258,12 @@ static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header)
for (i = 0; i < wilc->vif_num; i++) {
if (wilc->vif[i]->mode == STATION_MODE)
- if (!memcmp(bssid, wilc->vif[i]->bssid, ETH_ALEN))
+ if (ether_addr_equal_unaligned(bssid,
+ wilc->vif[i]->bssid))
return wilc->vif[i]->ndev;
if (wilc->vif[i]->mode == AP_MODE)
- if (!memcmp(bssid1, wilc->vif[i]->bssid, ETH_ALEN))
+ if (ether_addr_equal_unaligned(bssid1,
+ wilc->vif[i]->bssid))
return wilc->vif[i]->ndev;
}
@@ -303,40 +304,27 @@ int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc)
return ret_val;
}
-#define USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
-
static int linux_wlan_txq_task(void *vp)
{
int ret, txq_count;
struct wilc_vif *vif;
struct wilc *wl;
struct net_device *dev = vp;
-#if defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
-#define TX_BACKOFF_WEIGHT_INCR_STEP (1)
-#define TX_BACKOFF_WEIGHT_DECR_STEP (1)
-#define TX_BACKOFF_WEIGHT_MAX (7)
-#define TX_BACKOFF_WEIGHT_MIN (0)
-#define TX_BACKOFF_WEIGHT_UNIT_MS (10)
- int backoff_weight = TX_BACKOFF_WEIGHT_MIN;
-#endif
vif = netdev_priv(dev);
wl = vif->wilc;
- up(&wl->txq_thread_started);
+ complete(&wl->txq_thread_started);
while (1) {
down(&wl->txq_event);
if (wl->close) {
- up(&wl->txq_thread_started);
+ complete(&wl->txq_thread_started);
while (!kthread_should_stop())
schedule();
break;
}
-#if !defined USE_TX_BACKOFF_DELAY_IF_NO_BUFFERS
- ret = wilc_wlan_handle_txq(dev, &txq_count);
-#else
do {
ret = wilc_wlan_handle_txq(dev, &txq_count);
if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) {
@@ -345,20 +333,7 @@ static int linux_wlan_txq_task(void *vp)
if (netif_queue_stopped(wl->vif[1]->ndev))
netif_wake_queue(wl->vif[1]->ndev);
}
-
- if (ret == WILC_TX_ERR_NO_BUF) {
- backoff_weight += TX_BACKOFF_WEIGHT_INCR_STEP;
- if (backoff_weight > TX_BACKOFF_WEIGHT_MAX)
- backoff_weight = TX_BACKOFF_WEIGHT_MAX;
- } else {
- if (backoff_weight > TX_BACKOFF_WEIGHT_MIN) {
- backoff_weight -= TX_BACKOFF_WEIGHT_DECR_STEP;
- if (backoff_weight < TX_BACKOFF_WEIGHT_MIN)
- backoff_weight = TX_BACKOFF_WEIGHT_MIN;
- }
- }
} while (ret == WILC_TX_ERR_NO_BUF && !wl->close);
-#endif
}
return 0;
}
@@ -449,7 +424,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
struct wilc_vif *vif)
{
unsigned char c_val[64];
- unsigned char mac_add[] = {0x00, 0x80, 0xC2, 0x5E, 0xa2, 0xff};
struct wilc *wilc = vif->wilc;
struct wilc_priv *priv;
struct host_if_drv *hif_drv;
@@ -458,9 +432,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
hif_drv = (struct host_if_drv *)priv->hif_drv;
netdev_dbg(dev, "Host = %p\n", hif_drv);
- wilc_get_mac_address(vif, mac_add);
-
- netdev_dbg(dev, "MAC address is : %pM\n", mac_add);
wilc_get_chipid(wilc, false);
*(int *)c_val = 1;
@@ -622,11 +593,6 @@ static int linux_wlan_init_test_config(struct net_device *dev,
0))
goto _fail_;
- memcpy(c_val, mac_add, 6);
-
- if (!wilc_wlan_cfg_set(vif, 0, WID_MAC_ADDR, c_val, 6, 0, 0))
- goto _fail_;
-
c_val[0] = DETECT_PROTECT_REPORT;
if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, c_val, 1,
0, 0))
@@ -691,14 +657,6 @@ void wilc1000_wlan_deinit(struct net_device *dev)
wilc_wlan_stop(wl);
wilc_wlan_cleanup(dev);
-#if defined(PLAT_ALLWINNER_A20) || defined(PLAT_ALLWINNER_A23) || defined(PLAT_ALLWINNER_A31)
- if (!wl->dev_irq_num &&
- wl->hif_func->disable_interrupt) {
- mutex_lock(&wl->hif_cs);
- wl->hif_func->disable_interrupt(wl);
- mutex_unlock(&wl->hif_cs);
- }
-#endif
wlan_deinit_locks(dev);
wl->initialized = false;
@@ -727,8 +685,7 @@ static int wlan_init_locks(struct net_device *dev)
sema_init(&wl->cfg_event, 0);
sema_init(&wl->sync_event, 0);
-
- sema_init(&wl->txq_thread_started, 0);
+ init_completion(&wl->txq_thread_started);
return 0;
}
@@ -765,7 +722,7 @@ static int wlan_initialize_threads(struct net_device *dev)
wilc->close = 0;
return -ENOBUFS;
}
- down(&wilc->txq_thread_started);
+ wait_for_completion(&wilc->txq_thread_started);
return 0;
}
@@ -896,25 +853,20 @@ static int mac_init_fn(struct net_device *ndev)
int wilc_mac_open(struct net_device *ndev)
{
struct wilc_vif *vif;
- struct wilc *wilc;
unsigned char mac_add[ETH_ALEN] = {0};
int ret = 0;
int i = 0;
- struct wilc_priv *priv;
struct wilc *wl;
vif = netdev_priv(ndev);
wl = vif->wilc;
if (!wl || !wl->dev) {
- netdev_err(ndev, "wilc1000: SPI device not ready\n");
+ netdev_err(ndev, "device not ready\n");
return -ENODEV;
}
- vif = netdev_priv(ndev);
- wilc = vif->wilc;
- priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev);
ret = wilc_init_host_int(ndev);
@@ -933,13 +885,13 @@ int wilc_mac_open(struct net_device *ndev)
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
0);
- } else if (!wilc_wlan_get_num_conn_ifcs(wilc)) {
+ } else if (!wilc_wlan_get_num_conn_ifcs(wl)) {
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
- wilc->open_ifcs);
+ wl->open_ifcs);
} else {
- if (memcmp(wilc->vif[i ^ 1]->bssid,
- wilc->vif[i ^ 1]->src_addr, 6))
+ if (memcmp(wl->vif[i ^ 1]->bssid,
+ wl->vif[i ^ 1]->src_addr, 6))
wilc_set_wfi_drv_handler(vif,
wilc_get_vif_idx(vif),
0);
@@ -969,12 +921,12 @@ int wilc_mac_open(struct net_device *ndev)
wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
vif->ndev->ieee80211_ptr,
- vif->g_struct_frame_reg[0].frame_type,
- vif->g_struct_frame_reg[0].reg);
+ vif->frame_reg[0].type,
+ vif->frame_reg[0].reg);
wilc_mgmt_frame_register(vif->ndev->ieee80211_ptr->wiphy,
vif->ndev->ieee80211_ptr,
- vif->g_struct_frame_reg[1].frame_type,
- vif->g_struct_frame_reg[1].reg);
+ vif->frame_reg[1].type,
+ vif->frame_reg[1].reg);
netif_wake_queue(ndev);
wl->open_ifcs++;
vif->mac_opened = 1;
@@ -991,14 +943,10 @@ static struct net_device_stats *mac_stats(struct net_device *dev)
static void wilc_set_multicast_list(struct net_device *dev)
{
struct netdev_hw_addr *ha;
- struct wilc_priv *priv;
- struct host_if_drv *hif_drv;
struct wilc_vif *vif;
int i = 0;
- priv = wiphy_priv(dev->ieee80211_ptr->wiphy);
vif = netdev_priv(dev);
- hif_drv = (struct host_if_drv *)priv->hif_drv;
if (dev->flags & IFF_PROMISC)
return;
@@ -1152,7 +1100,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
s8 rssi;
u32 size = 0, length = 0;
struct wilc_vif *vif;
- struct wilc_priv *priv;
s32 ret = 0;
struct wilc *wilc;
@@ -1176,7 +1123,6 @@ static int mac_ioctl(struct net_device *ndev, struct ifreq *req, int cmd)
return PTR_ERR(buff);
if (strncasecmp(buff, "RSSI", length) == 0) {
- priv = wiphy_priv(vif->ndev->ieee80211_ptr->wiphy);
ret = wilc_get_rssi(vif, &rssi);
netdev_info(ndev, "RSSI :%d\n", rssi);
@@ -1263,8 +1209,8 @@ void WILC_WFI_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size)
}
vif = netdev_priv(wilc->vif[1]->ndev);
- if ((buff[0] == vif->g_struct_frame_reg[0].frame_type && vif->g_struct_frame_reg[0].reg) ||
- (buff[0] == vif->g_struct_frame_reg[1].frame_type && vif->g_struct_frame_reg[1].reg))
+ if ((buff[0] == vif->frame_reg[0].type && vif->frame_reg[0].reg) ||
+ (buff[0] == vif->frame_reg[1].type && vif->frame_reg[1].reg))
WILC_WFI_p2p_rx(wilc->vif[1]->ndev, buff, size);
}
@@ -1280,8 +1226,10 @@ void wilc_netdev_cleanup(struct wilc *wilc)
vif[i] = netdev_priv(wilc->vif[i]->ndev);
}
- if (wilc && wilc->firmware)
+ if (wilc && wilc->firmware) {
release_firmware(wilc->firmware);
+ wilc->firmware = NULL;
+ }
if (wilc && (wilc->vif[0]->ndev || wilc->vif[1]->ndev)) {
wilc_lock_timeout(wilc, &close_exit_sync, 5 * 1000);
diff --git a/drivers/staging/wilc1000/wilc_spi.c b/drivers/staging/wilc1000/wilc_spi.c
index d41b8b6790af..4268e2f29307 100644
--- a/drivers/staging/wilc1000/wilc_spi.c
+++ b/drivers/staging/wilc1000/wilc_spi.c
@@ -196,9 +196,6 @@ static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len)
dev_err(&spi->dev,
"can't write data with the following length: %d\n",
len);
- dev_err(&spi->dev,
- "FAILED due to NULL buffer or ZERO length check the following length: %d\n",
- len);
ret = -EINVAL;
}
diff --git a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
index 544917d8b2df..51aff4ff7d7c 100644
--- a/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
+++ b/drivers/staging/wilc1000/wilc_wfi_cfgoperations.c
@@ -451,7 +451,7 @@ static void CfgScanResult(enum scan_event scan_event,
} else if (scan_event == SCAN_EVENT_DONE) {
refresh_scan(priv, 1, false);
- down(&(priv->hSemScanReq));
+ mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
cfg80211_scan_done(priv->pstrScanReq, false);
@@ -459,9 +459,9 @@ static void CfgScanResult(enum scan_event scan_event,
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
- up(&(priv->hSemScanReq));
+ mutex_unlock(&priv->scan_req_lock);
} else if (scan_event == SCAN_EVENT_ABORTED) {
- down(&(priv->hSemScanReq));
+ mutex_lock(&priv->scan_req_lock);
if (priv->pstrScanReq) {
update_scan_time();
@@ -471,7 +471,7 @@ static void CfgScanResult(enum scan_event scan_event,
priv->bCfgScanning = false;
priv->pstrScanReq = NULL;
}
- up(&(priv->hSemScanReq));
+ mutex_unlock(&priv->scan_req_lock);
}
}
}
@@ -558,11 +558,11 @@ static void CfgConnectResult(enum conn_event enuConnDisconnEvent,
if (!pstrWFIDrv->p2p_connect)
wlan_channel = INVALID_CHANNEL;
- if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
+ if ((pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
pstrDisconnectNotifInfo->reason = 3;
- } else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev)) {
+ else if ((!pstrWFIDrv->IFC_UP) && (dev == wl->vif[1]->ndev))
pstrDisconnectNotifInfo->reason = 1;
- }
+
cfg80211_disconnected(dev, pstrDisconnectNotifInfo->reason, pstrDisconnectNotifInfo->ie,
pstrDisconnectNotifInfo->ie_len, false,
GFP_KERNEL);
@@ -739,18 +739,15 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
wilc_add_wep_key_bss_sta(vif, sme->key, sme->key_len,
sme->key_idx);
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
u8security = ENCRYPT_ENABLED | WPA2 | TKIP;
- } else {
+ else
u8security = ENCRYPT_ENABLED | WPA2 | AES;
- }
} else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) {
- if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.cipher_group == WLAN_CIPHER_SUITE_TKIP)
u8security = ENCRYPT_ENABLED | WPA | TKIP;
- } else {
+ else
u8security = ENCRYPT_ENABLED | WPA | AES;
- }
-
} else {
s32Error = -ENOTSUPP;
netdev_err(dev, "Not supported cipher\n");
@@ -762,11 +759,10 @@ static int connect(struct wiphy *wiphy, struct net_device *dev,
if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1)
|| (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) {
for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) {
- if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP) {
+ if (sme->crypto.ciphers_pairwise[i] == WLAN_CIPHER_SUITE_TKIP)
u8security = u8security | TKIP;
- } else {
+ else
u8security = u8security | AES;
- }
}
}
@@ -823,11 +819,22 @@ static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_co
struct wilc_priv *priv;
struct host_if_drv *pstrWFIDrv;
struct wilc_vif *vif;
+ struct wilc *wilc;
u8 NullBssid[ETH_ALEN] = {0};
wilc_connecting = 0;
priv = wiphy_priv(wiphy);
vif = netdev_priv(priv->dev);
+ wilc = vif->wilc;
+
+ if (!wilc)
+ return -EIO;
+
+ if (wilc->close) {
+ /* already disconnected done */
+ cfg80211_disconnected(dev, 0, NULL, 0, true, GFP_KERNEL);
+ return 0;
+ }
pstrWFIDrv = (struct host_if_drv *)priv->hif_drv;
if (!pstrWFIDrv->p2p_connect)
@@ -1115,9 +1122,12 @@ static int del_key(struct wiphy *wiphy, struct net_device *netdev,
}
if (key_index >= 0 && key_index <= 3) {
- memset(priv->WILC_WFI_wep_key[key_index], 0, priv->WILC_WFI_wep_key_len[key_index]);
- priv->WILC_WFI_wep_key_len[key_index] = 0;
- wilc_remove_wep_key(vif, key_index);
+ if (priv->WILC_WFI_wep_key_len[key_index]) {
+ memset(priv->WILC_WFI_wep_key[key_index], 0,
+ priv->WILC_WFI_wep_key_len[key_index]);
+ priv->WILC_WFI_wep_key_len[key_index] = 0;
+ wilc_remove_wep_key(vif, key_index);
+ }
} else {
wilc_remove_key(priv->hif_drv, mac_addr);
}
@@ -1355,9 +1365,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
u8 channel_list_attr_index = 0;
while (index < len) {
- if (buf[index] == GO_INTENT_ATTR_ID) {
+ if (buf[index] == GO_INTENT_ATTR_ID)
buf[index + 3] = (buf[index + 3] & 0x01) | (0x00 << 1);
- }
if (buf[index] == CHANLIST_ATTR_ID)
channel_list_attr_index = index;
@@ -1369,9 +1378,8 @@ static void WILC_WFI_CfgParseRxAction(u8 *buf, u32 len)
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
+ for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
buf[j] = wlan_channel;
- }
break;
}
}
@@ -1409,9 +1417,8 @@ static void WILC_WFI_CfgParseTxAction(u8 *buf, u32 len, bool bOperChan, u8 iftyp
if (channel_list_attr_index) {
for (i = channel_list_attr_index + 3; i < ((channel_list_attr_index + 3) + buf[channel_list_attr_index + 1]); i++) {
if (buf[i] == 0x51) {
- for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++) {
+ for (j = i + 2; j < ((i + 2) + buf[i + 1]); j++)
buf[j] = wlan_channel;
- }
break;
}
}
@@ -1752,15 +1759,15 @@ void wilc_mgmt_frame_register(struct wiphy *wiphy, struct wireless_dev *wdev,
switch (frame_type) {
case PROBE_REQ:
{
- vif->g_struct_frame_reg[0].frame_type = frame_type;
- vif->g_struct_frame_reg[0].reg = reg;
+ vif->frame_reg[0].type = frame_type;
+ vif->frame_reg[0].reg = reg;
}
break;
case ACTION:
{
- vif->g_struct_frame_reg[1].frame_type = frame_type;
- vif->g_struct_frame_reg[1].reg = reg;
+ vif->frame_reg[1].type = frame_type;
+ vif->frame_reg[1].reg = reg;
}
break;
@@ -1797,6 +1804,7 @@ static int dump_station(struct wiphy *wiphy, struct net_device *dev,
wilc_get_rssi(vif, &sinfo->signal);
+ memcpy(mac, priv->au8AssociatedBss, ETH_ALEN);
return 0;
}
@@ -2269,7 +2277,6 @@ struct wireless_dev *wilc_create_wiphy(struct net_device *net, struct device *de
}
priv = wdev_priv(wdev);
- sema_init(&(priv->SemHandleUpdateStats), 1);
priv->wdev = wdev;
wdev->wiphy->max_scan_ssids = MAX_NUM_PROBED_SSID;
#ifdef CONFIG_PM
@@ -2315,7 +2322,7 @@ int wilc_init_host_int(struct net_device *net)
priv->bInP2PlistenState = false;
- sema_init(&(priv->hSemScanReq), 1);
+ mutex_init(&priv->scan_req_lock);
s32Error = wilc_init(net, &priv->hif_drv);
if (s32Error)
netdev_err(net, "Error while initializing hostinterface\n");
diff --git a/drivers/staging/wilc1000/wilc_wfi_netdevice.h b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
index 4123cffe3a6e..3a561df6d370 100644
--- a/drivers/staging/wilc1000/wilc_wfi_netdevice.h
+++ b/drivers/staging/wilc1000/wilc_wfi_netdevice.h
@@ -130,8 +130,7 @@ struct wilc_priv {
struct wilc_wfi_key *wilc_ptk[MAX_NUM_STA];
u8 wilc_groupkey;
/* semaphores */
- struct semaphore SemHandleUpdateStats;
- struct semaphore hSemScanReq;
+ struct mutex scan_req_lock;
/* */
bool gbAutoRateAdjusted;
@@ -139,18 +138,17 @@ struct wilc_priv {
};
-typedef struct {
- u16 frame_type;
+struct frame_reg {
+ u16 type;
bool reg;
-
-} struct_frame_reg;
+};
struct wilc_vif {
u8 idx;
u8 iftype;
int monitor_flag;
int mac_opened;
- struct_frame_reg g_struct_frame_reg[num_reg_frame];
+ struct frame_reg frame_reg[num_reg_frame];
struct net_device_stats netstats;
struct wilc *wilc;
u8 src_addr[ETH_ALEN];
@@ -181,8 +179,7 @@ struct wilc {
struct semaphore cfg_event;
struct semaphore sync_event;
struct semaphore txq_event;
-
- struct semaphore txq_thread_started;
+ struct completion txq_thread_started;
struct task_struct *txq_thread;
diff --git a/drivers/staging/wilc1000/wilc_wlan.c b/drivers/staging/wilc1000/wilc_wlan.c
index fd938fb43dd3..11e16d56ace7 100644
--- a/drivers/staging/wilc1000/wilc_wlan.c
+++ b/drivers/staging/wilc1000/wilc_wlan.c
@@ -150,11 +150,6 @@ static u32 pending_base;
static u32 tcp_session;
static u32 pending_acks;
-static inline int init_tcp_tracking(void)
-{
- return 0;
-}
-
static inline int add_tcp_session(u32 src_prt, u32 dst_prt, u32 seq)
{
if (tcp_session < 2 * MAX_TCP_SESSION) {
@@ -330,8 +325,11 @@ static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer,
tqe->priv = NULL;
tqe->tcp_pending_ack_idx = NOT_TCP_ACK;
- if (wilc_wlan_txq_add_to_head(vif, tqe))
+ if (wilc_wlan_txq_add_to_head(vif, tqe)) {
+ kfree(tqe);
return 0;
+ }
+
return 1;
}
@@ -626,13 +624,12 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if ((reg & 0x1) == 0) {
break;
- } else {
- counter++;
- if (counter > 200) {
- counter = 0;
- ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
- break;
- }
+ }
+ counter++;
+ if (counter > 200) {
+ counter = 0;
+ ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0);
+ break;
}
} while (!wilc->quit);
@@ -658,9 +655,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if ((reg >> 2) & 0x1) {
entries = ((reg >> 3) & 0x3f);
break;
- } else {
- release_bus(wilc, RELEASE_ALLOW_SLEEP);
}
+ release_bus(wilc, RELEASE_ALLOW_SLEEP);
} while (--timeout);
if (timeout <= 0) {
ret = wilc->hif_func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0);
@@ -679,9 +675,8 @@ int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count)
if (!ret)
break;
break;
- } else {
- break;
}
+ break;
} while (1);
if (!ret)
@@ -900,8 +895,6 @@ static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status)
DATA_INT_CLR | ENABLE_RX_VMM);
ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size);
- if (!ret)
- goto _end_;
_end_:
if (ret) {
offset += size;
@@ -951,10 +944,8 @@ int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer,
blksz = BIT(12);
dma_buffer = kmalloc(blksz, GFP_KERNEL);
- if (!dma_buffer) {
- ret = -EIO;
- goto _fail_1;
- }
+ if (!dma_buffer)
+ return -EIO;
offset = 0;
do {
@@ -992,8 +983,6 @@ _fail_:
kfree(dma_buffer);
-_fail_1:
-
return (ret < 0) ? ret : 0;
}
@@ -1211,7 +1200,7 @@ static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type,
return 0;
}
-int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler)
{
u32 offset;
@@ -1226,7 +1215,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
offset = wilc->cfg_frame_offset;
ret_size = wilc_wlan_cfg_set_wid(wilc->cfg_frame.frame, offset,
- (u16)wid, buffer, buffer_size);
+ wid, buffer, buffer_size);
offset += ret_size;
wilc->cfg_frame_offset = offset;
@@ -1253,7 +1242,7 @@ int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
return ret_size;
}
-int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler)
{
u32 offset;
@@ -1267,8 +1256,7 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
wilc->cfg_frame_offset = 0;
offset = wilc->cfg_frame_offset;
- ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset,
- (u16)wid);
+ ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset, wid);
offset += ret_size;
wilc->cfg_frame_offset = offset;
@@ -1291,9 +1279,9 @@ int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
return ret_size;
}
-int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size)
+int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size)
{
- return wilc_wlan_cfg_get_wid_value((u16)wid, buffer, buffer_size);
+ return wilc_wlan_cfg_get_wid_value(wid, buffer, buffer_size);
}
int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids,
@@ -1440,7 +1428,6 @@ int wilc_wlan_init(struct net_device *dev)
ret = -EIO;
goto _fail_;
}
- init_tcp_tracking();
return 1;
diff --git a/drivers/staging/wilc1000/wilc_wlan.h b/drivers/staging/wilc1000/wilc_wlan.h
index bcd4bfa5accc..30e5312ee87e 100644
--- a/drivers/staging/wilc1000/wilc_wlan.h
+++ b/drivers/staging/wilc1000/wilc_wlan.h
@@ -284,11 +284,11 @@ int wilc_wlan_txq_add_net_pkt(struct net_device *dev, void *priv, u8 *buffer,
int wilc_wlan_handle_txq(struct net_device *dev, u32 *txq_count);
void wilc_handle_isr(struct wilc *wilc);
void wilc_wlan_cleanup(struct net_device *dev);
-int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u32 wid, u8 *buffer,
+int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer,
u32 buffer_size, int commit, u32 drv_handler);
-int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u32 wid, int commit,
+int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit,
u32 drv_handler);
-int wilc_wlan_cfg_get_val(u32 wid, u8 *buffer, u32 buffer_size);
+int wilc_wlan_cfg_get_val(u16 wid, u8 *buffer, u32 buffer_size);
int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer,
u32 buffer_size, wilc_tx_complete_func_t func);
void wilc_chip_sleep_manually(struct wilc *wilc);
diff --git a/drivers/staging/wilc1000/wilc_wlan_cfg.c b/drivers/staging/wilc1000/wilc_wlan_cfg.c
index b3425b9cec94..926fc16319b6 100644
--- a/drivers/staging/wilc1000/wilc_wlan_cfg.c
+++ b/drivers/staging/wilc1000/wilc_wlan_cfg.c
@@ -230,7 +230,7 @@ static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 siz
buf[1] = (u8)(id >> 8);
buf[2] = (u8)size;
- if ((str != NULL) && (size != 0))
+ if ((str) && (size != 0))
memcpy(&buf[3], str, size);
return (size + 3);
@@ -251,11 +251,10 @@ static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size)
buf[2] = (u8)size;
buf[3] = (u8)(size >> 8);
- if ((b != NULL) && (size != 0)) {
+ if ((b) && (size != 0)) {
memcpy(&buf[4], b, size);
- for (i = 0; i < size; i++) {
+ for (i = 0; i < size; i++)
checksum += buf[i + 4];
- }
}
buf[size + 4] = checksum;
diff --git a/drivers/staging/wilc1000/wilc_wlan_if.h b/drivers/staging/wilc1000/wilc_wlan_if.h
index 83cf84dd63b5..410bfc034319 100644
--- a/drivers/staging/wilc1000/wilc_wlan_if.h
+++ b/drivers/staging/wilc1000/wilc_wlan_if.h
@@ -15,18 +15,6 @@
/********************************************
*
- * Debug Flags
- *
- ********************************************/
-
-#define N_INIT 0x00000001
-#define N_ERR 0x00000002
-#define N_TXQ 0x00000004
-#define N_INTR 0x00000008
-#define N_RXQ 0x00000010
-
-/********************************************
- *
* Host Interface Defines
*
********************************************/
@@ -37,15 +25,6 @@
/********************************************
*
- * Tx/Rx Buffer Size Defines
- *
- ********************************************/
-
-#define CE_TX_BUFFER_SIZE (64 * 1024)
-#define CE_RX_BUFFER_SIZE (384 * 1024)
-
-/********************************************
- *
* Wlan Interface Defines
*
********************************************/
diff --git a/drivers/staging/wlan-ng/cfg80211.c b/drivers/staging/wlan-ng/cfg80211.c
index 2438cf7cc695..a6e6fb9f42e1 100644
--- a/drivers/staging/wlan-ng/cfg80211.c
+++ b/drivers/staging/wlan-ng/cfg80211.c
@@ -771,8 +771,10 @@ static struct wiphy *wlan_create_wiphy(struct device *dev, wlandevice_t *wlandev
wiphy->n_cipher_suites = PRISM2_NUM_CIPHER_SUITES;
wiphy->cipher_suites = prism2_cipher_suites;
- if (wiphy_register(wiphy) < 0)
+ if (wiphy_register(wiphy) < 0) {
+ wiphy_free(wiphy);
return NULL;
+ }
return wiphy;
}
diff --git a/drivers/staging/wlan-ng/hfa384x_usb.c b/drivers/staging/wlan-ng/hfa384x_usb.c
index 21a92df85931..337810750f2b 100644
--- a/drivers/staging/wlan-ng/hfa384x_usb.c
+++ b/drivers/staging/wlan-ng/hfa384x_usb.c
@@ -614,7 +614,7 @@ static hfa384x_usbctlx_t *usbctlx_alloc(void)
ctlx = kzalloc(sizeof(*ctlx),
in_interrupt() ? GFP_ATOMIC : GFP_KERNEL);
- if (ctlx != NULL)
+ if (ctlx)
init_completion(&ctlx->done);
return ctlx;
@@ -797,7 +797,7 @@ static inline struct usbctlx_completor *init_rmem_completor(
----------------------------------------------------------------*/
static void hfa384x_cb_status(hfa384x_t *hw, const hfa384x_usbctlx_t *ctlx)
{
- if (ctlx->usercb != NULL) {
+ if (ctlx->usercb) {
hfa384x_cmdresult_t cmdresult;
if (ctlx->state != CTLX_COMPLETE) {
@@ -2738,7 +2738,7 @@ static void hfa384x_usbctlx_completion_task(unsigned long data)
/* Call the completion function that this
* command was assigned, assuming it has one.
*/
- if (ctlx->cmdcb != NULL) {
+ if (ctlx->cmdcb) {
spin_unlock_irqrestore(&hw->ctlxq.lock, flags);
ctlx->cmdcb(hw, ctlx);
spin_lock_irqsave(&hw->ctlxq.lock, flags);
@@ -3629,7 +3629,7 @@ static void hfa384x_ctlxout_callback(struct urb *urb)
dbprint_urb(urb);
#endif
if ((urb->status == -ESHUTDOWN) ||
- (urb->status == -ENODEV) || (hw == NULL))
+ (urb->status == -ENODEV) || !hw)
return;
retry:
diff --git a/drivers/staging/wlan-ng/p80211conv.c b/drivers/staging/wlan-ng/p80211conv.c
index 0a8f3960d465..6354036ffb42 100644
--- a/drivers/staging/wlan-ng/p80211conv.c
+++ b/drivers/staging/wlan-ng/p80211conv.c
@@ -75,8 +75,8 @@
#include "p80211ioctl.h"
#include "p80211req.h"
-static u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
-static u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
+static const u8 oui_rfc1042[] = { 0x00, 0x00, 0x00 };
+static const u8 oui_8021h[] = { 0x00, 0x00, 0xf8 };
/*----------------------------------------------------------------
* p80211pb_ether_to_80211
@@ -243,7 +243,6 @@ static void orinoco_spy_gather(wlandevice_t *wlandev, char *mac,
for (i = 0; i < wlandev->spy_number; i++) {
if (!memcmp(wlandev->spy_address[i], mac, ETH_ALEN)) {
- memcpy(wlandev->spy_address[i], mac, ETH_ALEN);
wlandev->spy_stat[i].level = rxmeta->signal;
wlandev->spy_stat[i].noise = rxmeta->noise;
wlandev->spy_stat[i].qual =
diff --git a/drivers/staging/wlan-ng/p80211netdev.c b/drivers/staging/wlan-ng/p80211netdev.c
index 1f9dfba5dbb3..90cc8cdcf969 100644
--- a/drivers/staging/wlan-ng/p80211netdev.c
+++ b/drivers/staging/wlan-ng/p80211netdev.c
@@ -156,7 +156,7 @@ static int p80211knetdev_open(netdevice_t *netdev)
return -ENODEV;
/* Tell the MSD to open */
- if (wlandev->open != NULL) {
+ if (wlandev->open) {
result = wlandev->open(wlandev);
if (result == 0) {
netif_start_queue(wlandev->netdev);
@@ -186,7 +186,7 @@ static int p80211knetdev_stop(netdevice_t *netdev)
int result = 0;
wlandevice_t *wlandev = netdev->ml_priv;
- if (wlandev->close != NULL)
+ if (wlandev->close)
result = wlandev->close(wlandev);
netif_stop_queue(wlandev->netdev);
diff --git a/drivers/staging/wlan-ng/p80211netdev.h b/drivers/staging/wlan-ng/p80211netdev.h
index 810ee68aa18e..820a0e20a941 100644
--- a/drivers/staging/wlan-ng/p80211netdev.h
+++ b/drivers/staging/wlan-ng/p80211netdev.h
@@ -158,7 +158,6 @@ extern int wlan_wext_write;
/* WLAN device type */
typedef struct wlandevice {
- struct wlandevice *next; /* link for list of devices */
void *priv; /* private data for MSD */
/* Subsystem State */
diff --git a/drivers/staging/wlan-ng/prism2fw.c b/drivers/staging/wlan-ng/prism2fw.c
index 8564d9eb918f..56bffd93c982 100644
--- a/drivers/staging/wlan-ng/prism2fw.c
+++ b/drivers/staging/wlan-ng/prism2fw.c
@@ -278,7 +278,8 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
/* Build the PDA we're going to use. */
if (read_cardpda(&pda, wlandev)) {
netdev_err(wlandev->netdev, "load_cardpda failed, exiting.\n");
- return 1;
+ result = 1;
+ goto out;
}
/* read the card's PRI-SUP */
@@ -315,55 +316,58 @@ static int prism2_fwapply(const struct ihex_binrec *rfptr,
if (result) {
netdev_err(wlandev->netdev,
"Failed to read the data exiting.\n");
- return 1;
+ goto out;
}
result = validate_identity();
-
if (result) {
netdev_err(wlandev->netdev, "Incompatible firmware image.\n");
- return 1;
+ goto out;
}
if (startaddr == 0x00000000) {
netdev_err(wlandev->netdev,
"Can't RAM download a Flash image!\n");
- return 1;
+ result = 1;
+ goto out;
}
/* Make the image chunks */
result = mkimage(fchunk, &nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to make image chunk.\n");
- return 1;
+ goto free_chunks;
}
/* Do any plugging */
result = plugimage(fchunk, nfchunks, s3plug, ns3plug, &pda);
if (result) {
netdev_err(wlandev->netdev, "Failed to plug data.\n");
- return 1;
+ goto free_chunks;
}
/* Insert any CRCs */
- if (crcimage(fchunk, nfchunks, s3crc, ns3crc)) {
+ result = crcimage(fchunk, nfchunks, s3crc, ns3crc);
+ if (result) {
netdev_err(wlandev->netdev, "Failed to insert all CRCs\n");
- return 1;
+ goto free_chunks;
}
/* Write the image */
result = writeimage(wlandev, fchunk, nfchunks);
if (result) {
netdev_err(wlandev->netdev, "Failed to ramwrite image data.\n");
- return 1;
+ goto free_chunks;
}
+ netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
+
+free_chunks:
/* clear any allocated memory */
free_chunks(fchunk, &nfchunks);
free_srecs();
- netdev_info(wlandev->netdev, "prism2_usb: firmware loading finished.\n");
-
+out:
return result;
}
diff --git a/drivers/staging/wlan-ng/prism2usb.c b/drivers/staging/wlan-ng/prism2usb.c
index 41358bbc6246..b26d09ff840c 100644
--- a/drivers/staging/wlan-ng/prism2usb.c
+++ b/drivers/staging/wlan-ng/prism2usb.c
@@ -8,7 +8,7 @@
{ USB_DEVICE(vid, pid), \
.driver_info = (unsigned long)name }
-static struct usb_device_id usb_prism_tbl[] = {
+static const struct usb_device_id usb_prism_tbl[] = {
PRISM_DEV(0x04bb, 0x0922, "IOData AirPort WN-B11/USBS"),
PRISM_DEV(0x07aa, 0x0012, "Corega Wireless LAN USB Stick-11"),
PRISM_DEV(0x09aa, 0x3642, "Prism2.x 11Mbps WLAN USB Adapter"),
diff --git a/drivers/staging/xgifb/XGI_main_26.c b/drivers/staging/xgifb/XGI_main_26.c
index 7eadf922b21f..d56ef1425f6b 100644
--- a/drivers/staging/xgifb/XGI_main_26.c
+++ b/drivers/staging/xgifb/XGI_main_26.c
@@ -1130,8 +1130,9 @@ static int XGIfb_get_cmap_len(const struct fb_var_screeninfo *var)
return (var->bits_per_pixel == 8) ? 256 : 16;
}
-static int XGIfb_setcolreg(unsigned regno, unsigned red, unsigned green,
- unsigned blue, unsigned transp, struct fb_info *info)
+static int XGIfb_setcolreg(unsigned int regno, unsigned int red,
+ unsigned int green, unsigned int blue,
+ unsigned int transp, struct fb_info *info)
{
struct xgifb_video_info *xgifb_info = info->par;
diff --git a/drivers/staging/xgifb/vb_init.c b/drivers/staging/xgifb/vb_init.c
index 26b539bc6faf..062ece22ed84 100644
--- a/drivers/staging/xgifb/vb_init.c
+++ b/drivers/staging/xgifb/vb_init.c
@@ -355,7 +355,8 @@ static void XGINew_DDR2_DefaultRegister(
unsigned long P3d4 = Port, P3c4 = Port - 0x10;
/* keep following setting sequence, each setting in
- * the same reg insert idle */
+ * the same reg insert idle
+ */
xgifb_reg_set(P3d4, 0x82, 0x77);
xgifb_reg_set(P3d4, 0x86, 0x00);
xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */
@@ -551,7 +552,8 @@ static int XGINew_ReadWriteRest(unsigned short StopAddr,
writel(Position, fbaddr + Position);
}
- usleep_range(500, 1500); /* Fix #1759 Memory Size error in Multi-Adapter. */
+ /* Fix #1759 Memory Size error in Multi-Adapter. */
+ usleep_range(500, 1500);
Position = 0;
@@ -699,11 +701,11 @@ static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension,
break;
case XG42:
/*
- XG42 SR14 D[3] Reserve
- D[2] = 1, Dual Channel
- = 0, Single Channel
-
- It's Different from Other XG40 Series.
+ * XG42 SR14 D[3] Reserve
+ * D[2] = 1, Dual Channel
+ * = 0, Single Channel
+ *
+ * It's Different from Other XG40 Series.
*/
if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */
pVBInfo->ram_bus = 32; /* 32 bits */
diff --git a/drivers/staging/xgifb/vb_setmode.c b/drivers/staging/xgifb/vb_setmode.c
index f97c77d88173..50c8ea4f5ab7 100644
--- a/drivers/staging/xgifb/vb_setmode.c
+++ b/drivers/staging/xgifb/vb_setmode.c
@@ -108,9 +108,9 @@ static void XGI_SetATTRegs(unsigned short ModeIdIndex,
if (pVBInfo->VBInfo & XGI_SetCRT2ToLCDA) {
ARdata = 0;
} else if ((pVBInfo->VBInfo &
- (SetCRT2ToTV | SetCRT2ToLCD)) &&
- (pVBInfo->VBInfo & SetInSlaveMode)) {
- ARdata = 0;
+ (SetCRT2ToTV | SetCRT2ToLCD)) &&
+ (pVBInfo->VBInfo & SetInSlaveMode)) {
+ ARdata = 0;
}
}
@@ -1992,7 +1992,8 @@ static void XGI_GetVBInfo(unsigned short ModeIdIndex,
}
/* LCD+TV can't support in slave mode
- * (Force LCDA+TV->LCDB) */
+ * (Force LCDA+TV->LCDB)
+ */
if ((tempbx & SetInSlaveMode) && (tempbx & XGI_SetCRT2ToLCDA)) {
tempbx ^= (SetCRT2ToLCD | XGI_SetCRT2ToLCDA |
SetCRT2ToDualEdge);
@@ -2983,7 +2984,7 @@ static void XGI_SetLockRegs(unsigned short ModeNo, unsigned short ModeIdIndex,
if ((pVBInfo->VBInfo & SetCRT2ToHiVision) &&
!(pVBInfo->VBType & VB_SIS301LV) && (resinfo == 7))
- temp -= 2;
+ temp -= 2;
}
/* 0x05 Horizontal Display Start */
@@ -3450,8 +3451,9 @@ static void XGI_SetGroup2(unsigned short ModeNo, unsigned short ModeIdIndex,
if (!(pVBInfo->TVInfo &
(TVSetYPbPr525p | TVSetYPbPr750p)))
tempbx >>= 1;
- } else
+ } else {
tempbx >>= 1;
+ }
}
tempbx -= 2;
@@ -3839,9 +3841,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
if (pVBInfo->VGAVDE == 525) {
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C)) {
+ | VB_XGI301C))
temp = 0xC6;
- } else
+ else
temp = 0xC4;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
@@ -3851,9 +3853,9 @@ static void XGI_SetLCDRegs(unsigned short ModeIdIndex,
if (pVBInfo->VGAVDE == 420) {
if (pVBInfo->VBType & (VB_SIS301B | VB_SIS302B
| VB_SIS301LV | VB_SIS302LV
- | VB_XGI301C)) {
+ | VB_XGI301C))
temp = 0x4F;
- } else
+ else
temp = 0x4E;
xgifb_reg_set(pVBInfo->Part2Port, 0x2f, temp);
}
diff --git a/drivers/staging/xgifb/vb_table.h b/drivers/staging/xgifb/vb_table.h
index 45f2c992cd44..c801deb142f6 100644
--- a/drivers/staging/xgifb/vb_table.h
+++ b/drivers/staging/xgifb/vb_table.h
@@ -58,8 +58,9 @@ static const unsigned char XGI27_cr41[24][3] = {
{0xC4, 0x40, 0x84}, /* 1 CR8A */
{0xC4, 0x40, 0x84}, /* 2 CR8B */
{0xB3, 0x13, 0xa4}, /* 3 CR40[7],
- CR99[2:0],
- CR45[3:0]*/
+ * CR99[2:0],
+ * CR45[3:0]
+ */
{0xf0, 0xf5, 0xf0}, /* 4 CR59 */
{0x90, 0x90, 0x24}, /* 5 CR68 */
{0x77, 0x67, 0x44}, /* 6 CR69 */
@@ -101,9 +102,11 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
{0x38, 0x0a1b, 0x0508, 0x08, 0x00, 0x16},
{0x3a, 0x0e3b, 0x0609, 0x09, 0x00, 0x1e},
{0x3c, 0x0e3b, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE [2003/10/07] */
+ * add CRT2MODE [2003/10/07]
+ */
{0x3d, 0x0e7d, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE */
+ * add CRT2MODE
+ */
{0x40, 0x9a1c, 0x0000, 0x00, 0x04, 0x00},
{0x41, 0x9a1d, 0x0000, 0x00, 0x04, 0x00},
{0x43, 0x0a1c, 0x0306, 0x06, 0x05, 0x06},
@@ -129,7 +132,8 @@ const struct XGI_ExtStruct XGI330_EModeIDTable[] = {
{0x64, 0x0a7f, 0x0508, 0x08, 0x00, 0x16},
{0x65, 0x0eff, 0x0609, 0x09, 0x00, 0x1e},
{0x66, 0x0eff, 0x070a, 0x0a, 0x00, 0x22}, /* mode 1600x1200
- add CRT2MODE */
+ * add CRT2MODE
+ */
{0x68, 0x067b, 0x080b, 0x0b, 0x00, 0x29},
{0x69, 0x06fd, 0x080b, 0x0b, 0x00, 0x29},
{0x6b, 0x07ff, 0x080b, 0x0b, 0x00, 0x29},
@@ -223,38 +227,38 @@ const struct XGI_CRT1TableStruct XGI_CRT1Table[] = {
0x0D, 0x3E, 0xE0, 0x83, 0xDF, 0x0E, 0x90} }, /* 0xb */
{ {0x65, 0x4F, 0x89, 0x57, 0x9F, 0x00, 0x01, 0x00,
0xFB, 0x1F, 0xE6, 0x8A, 0xDF, 0xFC, 0x10} }, /* 0xc */
- { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00, /* ;
- 0D (800x600,56Hz) */
- 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} }, /* ;
- (VCLK 36.0MHz) */
- { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00, /* ;
- 0E (800x600,60Hz) */
- 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} }, /* ;
- (VCLK 40.0MHz) */
- { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00, /* ;
- 0F (800x600,72Hz) */
- 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} }, /* ;
- (VCLK 50.0MHz) */
- { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00, /* ;
- 10 (800x600,75Hz) */
- 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} }, /* ;
- (VCLK 49.5MHz) */
- { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00, /* ;
- 11 (800x600,85Hz) */
- 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} }, /* ;
- (VCLK 56.25MHz) */
- { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60, /* ;
- 12 (800x600,100Hz) */
- 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} }, /* ;
- (VCLK 75.8MHz) */
- { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60, /* ;
- 13 (800x600,120Hz) */
- 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} }, /* ;
- (VCLK 79.411MHz) */
- { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60, /* ;
- 14 (800x600,160Hz) */
- 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} }, /* ;
- (VCLK 105.822MHz) */
+ /* 0D (800x600,56Hz) */
+ { {0x7B, 0x63, 0x9F, 0x6A, 0x93, 0x00, 0x05, 0x00,
+ /* (VCLK 36.0MHz) */
+ 0x6F, 0xF0, 0x58, 0x8A, 0x57, 0x70, 0xA0} },
+ /* 0E (800x600,60Hz) */
+ { {0x7F, 0x63, 0x83, 0x6C, 0x1C, 0x00, 0x06, 0x00,
+ /* (VCLK 40.0MHz) */
+ 0x72, 0xF0, 0x58, 0x8C, 0x57, 0x73, 0xA0} },
+ /* 0F (800x600,72Hz) */
+ { {0x7D, 0x63, 0x81, 0x6E, 0x1D, 0x00, 0x06, 0x00,
+ /* (VCLK 50.0MHz) */
+ 0x98, 0xF0, 0x7C, 0x82, 0x57, 0x99, 0x80} },
+ /* 10 (800x600,75Hz) */
+ { {0x7F, 0x63, 0x83, 0x69, 0x13, 0x00, 0x06, 0x00,
+ /* (VCLK 49.5MHz) */
+ 0x6F, 0xF0, 0x58, 0x8B, 0x57, 0x70, 0xA0} },
+ /* 11 (800x600,85Hz) */
+ { {0x7E, 0x63, 0x82, 0x6B, 0x13, 0x00, 0x06, 0x00,
+ /* (VCLK 56.25MHz) */
+ 0x75, 0xF0, 0x58, 0x8B, 0x57, 0x76, 0xA0} },
+ /* 12 (800x600,100Hz) */
+ { {0x81, 0x63, 0x85, 0x6D, 0x18, 0x00, 0x06, 0x60,
+ /* (VCLK 75.8MHz) */
+ 0x7A, 0xF0, 0x58, 0x8B, 0x57, 0x7B, 0xA0} },
+ /* 13 (800x600,120Hz) */
+ { {0x83, 0x63, 0x87, 0x6E, 0x19, 0x00, 0x06, 0x60,
+ /* (VCLK 79.411MHz) */
+ 0x81, 0xF0, 0x58, 0x8B, 0x57, 0x82, 0xA0} },
+ /* 14 (800x600,160Hz) */
+ { {0x85, 0x63, 0x89, 0x6F, 0x1A, 0x00, 0x06, 0x60,
+ /* (VCLK 105.822MHz) */
+ 0x91, 0xF0, 0x58, 0x8B, 0x57, 0x92, 0xA0} },
{ {0x99, 0x7F, 0x9D, 0x84, 0x1A, 0x00, 0x02, 0x00,
0x96, 0x1F, 0x7F, 0x83, 0x7F, 0x97, 0x10} }, /* 0x15 */
{ {0xA3, 0x7F, 0x87, 0x86, 0x97, 0x00, 0x02, 0x00,
@@ -388,7 +392,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768Data[] = {
static const struct SiS_LCDData XGI_CetLCD1024x768Data[] = {
{1, 1, 1344, 806, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1344, 806, 1344, 806}, /* 01 (320x350,640x350) */
{1, 1, 1344, 806, 1344, 806}, /* 02 (360x400,720x400) */
{1, 1, 1344, 806, 1344, 806}, /* 03 (720x350) */
@@ -421,7 +426,8 @@ static const struct SiS_LCDData XGI_ExtLCD1280x1024Data[] = {
static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
{1, 1, 1688, 1066, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1688, 1066, 1688, 1066}, /* 01 (320x350,640x350) */
{1, 1, 1688, 1066, 1688, 1066}, /* 02 (360x400,720x400) */
{1, 1, 1688, 1066, 1688, 1066}, /* 03 (720x350) */
@@ -434,7 +440,8 @@ static const struct SiS_LCDData XGI_CetLCD1280x1024Data[] = {
static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
{211, 100, 2100, 408, 1688, 1066}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{211, 64, 1536, 358, 1688, 1066}, /* 01 (320x350,640x350) */
{211, 100, 2100, 408, 1688, 1066}, /* 02 (360x400,720x400) */
{211, 64, 1536, 358, 1688, 1066}, /* 03 (720x350) */
@@ -442,13 +449,15 @@ static const struct SiS_LCDData xgifb_lcd_1400x1050[] = {
{211, 72, 1008, 609, 1688, 1066}, /* 05 (800x600x60Hz) */
{211, 128, 1400, 776, 1688, 1066}, /* 06 (1024x768x60Hz) */
{1, 1, 1688, 1066, 1688, 1066}, /* 07 (1280x1024x60Hz
- w/o Scaling) */
+ * w/o Scaling)
+ */
{1, 1, 1688, 1066, 1688, 1066} /* 08 (1400x1050x60Hz) */
};
static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
{4, 1, 1620, 420, 2160, 1250}, /* 00 (320x200,320x400,
- 640x200,640x400)*/
+ * 640x200,640x400)
+ */
{27, 7, 1920, 375, 2160, 1250}, /* 01 (320x350,640x350) */
{4, 1, 1620, 420, 2160, 1250}, /* 02 (360x400,720x400)*/
{27, 7, 1920, 375, 2160, 1250}, /* 03 (720x350) */
@@ -462,7 +471,8 @@ static const struct SiS_LCDData XGI_ExtLCD1600x1200Data[] = {
static const struct SiS_LCDData XGI_StLCD1600x1200Data[] = {
{27, 4, 800, 500, 2160, 1250}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{27, 4, 800, 500, 2160, 1250}, /* 01 (320x350,640x350) */
{27, 4, 800, 500, 2160, 1250}, /* 02 (360x400,720x400) */
{27, 4, 800, 500, 2160, 1250}, /* 03 (720x350) */
@@ -489,7 +499,8 @@ static const struct SiS_LCDData XGI_NoScalingData[] = {
static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
{42, 25, 1536, 419, 1344, 806}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{48, 25, 1536, 369, 1344, 806}, /* ; 01 (320x350,640x350) */
{42, 25, 1536, 419, 1344, 806}, /* ; 02 (360x400,720x400) */
{48, 25, 1536, 369, 1344, 806}, /* ; 03 (720x350) */
@@ -500,7 +511,8 @@ static const struct SiS_LCDData XGI_ExtLCD1024x768x75Data[] = {
static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
{1, 1, 1312, 800, 1312, 800}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 1312, 800, 1312, 800}, /* ; 01 (320x350,640x350) */
{1, 1, 1312, 800, 1312, 800}, /* ; 02 (360x400,720x400) */
{1, 1, 1312, 800, 1312, 800}, /* ; 03 (720x350) */
@@ -511,7 +523,8 @@ static const struct SiS_LCDData XGI_CetLCD1024x768x75Data[] = {
static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
{211, 60, 1024, 501, 1688, 1066}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{211, 60, 1024, 508, 1688, 1066}, /* ; 01 (320x350,640x350) */
{211, 60, 1024, 501, 1688, 1066}, /* ; 02 (360x400,720x400) */
{211, 60, 1024, 508, 1688, 1066}, /* ; 03 (720x350) */
@@ -525,7 +538,8 @@ static const struct SiS_LCDData xgifb_lcd_1280x1024x75[] = {
static const struct SiS_LCDData XGI_NoScalingDatax75[] = {
{1, 1, 800, 449, 800, 449}, /* ; 00 (320x200, 320x400,
- 640x200, 640x400) */
+ * 640x200, 640x400)
+ */
{1, 1, 800, 449, 800, 449}, /* ; 01 (320x350, 640x350) */
{1, 1, 900, 449, 900, 449}, /* ; 02 (360x400, 720x400) */
{1, 1, 900, 449, 900, 449}, /* ; 03 (720x350) */
@@ -732,7 +746,8 @@ static const struct XGI_LCDDesStruct XGI_StLCDDes1600x1200Data[] = {
static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesData[] = {
{9, 657, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{9, 657, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
{9, 657, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
{9, 657, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -818,7 +833,8 @@ static const struct XGI_LCDDesStruct XGI_CetLCDDes1280x1024x75Data[] = {
/* Scaling LCD 75Hz */
static const struct XGI330_LCDDataDesStruct2 XGI_NoScalingDesDatax75[] = {
{9, 657, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{9, 657, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
{9, 738, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
{9, 738, 448, 355, 108, 2}, /* ; 03 (720x350) */
@@ -873,7 +889,8 @@ static const struct SiS_TVData XGI_ExtNTSCData[] = {
static const struct SiS_TVData XGI_St1HiTVData[] = {
{1, 1, 892, 563, 690, 800, 0, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
{1, 1, 1000, 563, 785, 800, 0, 0, 0}, /* 02 (360x400,720x400) */
{1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -883,7 +900,8 @@ static const struct SiS_TVData XGI_St1HiTVData[] = {
static const struct SiS_TVData XGI_St2HiTVData[] = {
{3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{1, 1, 892, 563, 690, 700, 0, 0, 0}, /* 01 (320x350,640x350) */
{3, 1, 840, 483, 1648, 960, 0x032, 0, 0}, /* 02 (360x400,720x400) */
{1, 1, 1000, 563, 785, 700, 0, 0, 0}, /* 03 (720x350) */
@@ -893,7 +911,8 @@ static const struct SiS_TVData XGI_St2HiTVData[] = {
static const struct SiS_TVData XGI_ExtHiTVData[] = {
{6, 1, 840, 563, 1632, 960, 0, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 01 (320x350,640x350) */
{3, 1, 840, 483, 1632, 960, 0, 0, 0}, /* 02 (360x400,720x400) */
{3, 1, 960, 563, 1632, 960, 0, 0, 0}, /* 03 (720x350) */
@@ -948,7 +967,8 @@ static const struct SiS_TVData XGI_StYPbPr525pData[] = {
static const struct SiS_TVData XGI_ExtYPbPr750pData[] = {
{ 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 01 (320x350,640x350) */
{ 3, 1, 935, 470, 1130, 680, 50, 0, 0}, /* 02 (360x400,720x400) */
{24, 7, 935, 420, 1130, 680, 50, 0, 0}, /* 03 (720x350) */
@@ -1269,7 +1289,8 @@ static const struct SiS_LVDSData XGI_LVDSNoScalingDatax75[] = {
{1312, 800, 1312, 800}, /* ; 06 (1024x768x75Hz) */
{1688, 1066, 1688, 1066}, /* ; 07 (1280x1024x75Hz) */
{1688, 1066, 1688, 1066}, /* ; 08 (1400x1050x75Hz)
- ;;[ycchen] 12/19/02 */
+ * ;;[ycchen] 12/19/02
+ */
{2160, 1250, 2160, 1250}, /* ; 09 (1600x1200x75Hz) */
{1688, 806, 1688, 806}, /* ; 0A (1280x768x75Hz) */
};
@@ -1364,7 +1385,8 @@ static const struct SiS_LVDSData XGI_LVDS1600x1200Des_1[] = {
static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesData[] = {
{0, 648, 448, 405, 96, 2}, /* 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{0, 648, 448, 355, 96, 2}, /* 01 (320x350,640x350) */
{0, 648, 448, 405, 96, 2}, /* 02 (360x400,720x400) */
{0, 648, 448, 355, 96, 2}, /* 03 (720x350) */
@@ -1435,7 +1457,8 @@ static const struct SiS_LVDSData XGI_LVDS1280x1024Des_2x75[] = {
/* Scaling LCD 75Hz */
static const struct XGI330_LCDDataDesStruct2 XGI_LVDSNoScalingDesDatax75[] = {
{0, 648, 448, 405, 96, 2}, /* ; 00 (320x200,320x400,
- 640x200,640x400) */
+ * 640x200,640x400)
+ */
{0, 648, 448, 355, 96, 2}, /* ; 01 (320x350,640x350) */
{0, 729, 448, 405, 108, 2}, /* ; 02 (360x400,720x400) */
{0, 729, 448, 355, 108, 2}, /* ; 03 (720x350) */
diff --git a/drivers/staging/xgifb/vb_util.h b/drivers/staging/xgifb/vb_util.h
index f613f54d522f..08db58b396b2 100644
--- a/drivers/staging/xgifb/vb_util.h
+++ b/drivers/staging/xgifb/vb_util.h
@@ -13,7 +13,7 @@ static inline u8 xgifb_reg_get(unsigned long port, u8 index)
}
static inline void xgifb_reg_and_or(unsigned long port, u8 index,
- unsigned data_and, unsigned data_or)
+ unsigned int data_and, unsigned int data_or)
{
u8 temp;
@@ -22,7 +22,8 @@ static inline void xgifb_reg_and_or(unsigned long port, u8 index,
xgifb_reg_set(port, index, temp);
}
-static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and)
+static inline void xgifb_reg_and(unsigned long port, u8 index,
+ unsigned int data_and)
{
u8 temp;
@@ -31,7 +32,8 @@ static inline void xgifb_reg_and(unsigned long port, u8 index, unsigned data_and
xgifb_reg_set(port, index, temp);
}
-static inline void xgifb_reg_or(unsigned long port, u8 index, unsigned data_or)
+static inline void xgifb_reg_or(unsigned long port, u8 index,
+ unsigned int data_or)
{
u8 temp;
diff --git a/include/dt-bindings/iio/adi,ad5592r.h b/include/dt-bindings/iio/adi,ad5592r.h
new file mode 100644
index 000000000000..c48aca1dcade
--- /dev/null
+++ b/include/dt-bindings/iio/adi,ad5592r.h
@@ -0,0 +1,16 @@
+
+#ifndef _DT_BINDINGS_ADI_AD5592R_H
+#define _DT_BINDINGS_ADI_AD5592R_H
+
+#define CH_MODE_UNUSED 0
+#define CH_MODE_ADC 1
+#define CH_MODE_DAC 2
+#define CH_MODE_DAC_AND_ADC 3
+#define CH_MODE_GPIO 8
+
+#define CH_OFFSTATE_PULLDOWN 0
+#define CH_OFFSTATE_OUT_LOW 1
+#define CH_OFFSTATE_OUT_HIGH 2
+#define CH_OFFSTATE_OUT_TRISTATE 3
+
+#endif /* _DT_BINDINGS_ADI_AD5592R_H */
diff --git a/include/linux/iio/buffer.h b/include/linux/iio/buffer.h
index 2ec3ad58e8a0..70a5164f4728 100644
--- a/include/linux/iio/buffer.h
+++ b/include/linux/iio/buffer.h
@@ -83,10 +83,12 @@ struct iio_buffer_access_funcs {
* @access: [DRIVER] buffer access functions associated with the
* implementation.
* @scan_el_dev_attr_list:[INTERN] list of scan element related attributes.
+ * @buffer_group: [INTERN] attributes of the buffer group
* @scan_el_group: [DRIVER] attribute group for those attributes not
* created from the iio_chan_info array.
* @pollq: [INTERN] wait queue to allow for polling on the buffer.
* @stufftoread: [INTERN] flag to indicate new data.
+ * @attrs: [INTERN] standard attributes of the buffer
* @demux_list: [INTERN] list of operations required to demux the scan.
* @demux_bounce: [INTERN] buffer for doing gather from incoming scan.
* @buffer_list: [INTERN] entry in the devices list of current buffers.
diff --git a/include/linux/iio/common/st_sensors.h b/include/linux/iio/common/st_sensors.h
index 6670c3d25c58..d029ffac0d69 100644
--- a/include/linux/iio/common/st_sensors.h
+++ b/include/linux/iio/common/st_sensors.h
@@ -37,6 +37,7 @@
#define ST_SENSORS_DEFAULT_AXIS_ADDR 0x20
#define ST_SENSORS_DEFAULT_AXIS_MASK 0x07
#define ST_SENSORS_DEFAULT_AXIS_N_BIT 3
+#define ST_SENSORS_DEFAULT_STAT_ADDR 0x27
#define ST_SENSORS_MAX_NAME 17
#define ST_SENSORS_MAX_4WAI 7
@@ -121,6 +122,9 @@ struct st_sensor_bdu {
* @mask_int2: mask to enable/disable IRQ on INT2 pin.
* @addr_ihl: address to enable/disable active low on the INT lines.
* @mask_ihl: mask to enable/disable active low on the INT lines.
+ * @addr_od: address to enable/disable Open Drain on the INT lines.
+ * @mask_od: mask to enable/disable Open Drain on the INT lines.
+ * @addr_stat_drdy: address to read status of DRDY (data ready) interrupt
* struct ig1 - represents the Interrupt Generator 1 of sensors.
* @en_addr: address of the enable ig1 register.
* @en_mask: mask to write the on/off value for enable.
@@ -131,6 +135,9 @@ struct st_sensor_data_ready_irq {
u8 mask_int2;
u8 addr_ihl;
u8 mask_ihl;
+ u8 addr_od;
+ u8 mask_od;
+ u8 addr_stat_drdy;
struct {
u8 en_addr;
u8 en_mask;
@@ -212,6 +219,7 @@ struct st_sensor_settings {
* @odr: Output data rate of the sensor [Hz].
* num_data_channels: Number of data channels used in buffer.
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
+ * @int_pin_open_drain: Set the interrupt/DRDY to open drain.
* @get_irq_data_ready: Function to get the IRQ used for data ready signal.
* @tf: Transfer function structure used by I/O operations.
* @tb: Transfer buffers and mutex used by I/O operations.
@@ -233,6 +241,7 @@ struct st_sensor_data {
unsigned int num_data_channels;
u8 drdy_int_pin;
+ bool int_pin_open_drain;
unsigned int (*get_irq_data_ready) (struct iio_dev *indio_dev);
diff --git a/include/linux/iio/consumer.h b/include/linux/iio/consumer.h
index fad58671c49e..3d672f72e7ec 100644
--- a/include/linux/iio/consumer.h
+++ b/include/linux/iio/consumer.h
@@ -49,6 +49,33 @@ struct iio_channel *iio_channel_get(struct device *dev,
void iio_channel_release(struct iio_channel *chan);
/**
+ * devm_iio_channel_get() - Resource managed version of iio_channel_get().
+ * @dev: Pointer to consumer device. Device name must match
+ * the name of the device as provided in the iio_map
+ * with which the desired provider to consumer mapping
+ * was registered.
+ * @consumer_channel: Unique name to identify the channel on the consumer
+ * side. This typically describes the channels use within
+ * the consumer. E.g. 'battery_voltage'
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns valid pointer for iio channel.
+ *
+ * The allocated iio channel is automatically released when the device is
+ * unbound.
+ */
+struct iio_channel *devm_iio_channel_get(struct device *dev,
+ const char *consumer_channel);
+/**
+ * devm_iio_channel_release() - Resource managed version of
+ * iio_channel_release().
+ * @dev: Pointer to consumer device for which resource
+ * is allocared.
+ * @chan: The channel to be released.
+ */
+void devm_iio_channel_release(struct device *dev, struct iio_channel *chan);
+
+/**
* iio_channel_get_all() - get all channels associated with a client
* @dev: Pointer to consumer device.
*
@@ -65,6 +92,32 @@ struct iio_channel *iio_channel_get_all(struct device *dev);
*/
void iio_channel_release_all(struct iio_channel *chan);
+/**
+ * devm_iio_channel_get_all() - Resource managed version of
+ * iio_channel_get_all().
+ * @dev: Pointer to consumer device.
+ *
+ * Returns a pointer to negative errno if it is not able to get the iio channel
+ * otherwise returns an array of iio_channel structures terminated with one with
+ * null iio_dev pointer.
+ *
+ * This function is used by fairly generic consumers to get all the
+ * channels registered as having this consumer.
+ *
+ * The allocated iio channels are automatically released when the device is
+ * unbounded.
+ */
+struct iio_channel *devm_iio_channel_get_all(struct device *dev);
+
+/**
+ * devm_iio_channel_release_all() - Resource managed version of
+ * iio_channel_release_all().
+ * @dev: Pointer to consumer device for which resource
+ * is allocared.
+ * @chan: Array channel to be released.
+ */
+void devm_iio_channel_release_all(struct device *dev, struct iio_channel *chan);
+
struct iio_cb_buffer;
/**
* iio_channel_get_all_cb() - register callback for triggered capture
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index b2b16772c651..7c29cb0124ae 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -148,6 +148,37 @@ ssize_t iio_enum_write(struct iio_dev *indio_dev,
}
/**
+ * struct iio_mount_matrix - iio mounting matrix
+ * @rotation: 3 dimensional space rotation matrix defining sensor alignment with
+ * main hardware
+ */
+struct iio_mount_matrix {
+ const char *rotation[9];
+};
+
+ssize_t iio_show_mount_matrix(struct iio_dev *indio_dev, uintptr_t priv,
+ const struct iio_chan_spec *chan, char *buf);
+int of_iio_read_mount_matrix(const struct device *dev, const char *propname,
+ struct iio_mount_matrix *matrix);
+
+typedef const struct iio_mount_matrix *
+ (iio_get_mount_matrix_t)(const struct iio_dev *indio_dev,
+ const struct iio_chan_spec *chan);
+
+/**
+ * IIO_MOUNT_MATRIX() - Initialize mount matrix extended channel attribute
+ * @_shared: Whether the attribute is shared between all channels
+ * @_get: Pointer to an iio_get_mount_matrix_t accessor
+ */
+#define IIO_MOUNT_MATRIX(_shared, _get) \
+{ \
+ .name = "mount_matrix", \
+ .shared = (_shared), \
+ .read = iio_show_mount_matrix, \
+ .private = (uintptr_t)(_get), \
+}
+
+/**
* struct iio_event_spec - specification for a channel event
* @type: Type of the event
* @dir: Direction of the event
@@ -527,6 +558,8 @@ void iio_device_unregister(struct iio_dev *indio_dev);
int devm_iio_device_register(struct device *dev, struct iio_dev *indio_dev);
void devm_iio_device_unregister(struct device *dev, struct iio_dev *indio_dev);
int iio_push_event(struct iio_dev *indio_dev, u64 ev_code, s64 timestamp);
+int iio_device_claim_direct_mode(struct iio_dev *indio_dev);
+void iio_device_release_direct_mode(struct iio_dev *indio_dev);
extern struct bus_type iio_bus_type;
diff --git a/include/linux/iio/imu/adis.h b/include/linux/iio/imu/adis.h
index fa2d01ef8f55..360da7d18a3d 100644
--- a/include/linux/iio/imu/adis.h
+++ b/include/linux/iio/imu/adis.h
@@ -41,6 +41,7 @@ struct adis_data {
unsigned int diag_stat_reg;
unsigned int self_test_mask;
+ bool self_test_no_autoclear;
unsigned int startup_delay;
const char * const *status_error_msgs;
diff --git a/include/linux/iio/magnetometer/ak8975.h b/include/linux/iio/magnetometer/ak8975.h
new file mode 100644
index 000000000000..c8400959d197
--- /dev/null
+++ b/include/linux/iio/magnetometer/ak8975.h
@@ -0,0 +1,16 @@
+#ifndef __IIO_MAGNETOMETER_AK8975_H__
+#define __IIO_MAGNETOMETER_AK8975_H__
+
+#include <linux/iio/iio.h>
+
+/**
+ * struct ak8975_platform_data - AK8975 magnetometer driver platform data
+ * @eoc_gpio: data ready event gpio
+ * @orientation: mounting matrix relative to main hardware
+ */
+struct ak8975_platform_data {
+ int eoc_gpio;
+ struct iio_mount_matrix orientation;
+};
+
+#endif
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cc7398287fdd..94aa10ffe156 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -53,6 +53,13 @@
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+#define u64_to_user_ptr(x) ( \
+{ \
+ typecheck(u64, x); \
+ (void __user *)(uintptr_t)x; \
+} \
+)
+
/*
* This looks more complex than it should be. But we need to
* get the type for the ~ right in round_down (it needs to be
diff --git a/include/linux/platform_data/invensense_mpu6050.h b/include/linux/platform_data/invensense_mpu6050.h
index ad3aa7b95f35..554b59801aa8 100644
--- a/include/linux/platform_data/invensense_mpu6050.h
+++ b/include/linux/platform_data/invensense_mpu6050.h
@@ -16,13 +16,16 @@
/**
* struct inv_mpu6050_platform_data - Platform data for the mpu driver
- * @orientation: Orientation matrix of the chip
+ * @orientation: Orientation matrix of the chip (deprecated in favor of
+ * mounting matrix retrieved from device-tree)
*
* Contains platform specific information on how to configure the MPU6050 to
* work on this platform. The orientation matricies are 3x3 rotation matricies
* that are applied to the data to rotate from the mounting orientation to the
* platform orientation. The values must be one of 0, 1, or -1 and each row and
* column should have exactly 1 non-zero value.
+ *
+ * Deprecated in favor of mounting matrix retrieved from device-tree.
*/
struct inv_mpu6050_platform_data {
__s8 orientation[9];
diff --git a/include/linux/platform_data/st_sensors_pdata.h b/include/linux/platform_data/st_sensors_pdata.h
index 753839187ba0..79b0e4cdb814 100644
--- a/include/linux/platform_data/st_sensors_pdata.h
+++ b/include/linux/platform_data/st_sensors_pdata.h
@@ -16,9 +16,11 @@
* @drdy_int_pin: Redirect DRDY on pin 1 (1) or pin 2 (2).
* Available only for accelerometer and pressure sensors.
* Accelerometer DRDY on LSM330 available only on pin 1 (see datasheet).
+ * @open_drain: set the interrupt line to be open drain if possible.
*/
struct st_sensors_platform_data {
u8 drdy_int_pin;
+ bool open_drain;
};
#endif /* ST_SENSORS_PDATA_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 31bd0d97d178..0fc28e45c142 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2248,6 +2248,7 @@ static inline void memalloc_noio_restore(unsigned int flags)
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
+#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
#define TASK_PFA_TEST(name, func) \
@@ -2271,6 +2272,9 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
+TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
+TASK_PFA_SET(LMK_WAITING, lmk_waiting)
+
/*
* task->jobctl flags
*/
diff --git a/include/linux/sync_file.h b/include/linux/sync_file.h
new file mode 100644
index 000000000000..c6ffe8b0725c
--- /dev/null
+++ b/include/linux/sync_file.h
@@ -0,0 +1,57 @@
+/*
+ * include/linux/sync_file.h
+ *
+ * Copyright (C) 2012 Google, Inc.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _LINUX_SYNC_FILE_H
+#define _LINUX_SYNC_FILE_H
+
+#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/ktime.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/fence.h>
+
+struct sync_file_cb {
+ struct fence_cb cb;
+ struct fence *fence;
+ struct sync_file *sync_file;
+};
+
+/**
+ * struct sync_file - sync file to export to the userspace
+ * @file: file representing this fence
+ * @kref: reference count on fence.
+ * @name: name of sync_file. Useful for debugging
+ * @sync_file_list: membership in global file list
+ * @num_fences: number of sync_pts in the fence
+ * @wq: wait queue for fence signaling
+ * @status: 0: signaled, >0:active, <0: error
+ * @cbs: sync_pts callback information
+ */
+struct sync_file {
+ struct file *file;
+ struct kref kref;
+ char name[32];
+#ifdef CONFIG_DEBUG_FS
+ struct list_head sync_file_list;
+#endif
+ int num_fences;
+
+ wait_queue_head_t wq;
+ atomic_t status;
+
+ struct sync_file_cb cbs[];
+};
+
+struct sync_file *sync_file_create(struct fence *fence);
+
+#endif /* _LINUX_SYNC_H */
diff --git a/include/uapi/linux/iio/types.h b/include/uapi/linux/iio/types.h
index c077617f3304..b0916fc72cce 100644
--- a/include/uapi/linux/iio/types.h
+++ b/include/uapi/linux/iio/types.h
@@ -38,6 +38,7 @@ enum iio_chan_type {
IIO_CONCENTRATION,
IIO_RESISTANCE,
IIO_PH,
+ IIO_UVINDEX,
};
enum iio_modifier {
@@ -77,6 +78,7 @@ enum iio_modifier {
IIO_MOD_Q,
IIO_MOD_CO2,
IIO_MOD_VOC,
+ IIO_MOD_LIGHT_UV,
};
enum iio_event_type {
diff --git a/drivers/staging/android/uapi/sync.h b/include/uapi/linux/sync_file.h
index a0cf357e598d..413303d37b56 100644
--- a/drivers/staging/android/uapi/sync.h
+++ b/include/uapi/linux/sync_file.h
@@ -16,57 +16,73 @@
/**
* struct sync_merge_data - data passed to merge ioctl
- * @fd2: file descriptor of second fence
* @name: name of new fence
+ * @fd2: file descriptor of second fence
* @fence: returns the fd of the new fence to userspace
+ * @flags: merge_data flags
+ * @pad: padding for 64-bit alignment, should always be zero
*/
struct sync_merge_data {
- __s32 fd2; /* fd of second fence */
- char name[32]; /* name of new fence */
- __s32 fence; /* fd on newly created fence */
+ char name[32];
+ __s32 fd2;
+ __s32 fence;
+ __u32 flags;
+ __u32 pad;
};
/**
* struct sync_fence_info - detailed fence information
* @obj_name: name of parent sync_timeline
- * @driver_name: name of driver implementing the parent
- * @status: status of the fence 0:active 1:signaled <0:error
+* @driver_name: name of driver implementing the parent
+* @status: status of the fence 0:active 1:signaled <0:error
+ * @flags: fence_info flags
* @timestamp_ns: timestamp of status change in nanoseconds
*/
struct sync_fence_info {
char obj_name[32];
char driver_name[32];
__s32 status;
+ __u32 flags;
__u64 timestamp_ns;
};
/**
* struct sync_file_info - data returned from fence info ioctl
- * @len: ioctl caller writes the size of the buffer its passing in.
- * ioctl returns length of sync_file_info returned to
- * userspace including pt_info.
* @name: name of fence
* @status: status of fence. 1: signaled 0:active <0:error
- * @sync_fence_info: array of sync_fence_info for every fence in the sync_file
+ * @flags: sync_file_info flags
+ * @num_fences number of fences in the sync_file
+ * @pad: padding for 64-bit alignment, should always be zero
+ * @sync_fence_info: pointer to array of structs sync_fence_info with all
+ * fences in the sync_file
*/
struct sync_file_info {
- __u32 len;
char name[32];
__s32 status;
+ __u32 flags;
+ __u32 num_fences;
+ __u32 pad;
- __u8 sync_fence_info[0];
+ __u64 sync_fence_info;
};
#define SYNC_IOC_MAGIC '>'
/**
+ * Opcodes 0, 1 and 2 were burned during a API change to avoid users of the
+ * old API to get weird errors when trying to handling sync_files. The API
+ * change happened during the de-stage of the Sync Framework when there was
+ * no upstream users available.
+ */
+
+/**
* DOC: SYNC_IOC_MERGE - merge two fences
*
* Takes a struct sync_merge_data. Creates a new fence containing copies of
* the sync_pts in both the calling fd and sync_merge_data.fd2. Returns the
* new fence's fd in sync_merge_data.fence
*/
-#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 1, struct sync_merge_data)
+#define SYNC_IOC_MERGE _IOWR(SYNC_IOC_MAGIC, 3, struct sync_merge_data)
/**
* DOC: SYNC_IOC_FENCE_INFO - get detailed information on a fence
@@ -79,6 +95,6 @@ struct sync_file_info {
* pt_info is a buffer containing sync_pt_infos for every sync_pt in the fence.
* To iterate over the sync_pt_infos, use the sync_pt_info.len field.
*/
-#define SYNC_IOC_FENCE_INFO _IOWR(SYNC_IOC_MAGIC, 2, struct sync_file_info)
+#define SYNC_IOC_FILE_INFO _IOWR(SYNC_IOC_MAGIC, 4, struct sync_file_info)
#endif /* _UAPI_LINUX_SYNC_H */
diff --git a/tools/iio/generic_buffer.c b/tools/iio/generic_buffer.c
index 01c4f67801e0..2429c78de940 100644
--- a/tools/iio/generic_buffer.c
+++ b/tools/iio/generic_buffer.c
@@ -35,6 +35,15 @@
#include "iio_utils.h"
/**
+ * enum autochan - state for the automatic channel enabling mechanism
+ */
+enum autochan {
+ AUTOCHANNELS_DISABLED,
+ AUTOCHANNELS_ENABLED,
+ AUTOCHANNELS_ACTIVE,
+};
+
+/**
* size_from_channelarray() - calculate the storage size of a scan
* @channels: the channel info array
* @num_channels: number of channels
@@ -191,10 +200,51 @@ void process_scan(char *data,
printf("\n");
}
+static int enable_disable_all_channels(char *dev_dir_name, int enable)
+{
+ const struct dirent *ent;
+ char scanelemdir[256];
+ DIR *dp;
+ int ret;
+
+ snprintf(scanelemdir, sizeof(scanelemdir),
+ FORMAT_SCAN_ELEMENTS_DIR, dev_dir_name);
+ scanelemdir[sizeof(scanelemdir)-1] = '\0';
+
+ dp = opendir(scanelemdir);
+ if (!dp) {
+ fprintf(stderr, "Enabling/disabling channels: can't open %s\n",
+ scanelemdir);
+ return -EIO;
+ }
+
+ ret = -ENOENT;
+ while (ent = readdir(dp), ent) {
+ if (iioutils_check_suffix(ent->d_name, "_en")) {
+ printf("%sabling: %s\n",
+ enable ? "En" : "Dis",
+ ent->d_name);
+ ret = write_sysfs_int(ent->d_name, scanelemdir,
+ enable);
+ if (ret < 0)
+ fprintf(stderr, "Failed to enable/disable %s\n",
+ ent->d_name);
+ }
+ }
+
+ if (closedir(dp) == -1) {
+ perror("Enabling/disabling channels: "
+ "Failed to close directory");
+ return -errno;
+ }
+ return 0;
+}
+
void print_usage(void)
{
fprintf(stderr, "Usage: generic_buffer [options]...\n"
"Capture, convert and output data from IIO device buffer\n"
+ " -a Auto-activate all available channels\n"
" -c <n> Do n conversions\n"
" -e Disable wait for event (new data)\n"
" -g Use trigger-less mode\n"
@@ -225,12 +275,16 @@ int main(int argc, char **argv)
int scan_size;
int noevents = 0;
int notrigger = 0;
+ enum autochan autochannels = AUTOCHANNELS_DISABLED;
char *dummy;
struct iio_channel_info *channels;
- while ((c = getopt(argc, argv, "c:egl:n:t:w:")) != -1) {
+ while ((c = getopt(argc, argv, "ac:egl:n:t:w:")) != -1) {
switch (c) {
+ case 'a':
+ autochannels = AUTOCHANNELS_ENABLED;
+ break;
case 'c':
errno = 0;
num_loops = strtoul(optarg, &dummy, 10);
@@ -304,7 +358,19 @@ int main(int argc, char **argv)
}
}
- /* Verify the trigger exists */
+ /* Look for this "-devN" trigger */
+ trig_num = find_type_by_name(trigger_name, "trigger");
+ if (trig_num < 0) {
+ /* OK try the simpler "-trigger" suffix instead */
+ free(trigger_name);
+ ret = asprintf(&trigger_name,
+ "%s-trigger", device_name);
+ if (ret < 0) {
+ ret = -ENOMEM;
+ goto error_free_dev_dir_name;
+ }
+ }
+
trig_num = find_type_by_name(trigger_name, "trigger");
if (trig_num < 0) {
fprintf(stderr, "Failed to find the trigger %s\n",
@@ -328,12 +394,47 @@ int main(int argc, char **argv)
"diag %s\n", dev_dir_name);
goto error_free_triggername;
}
- if (!num_channels) {
+ if (num_channels && autochannels == AUTOCHANNELS_ENABLED) {
+ fprintf(stderr, "Auto-channels selected but some channels "
+ "are already activated in sysfs\n");
+ fprintf(stderr, "Proceeding without activating any channels\n");
+ }
+
+ if (!num_channels && autochannels == AUTOCHANNELS_ENABLED) {
+ fprintf(stderr,
+ "No channels are enabled, enabling all channels\n");
+
+ ret = enable_disable_all_channels(dev_dir_name, 1);
+ if (ret) {
+ fprintf(stderr, "Failed to enable all channels\n");
+ goto error_free_triggername;
+ }
+
+ /* This flags that we need to disable the channels again */
+ autochannels = AUTOCHANNELS_ACTIVE;
+
+ ret = build_channel_array(dev_dir_name, &channels,
+ &num_channels);
+ if (ret) {
+ fprintf(stderr, "Problem reading scan element "
+ "information\n"
+ "diag %s\n", dev_dir_name);
+ goto error_disable_channels;
+ }
+ if (!num_channels) {
+ fprintf(stderr, "Still no channels after "
+ "auto-enabling, giving up\n");
+ goto error_disable_channels;
+ }
+ }
+
+ if (!num_channels && autochannels == AUTOCHANNELS_DISABLED) {
fprintf(stderr,
"No channels are enabled, we have nothing to scan.\n");
fprintf(stderr, "Enable channels manually in "
FORMAT_SCAN_ELEMENTS_DIR
- "/*_en and try again.\n", dev_dir_name);
+ "/*_en or pass -a to autoenable channels and "
+ "try again.\n", dev_dir_name);
ret = -ENOENT;
goto error_free_triggername;
}
@@ -467,7 +568,12 @@ error_free_channels:
error_free_triggername:
if (datardytrigger)
free(trigger_name);
-
+error_disable_channels:
+ if (autochannels == AUTOCHANNELS_ACTIVE) {
+ ret = enable_disable_all_channels(dev_dir_name, 0);
+ if (ret)
+ fprintf(stderr, "Failed to disable all channels\n");
+ }
error_free_dev_dir_name:
free(dev_dir_name);
diff --git a/tools/iio/iio_event_monitor.c b/tools/iio/iio_event_monitor.c
index d51eb04202e9..d9b7e0f306c6 100644
--- a/tools/iio/iio_event_monitor.c
+++ b/tools/iio/iio_event_monitor.c
@@ -53,6 +53,10 @@ static const char * const iio_chan_type_name_spec[] = {
[IIO_ENERGY] = "energy",
[IIO_DISTANCE] = "distance",
[IIO_VELOCITY] = "velocity",
+ [IIO_CONCENTRATION] = "concentration",
+ [IIO_RESISTANCE] = "resistance",
+ [IIO_PH] = "ph",
+ [IIO_UVINDEX] = "uvindex",
};
static const char * const iio_ev_type_text[] = {
@@ -90,6 +94,7 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_LIGHT_RED] = "red",
[IIO_MOD_LIGHT_GREEN] = "green",
[IIO_MOD_LIGHT_BLUE] = "blue",
+ [IIO_MOD_LIGHT_UV] = "uv",
[IIO_MOD_QUATERNION] = "quaternion",
[IIO_MOD_TEMP_AMBIENT] = "ambient",
[IIO_MOD_TEMP_OBJECT] = "object",
@@ -102,6 +107,10 @@ static const char * const iio_modifier_names[] = {
[IIO_MOD_WALKING] = "walking",
[IIO_MOD_STILL] = "still",
[IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z] = "sqrt(x^2+y^2+z^2)",
+ [IIO_MOD_I] = "i",
+ [IIO_MOD_Q] = "q",
+ [IIO_MOD_CO2] = "co2",
+ [IIO_MOD_VOC] = "voc",
};
static bool event_is_known(struct iio_event_data *event)
@@ -136,6 +145,10 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_ENERGY:
case IIO_DISTANCE:
case IIO_VELOCITY:
+ case IIO_CONCENTRATION:
+ case IIO_RESISTANCE:
+ case IIO_PH:
+ case IIO_UVINDEX:
break;
default:
return false;
@@ -162,6 +175,7 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_LIGHT_RED:
case IIO_MOD_LIGHT_GREEN:
case IIO_MOD_LIGHT_BLUE:
+ case IIO_MOD_LIGHT_UV:
case IIO_MOD_QUATERNION:
case IIO_MOD_TEMP_AMBIENT:
case IIO_MOD_TEMP_OBJECT:
@@ -174,6 +188,10 @@ static bool event_is_known(struct iio_event_data *event)
case IIO_MOD_WALKING:
case IIO_MOD_STILL:
case IIO_MOD_ROOT_SUM_SQUARED_X_Y_Z:
+ case IIO_MOD_I:
+ case IIO_MOD_Q:
+ case IIO_MOD_CO2:
+ case IIO_MOD_VOC:
break;
default:
return false;
diff --git a/tools/iio/iio_utils.h b/tools/iio/iio_utils.h
index e3503bfe538b..780f2014f8fa 100644
--- a/tools/iio/iio_utils.h
+++ b/tools/iio/iio_utils.h
@@ -52,6 +52,13 @@ struct iio_channel_info {
unsigned location;
};
+static inline int iioutils_check_suffix(const char *str, const char *suffix)
+{
+ return strlen(str) >= strlen(suffix) &&
+ strncmp(str+strlen(str)-strlen(suffix),
+ suffix, strlen(suffix)) == 0;
+}
+
int iioutils_break_up_name(const char *full_name, char **generic_name);
int iioutils_get_type(unsigned *is_signed, unsigned *bytes, unsigned *bits_used,
unsigned *shift, uint64_t *mask, unsigned *be,