staging:iio: Drop {mark,unmark}_in_use callbacks
These callbacks are currently used by the individual buffer implementations to
ensure that the request_update callback is not issued while the buffer is in use.
But the core already provides sufficient measures to prevent this from happening
in the first place. So it is safe to remove them.
There is one functional change due to this patch. Since the buffer is no longer
marked as in use when the chrdev is opened, it is now possible to enable the
buffer while it is opened. This did not work before, because mark_param_change
did fail if the buffer was marked as in use.
Acked-by: Jonathan Cameron <jic23@kernel.org>
Signed-off-by: Lars-Peter Clausen <lars@metafoo.de>
Signed-off-by: Greg Kroah-Hartman <gregkh@suse.de>
diff --git a/drivers/staging/iio/Documentation/ring.txt b/drivers/staging/iio/Documentation/ring.txt
index 0f21479..e338077 100644
--- a/drivers/staging/iio/Documentation/ring.txt
+++ b/drivers/staging/iio/Documentation/ring.txt
@@ -23,10 +23,6 @@
as much buffer functionality as possible. Note almost all of these
are optional.
-mark_in_use, unmark_in_use
- Basically indicate that not changes should be made to the buffer state that
- will effect the form of the data being captures (e.g. scan elements or length)
-
store_to
If possible, push data to the buffer.
diff --git a/drivers/staging/iio/buffer.h b/drivers/staging/iio/buffer.h
index ea42b5d..6fb6e64 100644
--- a/drivers/staging/iio/buffer.h
+++ b/drivers/staging/iio/buffer.h
@@ -18,8 +18,6 @@
/**
* struct iio_buffer_access_funcs - access functions for buffers.
- * @mark_in_use: reference counting, typically to prevent module removal
- * @unmark_in_use: reduce reference count when no longer using buffer
* @store_to: actually store stuff to the buffer
* @read_first_n: try to get a specified number of bytes (must exist)
* @request_update: if a parameter change has been marked, update underlying
@@ -38,9 +36,6 @@
* any of them not existing.
**/
struct iio_buffer_access_funcs {
- void (*mark_in_use)(struct iio_buffer *buffer);
- void (*unmark_in_use)(struct iio_buffer *buffer);
-
int (*store_to)(struct iio_buffer *buffer, u8 *data, s64 timestamp);
int (*read_first_n)(struct iio_buffer *buffer,
size_t n,
diff --git a/drivers/staging/iio/iio_core.h b/drivers/staging/iio/iio_core.h
index ff27f13..107cfb1 100644
--- a/drivers/staging/iio/iio_core.h
+++ b/drivers/staging/iio/iio_core.h
@@ -33,9 +33,6 @@
#ifdef CONFIG_IIO_BUFFER
struct poll_table_struct;
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev);
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev);
-
unsigned int iio_buffer_poll(struct file *filp,
struct poll_table_struct *wait);
ssize_t iio_buffer_read_first_n_outer(struct file *filp, char __user *buf,
@@ -47,14 +44,6 @@
#else
-static inline int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- return 0;
-}
-
-static inline void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{}
-
#define iio_buffer_poll_addr NULL
#define iio_buffer_read_first_n_outer_addr NULL
diff --git a/drivers/staging/iio/industrialio-buffer.c b/drivers/staging/iio/industrialio-buffer.c
index d2858ff..d7b1e9e 100644
--- a/drivers/staging/iio/industrialio-buffer.c
+++ b/drivers/staging/iio/industrialio-buffer.c
@@ -64,26 +64,6 @@
return 0;
}
-int iio_chrdev_buffer_open(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
- if (!rb)
- return 0;
- if (rb->access->mark_in_use)
- rb->access->mark_in_use(rb);
- return 0;
-}
-
-void iio_chrdev_buffer_release(struct iio_dev *indio_dev)
-{
- struct iio_buffer *rb = indio_dev->buffer;
-
- if (!rb)
- return;
- if (rb->access->unmark_in_use)
- rb->access->unmark_in_use(rb);
-}
-
void iio_buffer_init(struct iio_buffer *buffer)
{
INIT_LIST_HEAD(&buffer->demux_list);
@@ -447,16 +427,12 @@
goto error_ret;
}
}
- if (buffer->access->mark_in_use)
- buffer->access->mark_in_use(buffer);
/* Definitely possible for devices to support both of these.*/
if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) {
if (!indio_dev->trig) {
printk(KERN_INFO
"Buffer not started: no trigger\n");
ret = -EINVAL;
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
goto error_ret;
}
indio_dev->currentmode = INDIO_BUFFER_TRIGGERED;
@@ -473,8 +449,6 @@
printk(KERN_INFO
"Buffer not started:"
"postenable failed\n");
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = previous_mode;
if (indio_dev->setup_ops->postdisable)
indio_dev->setup_ops->
@@ -488,8 +462,6 @@
if (ret)
goto error_ret;
}
- if (buffer->access->unmark_in_use)
- buffer->access->unmark_in_use(buffer);
indio_dev->currentmode = INDIO_DIRECT_MODE;
if (indio_dev->setup_ops->postdisable) {
ret = indio_dev->setup_ops->postdisable(indio_dev);
diff --git a/drivers/staging/iio/industrialio-core.c b/drivers/staging/iio/industrialio-core.c
index 12d1576..19f897f 100644
--- a/drivers/staging/iio/industrialio-core.c
+++ b/drivers/staging/iio/industrialio-core.c
@@ -1083,18 +1083,13 @@
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
- unsigned int ret;
if (test_and_set_bit(IIO_BUSY_BIT_POS, &indio_dev->flags))
return -EBUSY;
filp->private_data = indio_dev;
- ret = iio_chrdev_buffer_open(indio_dev);
- if (ret < 0)
- clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
-
- return ret;
+ return 0;
}
/**
@@ -1104,7 +1099,6 @@
{
struct iio_dev *indio_dev = container_of(inode->i_cdev,
struct iio_dev, chrdev);
- iio_chrdev_buffer_release(indio_dev);
clear_bit(IIO_BUSY_BIT_POS, &indio_dev->flags);
return 0;
}
diff --git a/drivers/staging/iio/kfifo_buf.c b/drivers/staging/iio/kfifo_buf.c
index bae4caf..e1e9c06 100644
--- a/drivers/staging/iio/kfifo_buf.c
+++ b/drivers/staging/iio/kfifo_buf.c
@@ -11,9 +11,7 @@
struct iio_kfifo {
struct iio_buffer buffer;
struct kfifo kf;
- int use_count;
int update_needed;
- struct mutex use_lock;
};
#define iio_to_kfifo(r) container_of(r, struct iio_kfifo, buffer)
@@ -33,47 +31,20 @@
int ret = 0;
struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
if (!buf->update_needed)
goto error_ret;
- if (buf->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
kfifo_free(&buf->kf);
ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
buf->buffer.length);
error_ret:
- mutex_unlock(&buf->use_lock);
return ret;
}
-static void iio_mark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count++;
- mutex_unlock(&buf->use_lock);
-}
-
-static void iio_unmark_kfifo_in_use(struct iio_buffer *r)
-{
- struct iio_kfifo *buf = iio_to_kfifo(r);
- mutex_lock(&buf->use_lock);
- buf->use_count--;
- mutex_unlock(&buf->use_lock);
-}
-
static int iio_get_length_kfifo(struct iio_buffer *r)
{
return r->length;
}
-static inline void __iio_init_kfifo(struct iio_kfifo *kf)
-{
- mutex_init(&kf->use_lock);
-}
-
static IIO_BUFFER_ENABLE_ATTR;
static IIO_BUFFER_LENGTH_ATTR;
@@ -98,7 +69,6 @@
kf->update_needed = true;
iio_buffer_init(&kf->buffer);
kf->buffer.attrs = &iio_kfifo_attribute_group;
- __iio_init_kfifo(kf);
return &kf->buffer;
}
@@ -168,8 +138,6 @@
}
const struct iio_buffer_access_funcs kfifo_access_funcs = {
- .mark_in_use = &iio_mark_kfifo_in_use,
- .unmark_in_use = &iio_unmark_kfifo_in_use,
.store_to = &iio_store_to_kfifo,
.read_first_n = &iio_read_first_n_kfifo,
.request_update = &iio_request_update_kfifo,
diff --git a/drivers/staging/iio/ring_sw.c b/drivers/staging/iio/ring_sw.c
index c239fd3..3e24ec4 100644
--- a/drivers/staging/iio/ring_sw.c
+++ b/drivers/staging/iio/ring_sw.c
@@ -24,9 +24,7 @@
* @read_p: read pointer (oldest available)
* @write_p: write pointer
* @half_p: half buffer length behind write_p (event generation)
- * @use_count: reference count to prevent resizing when in use
* @update_needed: flag to indicated change in size requested
- * @use_lock: lock to prevent change in size when in use
*
* Note that the first element of all ring buffers must be a
* struct iio_buffer.
@@ -38,9 +36,7 @@
unsigned char *write_p;
/* used to act as a point at which to signal an event */
unsigned char *half_p;
- int use_count;
int update_needed;
- spinlock_t use_lock;
};
#define iio_to_sw_ring(r) container_of(r, struct iio_sw_ring_buffer, buf)
@@ -58,33 +54,11 @@
return ring->data ? 0 : -ENOMEM;
}
-static inline void __iio_init_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
-{
- spin_lock_init(&ring->use_lock);
-}
-
static inline void __iio_free_sw_ring_buffer(struct iio_sw_ring_buffer *ring)
{
kfree(ring->data);
}
-static void iio_mark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count++;
- spin_unlock(&ring->use_lock);
-}
-
-static void iio_unmark_sw_rb_in_use(struct iio_buffer *r)
-{
- struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
- spin_lock(&ring->use_lock);
- ring->use_count--;
- spin_unlock(&ring->use_lock);
-}
-
-
/* Ring buffer related functionality */
/* Store to ring is typically called in the bh of a data ready interrupt handler
* in the device driver */
@@ -295,18 +269,12 @@
struct iio_sw_ring_buffer *ring = iio_to_sw_ring(r);
r->stufftoread = false;
- spin_lock(&ring->use_lock);
if (!ring->update_needed)
goto error_ret;
- if (ring->use_count) {
- ret = -EAGAIN;
- goto error_ret;
- }
__iio_free_sw_ring_buffer(ring);
ret = __iio_allocate_sw_ring_buffer(ring, ring->buf.bytes_per_datum,
ring->buf.length);
error_ret:
- spin_unlock(&ring->use_lock);
return ret;
}
@@ -372,7 +340,6 @@
ring->update_needed = true;
buf = &ring->buf;
iio_buffer_init(buf);
- __iio_init_sw_ring_buffer(ring);
buf->attrs = &iio_ring_attribute_group;
return buf;
@@ -386,8 +353,6 @@
EXPORT_SYMBOL(iio_sw_rb_free);
const struct iio_buffer_access_funcs ring_sw_access_funcs = {
- .mark_in_use = &iio_mark_sw_rb_in_use,
- .unmark_in_use = &iio_unmark_sw_rb_in_use,
.store_to = &iio_store_to_sw_rb,
.read_first_n = &iio_read_first_n_sw_rb,
.request_update = &iio_request_update_sw_rb,