summaryrefslogtreecommitdiff
path: root/fs/ksmbd/ksmbd_work.c
diff options
context:
space:
mode:
Diffstat (limited to 'fs/ksmbd/ksmbd_work.c')
-rw-r--r--fs/ksmbd/ksmbd_work.c101
1 files changed, 99 insertions, 2 deletions
diff --git a/fs/ksmbd/ksmbd_work.c b/fs/ksmbd/ksmbd_work.c
index fd58eb4809f6..d7c676c151e2 100644
--- a/fs/ksmbd/ksmbd_work.c
+++ b/fs/ksmbd/ksmbd_work.c
@@ -27,18 +27,38 @@ struct ksmbd_work *ksmbd_alloc_work_struct(void)
INIT_LIST_HEAD(&work->async_request_entry);
INIT_LIST_HEAD(&work->fp_entry);
INIT_LIST_HEAD(&work->interim_entry);
+ INIT_LIST_HEAD(&work->aux_read_list);
+ work->iov_alloc_cnt = 4;
+ work->iov = kcalloc(work->iov_alloc_cnt, sizeof(struct kvec),
+ GFP_KERNEL);
+ if (!work->iov) {
+ kmem_cache_free(work_cache, work);
+ work = NULL;
+ }
}
return work;
}
void ksmbd_free_work_struct(struct ksmbd_work *work)
{
+ struct aux_read *ar, *tmp;
+
WARN_ON(work->saved_cred != NULL);
kvfree(work->response_buf);
- kvfree(work->aux_payload_buf);
+
+ list_for_each_entry_safe(ar, tmp, &work->aux_read_list, entry) {
+ kvfree(ar->buf);
+ list_del(&ar->entry);
+ kfree(ar);
+ }
+
kfree(work->tr_buf);
kvfree(work->request_buf);
+ kfree(work->iov);
+ if (!list_empty(&work->interim_entry))
+ list_del(&work->interim_entry);
+
if (work->async_id)
ksmbd_release_id(&work->conn->async_ida, work->async_id);
kmem_cache_free(work_cache, work);
@@ -69,7 +89,6 @@ int ksmbd_workqueue_init(void)
void ksmbd_workqueue_destroy(void)
{
- flush_workqueue(ksmbd_wq);
destroy_workqueue(ksmbd_wq);
ksmbd_wq = NULL;
}
@@ -78,3 +97,81 @@ bool ksmbd_queue_work(struct ksmbd_work *work)
{
return queue_work(ksmbd_wq, &work->work);
}
+
+static inline void __ksmbd_iov_pin(struct ksmbd_work *work, void *ib,
+ unsigned int ib_len)
+{
+ work->iov[++work->iov_idx].iov_base = ib;
+ work->iov[work->iov_idx].iov_len = ib_len;
+ work->iov_cnt++;
+}
+
+static int __ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len,
+ void *aux_buf, unsigned int aux_size)
+{
+ struct aux_read *ar = NULL;
+ int need_iov_cnt = 1;
+
+ if (aux_size) {
+ need_iov_cnt++;
+ ar = kmalloc(sizeof(struct aux_read), GFP_KERNEL);
+ if (!ar)
+ return -ENOMEM;
+ }
+
+ if (work->iov_alloc_cnt < work->iov_cnt + need_iov_cnt) {
+ struct kvec *new;
+
+ work->iov_alloc_cnt += 4;
+ new = krealloc(work->iov,
+ sizeof(struct kvec) * work->iov_alloc_cnt,
+ GFP_KERNEL | __GFP_ZERO);
+ if (!new) {
+ kfree(ar);
+ work->iov_alloc_cnt -= 4;
+ return -ENOMEM;
+ }
+ work->iov = new;
+ }
+
+ /* Plus rfc_length size on first iov */
+ if (!work->iov_idx) {
+ work->iov[work->iov_idx].iov_base = work->response_buf;
+ *(__be32 *)work->iov[0].iov_base = 0;
+ work->iov[work->iov_idx].iov_len = 4;
+ work->iov_cnt++;
+ }
+
+ __ksmbd_iov_pin(work, ib, len);
+ inc_rfc1001_len(work->iov[0].iov_base, len);
+
+ if (aux_size) {
+ __ksmbd_iov_pin(work, aux_buf, aux_size);
+ inc_rfc1001_len(work->iov[0].iov_base, aux_size);
+
+ ar->buf = aux_buf;
+ list_add(&ar->entry, &work->aux_read_list);
+ }
+
+ return 0;
+}
+
+int ksmbd_iov_pin_rsp(struct ksmbd_work *work, void *ib, int len)
+{
+ return __ksmbd_iov_pin_rsp(work, ib, len, NULL, 0);
+}
+
+int ksmbd_iov_pin_rsp_read(struct ksmbd_work *work, void *ib, int len,
+ void *aux_buf, unsigned int aux_size)
+{
+ return __ksmbd_iov_pin_rsp(work, ib, len, aux_buf, aux_size);
+}
+
+int allocate_interim_rsp_buf(struct ksmbd_work *work)
+{
+ work->response_buf = kzalloc(MAX_CIFS_SMALL_BUFFER_SIZE, GFP_KERNEL);
+ if (!work->response_buf)
+ return -ENOMEM;
+ work->response_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
+ return 0;
+}