Attachment 'patch-2.6.8-cifs-multithread.diff'
Download 1 diff -urN orig/fs/cifs/file.c cifs-devel/fs/cifs/file.c
2 --- orig/fs/cifs/file.c 2006-03-19 23:44:15.052372800 +0530
3 +++ cifs-devel/fs/cifs/file.c 2006-03-19 23:47:07.244195664 +0530
4 @@ -1,4 +1,8 @@
5 -/*
6 +/* <modified file.c>
7 + * modified function cifs_readpages()
8 + * added function cifs_readpages_threadfn()
9 + * (Nitin Gupta: nitingupta910@gmail.com)
10 + *
11 * fs/cifs/file.c
12 *
13 * vfs operations that deal with files
14 @@ -35,6 +39,38 @@
15 #include "cifs_debug.h"
16 #include "cifs_fs_sb.h"
17
18 +#include <asm/atomic.h>
19 +#include <asm/spinlock.h>
20 +#include <linux/kthread.h>
21 +
22 +#define FIN_WAIT 1
23 +#define FIN_ERR 3
24 +
25 +struct per_thread_data {
26 + int interrupted;
27 + wait_queue_head_t wq_h;
28 +
29 + int xid, rsize_in_pages;
30 + struct file *file;
31 + struct address_space *mapping;
32 + struct list_head *page_list;
33 + struct pagevec lru_pvec;
34 + struct cifsFileInfo * open_file;
35 + struct cifs_sb_info *cifs_sb;
36 + struct cifsTconInfo *pTcon;
37 +
38 + spinlock_t sl_page_pool;
39 + spinlock_t sl_cache_lock;
40 + struct semaphore threadsem;
41 + volatile struct list_head *page_pool;
42 +
43 + atomic_t pages_left;
44 + atomic_t read_state;
45 + atomic_t thread_count;
46 + atomic_t threads_required;
47 +};
48 +
49 +
50 int
51 cifs_open(struct inode *inode, struct file *file)
52 {
53 @@ -1093,49 +1129,109 @@
54 }
55
56
57 -static int
58 -cifs_readpages(struct file *file, struct address_space *mapping,
59 - struct list_head *page_list, unsigned num_pages)
60 +int cifs_readpages_threadfn (void *data)
61 {
62 - int rc = -EACCES;
63 - int xid;
64 - loff_t offset;
65 - struct page * page;
66 - struct cifs_sb_info *cifs_sb;
67 - struct cifsTconInfo *pTcon;
68 - int bytes_read = 0;
69 - unsigned int read_size,i;
70 - char * smb_read_data = NULL;
71 - struct smb_com_read_rsp * pSMBr;
72 - struct pagevec lru_pvec;
73 - struct cifsFileInfo * open_file;
74
75 - xid = GetXid();
76 - if (file->private_data == NULL) {
77 - FreeXid(xid);
78 - return -EBADF;
79 +int i, rc;
80 +unsigned num_pages;
81 +char *smb_read_data = NULL;
82 +struct page *page;
83 +
84 +struct list_head page_list_head;
85 +struct list_head *page_list;
86 +
87 +
88 +struct per_thread_data *t = (struct per_thread_data *)data;
89 +
90 +while ( atomic_read(&t->pages_left) > 0 )
91 +{
92 +
93 + INIT_LIST_HEAD(&page_list_head);
94 +
95 + if (atomic_read(&t->read_state) == FIN_ERR || (t->interrupted == 1)) break;
96 +
97 +
98 +spin_lock(&t->sl_page_pool);
99 +
100 + if (atomic_read(&t->threads_required) < atomic_read(&t->thread_count)) {
101 + spin_unlock(&t->sl_page_pool);
102 + atomic_dec(&t->thread_count);
103 + return 0;
104 + }
105 +
106 + if (atomic_read(&t->read_state) == FIN_ERR) /* if error */ {
107 + spin_unlock(&t->sl_page_pool);
108 + break;
109 }
110 - open_file = (struct cifsFileInfo *)file->private_data;
111 - cifs_sb = CIFS_SB(file->f_dentry->d_sb);
112 - pTcon = cifs_sb->tcon;
113 + /* if(atomic_read(&t->thread_count)<=1) {
114 + atomic_dec(&t->thread_count);
115 + up(&t->threadsem);
116 + return 0;
117 + } else {
118 + atomic_dec(&t->thread_count);
119 + return 0;
120 + }
121 + }*/
122 +
123 + if (atomic_read(&t->read_state) == FIN_WAIT) { /* endwait state */
124 + if (atomic_read(&t->thread_count) <= 1) {
125 + spin_unlock(&t->sl_page_pool);
126 + atomic_dec(&t->thread_count);
127 + up(&t->threadsem);
128 + return 0;
129 + } else {
130 + atomic_dec(&t->thread_count);
131 + spin_unlock(&t->sl_page_pool);
132 + return 0;
133 + }
134 + }
135 +
136 + //printk("\npages_left = %d\n", atomic_read(&t->pages_left));
137 +
138 + if (atomic_read(&t->pages_left) >= t->rsize_in_pages) {
139 + num_pages = t->rsize_in_pages;
140 + } else {
141 + num_pages = atomic_read(&t->pages_left);
142 + }
143 +
144 + //num_pages = 1;
145 + atomic_sub(num_pages, &t->pages_left);
146 +
147 + for (i=0; i<num_pages; i++) {
148 + page = list_entry(t->page_pool, struct page, lru);
149 + t->page_pool = t->page_pool->prev;
150 + list_del(&page->lru);
151 + list_add(&page->lru, &page_list_head);
152 + }
153 +
154 + //printk("\npages_left now = %d\n", atomic_read(&t->pages_left));
155 +
156 + if ( atomic_read(&t->pages_left) <= 0 )
157 + atomic_set(&t->read_state, FIN_WAIT); /* set endwait state */
158
159 - pagevec_init(&lru_pvec, 0);
160 +spin_unlock(&t->sl_page_pool);
161
162 - for(i = 0;i<num_pages;) {
163 + page_list = &page_list_head;
164 +
165 + for(i = 0; i < num_pages;) {
166 + struct page *tmp_page;
167 unsigned contig_pages;
168 - struct page * tmp_page;
169 unsigned long expected_index;
170 -
171 - if(list_empty(page_list)) {
172 - break;
173 - }
174 + loff_t offset;
175 + unsigned int read_size;
176 + int bytes_read;
177 + struct smb_com_read_rsp * pSMBr;
178 +
179 + smb_read_data = NULL;
180 +
181 +
182 page = list_entry(page_list->prev, struct page, lru);
183 offset = (loff_t)page->index << PAGE_CACHE_SHIFT;
184
185 /* count adjacent pages that we will read into */
186 contig_pages = 0;
187 - expected_index = list_entry(page_list->prev,struct page,lru)->index;
188 - list_for_each_entry_reverse(tmp_page,page_list,lru) {
189 + expected_index = list_entry(page_list->prev,struct page,lru)->index;
190 + list_for_each_entry_reverse(tmp_page, page_list,lru) {
191 if(tmp_page->index == expected_index) {
192 contig_pages++;
193 expected_index++;
194 @@ -1143,83 +1239,81 @@
195 break;
196 }
197 }
198 - if(contig_pages + i > num_pages) {
199 - contig_pages = num_pages - i;
200 - }
201 -
202 - /* for reads over a certain size could initiate async read ahead */
203 + //contig_pages = 1;
204 +
205
206 read_size = contig_pages * PAGE_CACHE_SIZE;
207 - /* Read size needs to be in multiples of one page */
208 - read_size = min_t(const unsigned int,read_size,cifs_sb->rsize & PAGE_CACHE_MASK);
209 +
210 + //printk("\nread_size = %d\n", read_size);
211 +
212 + if (atomic_read(&t->read_state) == FIN_ERR) break;
213
214 rc = -EAGAIN;
215 while(rc == -EAGAIN) {
216 - if ((open_file->invalidHandle) && (!open_file->closePend)) {
217 - rc = cifs_reopen_file(file->f_dentry->d_inode,
218 - file, TRUE);
219 - if(rc != 0)
220 + if ((t->open_file->invalidHandle) && (!t->open_file->closePend)) {
221 + rc = cifs_reopen_file(t->file->f_dentry->d_inode
222 + , t->file, TRUE);
223 + if(rc != 0) {
224 + atomic_set((&t->read_state), FIN_ERR);
225 break;
226 + }
227 }
228
229 - rc = CIFSSMBRead(xid, pTcon,
230 - open_file->netfid,
231 + rc = CIFSSMBRead(t->xid, t->pTcon,
232 + t->open_file->netfid,
233 read_size, offset,
234 &bytes_read, &smb_read_data);
235 - /* BB need to check return code here */
236 - if(rc== -EAGAIN) {
237 +
238 + if(rc == -EAGAIN) {
239 if(smb_read_data) {
240 cifs_buf_release(smb_read_data);
241 smb_read_data = NULL;
242 }
243 }
244 }
245 +
246 + if (atomic_read(&t->read_state) == FIN_ERR) break;
247 +
248 if ((rc < 0) || (smb_read_data == NULL)) {
249 cFYI(1,("Read error in readpages: %d",rc));
250 +
251 /* clean up remaing pages off list */
252 - while (!list_empty(page_list) && (i < num_pages)) {
253 - page = list_entry(page_list->prev, struct page, lru);
254 - list_del(&page->lru);
255 - page_cache_release(page);
256 - }
257 + atomic_set(&t->read_state, FIN_ERR);
258 break;
259 } else if (bytes_read > 0) {
260 pSMBr = (struct smb_com_read_rsp *)smb_read_data;
261 - cifs_copy_cache_pages(mapping, page_list, bytes_read,
262 - smb_read_data + 4 /* RFC1001 hdr */ +
263 - le16_to_cpu(pSMBr->DataOffset), &lru_pvec);
264 + //printk("\nbefore cache\n");
265 + spin_lock(&t->sl_cache_lock);
266 +
267 + if (atomic_read(&t->read_state) == FIN_ERR) {
268 + spin_unlock(&t->sl_cache_lock);
269 + break;
270 + }
271 + cifs_copy_cache_pages(t->mapping, page_list,
272 + bytes_read, smb_read_data + 4 /* RFC1001 hdr */
273 + + le16_to_cpu(pSMBr->DataOffset),
274 + &t->lru_pvec);
275 +
276 + spin_unlock(&t->sl_cache_lock);
277 + //printk("\nafter cache\n");
278
279 i += bytes_read >> PAGE_CACHE_SHIFT;
280 #ifdef CONFIG_CIFS_STATS
281 - atomic_inc(&pTcon->num_reads);
282 - spin_lock(&pTcon->stat_lock);
283 - pTcon->bytes_read += bytes_read;
284 - spin_unlock(&pTcon->stat_lock);
285 + atomic_inc(&t->pTcon->num_reads);
286 + spin_lock(&t->pTcon->stat_lock);
287 + t->pTcon->bytes_read += bytes_read;
288 + spin_unlock(&t->pTcon->stat_lock);
289 #endif
290 if((int)(bytes_read & PAGE_CACHE_MASK) != bytes_read) {
291 - cFYI(1,("Partial page %d of %d read to cache",i++,num_pages));
292 + cFYI(1,("Partial page %d of %d read to cache",
293 + i, num_pages));
294
295 i++; /* account for partial page */
296
297 - /* server copy of file can have smaller size than client */
298 - /* BB do we need to verify this common case ? this case is ok -
299 - if we are at server EOF we will hit it on next read */
300 -
301 - /* while(!list_empty(page_list) && (i < num_pages)) {
302 - page = list_entry(page_list->prev,struct page, list);
303 - list_del(&page->list);
304 - page_cache_release(page);
305 - }
306 - break; */
307 }
308 } else {
309 cFYI(1,("No bytes read (%d) at offset %lld . Cleaning remaining pages from readahead list",bytes_read,offset));
310 - /* BB turn off caching and do new lookup on file size at server? */
311 - while (!list_empty(page_list) && (i < num_pages)) {
312 - page = list_entry(page_list->prev, struct page, lru);
313 - list_del(&page->lru);
314 - page_cache_release(page); /* BB removeme - replace with zero of page? */
315 - }
316 + atomic_set(&t->read_state, FIN_ERR);
317 break;
318 }
319 if(smb_read_data) {
320 @@ -1227,20 +1321,169 @@
321 smb_read_data = NULL;
322 }
323 bytes_read = 0;
324 - }
325
326 - pagevec_lru_add(&lru_pvec);
327 + if (atomic_read(&t->read_state) == FIN_ERR) break;
328 +
329 + } //end of for(i = 0;i<num_pages;)
330 +
331
332 -/* need to free smb_read_data buf before exit */
333 + if (atomic_read(&t->read_state) == FIN_ERR) break;
334 +
335 + if (atomic_read(&t->read_state) == FIN_WAIT) {
336 + if (atomic_read(&t->thread_count) <= 1) {
337 + if(smb_read_data) {
338 + cifs_buf_release(smb_read_data);
339 + smb_read_data = NULL;
340 + }
341 + atomic_dec(&t->thread_count);
342 + up(&t->threadsem);
343 + return 0;
344 + } else {
345 + atomic_dec(&t->thread_count);
346 + return 0;
347 + }
348 + }
349 +} // end of while
350 +
351 +if (atomic_read(&t->read_state) == FIN_ERR || (t->interrupted == 1)) { /* if error */
352 if(smb_read_data) {
353 cifs_buf_release(smb_read_data);
354 smb_read_data = NULL;
355 - }
356 + }
357 + atomic_dec(&t->thread_count);
358 + //printk("\nin tfn thread_count: %d\n", atomic_read(&t->thread_count));
359 + if ( (t->interrupted == 1) && (atomic_read(&t->thread_count) <= 0) )
360 + wake_up(&t->wq_h);
361 + up(&t->threadsem);
362 + return 0;
363 +}
364
365 - FreeXid(xid);
366 +atomic_dec(&t->thread_count);
367 +//up(&t->threadsem);
368 +return 0;
369 +
370 +}
371 +
372 +
373 +
374 +static int
375 +cifs_readpages(struct file *file, struct address_space *mapping,
376 + struct list_head *page_list, unsigned num_pages)
377 +{
378 + int i, init_threads, xid, rc = -EACCES;
379 + struct page *page;
380 + struct per_thread_data thread_data;
381 +
382 + /* some hard-coded rules to set initial no of threads
383 + * no of threads are then later controlled by some
384 + * other function which changes threads_required var
385 + * to change no of threads running.
386 + */
387 +
388 + /* setting rsize to higher values at mount time inc performance */
389 + if (num_pages <= 4 )
390 + init_threads = 1;
391 + else if (num_pages <= 8)
392 + init_threads = 4;
393 + else init_threads = 8;
394 + // init_threads = 8;
395 +
396 +
397 + /* setting all the data to be passed to threads */
398 + xid = GetXid();
399 + thread_data.xid = xid;
400 + thread_data.sl_page_pool = SPIN_LOCK_UNLOCKED;
401 + thread_data.sl_cache_lock = SPIN_LOCK_UNLOCKED;
402 + thread_data.file = file;
403 + thread_data.mapping = mapping;
404 + thread_data.page_pool = page_list->prev;
405 +
406 +
407 + thread_data.open_file = (struct cifsFileInfo *)file->private_data;
408 + thread_data.cifs_sb = CIFS_SB(file->f_dentry->d_sb);
409 + thread_data.pTcon = thread_data.cifs_sb->tcon;
410 + thread_data.rsize_in_pages = (thread_data.cifs_sb->rsize) >> PAGE_CACHE_SHIFT;
411 + atomic_set(&thread_data.pages_left, num_pages);
412 +
413 + thread_data.interrupted = 0;
414 + init_waitqueue_head(&thread_data.wq_h);
415 +
416 + /* read_state var --
417 + * START (0) : start the thread
418 + * FIN_WAIT (1) : a thread has reached EOF
419 + * FIN_END (3) : some error occured during read
420 + */
421 + atomic_set(&thread_data.read_state, 0);
422 +
423 + /* keep track of current no of threads */
424 + atomic_set(&thread_data.thread_count, init_threads);
425 +
426 + /* var: threads_required - current no of threads required.
427 + * This var is meant to be modified by some external
428 + * function which determines current no of threads req
429 + * acc to some criteria (such as variation in RTT over
430 + * a certain period) and set it to this var.
431 + * Threads read this var and stop if required.
432 + * Increase in no of threads (if reqd) is work for the
433 + * external function.
434 + * Any such external function is not yet implemented.
435 + */
436 + atomic_set(&thread_data.threads_required, init_threads);
437 +
438 +
439 + pagevec_init(&thread_data.lru_pvec, 0);
440 +
441 + if (file->private_data == NULL) {
442 + FreeXid(thread_data.xid);
443 + return -EBADF;
444 + }
445 +
446 + sema_init(&thread_data.threadsem, 1);
447 +
448 + down_interruptible(&thread_data.threadsem);
449 +
450 + for (i=0; i<init_threads; i++)
451 + kthread_run(&cifs_readpages_threadfn, &thread_data, "cifsThread");
452 +
453 + if(down_interruptible(&thread_data.threadsem)) {
454 + thread_data.interrupted = 1;
455 + atomic_set(&thread_data.read_state, FIN_ERR);
456 + printk("\nCIFS: readpages interrupted by signal\n");
457 + sleep_on(&thread_data.wq_h);
458 +
459 + while(!list_empty(page_list)) {
460 + page = list_entry(page_list->prev,struct page, lru);
461 + list_del(&page->lru);
462 + page_cache_release(page);
463 + }
464 + pagevec_lru_add(&thread_data.lru_pvec);
465 + FreeXid(thread_data.xid);
466 + return -ERESTARTSYS;
467 + }
468 +
469 +
470 + up(&thread_data.threadsem);
471 + rc = 0;
472 + if (atomic_read(&thread_data.read_state) == FIN_ERR) {
473 + rc = -EACCES;
474 + printk("\nCIFS: some error occured during reading\n");
475 + wait_event_interruptible(thread_data.wq_h, (atomic_read(&thread_data.thread_count) <= 0) );
476 + while(!list_empty(page_list)) {
477 + page = list_entry(page_list->prev,struct page, lru);
478 + list_del(&page->lru);
479 + page_cache_release(page);
480 + }
481 + }
482 +
483 + pagevec_lru_add(&thread_data.lru_pvec);
484 +
485 + FreeXid(thread_data.xid);
486 return rc;
487 }
488
489 +
490 +
491 +
492 static int cifs_readpage_worker(struct file *file, struct page *page, loff_t * poffset)
493 {
494 char * read_data;
Attached Files
To refer to attachments on a page, use attachment:filename, as shown below in the list of files. Do NOT use the URL of the [get] link, since this is subject to change and can break easily.You are not allowed to attach a file to this page.