Lines Matching full:c

35 #define PAGE_DIV(x) ( ((unsigned long)(x) / (unsigned long)(c->wbuf_pagesize)) * (unsigned long)(c-…
36 #define PAGE_MOD(x) ( (unsigned long)(x) % (unsigned long)(c->wbuf_pagesize) )
48 static int jffs2_wbuf_pending_for_ino(struct jffs2_sb_info *c, uint32_t ino) in jffs2_wbuf_pending_for_ino() argument
50 struct jffs2_inodirty *this = c->wbuf_inodes; in jffs2_wbuf_pending_for_ino()
69 static void jffs2_clear_wbuf_ino_list(struct jffs2_sb_info *c) in jffs2_clear_wbuf_ino_list() argument
73 this = c->wbuf_inodes; in jffs2_clear_wbuf_ino_list()
82 c->wbuf_inodes = NULL; in jffs2_clear_wbuf_ino_list()
85 static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino) in jffs2_wbuf_dirties_inode() argument
90 jffs2_dirty_trigger(c); in jffs2_wbuf_dirties_inode()
92 if (jffs2_wbuf_pending_for_ino(c, ino)) in jffs2_wbuf_dirties_inode()
98 jffs2_clear_wbuf_ino_list(c); in jffs2_wbuf_dirties_inode()
99 c->wbuf_inodes = &inodirty_nomem; in jffs2_wbuf_dirties_inode()
103 new->next = c->wbuf_inodes; in jffs2_wbuf_dirties_inode()
104 c->wbuf_inodes = new; in jffs2_wbuf_dirties_inode()
108 static inline void jffs2_refile_wbuf_blocks(struct jffs2_sb_info *c) in jffs2_refile_wbuf_blocks() argument
113 if (list_empty(&c->erasable_pending_wbuf_list)) in jffs2_refile_wbuf_blocks()
116 list_for_each_safe(this, next, &c->erasable_pending_wbuf_list) { in jffs2_refile_wbuf_blocks()
126 list_add_tail(&jeb->list, &c->erase_pending_list); in jffs2_refile_wbuf_blocks()
127 c->nr_erasing_blocks++; in jffs2_refile_wbuf_blocks()
128 jffs2_garbage_collect_trigger(c); in jffs2_refile_wbuf_blocks()
133 list_add_tail(&jeb->list, &c->erasable_list); in jffs2_refile_wbuf_blocks()
141 static void jffs2_block_refile(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, int allow_emp… in jffs2_block_refile() argument
146 if (c->nextblock == jeb) in jffs2_block_refile()
147 c->nextblock = NULL; in jffs2_block_refile()
153 list_add(&jeb->list, &c->bad_used_list); in jffs2_block_refile()
159 list_add(&jeb->list, &c->erase_pending_list); in jffs2_block_refile()
160 c->nr_erasing_blocks++; in jffs2_block_refile()
161 jffs2_garbage_collect_trigger(c); in jffs2_block_refile()
164 if (!jffs2_prealloc_raw_node_refs(c, jeb, 1)) { in jffs2_block_refile()
167 jffs2_link_node_ref(c, jeb, in jffs2_block_refile()
168 (jeb->offset+c->sector_size-oldfree) | REF_OBSOLETE, in jffs2_block_refile()
171 c->wasted_size += oldfree; in jffs2_block_refile()
173 c->dirty_size -= oldfree; in jffs2_block_refile()
177 jffs2_dbg_dump_block_lists_nolock(c); in jffs2_block_refile()
178 jffs2_dbg_acct_sanity_check_nolock(c,jeb); in jffs2_block_refile()
179 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); in jffs2_block_refile()
182 static struct jffs2_raw_node_ref **jffs2_incore_replace_raw(struct jffs2_sb_info *c, in jffs2_incore_replace_raw() argument
230 static int jffs2_verify_write(struct jffs2_sb_info *c, unsigned char *buf, in jffs2_verify_write() argument
237 ret = mtd_read(c->mtd, ofs, c->wbuf_pagesize, &retlen, c->wbuf_verify); in jffs2_verify_write()
240 __func__, c->wbuf_ofs, ret); in jffs2_verify_write()
242 } else if (retlen != c->wbuf_pagesize) { in jffs2_verify_write()
244 __func__, ofs, retlen, c->wbuf_pagesize); in jffs2_verify_write()
247 if (!memcmp(buf, c->wbuf_verify, c->wbuf_pagesize)) in jffs2_verify_write()
258 eccstr, c->wbuf_ofs); in jffs2_verify_write()
260 c->wbuf, c->wbuf_pagesize, 0); in jffs2_verify_write()
264 c->wbuf_verify, c->wbuf_pagesize, 0); in jffs2_verify_write()
269 #define jffs2_verify_write(c,b,o) (0) argument
275 static void jffs2_wbuf_recover(struct jffs2_sb_info *c) in jffs2_wbuf_recover() argument
285 jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; in jffs2_wbuf_recover()
287 spin_lock(&c->erase_completion_lock); in jffs2_wbuf_recover()
288 if (c->wbuf_ofs % c->mtd->erasesize) in jffs2_wbuf_recover()
289 jffs2_block_refile(c, jeb, REFILE_NOTEMPTY); in jffs2_wbuf_recover()
291 jffs2_block_refile(c, jeb, REFILE_ANYWAY); in jffs2_wbuf_recover()
292 spin_unlock(&c->erase_completion_lock); in jffs2_wbuf_recover()
302 (next && ref_offset(next) <= c->wbuf_ofs)) { in jffs2_wbuf_recover()
305 (ref_offset(raw) + ref_totlen(c, jeb, raw)), in jffs2_wbuf_recover()
306 c->wbuf_ofs); in jffs2_wbuf_recover()
311 (ref_offset(raw) + ref_totlen(c, jeb, raw))); in jffs2_wbuf_recover()
320 c->wbuf_len = 0; in jffs2_wbuf_recover()
336 if (start < c->wbuf_ofs) { in jffs2_wbuf_recover()
348 ret = mtd_read(c->mtd, start, c->wbuf_ofs - start, &retlen, in jffs2_wbuf_recover()
353 (retlen == c->wbuf_ofs - start)) in jffs2_wbuf_recover()
356 if (ret || retlen != c->wbuf_ofs - start) { in jffs2_wbuf_recover()
371 c->wbuf_len = 0; in jffs2_wbuf_recover()
382 memcpy(buf + (c->wbuf_ofs - start), c->wbuf, end - c->wbuf_ofs); in jffs2_wbuf_recover()
389 ret = jffs2_reserve_space_gc(c, end-start, &len, JFFS2_SUMMARY_NOSUM_SIZE); in jffs2_wbuf_recover()
397 jffs2_sum_disable_collecting(c->summary); in jffs2_wbuf_recover()
399 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, nr_refile); in jffs2_wbuf_recover()
406 ofs = write_ofs(c); in jffs2_wbuf_recover()
408 if (end-start >= c->wbuf_pagesize) { in jffs2_wbuf_recover()
414 unsigned char *rewrite_buf = buf?:c->wbuf; in jffs2_wbuf_recover()
415 uint32_t towrite = (end-start) - ((end-start)%c->wbuf_pagesize); in jffs2_wbuf_recover()
425 mtd_write(c->mtd, ofs, towrite, &retlen, brokenbuf); in jffs2_wbuf_recover()
429 ret = mtd_write(c->mtd, ofs, towrite, &retlen, in jffs2_wbuf_recover()
432 if (ret || retlen != towrite || jffs2_verify_write(c, rewrite_buf, ofs)) { in jffs2_wbuf_recover()
438 jffs2_add_physical_node_ref(c, ofs | REF_OBSOLETE, ref_totlen(c, jeb, first_raw), NULL); in jffs2_wbuf_recover()
444 c->wbuf_len = (end - start) - towrite; in jffs2_wbuf_recover()
445 c->wbuf_ofs = ofs + towrite; in jffs2_wbuf_recover()
446 memmove(c->wbuf, rewrite_buf + towrite, c->wbuf_len); in jffs2_wbuf_recover()
447 /* Don't muck about with c->wbuf_inodes. False positives are harmless. */ in jffs2_wbuf_recover()
451 memcpy(c->wbuf, buf, end-start); in jffs2_wbuf_recover()
453 memmove(c->wbuf, c->wbuf + (start - c->wbuf_ofs), end - start); in jffs2_wbuf_recover()
455 c->wbuf_ofs = ofs; in jffs2_wbuf_recover()
456 c->wbuf_len = end - start; in jffs2_wbuf_recover()
460 new_jeb = &c->blocks[ofs / c->sector_size]; in jffs2_wbuf_recover()
462 spin_lock(&c->erase_completion_lock); in jffs2_wbuf_recover()
464 uint32_t rawlen = ref_totlen(c, jeb, raw); in jffs2_wbuf_recover()
505 f = jffs2_gc_fetch_inode(c, ic->ino, !ic->pino_nlink); in jffs2_wbuf_recover()
517 adjust_ref = jffs2_incore_replace_raw(c, f, raw, in jffs2_wbuf_recover()
518 (void *)(buf?:c->wbuf) + (ref_offset(raw) - start)); in jffs2_wbuf_recover()
527 new_ref = jffs2_link_node_ref(c, new_jeb, ofs | ref_flags(raw), rawlen, ic); in jffs2_wbuf_recover()
534 jffs2_gc_release_inode(c, f); in jffs2_wbuf_recover()
539 c->dirty_size += rawlen; in jffs2_wbuf_recover()
540 c->used_size -= rawlen; in jffs2_wbuf_recover()
553 list_move(&jeb->list, &c->erase_pending_list); in jffs2_wbuf_recover()
554 c->nr_erasing_blocks++; in jffs2_wbuf_recover()
555 jffs2_garbage_collect_trigger(c); in jffs2_wbuf_recover()
558 jffs2_dbg_acct_sanity_check_nolock(c, jeb); in jffs2_wbuf_recover()
559 jffs2_dbg_acct_paranoia_check_nolock(c, jeb); in jffs2_wbuf_recover()
561 jffs2_dbg_acct_sanity_check_nolock(c, new_jeb); in jffs2_wbuf_recover()
562 jffs2_dbg_acct_paranoia_check_nolock(c, new_jeb); in jffs2_wbuf_recover()
564 spin_unlock(&c->erase_completion_lock); in jffs2_wbuf_recover()
567 c->wbuf_ofs, c->wbuf_len); in jffs2_wbuf_recover()
580 static int __jffs2_flush_wbuf(struct jffs2_sb_info *c, int pad) in __jffs2_flush_wbuf() argument
588 if (!jffs2_is_writebuffered(c)) in __jffs2_flush_wbuf()
591 if (!mutex_is_locked(&c->alloc_sem)) { in __jffs2_flush_wbuf()
596 if (!c->wbuf_len) /* already checked c->wbuf above */ in __jffs2_flush_wbuf()
599 wbuf_jeb = &c->blocks[c->wbuf_ofs / c->sector_size]; in __jffs2_flush_wbuf()
600 if (jffs2_prealloc_raw_node_refs(c, wbuf_jeb, c->nextblock->allocated_refs + 1)) in __jffs2_flush_wbuf()
610 c->wbuf_len = PAD(c->wbuf_len); in __jffs2_flush_wbuf()
614 memset(c->wbuf + c->wbuf_len, 0, c->wbuf_pagesize - c->wbuf_len); in __jffs2_flush_wbuf()
616 if ( c->wbuf_len + sizeof(struct jffs2_unknown_node) < c->wbuf_pagesize) { in __jffs2_flush_wbuf()
617 struct jffs2_unknown_node *padnode = (void *)(c->wbuf + c->wbuf_len); in __jffs2_flush_wbuf()
620 padnode->totlen = cpu_to_je32(c->wbuf_pagesize - c->wbuf_len); in __jffs2_flush_wbuf()
630 pr_notice("Faking write error at 0x%08x\n", c->wbuf_ofs); in __jffs2_flush_wbuf()
632 mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, &retlen, in __jffs2_flush_wbuf()
638 ret = mtd_write(c->mtd, c->wbuf_ofs, c->wbuf_pagesize, in __jffs2_flush_wbuf()
639 &retlen, c->wbuf); in __jffs2_flush_wbuf()
644 } else if (retlen != c->wbuf_pagesize) { in __jffs2_flush_wbuf()
646 retlen, c->wbuf_pagesize); in __jffs2_flush_wbuf()
649 } else if ((ret = jffs2_verify_write(c, c->wbuf, c->wbuf_ofs))) { in __jffs2_flush_wbuf()
651 jffs2_wbuf_recover(c); in __jffs2_flush_wbuf()
658 uint32_t waste = c->wbuf_pagesize - c->wbuf_len; in __jffs2_flush_wbuf()
661 (wbuf_jeb == c->nextblock) ? "next" : "", in __jffs2_flush_wbuf()
669 c->wbuf_ofs, c->wbuf_len, waste); in __jffs2_flush_wbuf()
675 spin_lock(&c->erase_completion_lock); in __jffs2_flush_wbuf()
677 jffs2_link_node_ref(c, wbuf_jeb, (c->wbuf_ofs + c->wbuf_len) | REF_OBSOLETE, waste, NULL); in __jffs2_flush_wbuf()
680 c->dirty_size -= waste; in __jffs2_flush_wbuf()
682 c->wasted_size += waste; in __jffs2_flush_wbuf()
684 spin_lock(&c->erase_completion_lock); in __jffs2_flush_wbuf()
687 jffs2_refile_wbuf_blocks(c); in __jffs2_flush_wbuf()
688 jffs2_clear_wbuf_ino_list(c); in __jffs2_flush_wbuf()
689 spin_unlock(&c->erase_completion_lock); in __jffs2_flush_wbuf()
691 memset(c->wbuf,0xff,c->wbuf_pagesize); in __jffs2_flush_wbuf()
693 c->wbuf_ofs += c->wbuf_pagesize; in __jffs2_flush_wbuf()
694 c->wbuf_len = 0; in __jffs2_flush_wbuf()
702 int jffs2_flush_wbuf_gc(struct jffs2_sb_info *c, uint32_t ino) in jffs2_flush_wbuf_gc() argument
710 if (!c->wbuf) in jffs2_flush_wbuf_gc()
713 mutex_lock(&c->alloc_sem); in jffs2_flush_wbuf_gc()
714 if (!jffs2_wbuf_pending_for_ino(c, ino)) { in jffs2_flush_wbuf_gc()
716 mutex_unlock(&c->alloc_sem); in jffs2_flush_wbuf_gc()
720 old_wbuf_ofs = c->wbuf_ofs; in jffs2_flush_wbuf_gc()
721 old_wbuf_len = c->wbuf_len; in jffs2_flush_wbuf_gc()
723 if (c->unchecked_size) { in jffs2_flush_wbuf_gc()
727 down_write(&c->wbuf_sem); in jffs2_flush_wbuf_gc()
728 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); in jffs2_flush_wbuf_gc()
732 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); in jffs2_flush_wbuf_gc()
733 up_write(&c->wbuf_sem); in jffs2_flush_wbuf_gc()
735 old_wbuf_ofs == c->wbuf_ofs) { in jffs2_flush_wbuf_gc()
737 mutex_unlock(&c->alloc_sem); in jffs2_flush_wbuf_gc()
741 ret = jffs2_garbage_collect_pass(c); in jffs2_flush_wbuf_gc()
744 mutex_lock(&c->alloc_sem); in jffs2_flush_wbuf_gc()
745 down_write(&c->wbuf_sem); in jffs2_flush_wbuf_gc()
746 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); in jffs2_flush_wbuf_gc()
750 ret = __jffs2_flush_wbuf(c, PAD_ACCOUNTING); in jffs2_flush_wbuf_gc()
751 up_write(&c->wbuf_sem); in jffs2_flush_wbuf_gc()
754 mutex_lock(&c->alloc_sem); in jffs2_flush_wbuf_gc()
759 mutex_unlock(&c->alloc_sem); in jffs2_flush_wbuf_gc()
764 int jffs2_flush_wbuf_pad(struct jffs2_sb_info *c) in jffs2_flush_wbuf_pad() argument
768 if (!c->wbuf) in jffs2_flush_wbuf_pad()
771 down_write(&c->wbuf_sem); in jffs2_flush_wbuf_pad()
772 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); in jffs2_flush_wbuf_pad()
775 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); in jffs2_flush_wbuf_pad()
776 up_write(&c->wbuf_sem); in jffs2_flush_wbuf_pad()
781 static size_t jffs2_fill_wbuf(struct jffs2_sb_info *c, const uint8_t *buf, in jffs2_fill_wbuf() argument
784 if (len && !c->wbuf_len && (len >= c->wbuf_pagesize)) in jffs2_fill_wbuf()
787 if (len > (c->wbuf_pagesize - c->wbuf_len)) in jffs2_fill_wbuf()
788 len = c->wbuf_pagesize - c->wbuf_len; in jffs2_fill_wbuf()
789 memcpy(c->wbuf + c->wbuf_len, buf, len); in jffs2_fill_wbuf()
790 c->wbuf_len += (uint32_t) len; in jffs2_fill_wbuf()
794 int jffs2_flash_writev(struct jffs2_sb_info *c, const struct kvec *invecs, in jffs2_flash_writev() argument
804 if (!jffs2_is_writebuffered(c)) in jffs2_flash_writev()
805 return jffs2_flash_direct_writev(c, invecs, count, to, retlen); in jffs2_flash_writev()
807 down_write(&c->wbuf_sem); in jffs2_flash_writev()
810 if (c->wbuf_ofs == 0xFFFFFFFF) { in jffs2_flash_writev()
811 c->wbuf_ofs = PAGE_DIV(to); in jffs2_flash_writev()
812 c->wbuf_len = PAGE_MOD(to); in jffs2_flash_writev()
813 memset(c->wbuf,0xff,c->wbuf_pagesize); in jffs2_flash_writev()
818 * at PAD(c->wbuf_len+c->wbuf_ofs), and it's permitted to in jffs2_flash_writev()
823 if (SECTOR_ADDR(to) != SECTOR_ADDR(c->wbuf_ofs)) { in jffs2_flash_writev()
825 if (c->wbuf_len) { in jffs2_flash_writev()
827 __func__, (unsigned long)to, c->wbuf_ofs); in jffs2_flash_writev()
828 ret = __jffs2_flush_wbuf(c, PAD_NOACCOUNT); in jffs2_flash_writev()
833 c->wbuf_ofs = PAGE_DIV(to); in jffs2_flash_writev()
834 c->wbuf_len = PAGE_MOD(to); in jffs2_flash_writev()
837 if (to != PAD(c->wbuf_ofs + c->wbuf_len)) { in jffs2_flash_writev()
841 if (c->wbuf_len) in jffs2_flash_writev()
843 c->wbuf_ofs, c->wbuf_ofs + c->wbuf_len); in jffs2_flash_writev()
848 if (c->wbuf_len != PAGE_MOD(to)) { in jffs2_flash_writev()
849 c->wbuf_len = PAGE_MOD(to); in jffs2_flash_writev()
851 if (!c->wbuf_len) { in jffs2_flash_writev()
852 c->wbuf_len = c->wbuf_pagesize; in jffs2_flash_writev()
853 ret = __jffs2_flush_wbuf(c, NOPAD); in jffs2_flash_writev()
863 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); in jffs2_flash_writev()
865 if (c->wbuf_len == c->wbuf_pagesize) { in jffs2_flash_writev()
866 ret = __jffs2_flush_wbuf(c, NOPAD); in jffs2_flash_writev()
875 if (vlen >= c->wbuf_pagesize) { in jffs2_flash_writev()
876 ret = mtd_write(c->mtd, outvec_to, PAGE_DIV(vlen), in jffs2_flash_writev()
883 c->wbuf_ofs = outvec_to; in jffs2_flash_writev()
888 wbuf_retlen = jffs2_fill_wbuf(c, v, vlen); in jffs2_flash_writev()
889 if (c->wbuf_len == c->wbuf_pagesize) { in jffs2_flash_writev()
890 ret = __jffs2_flush_wbuf(c, NOPAD); in jffs2_flash_writev()
906 int res = jffs2_sum_add_kvec(c, invecs, count, (uint32_t) to); in jffs2_flash_writev()
911 if (c->wbuf_len && ino) in jffs2_flash_writev()
912 jffs2_wbuf_dirties_inode(c, ino); in jffs2_flash_writev()
915 up_write(&c->wbuf_sem); in jffs2_flash_writev()
920 * At this point we have no problem, c->wbuf is empty. However in jffs2_flash_writev()
924 spin_lock(&c->erase_completion_lock); in jffs2_flash_writev()
926 jeb = &c->blocks[outvec_to / c->sector_size]; in jffs2_flash_writev()
927 jffs2_block_refile(c, jeb, REFILE_ANYWAY); in jffs2_flash_writev()
929 spin_unlock(&c->erase_completion_lock); in jffs2_flash_writev()
933 up_write(&c->wbuf_sem); in jffs2_flash_writev()
941 int jffs2_flash_write(struct jffs2_sb_info *c, loff_t ofs, size_t len, in jffs2_flash_write() argument
946 if (!jffs2_is_writebuffered(c)) in jffs2_flash_write()
947 return jffs2_flash_direct_write(c, ofs, len, retlen, buf); in jffs2_flash_write()
951 return jffs2_flash_writev(c, vecs, 1, ofs, retlen, 0); in jffs2_flash_write()
957 int jffs2_flash_read(struct jffs2_sb_info *c, loff_t ofs, size_t len, size_t *retlen, u_char *buf) in jffs2_flash_read() argument
962 if (!jffs2_is_writebuffered(c)) in jffs2_flash_read()
963 return mtd_read(c->mtd, ofs, len, retlen, buf); in jffs2_flash_read()
966 down_read(&c->wbuf_sem); in jffs2_flash_read()
967 ret = mtd_read(c->mtd, ofs, len, retlen, buf); in jffs2_flash_read()
987 if (!c->wbuf_pagesize || !c->wbuf_len) in jffs2_flash_read()
991 if (SECTOR_ADDR(ofs) != SECTOR_ADDR(c->wbuf_ofs)) in jffs2_flash_read()
994 if (ofs >= c->wbuf_ofs) { in jffs2_flash_read()
995 owbf = (ofs - c->wbuf_ofs); /* offset in write buffer */ in jffs2_flash_read()
996 if (owbf > c->wbuf_len) /* is read beyond write buffer ? */ in jffs2_flash_read()
998 lwbf = c->wbuf_len - owbf; /* number of bytes to copy */ in jffs2_flash_read()
1002 orbf = (c->wbuf_ofs - ofs); /* offset in read buffer */ in jffs2_flash_read()
1006 if (lwbf > c->wbuf_len) in jffs2_flash_read()
1007 lwbf = c->wbuf_len; in jffs2_flash_read()
1010 memcpy(buf+orbf,c->wbuf+owbf,lwbf); in jffs2_flash_read()
1013 up_read(&c->wbuf_sem); in jffs2_flash_read()
1033 int jffs2_check_oob_empty(struct jffs2_sb_info *c, in jffs2_check_oob_empty() argument
1037 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); in jffs2_check_oob_empty()
1041 ops.ooblen = NR_OOB_SCAN_PAGES * c->oobavail; in jffs2_check_oob_empty()
1042 ops.oobbuf = c->oobbuf; in jffs2_check_oob_empty()
1046 ret = mtd_read_oob(c->mtd, jeb->offset, &ops); in jffs2_check_oob_empty()
1076 int jffs2_check_nand_cleanmarker(struct jffs2_sb_info *c, in jffs2_check_nand_cleanmarker() argument
1080 int ret, cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); in jffs2_check_nand_cleanmarker()
1084 ops.oobbuf = c->oobbuf; in jffs2_check_nand_cleanmarker()
1088 ret = mtd_read_oob(c->mtd, jeb->offset, &ops); in jffs2_check_nand_cleanmarker()
1097 return !!memcmp(&oob_cleanmarker, c->oobbuf, cmlen); in jffs2_check_nand_cleanmarker()
1100 int jffs2_write_nand_cleanmarker(struct jffs2_sb_info *c, in jffs2_write_nand_cleanmarker() argument
1105 int cmlen = min_t(int, c->oobavail, OOB_CM_SIZE); in jffs2_write_nand_cleanmarker()
1113 ret = mtd_write_oob(c->mtd, jeb->offset, &ops); in jffs2_write_nand_cleanmarker()
1133 int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t bad_o… in jffs2_write_nand_badblock() argument
1142 ret = mtd_block_markbad(c->mtd, bad_offset); in jffs2_write_nand_badblock()
1162 struct jffs2_sb_info *c = work_to_sb(work); in delayed_wbuf_sync() local
1163 struct super_block *sb = OFNI_BS_2SFFJ(c); in delayed_wbuf_sync()
1167 jffs2_flush_wbuf_gc(c, 0); in delayed_wbuf_sync()
1171 void jffs2_dirty_trigger(struct jffs2_sb_info *c) in jffs2_dirty_trigger() argument
1173 struct super_block *sb = OFNI_BS_2SFFJ(c); in jffs2_dirty_trigger()
1180 if (queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay)) in jffs2_dirty_trigger()
1184 int jffs2_nand_flash_setup(struct jffs2_sb_info *c) in jffs2_nand_flash_setup() argument
1186 if (!c->mtd->oobsize) in jffs2_nand_flash_setup()
1190 c->cleanmarker_size = 0; in jffs2_nand_flash_setup()
1192 if (c->mtd->oobavail == 0) { in jffs2_nand_flash_setup()
1199 c->oobavail = c->mtd->oobavail; in jffs2_nand_flash_setup()
1202 init_rwsem(&c->wbuf_sem); in jffs2_nand_flash_setup()
1203 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); in jffs2_nand_flash_setup()
1204 c->wbuf_pagesize = c->mtd->writesize; in jffs2_nand_flash_setup()
1205 c->wbuf_ofs = 0xFFFFFFFF; in jffs2_nand_flash_setup()
1207 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_nand_flash_setup()
1208 if (!c->wbuf) in jffs2_nand_flash_setup()
1211 c->oobbuf = kmalloc_array(NR_OOB_SCAN_PAGES, c->oobavail, GFP_KERNEL); in jffs2_nand_flash_setup()
1212 if (!c->oobbuf) { in jffs2_nand_flash_setup()
1213 kfree(c->wbuf); in jffs2_nand_flash_setup()
1218 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_nand_flash_setup()
1219 if (!c->wbuf_verify) { in jffs2_nand_flash_setup()
1220 kfree(c->oobbuf); in jffs2_nand_flash_setup()
1221 kfree(c->wbuf); in jffs2_nand_flash_setup()
1228 void jffs2_nand_flash_cleanup(struct jffs2_sb_info *c) in jffs2_nand_flash_cleanup() argument
1231 kfree(c->wbuf_verify); in jffs2_nand_flash_cleanup()
1233 kfree(c->wbuf); in jffs2_nand_flash_cleanup()
1234 kfree(c->oobbuf); in jffs2_nand_flash_cleanup()
1237 int jffs2_dataflash_setup(struct jffs2_sb_info *c) { in jffs2_dataflash_setup() argument
1238 c->cleanmarker_size = 0; /* No cleanmarkers needed */ in jffs2_dataflash_setup()
1241 init_rwsem(&c->wbuf_sem); in jffs2_dataflash_setup()
1242 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); in jffs2_dataflash_setup()
1243 c->wbuf_pagesize = c->mtd->erasesize; in jffs2_dataflash_setup()
1245 /* Find a suitable c->sector_size in jffs2_dataflash_setup()
1253 c->sector_size = 8 * c->mtd->erasesize; in jffs2_dataflash_setup()
1255 while (c->sector_size < 8192) { in jffs2_dataflash_setup()
1256 c->sector_size *= 2; in jffs2_dataflash_setup()
1260 c->flash_size = c->mtd->size; in jffs2_dataflash_setup()
1262 if ((c->flash_size % c->sector_size) != 0) { in jffs2_dataflash_setup()
1263 c->flash_size = (c->flash_size / c->sector_size) * c->sector_size; in jffs2_dataflash_setup()
1264 pr_warn("flash size adjusted to %dKiB\n", c->flash_size); in jffs2_dataflash_setup()
1267 c->wbuf_ofs = 0xFFFFFFFF; in jffs2_dataflash_setup()
1268 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_dataflash_setup()
1269 if (!c->wbuf) in jffs2_dataflash_setup()
1273 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_dataflash_setup()
1274 if (!c->wbuf_verify) { in jffs2_dataflash_setup()
1275 kfree(c->wbuf); in jffs2_dataflash_setup()
1281 c->wbuf_pagesize, c->sector_size); in jffs2_dataflash_setup()
1286 void jffs2_dataflash_cleanup(struct jffs2_sb_info *c) { in jffs2_dataflash_cleanup() argument
1288 kfree(c->wbuf_verify); in jffs2_dataflash_cleanup()
1290 kfree(c->wbuf); in jffs2_dataflash_cleanup()
1293 int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) { in jffs2_nor_wbuf_flash_setup() argument
1296 c->cleanmarker_size = max(16u, c->mtd->writesize); in jffs2_nor_wbuf_flash_setup()
1299 init_rwsem(&c->wbuf_sem); in jffs2_nor_wbuf_flash_setup()
1300 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); in jffs2_nor_wbuf_flash_setup()
1302 c->wbuf_pagesize = c->mtd->writesize; in jffs2_nor_wbuf_flash_setup()
1303 c->wbuf_ofs = 0xFFFFFFFF; in jffs2_nor_wbuf_flash_setup()
1305 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_nor_wbuf_flash_setup()
1306 if (!c->wbuf) in jffs2_nor_wbuf_flash_setup()
1310 c->wbuf_verify = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_nor_wbuf_flash_setup()
1311 if (!c->wbuf_verify) { in jffs2_nor_wbuf_flash_setup()
1312 kfree(c->wbuf); in jffs2_nor_wbuf_flash_setup()
1319 void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c) { in jffs2_nor_wbuf_flash_cleanup() argument
1321 kfree(c->wbuf_verify); in jffs2_nor_wbuf_flash_cleanup()
1323 kfree(c->wbuf); in jffs2_nor_wbuf_flash_cleanup()
1326 int jffs2_ubivol_setup(struct jffs2_sb_info *c) { in jffs2_ubivol_setup() argument
1327 c->cleanmarker_size = 0; in jffs2_ubivol_setup()
1329 if (c->mtd->writesize == 1) in jffs2_ubivol_setup()
1333 init_rwsem(&c->wbuf_sem); in jffs2_ubivol_setup()
1334 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync); in jffs2_ubivol_setup()
1336 c->wbuf_pagesize = c->mtd->writesize; in jffs2_ubivol_setup()
1337 c->wbuf_ofs = 0xFFFFFFFF; in jffs2_ubivol_setup()
1338 c->wbuf = kmalloc(c->wbuf_pagesize, GFP_KERNEL); in jffs2_ubivol_setup()
1339 if (!c->wbuf) in jffs2_ubivol_setup()
1343 c->wbuf_pagesize, c->sector_size); in jffs2_ubivol_setup()
1348 void jffs2_ubivol_cleanup(struct jffs2_sb_info *c) { in jffs2_ubivol_cleanup() argument
1349 kfree(c->wbuf); in jffs2_ubivol_cleanup()