// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2017-2018 HUAWEI, Inc. * http://www.huawei.com/ * Created by Gao Xiang */ #include #include #include #include #include #include #include "xattr.h" #define CREATE_TRACE_POINTS #include static struct kmem_cache *erofs_inode_cachep __read_mostly; void _erofs_err(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_err("(device %s): %s: %pV", sb->s_id, function, &vaf); va_end(args); } void _erofs_info(struct super_block *sb, const char *function, const char *fmt, ...) { struct va_format vaf; va_list args; va_start(args, fmt); vaf.fmt = fmt; vaf.va = &args; pr_info("(device %s): %pV", sb->s_id, &vaf); va_end(args); } static void erofs_inode_init_once(void *ptr) { struct erofs_inode *vi = ptr; inode_init_once(&vi->vfs_inode); } static struct inode *erofs_alloc_inode(struct super_block *sb) { struct erofs_inode *vi = kmem_cache_alloc(erofs_inode_cachep, GFP_KERNEL); if (!vi) return NULL; /* zero out everything except vfs_inode */ memset(vi, 0, offsetof(struct erofs_inode, vfs_inode)); return &vi->vfs_inode; } static void i_callback(struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct erofs_inode *vi = EROFS_I(inode); /* be careful of RCU symlink path */ if (inode->i_op == &erofs_fast_symlink_iops) kfree(inode->i_link); kfree(vi->xattr_shared_xattrs); kmem_cache_free(erofs_inode_cachep, vi); } static bool check_layout_compatibility(struct super_block *sb, struct erofs_super_block *dsb) { const unsigned int feature = le32_to_cpu(dsb->feature_incompat); EROFS_SB(sb)->feature_incompat = feature; /* check if current kernel meets all mandatory requirements */ if (feature & (~EROFS_ALL_FEATURE_INCOMPAT)) { erofs_err(sb, "unidentified incompatible feature %x, please upgrade kernel version", feature & ~EROFS_ALL_FEATURE_INCOMPAT); return false; } return true; } static void destroy_inode(struct inode *inode) { call_rcu(&inode->i_rcu, i_callback); } static int erofs_read_superblock(struct super_block *sb) { struct erofs_sb_info *sbi; struct page *page; struct erofs_super_block *dsb; unsigned int blkszbits; void *data; int ret; page = read_mapping_page(sb->s_bdev->bd_inode->i_mapping, 0, NULL); if (IS_ERR(page)) { erofs_err(sb, "cannot read erofs superblock"); return PTR_ERR(page); } sbi = EROFS_SB(sb); data = kmap_atomic(page); dsb = (struct erofs_super_block *)(data + EROFS_SUPER_OFFSET); ret = -EINVAL; if (le32_to_cpu(dsb->magic) != EROFS_SUPER_MAGIC_V1) { erofs_err(sb, "cannot find valid erofs superblock"); goto out; } blkszbits = dsb->blkszbits; /* 9(512 bytes) + LOG_SECTORS_PER_BLOCK == LOG_BLOCK_SIZE */ if (blkszbits != LOG_BLOCK_SIZE) { erofs_err(sb, "blksize %u isn't supported on this platform", 1 << blkszbits); goto out; } if (!check_layout_compatibility(sb, dsb)) goto out; sbi->blocks = le32_to_cpu(dsb->blocks); sbi->meta_blkaddr = le32_to_cpu(dsb->meta_blkaddr); #ifdef CONFIG_EROFS_FS_XATTR sbi->xattr_blkaddr = le32_to_cpu(dsb->xattr_blkaddr); #endif sbi->islotbits = ilog2(sizeof(struct erofs_inode_compact)); sbi->root_nid = le16_to_cpu(dsb->root_nid); sbi->inos = le64_to_cpu(dsb->inos); sbi->build_time = le64_to_cpu(dsb->build_time); sbi->build_time_nsec = le32_to_cpu(dsb->build_time_nsec); memcpy(&sb->s_uuid, dsb->uuid, sizeof(dsb->uuid)); ret = strscpy(sbi->volume_name, dsb->volume_name, sizeof(dsb->volume_name)); if (ret < 0) { /* -E2BIG */ erofs_err(sb, "bad volume name without NIL terminator"); ret = -EFSCORRUPTED; goto out; } ret = 0; out: kunmap_atomic(data); put_page(page); return ret; } #ifdef CONFIG_EROFS_FS_ZIP static int erofs_build_cache_strategy(struct super_block *sb, substring_t *args) { struct erofs_sb_info *sbi = EROFS_SB(sb); const char *cs = match_strdup(args); int err = 0; if (!cs) { erofs_err(sb, "Not enough memory to store cache strategy"); return -ENOMEM; } if (!strcmp(cs, "disabled")) { sbi->cache_strategy = EROFS_ZIP_CACHE_DISABLED; } else if (!strcmp(cs, "readahead")) { sbi->cache_strategy = EROFS_ZIP_CACHE_READAHEAD; } else if (!strcmp(cs, "readaround")) { sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; } else { erofs_err(sb, "Unrecognized cache strategy \"%s\"", cs); err = -EINVAL; } kfree(cs); return err; } #else static int erofs_build_cache_strategy(struct super_block *sb, substring_t *args) { erofs_info(sb, "EROFS compression is disabled, so cache strategy is ignored"); return 0; } #endif /* set up default EROFS parameters */ static void erofs_default_options(struct erofs_sb_info *sbi) { #ifdef CONFIG_EROFS_FS_ZIP sbi->cache_strategy = EROFS_ZIP_CACHE_READAROUND; sbi->max_sync_decompress_pages = 3; #endif #ifdef CONFIG_EROFS_FS_XATTR set_opt(sbi, XATTR_USER); #endif #ifdef CONFIG_EROFS_FS_POSIX_ACL set_opt(sbi, POSIX_ACL); #endif } enum { Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl, Opt_cache_strategy, Opt_err }; static match_table_t erofs_tokens = { {Opt_user_xattr, "user_xattr"}, {Opt_nouser_xattr, "nouser_xattr"}, {Opt_acl, "acl"}, {Opt_noacl, "noacl"}, {Opt_cache_strategy, "cache_strategy=%s"}, {Opt_err, NULL} }; static int erofs_parse_options(struct super_block *sb, char *options) { substring_t args[MAX_OPT_ARGS]; char *p; int err; if (!options) return 0; while ((p = strsep(&options, ","))) { int token; if (!*p) continue; args[0].to = args[0].from = NULL; token = match_token(p, erofs_tokens, args); switch (token) { #ifdef CONFIG_EROFS_FS_XATTR case Opt_user_xattr: set_opt(EROFS_SB(sb), XATTR_USER); break; case Opt_nouser_xattr: clear_opt(EROFS_SB(sb), XATTR_USER); break; #else case Opt_user_xattr: erofs_info(sb, "user_xattr options not supported"); break; case Opt_nouser_xattr: erofs_info(sb, "nouser_xattr options not supported"); break; #endif #ifdef CONFIG_EROFS_FS_POSIX_ACL case Opt_acl: set_opt(EROFS_SB(sb), POSIX_ACL); break; case Opt_noacl: clear_opt(EROFS_SB(sb), POSIX_ACL); break; #else case Opt_acl: erofs_info(sb, "acl options not supported"); break; case Opt_noacl: erofs_info(sb, "noacl options not supported"); break; #endif case Opt_cache_strategy: err = erofs_build_cache_strategy(sb, args); if (err) return err; break; default: erofs_err(sb, "Unrecognized mount option \"%s\" or missing value", p); return -EINVAL; } } return 0; } #ifdef CONFIG_EROFS_FS_ZIP static const struct address_space_operations managed_cache_aops; static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask) { int ret = 1; /* 0 - busy */ struct address_space *const mapping = page->mapping; DBG_BUGON(!PageLocked(page)); DBG_BUGON(mapping->a_ops != &managed_cache_aops); if (PagePrivate(page)) ret = erofs_try_to_free_cached_page(mapping, page); return ret; } static void erofs_managed_cache_invalidatepage(struct page *page, unsigned int offset, unsigned int length) { const unsigned int stop = length + offset; DBG_BUGON(!PageLocked(page)); /* Check for potential overflow in debug mode */ DBG_BUGON(stop > PAGE_SIZE || stop < length); if (offset == 0 && stop == PAGE_SIZE) while (!erofs_managed_cache_releasepage(page, GFP_NOFS)) cond_resched(); } static const struct address_space_operations managed_cache_aops = { .releasepage = erofs_managed_cache_releasepage, .invalidatepage = erofs_managed_cache_invalidatepage, }; static int erofs_init_managed_cache(struct super_block *sb) { struct erofs_sb_info *const sbi = EROFS_SB(sb); struct inode *const inode = new_inode(sb); if (!inode) return -ENOMEM; set_nlink(inode, 1); inode->i_size = OFFSET_MAX; inode->i_mapping->a_ops = &managed_cache_aops; mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS | __GFP_HIGHMEM | __GFP_MOVABLE); sbi->managed_cache = inode; return 0; } #else static int erofs_init_managed_cache(struct super_block *sb) { return 0; } #endif static int erofs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *inode; struct erofs_sb_info *sbi; int err; sb->s_magic = EROFS_SUPER_MAGIC; if (!sb_set_blocksize(sb, EROFS_BLKSIZ)) { erofs_err(sb, "failed to set erofs blksize"); return -EINVAL; } sbi = kzalloc(sizeof(*sbi), GFP_KERNEL); if (!sbi) return -ENOMEM; sb->s_fs_info = sbi; err = erofs_read_superblock(sb); if (err) return err; sb->s_flags |= SB_RDONLY | SB_NOATIME; sb->s_maxbytes = MAX_LFS_FILESIZE; sb->s_time_gran = 1; sb->s_op = &erofs_sops; #ifdef CONFIG_EROFS_FS_XATTR sb->s_xattr = erofs_xattr_handlers; #endif /* set erofs default mount options */ erofs_default_options(sbi); err = erofs_parse_options(sb, data); if (err) return err; if (test_opt(sbi, POSIX_ACL)) sb->s_flags |= SB_POSIXACL; else sb->s_flags &= ~SB_POSIXACL; #ifdef CONFIG_EROFS_FS_ZIP INIT_RADIX_TREE(&sbi->workstn_tree, GFP_ATOMIC); #endif /* get the root inode */ inode = erofs_iget(sb, ROOT_NID(sbi), true); if (IS_ERR(inode)) return PTR_ERR(inode); if (!S_ISDIR(inode->i_mode)) { erofs_err(sb, "rootino(nid %llu) is not a directory(i_mode %o)", ROOT_NID(sbi), inode->i_mode); iput(inode); return -EINVAL; } sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; erofs_shrinker_register(sb); /* sb->s_umount is already locked, SB_ACTIVE and SB_BORN are not set */ err = erofs_init_managed_cache(sb); if (err) return err; cleancache_init_fs(sb); erofs_info(sb, "mounted with opts: %s, root inode @ nid %llu.", (char *)data, ROOT_NID(sbi)); return 0; } static struct dentry *erofs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { return mount_bdev(fs_type, flags, dev_name, data, erofs_fill_super); } /* * could be triggered after deactivate_locked_super() * is called, thus including umount and failed to initialize. */ static void erofs_kill_sb(struct super_block *sb) { struct erofs_sb_info *sbi; WARN_ON(sb->s_magic != EROFS_SUPER_MAGIC); kill_block_super(sb); sbi = EROFS_SB(sb); if (!sbi) return; kfree(sbi); sb->s_fs_info = NULL; } /* called when ->s_root is non-NULL */ static void erofs_put_super(struct super_block *sb) { struct erofs_sb_info *const sbi = EROFS_SB(sb); DBG_BUGON(!sbi); erofs_shrinker_unregister(sb); #ifdef CONFIG_EROFS_FS_ZIP iput(sbi->managed_cache); sbi->managed_cache = NULL; #endif } static struct file_system_type erofs_fs_type = { .owner = THIS_MODULE, .name = "erofs", .mount = erofs_mount, .kill_sb = erofs_kill_sb, .fs_flags = FS_REQUIRES_DEV, }; MODULE_ALIAS_FS("erofs"); static int __init erofs_module_init(void) { int err; erofs_check_ondisk_layout_definitions(); erofs_inode_cachep = kmem_cache_create("erofs_inode", sizeof(struct erofs_inode), 0, SLAB_RECLAIM_ACCOUNT, erofs_inode_init_once); if (!erofs_inode_cachep) { err = -ENOMEM; goto icache_err; } err = erofs_init_shrinker(); if (err) goto shrinker_err; err = z_erofs_init_zip_subsystem(); if (err) goto zip_err; err = register_filesystem(&erofs_fs_type); if (err) goto fs_err; return 0; fs_err: z_erofs_exit_zip_subsystem(); zip_err: erofs_exit_shrinker(); shrinker_err: kmem_cache_destroy(erofs_inode_cachep); icache_err: return err; } static void __exit erofs_module_exit(void) { unregister_filesystem(&erofs_fs_type); z_erofs_exit_zip_subsystem(); erofs_exit_shrinker(); /* Ensure all RCU free inodes are safe before cache is destroyed. */ rcu_barrier(); kmem_cache_destroy(erofs_inode_cachep); } /* get filesystem statistics */ static int erofs_statfs(struct dentry *dentry, struct kstatfs *buf) { struct super_block *sb = dentry->d_sb; struct erofs_sb_info *sbi = EROFS_SB(sb); u64 id = huge_encode_dev(sb->s_bdev->bd_dev); buf->f_type = sb->s_magic; buf->f_bsize = EROFS_BLKSIZ; buf->f_blocks = sbi->blocks; buf->f_bfree = buf->f_bavail = 0; buf->f_files = ULLONG_MAX; buf->f_ffree = ULLONG_MAX - sbi->inos; buf->f_namelen = EROFS_NAME_LEN; buf->f_fsid.val[0] = (u32)id; buf->f_fsid.val[1] = (u32)(id >> 32); return 0; } static int erofs_show_options(struct seq_file *seq, struct dentry *root) { struct erofs_sb_info *sbi __maybe_unused = EROFS_SB(root->d_sb); #ifdef CONFIG_EROFS_FS_XATTR if (test_opt(sbi, XATTR_USER)) seq_puts(seq, ",user_xattr"); else seq_puts(seq, ",nouser_xattr"); #endif #ifdef CONFIG_EROFS_FS_POSIX_ACL if (test_opt(sbi, POSIX_ACL)) seq_puts(seq, ",acl"); else seq_puts(seq, ",noacl"); #endif #ifdef CONFIG_EROFS_FS_ZIP if (sbi->cache_strategy == EROFS_ZIP_CACHE_DISABLED) { seq_puts(seq, ",cache_strategy=disabled"); } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAHEAD) { seq_puts(seq, ",cache_strategy=readahead"); } else if (sbi->cache_strategy == EROFS_ZIP_CACHE_READAROUND) { seq_puts(seq, ",cache_strategy=readaround"); } else { seq_puts(seq, ",cache_strategy=(unknown)"); DBG_BUGON(1); } #endif return 0; } static int erofs_remount(struct super_block *sb, int *flags, char *data) { struct erofs_sb_info *sbi = EROFS_SB(sb); unsigned int org_mnt_opt = sbi->mount_opt; int err; DBG_BUGON(!sb_rdonly(sb)); err = erofs_parse_options(sb, data); if (err) goto out; if (test_opt(sbi, POSIX_ACL)) sb->s_flags |= SB_POSIXACL; else sb->s_flags &= ~SB_POSIXACL; *flags |= SB_RDONLY; return 0; out: sbi->mount_opt = org_mnt_opt; return err; } const struct super_operations erofs_sops = { .put_super = erofs_put_super, .alloc_inode = erofs_alloc_inode, .destroy_inode = destroy_inode, .statfs = erofs_statfs, .show_options = erofs_show_options, .remount_fs = erofs_remount, }; module_init(erofs_module_init); module_exit(erofs_module_exit); MODULE_DESCRIPTION("Enhanced ROM File System"); MODULE_AUTHOR("Gao Xiang, Chao Yu, Miao Xie, CONSUMER BG, HUAWEI Inc."); MODULE_LICENSE("GPL");