2023-05-22 00:54:34 +00:00
|
|
|
// SPDX-License-Identifier: GPL-2.0-only
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* inode.c
|
|
|
|
*
|
|
|
|
* PURPOSE
|
|
|
|
* Inode handling routines for the OSTA-UDF(tm) filesystem.
|
|
|
|
*
|
|
|
|
* COPYRIGHT
|
|
|
|
* (C) 1998 Dave Boynton
|
|
|
|
* (C) 1998-2004 Ben Fennema
|
|
|
|
* (C) 1999-2000 Stelias Computing Inc
|
|
|
|
*
|
|
|
|
* HISTORY
|
|
|
|
*
|
|
|
|
* 10/04/98 dgb Added rudimentary directory functions
|
|
|
|
* 10/07/98 Fully working udf_block_map! It works!
|
|
|
|
* 11/25/98 bmap altered to better support extents
|
2008-02-08 12:20:36 +00:00
|
|
|
* 12/06/98 blf partition support in udf_iget, udf_block_map
|
|
|
|
* and udf_read_inode
|
2005-04-16 22:20:36 +00:00
|
|
|
* 12/12/98 rewrote udf_block_map to handle next extents and descs across
|
|
|
|
* block boundaries (which is not actually allowed)
|
|
|
|
* 12/20/98 added support for strategy 4096
|
|
|
|
* 03/07/99 rewrote udf_block_map (again)
|
|
|
|
* New funcs, inode_bmap, udf_next_aext
|
|
|
|
* 04/19/99 Support for writing device EA's for major/minor #
|
|
|
|
*/
|
|
|
|
|
|
|
|
#include "udfdecl.h"
|
|
|
|
#include <linux/mm.h>
|
|
|
|
#include <linux/module.h>
|
|
|
|
#include <linux/pagemap.h>
|
|
|
|
#include <linux/writeback.h>
|
|
|
|
#include <linux/slab.h>
|
2008-04-17 07:47:48 +00:00
|
|
|
#include <linux/crc-itu-t.h>
|
2011-10-03 05:02:59 +00:00
|
|
|
#include <linux/mpage.h>
|
2015-02-22 16:58:50 +00:00
|
|
|
#include <linux/uio.h>
|
2016-11-01 13:40:13 +00:00
|
|
|
#include <linux/bio.h>
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
#include "udf_i.h"
|
|
|
|
#include "udf_sb.h"
|
|
|
|
|
|
|
|
#define EXTENT_MERGE_SIZE 5
|
|
|
|
|
2019-08-27 12:13:59 +00:00
|
|
|
#define FE_MAPPED_PERMS (FE_PERM_U_READ | FE_PERM_U_WRITE | FE_PERM_U_EXEC | \
|
|
|
|
FE_PERM_G_READ | FE_PERM_G_WRITE | FE_PERM_G_EXEC | \
|
|
|
|
FE_PERM_O_READ | FE_PERM_O_WRITE | FE_PERM_O_EXEC)
|
|
|
|
|
|
|
|
#define FE_DELETE_PERMS (FE_PERM_U_DELETE | FE_PERM_G_DELETE | \
|
|
|
|
FE_PERM_O_DELETE)
|
|
|
|
|
2023-01-18 11:29:22 +00:00
|
|
|
struct udf_map_rq;
|
|
|
|
|
2011-07-26 07:18:29 +00:00
|
|
|
static umode_t udf_convert_permissions(struct fileEntry *);
|
2005-04-16 22:20:36 +00:00
|
|
|
static int udf_update_inode(struct inode *, int);
|
2010-10-20 15:42:44 +00:00
|
|
|
static int udf_sync_inode(struct inode *inode);
|
2007-07-16 06:39:47 +00:00
|
|
|
static int udf_alloc_i_data(struct inode *inode, size_t size);
|
2023-01-18 13:57:34 +00:00
|
|
|
static int inode_getblk(struct inode *inode, struct udf_map_rq *map);
|
2022-12-19 19:10:35 +00:00
|
|
|
static int udf_insert_aext(struct inode *, struct extent_position,
|
|
|
|
struct kernel_lb_addr, uint32_t);
|
2017-10-12 13:48:40 +00:00
|
|
|
static void udf_split_extents(struct inode *, int *, int, udf_pblk_t,
|
2017-01-06 20:53:50 +00:00
|
|
|
struct kernel_long_ad *, int *);
|
2005-04-16 22:20:36 +00:00
|
|
|
static void udf_prealloc_extents(struct inode *, int, int,
|
2017-01-06 20:53:50 +00:00
|
|
|
struct kernel_long_ad *, int *);
|
|
|
|
static void udf_merge_extents(struct inode *, struct kernel_long_ad *, int *);
|
2022-12-19 19:10:35 +00:00
|
|
|
static int udf_update_extents(struct inode *, struct kernel_long_ad *, int,
|
|
|
|
int, struct extent_position *);
|
2023-01-18 14:09:49 +00:00
|
|
|
static int udf_get_block_wb(struct inode *inode, sector_t block,
|
|
|
|
struct buffer_head *bh_result, int create);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2013-01-19 02:17:14 +00:00
|
|
|
static void __udf_clear_extent_cache(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
|
|
|
|
if (iinfo->cached_extent.lstart != -1) {
|
|
|
|
brelse(iinfo->cached_extent.epos.bh);
|
|
|
|
iinfo->cached_extent.lstart = -1;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Invalidate extent cache */
|
|
|
|
static void udf_clear_extent_cache(struct inode *inode)
|
|
|
|
{
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
|
|
|
|
spin_lock(&iinfo->i_extent_cache_lock);
|
|
|
|
__udf_clear_extent_cache(inode);
|
|
|
|
spin_unlock(&iinfo->i_extent_cache_lock);
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Return contents of extent cache */
|
|
|
|
static int udf_read_extent_cache(struct inode *inode, loff_t bcount,
|
|
|
|
loff_t *lbcount, struct extent_position *pos)
|
|
|
|
{
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
spin_lock(&iinfo->i_extent_cache_lock);
|
|
|
|
if ((iinfo->cached_extent.lstart <= bcount) &&
|
|
|
|
(iinfo->cached_extent.lstart != -1)) {
|
|
|
|
/* Cache hit */
|
|
|
|
*lbcount = iinfo->cached_extent.lstart;
|
|
|
|
memcpy(pos, &iinfo->cached_extent.epos,
|
|
|
|
sizeof(struct extent_position));
|
|
|
|
if (pos->bh)
|
|
|
|
get_bh(pos->bh);
|
|
|
|
ret = 1;
|
|
|
|
}
|
|
|
|
spin_unlock(&iinfo->i_extent_cache_lock);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
|
|
|
|
/* Add extent to extent cache */
|
|
|
|
static void udf_update_extent_cache(struct inode *inode, loff_t estart,
|
2017-01-06 20:53:56 +00:00
|
|
|
struct extent_position *pos)
|
2013-01-19 02:17:14 +00:00
|
|
|
{
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
|
|
|
|
spin_lock(&iinfo->i_extent_cache_lock);
|
|
|
|
/* Invalidate previously cached extent */
|
|
|
|
__udf_clear_extent_cache(inode);
|
|
|
|
if (pos->bh)
|
|
|
|
get_bh(pos->bh);
|
2017-08-15 14:45:44 +00:00
|
|
|
memcpy(&iinfo->cached_extent.epos, pos, sizeof(*pos));
|
2013-01-19 02:17:14 +00:00
|
|
|
iinfo->cached_extent.lstart = estart;
|
2017-01-06 20:53:56 +00:00
|
|
|
switch (iinfo->i_alloc_type) {
|
|
|
|
case ICBTAG_FLAG_AD_SHORT:
|
|
|
|
iinfo->cached_extent.epos.offset -= sizeof(struct short_ad);
|
|
|
|
break;
|
|
|
|
case ICBTAG_FLAG_AD_LONG:
|
|
|
|
iinfo->cached_extent.epos.offset -= sizeof(struct long_ad);
|
|
|
|
break;
|
|
|
|
}
|
2013-01-19 02:17:14 +00:00
|
|
|
spin_unlock(&iinfo->i_extent_cache_lock);
|
|
|
|
}
|
2008-02-22 11:38:48 +00:00
|
|
|
|
2010-06-07 04:43:39 +00:00
|
|
|
void udf_evict_inode(struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2009-12-03 12:39:28 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2010-06-07 04:43:39 +00:00
|
|
|
int want_delete = 0;
|
|
|
|
|
2020-09-25 10:14:03 +00:00
|
|
|
if (!is_bad_inode(inode)) {
|
|
|
|
if (!inode->i_nlink) {
|
|
|
|
want_delete = 1;
|
|
|
|
udf_setsize(inode, 0);
|
|
|
|
udf_update_inode(inode, IS_SYNC(inode));
|
|
|
|
}
|
|
|
|
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
|
|
|
|
inode->i_size != iinfo->i_lenExtents) {
|
|
|
|
udf_warn(inode->i_sb,
|
|
|
|
"Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
|
|
|
|
inode->i_ino, inode->i_mode,
|
|
|
|
(unsigned long long)inode->i_size,
|
|
|
|
(unsigned long long)iinfo->i_lenExtents);
|
|
|
|
}
|
2014-04-03 21:47:49 +00:00
|
|
|
}
|
|
|
|
truncate_inode_pages_final(&inode->i_data);
|
2010-06-07 04:43:39 +00:00
|
|
|
invalidate_inode_buffers(inode);
|
2012-05-03 12:48:02 +00:00
|
|
|
clear_inode(inode);
|
2020-09-25 10:29:54 +00:00
|
|
|
kfree(iinfo->i_data);
|
|
|
|
iinfo->i_data = NULL;
|
2013-01-19 02:17:14 +00:00
|
|
|
udf_clear_extent_cache(inode);
|
2010-06-07 04:43:39 +00:00
|
|
|
if (want_delete) {
|
|
|
|
udf_free_inode(inode);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2012-09-05 16:44:31 +00:00
|
|
|
static void udf_write_failed(struct address_space *mapping, loff_t to)
|
|
|
|
{
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
loff_t isize = inode->i_size;
|
|
|
|
|
|
|
|
if (to > isize) {
|
2013-09-12 22:13:56 +00:00
|
|
|
truncate_pagecache(inode, isize);
|
2012-09-05 16:44:31 +00:00
|
|
|
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
|
|
|
down_write(&iinfo->i_data_sem);
|
2013-01-19 02:17:14 +00:00
|
|
|
udf_clear_extent_cache(inode);
|
2012-09-05 16:44:31 +00:00
|
|
|
udf_truncate_extents(inode);
|
|
|
|
up_write(&iinfo->i_data_sem);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
- Daniel Verkamp has contributed a memfd series ("mm/memfd: add
F_SEAL_EXEC") which permits the setting of the memfd execute bit at
memfd creation time, with the option of sealing the state of the X bit.
- Peter Xu adds a patch series ("mm/hugetlb: Make huge_pte_offset()
thread-safe for pmd unshare") which addresses a rare race condition
related to PMD unsharing.
- Several folioification patch serieses from Matthew Wilcox, Vishal
Moola, Sidhartha Kumar and Lorenzo Stoakes
- Johannes Weiner has a series ("mm: push down lock_page_memcg()") which
does perform some memcg maintenance and cleanup work.
- SeongJae Park has added DAMOS filtering to DAMON, with the series
"mm/damon/core: implement damos filter". These filters provide users
with finer-grained control over DAMOS's actions. SeongJae has also done
some DAMON cleanup work.
- Kairui Song adds a series ("Clean up and fixes for swap").
- Vernon Yang contributed the series "Clean up and refinement for maple
tree".
- Yu Zhao has contributed the "mm: multi-gen LRU: memcg LRU" series. It
adds to MGLRU an LRU of memcgs, to improve the scalability of global
reclaim.
- David Hildenbrand has added some userfaultfd cleanup work in the
series "mm: uffd-wp + change_protection() cleanups".
- Christoph Hellwig has removed the generic_writepages() library
function in the series "remove generic_writepages".
- Baolin Wang has performed some maintenance on the compaction code in
his series "Some small improvements for compaction".
- Sidhartha Kumar is doing some maintenance work on struct page in his
series "Get rid of tail page fields".
- David Hildenbrand contributed some cleanup, bugfixing and
generalization of pte management and of pte debugging in his series "mm:
support __HAVE_ARCH_PTE_SWP_EXCLUSIVE on all architectures with swap
PTEs".
- Mel Gorman and Neil Brown have removed the __GFP_ATOMIC allocation
flag in the series "Discard __GFP_ATOMIC".
- Sergey Senozhatsky has improved zsmalloc's memory utilization with his
series "zsmalloc: make zspage chain size configurable".
- Joey Gouly has added prctl() support for prohibiting the creation of
writeable+executable mappings. The previous BPF-based approach had
shortcomings. See "mm: In-kernel support for memory-deny-write-execute
(MDWE)".
- Waiman Long did some kmemleak cleanup and bugfixing in the series
"mm/kmemleak: Simplify kmemleak_cond_resched() & fix UAF".
- T.J. Alumbaugh has contributed some MGLRU cleanup work in his series
"mm: multi-gen LRU: improve".
- Jiaqi Yan has provided some enhancements to our memory error
statistics reporting, mainly by presenting the statistics on a per-node
basis. See the series "Introduce per NUMA node memory error
statistics".
- Mel Gorman has a second and hopefully final shot at fixing a CPU-hog
regression in compaction via his series "Fix excessive CPU usage during
compaction".
- Christoph Hellwig does some vmalloc maintenance work in the series
"cleanup vfree and vunmap".
- Christoph Hellwig has removed block_device_operations.rw_page() in ths
series "remove ->rw_page".
- We get some maple_tree improvements and cleanups in Liam Howlett's
series "VMA tree type safety and remove __vma_adjust()".
- Suren Baghdasaryan has done some work on the maintainability of our
vm_flags handling in the series "introduce vm_flags modifier functions".
- Some pagemap cleanup and generalization work in Mike Rapoport's series
"mm, arch: add generic implementation of pfn_valid() for FLATMEM" and
"fixups for generic implementation of pfn_valid()"
- Baoquan He has done some work to make /proc/vmallocinfo and
/proc/kcore better represent the real state of things in his series
"mm/vmalloc.c: allow vread() to read out vm_map_ram areas".
- Jason Gunthorpe rationalized the GUP system's interface to the rest of
the kernel in the series "Simplify the external interface for GUP".
- SeongJae Park wishes to migrate people from DAMON's debugfs interface
over to its sysfs interface. To support this, we'll temporarily be
printing warnings when people use the debugfs interface. See the series
"mm/damon: deprecate DAMON debugfs interface".
- Andrey Konovalov provided the accurately named "lib/stackdepot: fixes
and clean-ups" series.
- Huang Ying has provided a dramatic reduction in migration's TLB flush
IPI rates with the series "migrate_pages(): batch TLB flushing".
- Arnd Bergmann has some objtool fixups in "objtool warning fixes".
-----BEGIN PGP SIGNATURE-----
iHUEABYIAB0WIQTTMBEPP41GrTpTJgfdBJ7gKXxAjgUCY/PoPQAKCRDdBJ7gKXxA
jlvpAPsFECUBBl20qSue2zCYWnHC7Yk4q9ytTkPB/MMDrFEN9wD/SNKEm2UoK6/K
DmxHkn0LAitGgJRS/W9w81yrgig9tAQ=
=MlGs
-----END PGP SIGNATURE-----
Merge tag 'mm-stable-2023-02-20-13-37' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm
Pull MM updates from Andrew Morton:
- Daniel Verkamp has contributed a memfd series ("mm/memfd: add
F_SEAL_EXEC") which permits the setting of the memfd execute bit at
memfd creation time, with the option of sealing the state of the X
bit.
- Peter Xu adds a patch series ("mm/hugetlb: Make huge_pte_offset()
thread-safe for pmd unshare") which addresses a rare race condition
related to PMD unsharing.
- Several folioification patch serieses from Matthew Wilcox, Vishal
Moola, Sidhartha Kumar and Lorenzo Stoakes
- Johannes Weiner has a series ("mm: push down lock_page_memcg()")
which does perform some memcg maintenance and cleanup work.
- SeongJae Park has added DAMOS filtering to DAMON, with the series
"mm/damon/core: implement damos filter".
These filters provide users with finer-grained control over DAMOS's
actions. SeongJae has also done some DAMON cleanup work.
- Kairui Song adds a series ("Clean up and fixes for swap").
- Vernon Yang contributed the series "Clean up and refinement for maple
tree".
- Yu Zhao has contributed the "mm: multi-gen LRU: memcg LRU" series. It
adds to MGLRU an LRU of memcgs, to improve the scalability of global
reclaim.
- David Hildenbrand has added some userfaultfd cleanup work in the
series "mm: uffd-wp + change_protection() cleanups".
- Christoph Hellwig has removed the generic_writepages() library
function in the series "remove generic_writepages".
- Baolin Wang has performed some maintenance on the compaction code in
his series "Some small improvements for compaction".
- Sidhartha Kumar is doing some maintenance work on struct page in his
series "Get rid of tail page fields".
- David Hildenbrand contributed some cleanup, bugfixing and
generalization of pte management and of pte debugging in his series
"mm: support __HAVE_ARCH_PTE_SWP_EXCLUSIVE on all architectures with
swap PTEs".
- Mel Gorman and Neil Brown have removed the __GFP_ATOMIC allocation
flag in the series "Discard __GFP_ATOMIC".
- Sergey Senozhatsky has improved zsmalloc's memory utilization with
his series "zsmalloc: make zspage chain size configurable".
- Joey Gouly has added prctl() support for prohibiting the creation of
writeable+executable mappings.
The previous BPF-based approach had shortcomings. See "mm: In-kernel
support for memory-deny-write-execute (MDWE)".
- Waiman Long did some kmemleak cleanup and bugfixing in the series
"mm/kmemleak: Simplify kmemleak_cond_resched() & fix UAF".
- T.J. Alumbaugh has contributed some MGLRU cleanup work in his series
"mm: multi-gen LRU: improve".
- Jiaqi Yan has provided some enhancements to our memory error
statistics reporting, mainly by presenting the statistics on a
per-node basis. See the series "Introduce per NUMA node memory error
statistics".
- Mel Gorman has a second and hopefully final shot at fixing a CPU-hog
regression in compaction via his series "Fix excessive CPU usage
during compaction".
- Christoph Hellwig does some vmalloc maintenance work in the series
"cleanup vfree and vunmap".
- Christoph Hellwig has removed block_device_operations.rw_page() in
ths series "remove ->rw_page".
- We get some maple_tree improvements and cleanups in Liam Howlett's
series "VMA tree type safety and remove __vma_adjust()".
- Suren Baghdasaryan has done some work on the maintainability of our
vm_flags handling in the series "introduce vm_flags modifier
functions".
- Some pagemap cleanup and generalization work in Mike Rapoport's
series "mm, arch: add generic implementation of pfn_valid() for
FLATMEM" and "fixups for generic implementation of pfn_valid()"
- Baoquan He has done some work to make /proc/vmallocinfo and
/proc/kcore better represent the real state of things in his series
"mm/vmalloc.c: allow vread() to read out vm_map_ram areas".
- Jason Gunthorpe rationalized the GUP system's interface to the rest
of the kernel in the series "Simplify the external interface for
GUP".
- SeongJae Park wishes to migrate people from DAMON's debugfs interface
over to its sysfs interface. To support this, we'll temporarily be
printing warnings when people use the debugfs interface. See the
series "mm/damon: deprecate DAMON debugfs interface".
- Andrey Konovalov provided the accurately named "lib/stackdepot: fixes
and clean-ups" series.
- Huang Ying has provided a dramatic reduction in migration's TLB flush
IPI rates with the series "migrate_pages(): batch TLB flushing".
- Arnd Bergmann has some objtool fixups in "objtool warning fixes".
* tag 'mm-stable-2023-02-20-13-37' of git://git.kernel.org/pub/scm/linux/kernel/git/akpm/mm: (505 commits)
include/linux/migrate.h: remove unneeded externs
mm/memory_hotplug: cleanup return value handing in do_migrate_range()
mm/uffd: fix comment in handling pte markers
mm: change to return bool for isolate_movable_page()
mm: hugetlb: change to return bool for isolate_hugetlb()
mm: change to return bool for isolate_lru_page()
mm: change to return bool for folio_isolate_lru()
objtool: add UACCESS exceptions for __tsan_volatile_read/write
kmsan: disable ftrace in kmsan core code
kasan: mark addr_has_metadata __always_inline
mm: memcontrol: rename memcg_kmem_enabled()
sh: initialize max_mapnr
m68k/nommu: add missing definition of ARCH_PFN_OFFSET
mm: percpu: fix incorrect size in pcpu_obj_full_size()
maple_tree: reduce stack usage with gcc-9 and earlier
mm: page_alloc: call panic() when memoryless node allocation fails
mm: multi-gen LRU: avoid futile retries
migrate_pages: move THP/hugetlb migration support check to simplify code
migrate_pages: batch flushing TLB
migrate_pages: share more code between _unmap and _move
...
2023-02-24 01:09:35 +00:00
|
|
|
static int udf_adinicb_writepage(struct folio *folio,
|
2023-01-24 10:54:08 +00:00
|
|
|
struct writeback_control *wbc, void *data)
|
2012-08-31 16:49:07 +00:00
|
|
|
{
|
2023-03-08 17:55:18 +00:00
|
|
|
struct inode *inode = folio->mapping->host;
|
2023-01-24 10:54:08 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
|
2023-03-08 17:55:18 +00:00
|
|
|
BUG_ON(!folio_test_locked(folio));
|
|
|
|
BUG_ON(folio->index != 0);
|
|
|
|
memcpy_from_file_folio(iinfo->i_data + iinfo->i_lenEAttr, folio, 0,
|
2023-01-25 09:23:33 +00:00
|
|
|
i_size_read(inode));
|
2023-03-08 17:55:18 +00:00
|
|
|
folio_unlock(folio);
|
2023-01-24 10:54:08 +00:00
|
|
|
mark_inode_dirty(inode);
|
|
|
|
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2023-01-24 11:25:46 +00:00
|
|
|
static int udf_writepages(struct address_space *mapping,
|
|
|
|
struct writeback_control *wbc)
|
2023-01-24 10:54:08 +00:00
|
|
|
{
|
|
|
|
struct inode *inode = mapping->host;
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
|
|
|
|
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
|
|
|
|
return mpage_writepages(mapping, wbc, udf_get_block_wb);
|
|
|
|
return write_cache_pages(mapping, wbc, udf_adinicb_writepage, NULL);
|
2012-08-31 16:49:07 +00:00
|
|
|
}
|
|
|
|
|
2024-04-17 15:04:10 +00:00
|
|
|
static void udf_adinicb_read_folio(struct folio *folio)
|
2023-01-24 11:29:57 +00:00
|
|
|
{
|
2024-04-17 15:04:10 +00:00
|
|
|
struct inode *inode = folio->mapping->host;
|
2023-01-24 11:29:57 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
loff_t isize = i_size_read(inode);
|
|
|
|
|
2024-04-17 15:04:10 +00:00
|
|
|
folio_fill_tail(folio, 0, iinfo->i_data + iinfo->i_lenEAttr, isize);
|
|
|
|
folio_mark_uptodate(folio);
|
2023-01-24 11:29:57 +00:00
|
|
|
}
|
|
|
|
|
2023-01-24 11:25:46 +00:00
|
|
|
static int udf_read_folio(struct file *file, struct folio *folio)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-24 10:40:28 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(file_inode(file));
|
|
|
|
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
|
2024-04-17 15:04:10 +00:00
|
|
|
udf_adinicb_read_folio(folio);
|
2023-01-24 10:40:28 +00:00
|
|
|
folio_unlock(folio);
|
|
|
|
return 0;
|
|
|
|
}
|
2022-04-29 15:47:39 +00:00
|
|
|
return mpage_read_folio(folio, udf_get_block);
|
2011-10-03 05:02:59 +00:00
|
|
|
}
|
|
|
|
|
fs: convert mpage_readpages to mpage_readahead
Implement the new readahead aop and convert all callers (block_dev,
exfat, ext2, fat, gfs2, hpfs, isofs, jfs, nilfs2, ocfs2, omfs, qnx6,
reiserfs & udf).
The callers are all trivial except for GFS2 & OCFS2.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com> # ocfs2
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> # ocfs2
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-17-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-02 04:47:02 +00:00
|
|
|
static void udf_readahead(struct readahead_control *rac)
|
2011-10-03 05:02:59 +00:00
|
|
|
{
|
2023-02-28 11:00:25 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(rac->mapping->host);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* No readahead needed for in-ICB files and udf_get_block() would get
|
|
|
|
* confused for such file anyway.
|
|
|
|
*/
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
|
|
|
|
return;
|
|
|
|
|
fs: convert mpage_readpages to mpage_readahead
Implement the new readahead aop and convert all callers (block_dev,
exfat, ext2, fat, gfs2, hpfs, isofs, jfs, nilfs2, ocfs2, omfs, qnx6,
reiserfs & udf).
The callers are all trivial except for GFS2 & OCFS2.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com> # ocfs2
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> # ocfs2
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-17-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-02 04:47:02 +00:00
|
|
|
mpage_readahead(rac, udf_get_block);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2023-01-24 11:25:46 +00:00
|
|
|
static int udf_write_begin(struct file *file, struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len,
|
2024-07-15 18:24:01 +00:00
|
|
|
struct folio **foliop, void **fsdata)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-24 11:07:37 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(file_inode(file));
|
2024-04-17 15:04:08 +00:00
|
|
|
struct folio *folio;
|
2010-06-04 09:29:58 +00:00
|
|
|
int ret;
|
|
|
|
|
2023-01-24 11:07:37 +00:00
|
|
|
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
2024-07-15 18:24:01 +00:00
|
|
|
ret = block_write_begin(mapping, pos, len, foliop,
|
2023-01-24 11:07:37 +00:00
|
|
|
udf_get_block);
|
|
|
|
if (unlikely(ret))
|
|
|
|
udf_write_failed(mapping, pos + len);
|
|
|
|
return ret;
|
|
|
|
}
|
|
|
|
if (WARN_ON_ONCE(pos >= PAGE_SIZE))
|
|
|
|
return -EIO;
|
2024-04-17 15:04:08 +00:00
|
|
|
folio = __filemap_get_folio(mapping, 0, FGP_WRITEBEGIN,
|
|
|
|
mapping_gfp_mask(mapping));
|
|
|
|
if (IS_ERR(folio))
|
|
|
|
return PTR_ERR(folio);
|
2024-07-15 18:24:01 +00:00
|
|
|
*foliop = folio;
|
2024-04-17 15:04:08 +00:00
|
|
|
if (!folio_test_uptodate(folio))
|
2024-04-17 15:04:10 +00:00
|
|
|
udf_adinicb_read_folio(folio);
|
2023-01-24 11:07:37 +00:00
|
|
|
return 0;
|
2012-09-05 16:44:31 +00:00
|
|
|
}
|
2010-06-04 09:29:58 +00:00
|
|
|
|
2023-01-24 11:25:46 +00:00
|
|
|
static int udf_write_end(struct file *file, struct address_space *mapping,
|
|
|
|
loff_t pos, unsigned len, unsigned copied,
|
2024-07-10 19:45:32 +00:00
|
|
|
struct folio *folio, void *fsdata)
|
2023-01-24 11:13:57 +00:00
|
|
|
{
|
|
|
|
struct inode *inode = file_inode(file);
|
|
|
|
loff_t last_pos;
|
|
|
|
|
|
|
|
if (UDF_I(inode)->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB)
|
2024-07-10 19:45:32 +00:00
|
|
|
return generic_write_end(file, mapping, pos, len, copied, folio,
|
2023-01-24 11:13:57 +00:00
|
|
|
fsdata);
|
|
|
|
last_pos = pos + copied;
|
|
|
|
if (last_pos > inode->i_size)
|
|
|
|
i_size_write(inode, last_pos);
|
2024-04-17 15:04:13 +00:00
|
|
|
folio_mark_dirty(folio);
|
|
|
|
folio_unlock(folio);
|
|
|
|
folio_put(folio);
|
2023-01-24 11:13:57 +00:00
|
|
|
|
|
|
|
return copied;
|
|
|
|
}
|
|
|
|
|
2023-01-24 11:25:46 +00:00
|
|
|
static ssize_t udf_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
|
2012-09-05 16:44:31 +00:00
|
|
|
{
|
|
|
|
struct file *file = iocb->ki_filp;
|
|
|
|
struct address_space *mapping = file->f_mapping;
|
|
|
|
struct inode *inode = mapping->host;
|
2014-03-05 03:38:00 +00:00
|
|
|
size_t count = iov_iter_count(iter);
|
2012-09-05 16:44:31 +00:00
|
|
|
ssize_t ret;
|
|
|
|
|
2023-01-24 10:59:32 +00:00
|
|
|
/* Fallback to buffered IO for in-ICB files */
|
|
|
|
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
|
|
|
|
return 0;
|
2016-04-07 15:51:58 +00:00
|
|
|
ret = blockdev_direct_IO(iocb, inode, iter, udf_get_block);
|
2015-03-16 11:33:52 +00:00
|
|
|
if (unlikely(ret < 0 && iov_iter_rw(iter) == WRITE))
|
2016-04-07 15:51:58 +00:00
|
|
|
udf_write_failed(mapping, iocb->ki_pos + count);
|
2010-06-04 09:29:58 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
static sector_t udf_bmap(struct address_space *mapping, sector_t block)
|
|
|
|
{
|
2023-01-24 11:16:38 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(mapping->host);
|
|
|
|
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
|
|
|
|
return -EINVAL;
|
2007-07-19 08:47:43 +00:00
|
|
|
return generic_block_bmap(mapping, block, udf_get_block);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-06-28 11:26:44 +00:00
|
|
|
const struct address_space_operations udf_aops = {
|
2022-02-09 20:22:12 +00:00
|
|
|
.dirty_folio = block_dirty_folio,
|
2022-02-09 20:21:34 +00:00
|
|
|
.invalidate_folio = block_invalidate_folio,
|
2022-04-29 15:47:39 +00:00
|
|
|
.read_folio = udf_read_folio,
|
fs: convert mpage_readpages to mpage_readahead
Implement the new readahead aop and convert all callers (block_dev,
exfat, ext2, fat, gfs2, hpfs, isofs, jfs, nilfs2, ocfs2, omfs, qnx6,
reiserfs & udf).
The callers are all trivial except for GFS2 & OCFS2.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Junxiao Bi <junxiao.bi@oracle.com> # ocfs2
Reviewed-by: Joseph Qi <joseph.qi@linux.alibaba.com> # ocfs2
Reviewed-by: Dave Chinner <dchinner@redhat.com>
Reviewed-by: John Hubbard <jhubbard@nvidia.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: William Kucharski <william.kucharski@oracle.com>
Cc: Chao Yu <yuchao0@huawei.com>
Cc: Cong Wang <xiyou.wangcong@gmail.com>
Cc: Darrick J. Wong <darrick.wong@oracle.com>
Cc: Eric Biggers <ebiggers@google.com>
Cc: Gao Xiang <gaoxiang25@huawei.com>
Cc: Jaegeuk Kim <jaegeuk@kernel.org>
Cc: Michal Hocko <mhocko@suse.com>
Cc: Zi Yan <ziy@nvidia.com>
Cc: Johannes Thumshirn <johannes.thumshirn@wdc.com>
Cc: Miklos Szeredi <mszeredi@redhat.com>
Link: http://lkml.kernel.org/r/20200414150233.24495-17-willy@infradead.org
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
2020-06-02 04:47:02 +00:00
|
|
|
.readahead = udf_readahead,
|
2012-08-31 16:49:07 +00:00
|
|
|
.writepages = udf_writepages,
|
2012-09-05 16:44:31 +00:00
|
|
|
.write_begin = udf_write_begin,
|
2023-01-24 11:13:57 +00:00
|
|
|
.write_end = udf_write_end,
|
2012-09-05 16:44:31 +00:00
|
|
|
.direct_IO = udf_direct_IO,
|
2007-07-21 11:37:18 +00:00
|
|
|
.bmap = udf_bmap,
|
2022-11-13 16:29:02 +00:00
|
|
|
.migrate_folio = buffer_migrate_folio,
|
2005-04-16 22:20:36 +00:00
|
|
|
};
|
|
|
|
|
2011-12-10 01:30:48 +00:00
|
|
|
/*
|
|
|
|
* Expand file stored in ICB to a normal one-block-file
|
|
|
|
*
|
|
|
|
* This function requires i_mutex held
|
|
|
|
*/
|
2010-10-21 22:30:26 +00:00
|
|
|
int udf_expand_file_adinicb(struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2024-04-17 15:04:09 +00:00
|
|
|
struct folio *folio;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2010-10-21 22:30:26 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2016-01-22 20:40:57 +00:00
|
|
|
WARN_ON_ONCE(!inode_is_locked(inode));
|
2008-02-08 12:20:44 +00:00
|
|
|
if (!iinfo->i_lenAlloc) {
|
2023-01-19 11:28:37 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2005-04-16 22:20:36 +00:00
|
|
|
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
|
2011-12-10 01:30:48 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_inode_dirty(inode);
|
2010-10-21 22:30:26 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2024-04-17 15:04:09 +00:00
|
|
|
folio = __filemap_get_folio(inode->i_mapping, 0,
|
|
|
|
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_KERNEL);
|
|
|
|
if (IS_ERR(folio))
|
|
|
|
return PTR_ERR(folio);
|
2005-05-01 15:59:01 +00:00
|
|
|
|
2024-04-17 15:04:09 +00:00
|
|
|
if (!folio_test_uptodate(folio))
|
2024-04-17 15:04:10 +00:00
|
|
|
udf_adinicb_read_folio(folio);
|
2011-12-10 01:30:48 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2020-09-25 10:29:54 +00:00
|
|
|
memset(iinfo->i_data + iinfo->i_lenEAttr, 0x00,
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenAlloc);
|
|
|
|
iinfo->i_lenAlloc = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_USE_SHORT_AD))
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_alloc_type = ICBTAG_FLAG_AD_SHORT;
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_alloc_type = ICBTAG_FLAG_AD_LONG;
|
2024-04-17 15:04:09 +00:00
|
|
|
folio_mark_dirty(folio);
|
|
|
|
folio_unlock(folio);
|
2011-12-10 01:30:48 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2022-01-17 17:22:13 +00:00
|
|
|
err = filemap_fdatawrite(inode->i_mapping);
|
2010-10-21 22:30:26 +00:00
|
|
|
if (err) {
|
|
|
|
/* Restore everything back so that we don't lose data... */
|
2024-04-17 15:04:09 +00:00
|
|
|
folio_lock(folio);
|
2011-12-10 01:30:48 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2024-04-17 15:04:09 +00:00
|
|
|
memcpy_from_folio(iinfo->i_data + iinfo->i_lenEAttr,
|
|
|
|
folio, 0, inode->i_size);
|
|
|
|
folio_unlock(folio);
|
2010-10-21 22:30:26 +00:00
|
|
|
iinfo->i_alloc_type = ICBTAG_FLAG_AD_IN_ICB;
|
2022-01-18 08:57:25 +00:00
|
|
|
iinfo->i_lenAlloc = inode->i_size;
|
2011-12-10 01:30:48 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2010-10-21 22:30:26 +00:00
|
|
|
}
|
2024-04-17 15:04:09 +00:00
|
|
|
folio_put(folio);
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_inode_dirty(inode);
|
2010-10-21 22:30:26 +00:00
|
|
|
|
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2023-01-18 14:18:41 +00:00
|
|
|
#define UDF_MAP_CREATE 0x01 /* Mapping can allocate new blocks */
|
|
|
|
#define UDF_MAP_NOPREALLOC 0x02 /* Do not preallocate blocks */
|
2023-01-18 11:29:22 +00:00
|
|
|
|
|
|
|
#define UDF_BLK_MAPPED 0x01 /* Block was successfully mapped */
|
|
|
|
#define UDF_BLK_NEW 0x02 /* Block was freshly allocated */
|
|
|
|
|
|
|
|
struct udf_map_rq {
|
|
|
|
sector_t lblk;
|
|
|
|
udf_pblk_t pblk;
|
|
|
|
int iflags; /* UDF_MAP_ flags determining behavior */
|
|
|
|
int oflags; /* UDF_BLK_ flags reporting results */
|
|
|
|
};
|
|
|
|
|
|
|
|
static int udf_map_block(struct inode *inode, struct udf_map_rq *map)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2024-10-01 11:54:25 +00:00
|
|
|
int ret;
|
2023-01-18 11:29:22 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-02-28 11:11:38 +00:00
|
|
|
if (WARN_ON_ONCE(iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB))
|
|
|
|
return -EFSCORRUPTED;
|
|
|
|
|
2023-01-18 11:29:22 +00:00
|
|
|
map->oflags = 0;
|
|
|
|
if (!(map->iflags & UDF_MAP_CREATE)) {
|
2023-01-18 13:38:35 +00:00
|
|
|
struct kernel_lb_addr eloc;
|
|
|
|
uint32_t elen;
|
|
|
|
sector_t offset;
|
|
|
|
struct extent_position epos = {};
|
2024-10-01 11:54:25 +00:00
|
|
|
int8_t etype;
|
2023-01-18 13:38:35 +00:00
|
|
|
|
|
|
|
down_read(&iinfo->i_data_sem);
|
2024-10-01 11:54:25 +00:00
|
|
|
ret = inode_bmap(inode, map->lblk, &epos, &eloc, &elen, &offset,
|
|
|
|
&etype);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_read;
|
|
|
|
if (ret > 0 && etype == (EXT_RECORDED_ALLOCATED >> 30)) {
|
2023-01-18 13:38:35 +00:00
|
|
|
map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc,
|
|
|
|
offset);
|
2023-01-18 11:29:22 +00:00
|
|
|
map->oflags |= UDF_BLK_MAPPED;
|
2024-10-01 11:54:25 +00:00
|
|
|
ret = 0;
|
2023-01-18 13:38:35 +00:00
|
|
|
}
|
2024-10-01 11:54:25 +00:00
|
|
|
out_read:
|
2023-01-18 13:38:35 +00:00
|
|
|
up_read(&iinfo->i_data_sem);
|
|
|
|
brelse(epos.bh);
|
|
|
|
|
2024-10-01 11:54:25 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-11-16 17:40:47 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2022-12-07 17:17:34 +00:00
|
|
|
/*
|
|
|
|
* Block beyond EOF and prealloc extents? Just discard preallocation
|
|
|
|
* as it is not useful and complicates things.
|
|
|
|
*/
|
2023-01-23 13:29:15 +00:00
|
|
|
if (((loff_t)map->lblk) << inode->i_blkbits >= iinfo->i_lenExtents)
|
2022-12-07 17:17:34 +00:00
|
|
|
udf_discard_prealloc(inode);
|
2013-01-19 02:17:14 +00:00
|
|
|
udf_clear_extent_cache(inode);
|
2024-10-01 11:54:25 +00:00
|
|
|
ret = inode_getblk(inode, map);
|
2023-01-18 11:29:22 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2024-10-01 11:54:25 +00:00
|
|
|
return ret;
|
2023-01-18 11:29:22 +00:00
|
|
|
}
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2023-01-18 14:09:49 +00:00
|
|
|
static int __udf_get_block(struct inode *inode, sector_t block,
|
|
|
|
struct buffer_head *bh_result, int flags)
|
2023-01-18 11:29:22 +00:00
|
|
|
{
|
|
|
|
int err;
|
|
|
|
struct udf_map_rq map = {
|
|
|
|
.lblk = block,
|
2023-01-18 14:09:49 +00:00
|
|
|
.iflags = flags,
|
2023-01-18 11:29:22 +00:00
|
|
|
};
|
|
|
|
|
|
|
|
err = udf_map_block(inode, &map);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
if (map.oflags & UDF_BLK_MAPPED) {
|
|
|
|
map_bh(bh_result, inode->i_sb, map.pblk);
|
|
|
|
if (map.oflags & UDF_BLK_NEW)
|
|
|
|
set_buffer_new(bh_result);
|
|
|
|
}
|
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2023-01-18 14:09:49 +00:00
|
|
|
int udf_get_block(struct inode *inode, sector_t block,
|
|
|
|
struct buffer_head *bh_result, int create)
|
|
|
|
{
|
|
|
|
int flags = create ? UDF_MAP_CREATE : 0;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We preallocate blocks only for regular files. It also makes sense
|
|
|
|
* for directories but there's a problem when to drop the
|
|
|
|
* preallocation. We might use some delayed work for that but I feel
|
|
|
|
* it's overengineering for a filesystem like UDF.
|
|
|
|
*/
|
|
|
|
if (!S_ISREG(inode->i_mode))
|
|
|
|
flags |= UDF_MAP_NOPREALLOC;
|
|
|
|
return __udf_get_block(inode, block, bh_result, flags);
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* We shouldn't be allocating blocks on page writeback since we allocate them
|
|
|
|
* on page fault. We can spot dirty buffers without allocated blocks though
|
|
|
|
* when truncate expands file. These however don't have valid data so we can
|
|
|
|
* safely ignore them. So never allocate blocks from page writeback.
|
|
|
|
*/
|
|
|
|
static int udf_get_block_wb(struct inode *inode, sector_t block,
|
|
|
|
struct buffer_head *bh_result, int create)
|
|
|
|
{
|
|
|
|
return __udf_get_block(inode, block, bh_result, 0);
|
|
|
|
}
|
|
|
|
|
2019-07-01 02:39:35 +00:00
|
|
|
/* Extend the file with new blocks totaling 'new_block_bytes',
|
|
|
|
* return the number of extents added
|
|
|
|
*/
|
2010-10-21 22:30:26 +00:00
|
|
|
static int udf_do_extend_file(struct inode *inode,
|
|
|
|
struct extent_position *last_pos,
|
|
|
|
struct kernel_long_ad *last_ext,
|
2019-07-01 02:39:35 +00:00
|
|
|
loff_t new_block_bytes)
|
2007-05-08 07:35:21 +00:00
|
|
|
{
|
2019-07-01 02:39:35 +00:00
|
|
|
uint32_t add;
|
2007-05-08 07:35:21 +00:00
|
|
|
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
|
|
|
|
struct super_block *sb = inode->i_sb;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo;
|
2010-10-21 22:30:26 +00:00
|
|
|
int err;
|
2007-05-08 07:35:21 +00:00
|
|
|
|
|
|
|
/* The previous extent is fake and we should not extend by anything
|
|
|
|
* - there's nothing to do... */
|
2019-07-01 02:39:35 +00:00
|
|
|
if (!new_block_bytes && fake)
|
2007-05-08 07:35:21 +00:00
|
|
|
return 0;
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo = UDF_I(inode);
|
2007-05-08 07:35:21 +00:00
|
|
|
/* Round the last extent up to a multiple of block size */
|
|
|
|
if (last_ext->extLength & (sb->s_blocksize - 1)) {
|
|
|
|
last_ext->extLength =
|
2007-07-21 11:37:18 +00:00
|
|
|
(last_ext->extLength & UDF_EXTENT_FLAG_MASK) |
|
|
|
|
(((last_ext->extLength & UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
sb->s_blocksize - 1) & ~(sb->s_blocksize - 1));
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenExtents =
|
|
|
|
(iinfo->i_lenExtents + sb->s_blocksize - 1) &
|
2007-07-21 11:37:18 +00:00
|
|
|
~(sb->s_blocksize - 1);
|
2007-05-08 07:35:21 +00:00
|
|
|
}
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2022-12-21 11:18:23 +00:00
|
|
|
add = 0;
|
2007-05-08 07:35:21 +00:00
|
|
|
/* Can we merge with the previous extent? */
|
2008-02-08 12:20:36 +00:00
|
|
|
if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
|
|
|
|
EXT_NOT_RECORDED_NOT_ALLOCATED) {
|
2019-07-01 02:39:35 +00:00
|
|
|
add = (1 << 30) - sb->s_blocksize -
|
|
|
|
(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
|
|
|
|
if (add > new_block_bytes)
|
|
|
|
add = new_block_bytes;
|
|
|
|
new_block_bytes -= add;
|
|
|
|
last_ext->extLength += add;
|
2007-05-08 07:35:21 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (fake) {
|
2022-12-15 13:24:03 +00:00
|
|
|
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
|
|
|
|
last_ext->extLength, 1);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_err;
|
2007-05-08 07:35:21 +00:00
|
|
|
count++;
|
2015-12-23 17:05:03 +00:00
|
|
|
} else {
|
|
|
|
struct kernel_lb_addr tmploc;
|
|
|
|
uint32_t tmplen;
|
2024-10-01 11:54:24 +00:00
|
|
|
int8_t tmptype;
|
2015-12-23 17:05:03 +00:00
|
|
|
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_write_aext(inode, last_pos, &last_ext->extLocation,
|
2008-02-08 12:20:36 +00:00
|
|
|
last_ext->extLength, 1);
|
2021-01-07 23:41:16 +00:00
|
|
|
|
2015-12-23 17:05:03 +00:00
|
|
|
/*
|
2021-01-07 23:41:16 +00:00
|
|
|
* We've rewritten the last extent. If we are going to add
|
|
|
|
* more extents, we may need to enter possible following
|
|
|
|
* empty indirect extent.
|
2015-12-23 17:05:03 +00:00
|
|
|
*/
|
2024-10-01 11:54:24 +00:00
|
|
|
if (new_block_bytes) {
|
|
|
|
err = udf_next_aext(inode, last_pos, &tmploc, &tmplen,
|
|
|
|
&tmptype, 0);
|
|
|
|
if (err < 0)
|
|
|
|
goto out_err;
|
|
|
|
}
|
2015-12-23 17:05:03 +00:00
|
|
|
}
|
2022-12-21 11:18:23 +00:00
|
|
|
iinfo->i_lenExtents += add;
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2007-05-08 07:35:21 +00:00
|
|
|
/* Managed to do everything necessary? */
|
2019-07-01 02:39:35 +00:00
|
|
|
if (!new_block_bytes)
|
2007-05-08 07:35:21 +00:00
|
|
|
goto out;
|
|
|
|
|
|
|
|
/* All further extents will be NOT_RECORDED_NOT_ALLOCATED */
|
|
|
|
last_ext->extLocation.logicalBlockNum = 0;
|
2008-02-08 12:20:36 +00:00
|
|
|
last_ext->extLocation.partitionReferenceNum = 0;
|
2019-07-01 02:39:35 +00:00
|
|
|
add = (1 << 30) - sb->s_blocksize;
|
|
|
|
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED | add;
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2007-05-08 07:35:21 +00:00
|
|
|
/* Create enough extents to cover the whole hole */
|
2019-07-01 02:39:35 +00:00
|
|
|
while (new_block_bytes > add) {
|
|
|
|
new_block_bytes -= add;
|
2010-10-21 22:30:26 +00:00
|
|
|
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
|
|
|
|
last_ext->extLength, 1);
|
|
|
|
if (err)
|
2022-12-15 13:24:03 +00:00
|
|
|
goto out_err;
|
2022-12-21 11:18:23 +00:00
|
|
|
iinfo->i_lenExtents += add;
|
2007-05-08 07:35:21 +00:00
|
|
|
count++;
|
|
|
|
}
|
2019-07-01 02:39:35 +00:00
|
|
|
if (new_block_bytes) {
|
2007-05-08 07:35:21 +00:00
|
|
|
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
|
2019-07-01 02:39:35 +00:00
|
|
|
new_block_bytes;
|
2010-10-21 22:30:26 +00:00
|
|
|
err = udf_add_aext(inode, last_pos, &last_ext->extLocation,
|
|
|
|
last_ext->extLength, 1);
|
|
|
|
if (err)
|
2022-12-15 13:24:03 +00:00
|
|
|
goto out_err;
|
2022-12-21 11:18:23 +00:00
|
|
|
iinfo->i_lenExtents += new_block_bytes;
|
2007-05-08 07:35:21 +00:00
|
|
|
count++;
|
|
|
|
}
|
2007-07-21 11:37:18 +00:00
|
|
|
|
|
|
|
out:
|
2007-05-08 07:35:21 +00:00
|
|
|
/* last_pos should point to the last written extent... */
|
2008-02-08 12:20:44 +00:00
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
2008-10-15 10:28:03 +00:00
|
|
|
last_pos->offset -= sizeof(struct short_ad);
|
2008-02-08 12:20:44 +00:00
|
|
|
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
|
2008-10-15 10:28:03 +00:00
|
|
|
last_pos->offset -= sizeof(struct long_ad);
|
2007-05-08 07:35:21 +00:00
|
|
|
else
|
2010-10-21 22:30:26 +00:00
|
|
|
return -EIO;
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2007-05-08 07:35:21 +00:00
|
|
|
return count;
|
2022-12-15 13:24:03 +00:00
|
|
|
out_err:
|
|
|
|
/* Remove extents we've created so far */
|
|
|
|
udf_clear_extent_cache(inode);
|
|
|
|
udf_truncate_extents(inode);
|
|
|
|
return err;
|
2007-05-08 07:35:21 +00:00
|
|
|
}
|
|
|
|
|
2019-07-01 02:39:35 +00:00
|
|
|
/* Extend the final block of the file to final_block_len bytes */
|
|
|
|
static void udf_do_extend_final_block(struct inode *inode,
|
|
|
|
struct extent_position *last_pos,
|
|
|
|
struct kernel_long_ad *last_ext,
|
2022-12-08 12:03:30 +00:00
|
|
|
uint32_t new_elen)
|
2019-07-01 02:39:35 +00:00
|
|
|
{
|
|
|
|
uint32_t added_bytes;
|
|
|
|
|
2022-12-08 12:03:30 +00:00
|
|
|
/*
|
|
|
|
* Extent already large enough? It may be already rounded up to block
|
|
|
|
* size...
|
|
|
|
*/
|
|
|
|
if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
|
|
|
|
return;
|
2022-12-21 16:45:51 +00:00
|
|
|
added_bytes = new_elen - (last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
|
2019-07-01 02:39:35 +00:00
|
|
|
last_ext->extLength += added_bytes;
|
|
|
|
UDF_I(inode)->i_lenExtents += added_bytes;
|
|
|
|
|
|
|
|
udf_write_aext(inode, last_pos, &last_ext->extLocation,
|
|
|
|
last_ext->extLength, 1);
|
|
|
|
}
|
|
|
|
|
2010-10-21 22:30:26 +00:00
|
|
|
static int udf_extend_file(struct inode *inode, loff_t newsize)
|
|
|
|
{
|
|
|
|
|
|
|
|
struct extent_position epos;
|
|
|
|
struct kernel_lb_addr eloc;
|
|
|
|
uint32_t elen;
|
|
|
|
int8_t etype;
|
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
|
2022-12-08 12:03:30 +00:00
|
|
|
loff_t new_elen;
|
2010-10-21 22:30:26 +00:00
|
|
|
int adsize;
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
struct kernel_long_ad extent;
|
2019-07-01 02:39:35 +00:00
|
|
|
int err = 0;
|
2022-12-08 12:03:30 +00:00
|
|
|
bool within_last_ext;
|
2010-10-21 22:30:26 +00:00
|
|
|
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
|
|
|
adsize = sizeof(struct short_ad);
|
|
|
|
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
|
|
|
|
adsize = sizeof(struct long_ad);
|
|
|
|
else
|
|
|
|
BUG();
|
|
|
|
|
2023-01-19 11:37:04 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2022-12-07 17:17:34 +00:00
|
|
|
/*
|
|
|
|
* When creating hole in file, just don't bother with preserving
|
|
|
|
* preallocation. It likely won't be very useful anyway.
|
|
|
|
*/
|
|
|
|
udf_discard_prealloc(inode);
|
|
|
|
|
2024-10-01 11:54:25 +00:00
|
|
|
err = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset, &etype);
|
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
within_last_ext = (err == 1);
|
2022-12-07 17:17:34 +00:00
|
|
|
/* We don't expect extents past EOF... */
|
2022-12-08 12:03:30 +00:00
|
|
|
WARN_ON_ONCE(within_last_ext &&
|
2022-12-07 17:17:34 +00:00
|
|
|
elen > ((loff_t)offset + 1) << inode->i_blkbits);
|
2010-10-21 22:30:26 +00:00
|
|
|
|
|
|
|
if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
|
|
|
|
(epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
|
|
|
|
/* File has no extents at all or has empty last
|
|
|
|
* indirect extent! Create a fake extent... */
|
|
|
|
extent.extLocation.logicalBlockNum = 0;
|
|
|
|
extent.extLocation.partitionReferenceNum = 0;
|
|
|
|
extent.extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
|
|
|
|
} else {
|
|
|
|
epos.offset -= adsize;
|
2024-10-01 11:54:24 +00:00
|
|
|
err = udf_next_aext(inode, &epos, &extent.extLocation,
|
|
|
|
&extent.extLength, &etype, 0);
|
|
|
|
if (err <= 0)
|
|
|
|
goto out;
|
2010-10-21 22:30:26 +00:00
|
|
|
extent.extLength |= etype << 30;
|
|
|
|
}
|
2019-07-01 02:39:35 +00:00
|
|
|
|
2022-12-08 12:03:30 +00:00
|
|
|
new_elen = ((loff_t)offset << inode->i_blkbits) |
|
|
|
|
(newsize & (sb->s_blocksize - 1));
|
2019-07-01 02:39:35 +00:00
|
|
|
|
|
|
|
/* File has extent covering the new size (could happen when extending
|
|
|
|
* inside a block)?
|
|
|
|
*/
|
2022-12-08 12:03:30 +00:00
|
|
|
if (within_last_ext) {
|
2019-07-01 02:39:35 +00:00
|
|
|
/* Extending file within the last file block */
|
2022-12-08 12:03:30 +00:00
|
|
|
udf_do_extend_final_block(inode, &epos, &extent, new_elen);
|
2019-07-01 02:39:35 +00:00
|
|
|
} else {
|
2022-12-08 12:03:30 +00:00
|
|
|
err = udf_do_extend_file(inode, &epos, &extent, new_elen);
|
2019-07-01 02:39:35 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 22:30:26 +00:00
|
|
|
if (err < 0)
|
|
|
|
goto out;
|
|
|
|
err = 0;
|
|
|
|
out:
|
|
|
|
brelse(epos.bh);
|
2023-01-19 11:37:04 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2010-10-21 22:30:26 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2023-01-18 13:57:34 +00:00
|
|
|
static int inode_getblk(struct inode *inode, struct udf_map_rq *map)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-10-15 10:28:03 +00:00
|
|
|
struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
|
2007-05-08 07:35:14 +00:00
|
|
|
struct extent_position prev_epos, cur_epos, next_epos;
|
2005-04-16 22:20:36 +00:00
|
|
|
int count = 0, startnum = 0, endnum = 0;
|
2007-06-01 07:46:29 +00:00
|
|
|
uint32_t elen = 0, tmpelen;
|
2008-10-15 10:28:03 +00:00
|
|
|
struct kernel_lb_addr eloc, tmpeloc;
|
2005-04-16 22:20:36 +00:00
|
|
|
int c = 1;
|
2007-05-08 07:35:13 +00:00
|
|
|
loff_t lbcount = 0, b_off = 0;
|
2023-01-18 13:57:34 +00:00
|
|
|
udf_pblk_t newblocknum;
|
2007-05-08 07:35:13 +00:00
|
|
|
sector_t offset = 0;
|
2024-10-01 11:54:24 +00:00
|
|
|
int8_t etype, tmpetype;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2017-10-12 13:48:40 +00:00
|
|
|
udf_pblk_t goal = 0, pgoal = iinfo->i_location.logicalBlockNum;
|
2007-05-08 07:35:21 +00:00
|
|
|
int lastblock = 0;
|
2024-10-01 11:54:24 +00:00
|
|
|
bool isBeyondEOF = false;
|
2023-01-18 13:57:34 +00:00
|
|
|
int ret = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
prev_epos.offset = udf_file_entry_alloc_offset(inode);
|
2008-02-08 12:20:44 +00:00
|
|
|
prev_epos.block = iinfo->i_location;
|
2007-05-08 07:35:14 +00:00
|
|
|
prev_epos.bh = NULL;
|
|
|
|
cur_epos = next_epos = prev_epos;
|
2023-01-18 13:57:34 +00:00
|
|
|
b_off = (loff_t)map->lblk << inode->i_sb->s_blocksize_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* find the extent which contains the block we are looking for.
|
2007-07-19 08:47:43 +00:00
|
|
|
alternate between laarr[0] and laarr[1] for locations of the
|
|
|
|
current extent, and the previous extent */
|
|
|
|
do {
|
|
|
|
if (prev_epos.bh != cur_epos.bh) {
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(prev_epos.bh);
|
|
|
|
get_bh(cur_epos.bh);
|
2007-05-08 07:35:14 +00:00
|
|
|
prev_epos.bh = cur_epos.bh;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-07-19 08:47:43 +00:00
|
|
|
if (cur_epos.bh != next_epos.bh) {
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(cur_epos.bh);
|
|
|
|
get_bh(next_epos.bh);
|
2007-05-08 07:35:14 +00:00
|
|
|
cur_epos.bh = next_epos.bh;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
lbcount += elen;
|
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
prev_epos.block = cur_epos.block;
|
|
|
|
cur_epos.block = next_epos.block;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
prev_epos.offset = cur_epos.offset;
|
|
|
|
cur_epos.offset = next_epos.offset;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
ret = udf_next_aext(inode, &next_epos, &eloc, &elen, &etype, 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
goto out_free;
|
|
|
|
} else if (ret == 0) {
|
|
|
|
isBeyondEOF = true;
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
2024-10-01 11:54:24 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
c = !c;
|
|
|
|
|
|
|
|
laarr[c].extLength = (etype << 30) | elen;
|
|
|
|
laarr[c].extLocation = eloc;
|
|
|
|
|
|
|
|
if (etype != (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
|
|
|
|
pgoal = eloc.logicalBlockNum +
|
2007-07-21 11:37:18 +00:00
|
|
|
((elen + inode->i_sb->s_blocksize - 1) >>
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
count++;
|
2005-04-16 22:20:36 +00:00
|
|
|
} while (lbcount + elen <= b_off);
|
|
|
|
|
|
|
|
b_off -= lbcount;
|
|
|
|
offset = b_off >> inode->i_sb->s_blocksize_bits;
|
2007-06-01 07:46:29 +00:00
|
|
|
/*
|
|
|
|
* Move prev_epos and cur_epos into indirect extent if we are at
|
|
|
|
* the pointer to it
|
|
|
|
*/
|
2024-10-01 11:54:24 +00:00
|
|
|
ret = udf_next_aext(inode, &prev_epos, &tmpeloc, &tmpelen, &tmpetype, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_free;
|
|
|
|
ret = udf_next_aext(inode, &cur_epos, &tmpeloc, &tmpelen, &tmpetype, 0);
|
|
|
|
if (ret < 0)
|
|
|
|
goto out_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* if the extent is allocated and recorded, return the block
|
2007-07-19 08:47:43 +00:00
|
|
|
if the extent is not a multiple of the blocksize, round up */
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
if (!isBeyondEOF && etype == (EXT_RECORDED_ALLOCATED >> 30)) {
|
2007-07-19 08:47:43 +00:00
|
|
|
if (elen & (inode->i_sb->s_blocksize - 1)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
elen = EXT_RECORDED_ALLOCATED |
|
2007-07-21 11:37:18 +00:00
|
|
|
((elen + inode->i_sb->s_blocksize - 1) &
|
|
|
|
~(inode->i_sb->s_blocksize - 1));
|
2022-12-21 16:53:28 +00:00
|
|
|
iinfo->i_lenExtents =
|
|
|
|
ALIGN(iinfo->i_lenExtents,
|
|
|
|
inode->i_sb->s_blocksize);
|
2010-10-21 22:30:26 +00:00
|
|
|
udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2023-01-18 13:57:34 +00:00
|
|
|
map->oflags = UDF_BLK_MAPPED;
|
|
|
|
map->pblk = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
|
2017-01-06 20:53:51 +00:00
|
|
|
goto out_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2022-12-07 17:17:34 +00:00
|
|
|
/* Are we beyond EOF and preallocated extent? */
|
2024-10-01 11:54:24 +00:00
|
|
|
if (isBeyondEOF) {
|
2019-07-01 02:39:35 +00:00
|
|
|
loff_t hole_len;
|
2022-12-07 17:17:34 +00:00
|
|
|
|
2007-05-08 07:35:21 +00:00
|
|
|
if (count) {
|
|
|
|
if (c)
|
|
|
|
laarr[0] = laarr[1];
|
|
|
|
startnum = 1;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2007-05-08 07:35:21 +00:00
|
|
|
/* Create a fake extent when there's not one */
|
2008-02-08 12:20:36 +00:00
|
|
|
memset(&laarr[0].extLocation, 0x00,
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct kernel_lb_addr));
|
2007-05-08 07:35:21 +00:00
|
|
|
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
|
2010-10-21 22:30:26 +00:00
|
|
|
/* Will udf_do_extend_file() create real extent from
|
2008-02-08 12:20:36 +00:00
|
|
|
a fake one? */
|
2007-05-08 07:35:21 +00:00
|
|
|
startnum = (offset > 0);
|
|
|
|
}
|
|
|
|
/* Create extents for the hole between EOF and offset */
|
2019-07-01 02:39:35 +00:00
|
|
|
hole_len = (loff_t)offset << inode->i_blkbits;
|
|
|
|
ret = udf_do_extend_file(inode, &prev_epos, laarr, hole_len);
|
2023-01-18 13:57:34 +00:00
|
|
|
if (ret < 0)
|
2017-01-06 20:53:51 +00:00
|
|
|
goto out_free;
|
2007-05-08 07:35:21 +00:00
|
|
|
c = 0;
|
|
|
|
offset = 0;
|
|
|
|
count += ret;
|
2023-01-23 13:18:47 +00:00
|
|
|
/*
|
|
|
|
* Is there any real extent? - otherwise we overwrite the fake
|
|
|
|
* one...
|
|
|
|
*/
|
|
|
|
if (count)
|
|
|
|
c = !c;
|
|
|
|
laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
|
|
|
|
inode->i_sb->s_blocksize;
|
|
|
|
memset(&laarr[c].extLocation, 0x00,
|
|
|
|
sizeof(struct kernel_lb_addr));
|
|
|
|
count++;
|
2007-07-19 08:47:43 +00:00
|
|
|
endnum = c + 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
lastblock = 1;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
endnum = startnum = ((count > 2) ? 2 : count);
|
|
|
|
|
2008-02-08 12:20:36 +00:00
|
|
|
/* if the current extent is in position 0,
|
|
|
|
swap it with the previous */
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!c && count != 1) {
|
2007-05-08 07:35:21 +00:00
|
|
|
laarr[2] = laarr[0];
|
|
|
|
laarr[0] = laarr[1];
|
|
|
|
laarr[1] = laarr[2];
|
|
|
|
c = 1;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:36 +00:00
|
|
|
/* if the current block is located in an extent,
|
|
|
|
read the next extent */
|
2024-10-01 11:54:24 +00:00
|
|
|
ret = udf_next_aext(inode, &next_epos, &eloc, &elen, &etype, 0);
|
|
|
|
if (ret > 0) {
|
2007-07-19 08:47:43 +00:00
|
|
|
laarr[c + 1].extLength = (etype << 30) | elen;
|
|
|
|
laarr[c + 1].extLocation = eloc;
|
|
|
|
count++;
|
|
|
|
startnum++;
|
|
|
|
endnum++;
|
2024-10-01 11:54:24 +00:00
|
|
|
} else if (ret == 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
lastblock = 1;
|
2024-10-01 11:54:24 +00:00
|
|
|
else
|
|
|
|
goto out_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/* if the current extent is not recorded but allocated, get the
|
2007-07-21 11:37:18 +00:00
|
|
|
* block in the extent corresponding to the requested block */
|
2008-02-08 12:20:36 +00:00
|
|
|
if ((laarr[c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30))
|
2005-04-16 22:20:36 +00:00
|
|
|
newblocknum = laarr[c].extLocation.logicalBlockNum + offset;
|
2008-02-08 12:20:36 +00:00
|
|
|
else { /* otherwise, allocate a new block */
|
2023-01-18 13:57:34 +00:00
|
|
|
if (iinfo->i_next_alloc_block == map->lblk)
|
2008-02-08 12:20:44 +00:00
|
|
|
goal = iinfo->i_next_alloc_goal;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!goal) {
|
2008-02-08 12:20:36 +00:00
|
|
|
if (!(goal = pgoal)) /* XXX: what was intended here? */
|
2008-02-08 12:20:44 +00:00
|
|
|
goal = iinfo->i_location.logicalBlockNum + 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-02-08 12:20:36 +00:00
|
|
|
newblocknum = udf_new_block(inode->i_sb, inode,
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_location.partitionReferenceNum,
|
2023-01-18 13:57:34 +00:00
|
|
|
goal, &ret);
|
|
|
|
if (!newblocknum)
|
2017-01-06 20:53:51 +00:00
|
|
|
goto out_free;
|
2012-10-09 15:09:12 +00:00
|
|
|
if (isBeyondEOF)
|
|
|
|
iinfo->i_lenExtents += inode->i_sb->s_blocksize;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-02-08 12:20:36 +00:00
|
|
|
/* if the extent the requsted block is located in contains multiple
|
|
|
|
* blocks, split the extent into at most three extents. blocks prior
|
|
|
|
* to requested block, requested block, and blocks after requested
|
|
|
|
* block */
|
2005-04-16 22:20:36 +00:00
|
|
|
udf_split_extents(inode, &c, offset, newblocknum, laarr, &endnum);
|
|
|
|
|
2023-01-18 14:18:41 +00:00
|
|
|
if (!(map->iflags & UDF_MAP_NOPREALLOC))
|
2009-07-16 16:02:25 +00:00
|
|
|
udf_prealloc_extents(inode, c, lastblock, laarr, &endnum);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
/* merge any continuous blocks in laarr */
|
|
|
|
udf_merge_extents(inode, laarr, &endnum);
|
|
|
|
|
|
|
|
/* write back the new extents, inserting new extents if the new number
|
2007-07-21 11:37:18 +00:00
|
|
|
* of extents is greater than the old number, and deleting extents if
|
|
|
|
* the new number of extents is less than the old number */
|
2023-01-18 13:57:34 +00:00
|
|
|
ret = udf_update_extents(inode, laarr, startnum, endnum, &prev_epos);
|
|
|
|
if (ret < 0)
|
2022-12-19 19:10:35 +00:00
|
|
|
goto out_free;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-01-18 13:57:34 +00:00
|
|
|
map->pblk = udf_get_pblock(inode->i_sb, newblocknum,
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_location.partitionReferenceNum, 0);
|
2023-01-18 13:57:34 +00:00
|
|
|
if (!map->pblk) {
|
|
|
|
ret = -EFSCORRUPTED;
|
2017-01-06 20:53:51 +00:00
|
|
|
goto out_free;
|
2011-12-10 00:43:33 +00:00
|
|
|
}
|
2023-01-18 13:57:34 +00:00
|
|
|
map->oflags = UDF_BLK_NEW | UDF_BLK_MAPPED;
|
|
|
|
iinfo->i_next_alloc_block = map->lblk + 1;
|
2023-01-18 11:05:58 +00:00
|
|
|
iinfo->i_next_alloc_goal = newblocknum + 1;
|
2023-07-05 19:01:44 +00:00
|
|
|
inode_set_ctime_current(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (IS_SYNC(inode))
|
|
|
|
udf_sync_inode(inode);
|
|
|
|
else
|
|
|
|
mark_inode_dirty(inode);
|
2023-01-18 13:57:34 +00:00
|
|
|
ret = 0;
|
2017-01-06 20:53:51 +00:00
|
|
|
out_free:
|
|
|
|
brelse(prev_epos.bh);
|
|
|
|
brelse(cur_epos.bh);
|
|
|
|
brelse(next_epos.bh);
|
2023-01-18 13:57:34 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
static void udf_split_extents(struct inode *inode, int *c, int offset,
|
2017-10-12 13:48:40 +00:00
|
|
|
udf_pblk_t newblocknum,
|
|
|
|
struct kernel_long_ad *laarr, int *endnum)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-02-08 12:20:36 +00:00
|
|
|
unsigned long blocksize = inode->i_sb->s_blocksize;
|
|
|
|
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
if ((laarr[*c].extLength >> 30) == (EXT_NOT_RECORDED_ALLOCATED >> 30) ||
|
2008-02-08 12:20:36 +00:00
|
|
|
(laarr[*c].extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
int curr = *c;
|
|
|
|
int blen = ((laarr[curr].extLength & UDF_EXTENT_LENGTH_MASK) +
|
2008-02-08 12:20:36 +00:00
|
|
|
blocksize - 1) >> blocksize_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
int8_t etype = (laarr[curr].extLength >> 30);
|
|
|
|
|
2008-02-08 12:20:36 +00:00
|
|
|
if (blen == 1)
|
2007-07-21 11:37:18 +00:00
|
|
|
;
|
2008-02-08 12:20:36 +00:00
|
|
|
else if (!offset || blen == offset + 1) {
|
2007-07-19 08:47:43 +00:00
|
|
|
laarr[curr + 2] = laarr[curr + 1];
|
|
|
|
laarr[curr + 1] = laarr[curr];
|
|
|
|
} else {
|
|
|
|
laarr[curr + 3] = laarr[curr + 1];
|
|
|
|
laarr[curr + 2] = laarr[curr + 1] = laarr[curr];
|
|
|
|
}
|
|
|
|
|
|
|
|
if (offset) {
|
|
|
|
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_free_blocks(inode->i_sb, inode,
|
2008-10-15 10:29:03 +00:00
|
|
|
&laarr[curr].extLocation,
|
2008-02-08 12:20:36 +00:00
|
|
|
0, offset);
|
|
|
|
laarr[curr].extLength =
|
|
|
|
EXT_NOT_RECORDED_NOT_ALLOCATED |
|
|
|
|
(offset << blocksize_bits);
|
2005-04-16 22:20:36 +00:00
|
|
|
laarr[curr].extLocation.logicalBlockNum = 0;
|
2008-02-08 12:20:36 +00:00
|
|
|
laarr[curr].extLocation.
|
|
|
|
partitionReferenceNum = 0;
|
|
|
|
} else
|
2005-04-16 22:20:36 +00:00
|
|
|
laarr[curr].extLength = (etype << 30) |
|
2008-02-08 12:20:36 +00:00
|
|
|
(offset << blocksize_bits);
|
2007-07-19 08:47:43 +00:00
|
|
|
curr++;
|
|
|
|
(*c)++;
|
|
|
|
(*endnum)++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-07-16 06:39:47 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
laarr[curr].extLocation.logicalBlockNum = newblocknum;
|
|
|
|
if (etype == (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))
|
|
|
|
laarr[curr].extLocation.partitionReferenceNum =
|
2008-02-08 12:20:42 +00:00
|
|
|
UDF_I(inode)->i_location.partitionReferenceNum;
|
2005-04-16 22:20:36 +00:00
|
|
|
laarr[curr].extLength = EXT_RECORDED_ALLOCATED |
|
2008-02-08 12:20:36 +00:00
|
|
|
blocksize;
|
2007-07-19 08:47:43 +00:00
|
|
|
curr++;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (blen != offset + 1) {
|
2005-04-16 22:20:36 +00:00
|
|
|
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30))
|
2008-02-08 12:20:36 +00:00
|
|
|
laarr[curr].extLocation.logicalBlockNum +=
|
|
|
|
offset + 1;
|
2007-07-21 11:37:18 +00:00
|
|
|
laarr[curr].extLength = (etype << 30) |
|
2008-02-08 12:20:36 +00:00
|
|
|
((blen - (offset + 1)) << blocksize_bits);
|
2007-07-19 08:47:43 +00:00
|
|
|
curr++;
|
|
|
|
(*endnum)++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
|
2017-01-06 20:53:50 +00:00
|
|
|
struct kernel_long_ad *laarr,
|
2007-07-19 08:47:43 +00:00
|
|
|
int *endnum)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int start, length = 0, currlength = 0, i;
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (*endnum >= (c + 1)) {
|
2005-04-16 22:20:36 +00:00
|
|
|
if (!lastblock)
|
|
|
|
return;
|
|
|
|
else
|
|
|
|
start = c;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2008-02-08 12:20:36 +00:00
|
|
|
if ((laarr[c + 1].extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
|
2007-07-19 08:47:43 +00:00
|
|
|
start = c + 1;
|
2008-02-08 12:20:36 +00:00
|
|
|
length = currlength =
|
|
|
|
(((laarr[c + 1].extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
inode->i_sb->s_blocksize - 1) >>
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
|
|
|
} else
|
2005-04-16 22:20:36 +00:00
|
|
|
start = c;
|
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
for (i = start + 1; i <= *endnum; i++) {
|
|
|
|
if (i == *endnum) {
|
2005-04-16 22:20:36 +00:00
|
|
|
if (lastblock)
|
|
|
|
length += UDF_DEFAULT_PREALLOC_BLOCKS;
|
2008-02-08 12:20:36 +00:00
|
|
|
} else if ((laarr[i].extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) {
|
|
|
|
length += (((laarr[i].extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
inode->i_sb->s_blocksize - 1) >>
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
|
|
|
} else
|
2005-04-16 22:20:36 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (length) {
|
2005-04-16 22:20:36 +00:00
|
|
|
int next = laarr[start].extLocation.logicalBlockNum +
|
2007-07-21 11:37:18 +00:00
|
|
|
(((laarr[start].extLength & UDF_EXTENT_LENGTH_MASK) +
|
2008-02-08 12:20:36 +00:00
|
|
|
inode->i_sb->s_blocksize - 1) >>
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
2005-04-16 22:20:36 +00:00
|
|
|
int numalloc = udf_prealloc_blocks(inode->i_sb, inode,
|
2008-02-08 12:20:36 +00:00
|
|
|
laarr[start].extLocation.partitionReferenceNum,
|
|
|
|
next, (UDF_DEFAULT_PREALLOC_BLOCKS > length ?
|
|
|
|
length : UDF_DEFAULT_PREALLOC_BLOCKS) -
|
|
|
|
currlength);
|
2007-07-21 11:37:18 +00:00
|
|
|
if (numalloc) {
|
2008-02-08 12:20:36 +00:00
|
|
|
if (start == (c + 1))
|
2005-04-16 22:20:36 +00:00
|
|
|
laarr[start].extLength +=
|
2008-02-08 12:20:36 +00:00
|
|
|
(numalloc <<
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
|
|
|
else {
|
2007-07-19 08:47:43 +00:00
|
|
|
memmove(&laarr[c + 2], &laarr[c + 1],
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct long_ad) * (*endnum - (c + 1)));
|
2007-07-19 08:47:43 +00:00
|
|
|
(*endnum)++;
|
|
|
|
laarr[c + 1].extLocation.logicalBlockNum = next;
|
|
|
|
laarr[c + 1].extLocation.partitionReferenceNum =
|
2008-02-08 12:20:36 +00:00
|
|
|
laarr[c].extLocation.
|
|
|
|
partitionReferenceNum;
|
|
|
|
laarr[c + 1].extLength =
|
|
|
|
EXT_NOT_RECORDED_ALLOCATED |
|
|
|
|
(numalloc <<
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
2007-07-19 08:47:43 +00:00
|
|
|
start = c + 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
for (i = start + 1; numalloc && i < *endnum; i++) {
|
2008-02-08 12:20:36 +00:00
|
|
|
int elen = ((laarr[i].extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
inode->i_sb->s_blocksize - 1) >>
|
|
|
|
inode->i_sb->s_blocksize_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (elen > numalloc) {
|
2005-04-16 22:20:36 +00:00
|
|
|
laarr[i].extLength -=
|
2008-02-08 12:20:36 +00:00
|
|
|
(numalloc <<
|
|
|
|
inode->i_sb->s_blocksize_bits);
|
2005-04-16 22:20:36 +00:00
|
|
|
numalloc = 0;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
numalloc -= elen;
|
2007-07-19 08:47:43 +00:00
|
|
|
if (*endnum > (i + 1))
|
2008-02-08 12:20:36 +00:00
|
|
|
memmove(&laarr[i],
|
|
|
|
&laarr[i + 1],
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct long_ad) *
|
2008-02-08 12:20:36 +00:00
|
|
|
(*endnum - (i + 1)));
|
2007-07-19 08:47:43 +00:00
|
|
|
i--;
|
|
|
|
(*endnum)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2008-02-08 12:20:42 +00:00
|
|
|
UDF_I(inode)->i_lenExtents +=
|
2008-02-08 12:20:36 +00:00
|
|
|
numalloc << inode->i_sb->s_blocksize_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-01-06 20:53:50 +00:00
|
|
|
static void udf_merge_extents(struct inode *inode, struct kernel_long_ad *laarr,
|
2007-07-19 08:47:43 +00:00
|
|
|
int *endnum)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int i;
|
2008-02-08 12:20:36 +00:00
|
|
|
unsigned long blocksize = inode->i_sb->s_blocksize;
|
|
|
|
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
for (i = 0; i < (*endnum - 1); i++) {
|
2008-10-15 10:28:03 +00:00
|
|
|
struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
|
|
|
|
struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
|
2008-02-08 12:20:36 +00:00
|
|
|
|
|
|
|
if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
|
|
|
|
(((li->extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30)) ||
|
|
|
|
((lip1->extLocation.logicalBlockNum -
|
|
|
|
li->extLocation.logicalBlockNum) ==
|
|
|
|
(((li->extLength & UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) >> blocksize_bits)))) {
|
|
|
|
|
|
|
|
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
|
2022-12-16 11:37:51 +00:00
|
|
|
(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) <= UDF_EXTENT_LENGTH_MASK) {
|
2008-02-08 12:20:36 +00:00
|
|
|
li->extLength = lip1->extLength +
|
|
|
|
(((li->extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) & ~(blocksize - 1));
|
|
|
|
if (*endnum > (i + 2))
|
|
|
|
memmove(&laarr[i + 1], &laarr[i + 2],
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct long_ad) *
|
2008-02-08 12:20:36 +00:00
|
|
|
(*endnum - (i + 2)));
|
|
|
|
i--;
|
|
|
|
(*endnum)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-02-08 12:20:36 +00:00
|
|
|
} else if (((li->extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
|
|
|
|
((lip1->extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
|
2008-02-08 12:20:36 +00:00
|
|
|
((li->extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) >> blocksize_bits);
|
|
|
|
li->extLocation.logicalBlockNum = 0;
|
|
|
|
li->extLocation.partitionReferenceNum = 0;
|
|
|
|
|
|
|
|
if (((li->extLength & UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
(lip1->extLength & UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) & ~UDF_EXTENT_LENGTH_MASK) {
|
|
|
|
lip1->extLength = (lip1->extLength -
|
|
|
|
(li->extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
UDF_EXTENT_LENGTH_MASK) &
|
|
|
|
~(blocksize - 1);
|
|
|
|
li->extLength = (li->extLength &
|
|
|
|
UDF_EXTENT_FLAG_MASK) +
|
|
|
|
(UDF_EXTENT_LENGTH_MASK + 1) -
|
|
|
|
blocksize;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2008-02-08 12:20:36 +00:00
|
|
|
li->extLength = lip1->extLength +
|
|
|
|
(((li->extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) & ~(blocksize - 1));
|
2007-07-19 08:47:43 +00:00
|
|
|
if (*endnum > (i + 2))
|
|
|
|
memmove(&laarr[i + 1], &laarr[i + 2],
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct long_ad) *
|
2008-02-08 12:20:36 +00:00
|
|
|
(*endnum - (i + 2)));
|
2007-07-19 08:47:43 +00:00
|
|
|
i--;
|
|
|
|
(*endnum)--;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-02-08 12:20:36 +00:00
|
|
|
} else if ((li->extLength >> 30) ==
|
|
|
|
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
|
|
|
|
udf_free_blocks(inode->i_sb, inode,
|
2008-10-15 10:29:03 +00:00
|
|
|
&li->extLocation, 0,
|
2008-02-08 12:20:36 +00:00
|
|
|
((li->extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) +
|
|
|
|
blocksize - 1) >> blocksize_bits);
|
|
|
|
li->extLocation.logicalBlockNum = 0;
|
|
|
|
li->extLocation.partitionReferenceNum = 0;
|
|
|
|
li->extLength = (li->extLength &
|
|
|
|
UDF_EXTENT_LENGTH_MASK) |
|
|
|
|
EXT_NOT_RECORDED_NOT_ALLOCATED;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2022-12-19 19:10:35 +00:00
|
|
|
static int udf_update_extents(struct inode *inode, struct kernel_long_ad *laarr,
|
|
|
|
int startnum, int endnum,
|
|
|
|
struct extent_position *epos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int start = 0, i;
|
2008-10-15 10:28:03 +00:00
|
|
|
struct kernel_lb_addr tmploc;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint32_t tmplen;
|
2024-10-01 11:54:24 +00:00
|
|
|
int8_t tmpetype;
|
2022-12-19 19:10:35 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (startnum > endnum) {
|
|
|
|
for (i = 0; i < (startnum - endnum); i++)
|
2018-06-13 16:04:24 +00:00
|
|
|
udf_delete_aext(inode, *epos);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else if (startnum < endnum) {
|
|
|
|
for (i = 0; i < (endnum - startnum); i++) {
|
2022-12-19 19:10:35 +00:00
|
|
|
err = udf_insert_aext(inode, *epos,
|
|
|
|
laarr[i].extLocation,
|
|
|
|
laarr[i].extLength);
|
|
|
|
/*
|
|
|
|
* If we fail here, we are likely corrupting the extent
|
|
|
|
* list and leaking blocks. At least stop early to
|
|
|
|
* limit the damage.
|
|
|
|
*/
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2024-10-01 11:54:24 +00:00
|
|
|
err = udf_next_aext(inode, epos, &laarr[i].extLocation,
|
|
|
|
&laarr[i].extLength, &tmpetype, 1);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
2007-07-19 08:47:43 +00:00
|
|
|
start++;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
for (i = start; i < endnum; i++) {
|
2024-10-01 11:54:24 +00:00
|
|
|
err = udf_next_aext(inode, epos, &tmploc, &tmplen, &tmpetype, 0);
|
|
|
|
if (err < 0)
|
|
|
|
return err;
|
|
|
|
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_write_aext(inode, epos, &laarr[i].extLocation,
|
2007-07-19 08:47:43 +00:00
|
|
|
laarr[i].extLength, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2022-12-19 19:10:35 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2017-10-12 13:48:40 +00:00
|
|
|
struct buffer_head *udf_bread(struct inode *inode, udf_pblk_t block,
|
2007-07-19 08:47:43 +00:00
|
|
|
int create, int *err)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-07-19 08:47:43 +00:00
|
|
|
struct buffer_head *bh = NULL;
|
2023-01-18 14:33:41 +00:00
|
|
|
struct udf_map_rq map = {
|
|
|
|
.lblk = block,
|
|
|
|
.iflags = UDF_MAP_NOPREALLOC | (create ? UDF_MAP_CREATE : 0),
|
|
|
|
};
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-01-18 14:33:41 +00:00
|
|
|
*err = udf_map_block(inode, &map);
|
|
|
|
if (*err || !(map.oflags & UDF_BLK_MAPPED))
|
|
|
|
return NULL;
|
|
|
|
|
|
|
|
bh = sb_getblk(inode->i_sb, map.pblk);
|
|
|
|
if (!bh) {
|
|
|
|
*err = -ENOMEM;
|
2005-04-16 22:20:36 +00:00
|
|
|
return NULL;
|
2023-01-18 14:33:41 +00:00
|
|
|
}
|
|
|
|
if (map.oflags & UDF_BLK_NEW) {
|
|
|
|
lock_buffer(bh);
|
|
|
|
memset(bh->b_data, 0x00, inode->i_sb->s_blocksize);
|
|
|
|
set_buffer_uptodate(bh);
|
|
|
|
unlock_buffer(bh);
|
|
|
|
mark_buffer_dirty_inode(bh, inode);
|
|
|
|
return bh;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2022-09-01 13:35:01 +00:00
|
|
|
if (bh_read(bh, 0) >= 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
return bh;
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
brelse(bh);
|
|
|
|
*err = -EIO;
|
|
|
|
return NULL;
|
|
|
|
}
|
|
|
|
|
2010-10-21 22:30:26 +00:00
|
|
|
int udf_setsize(struct inode *inode, loff_t newsize)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2023-01-19 11:46:09 +00:00
|
|
|
int err = 0;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo;
|
2017-10-12 13:48:42 +00:00
|
|
|
unsigned int bsize = i_blocksize(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
|
2007-07-19 08:47:43 +00:00
|
|
|
S_ISLNK(inode->i_mode)))
|
2010-10-21 22:30:26 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo = UDF_I(inode);
|
2010-10-21 22:30:26 +00:00
|
|
|
if (newsize > inode->i_size) {
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
|
2023-01-19 11:28:37 +00:00
|
|
|
if (bsize >=
|
2010-10-21 22:30:26 +00:00
|
|
|
(udf_file_entry_alloc_offset(inode) + newsize)) {
|
2011-12-10 01:30:48 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2010-10-21 22:30:26 +00:00
|
|
|
iinfo->i_lenAlloc = newsize;
|
2023-01-19 11:37:04 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2012-07-23 16:39:29 +00:00
|
|
|
goto set_size;
|
|
|
|
}
|
2023-01-19 11:28:37 +00:00
|
|
|
err = udf_expand_file_adinicb(inode);
|
|
|
|
if (err)
|
2024-05-20 13:23:37 +00:00
|
|
|
return err;
|
2010-10-21 22:30:26 +00:00
|
|
|
}
|
|
|
|
err = udf_extend_file(inode, newsize);
|
2023-01-19 11:37:04 +00:00
|
|
|
if (err)
|
2024-05-20 13:23:37 +00:00
|
|
|
return err;
|
2012-07-23 16:39:29 +00:00
|
|
|
set_size:
|
2017-06-13 14:20:25 +00:00
|
|
|
truncate_setsize(inode, newsize);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2010-10-21 22:30:26 +00:00
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
|
|
|
|
down_write(&iinfo->i_data_sem);
|
2013-01-19 02:17:14 +00:00
|
|
|
udf_clear_extent_cache(inode);
|
2020-09-25 10:29:54 +00:00
|
|
|
memset(iinfo->i_data + iinfo->i_lenEAttr + newsize,
|
2010-10-21 22:30:26 +00:00
|
|
|
0x00, bsize - newsize -
|
|
|
|
udf_file_entry_alloc_offset(inode));
|
|
|
|
iinfo->i_lenAlloc = newsize;
|
|
|
|
truncate_setsize(inode, newsize);
|
|
|
|
up_write(&iinfo->i_data_sem);
|
|
|
|
goto update_time;
|
|
|
|
}
|
|
|
|
err = block_truncate_page(inode->i_mapping, newsize,
|
|
|
|
udf_get_block);
|
|
|
|
if (err)
|
2024-05-20 13:23:37 +00:00
|
|
|
return err;
|
2017-06-13 14:20:25 +00:00
|
|
|
truncate_setsize(inode, newsize);
|
2010-11-16 17:40:47 +00:00
|
|
|
down_write(&iinfo->i_data_sem);
|
2013-01-19 02:17:14 +00:00
|
|
|
udf_clear_extent_cache(inode);
|
2019-03-11 14:27:02 +00:00
|
|
|
err = udf_truncate_extents(inode);
|
2010-11-16 17:40:47 +00:00
|
|
|
up_write(&iinfo->i_data_sem);
|
2019-03-11 14:27:02 +00:00
|
|
|
if (err)
|
2024-05-20 13:23:37 +00:00
|
|
|
return err;
|
2007-07-16 06:39:47 +00:00
|
|
|
}
|
2010-10-21 22:30:26 +00:00
|
|
|
update_time:
|
2023-10-04 18:52:59 +00:00
|
|
|
inode_set_mtime_to_ts(inode, inode_set_ctime_current(inode));
|
2005-04-16 22:20:36 +00:00
|
|
|
if (IS_SYNC(inode))
|
2007-07-19 08:47:43 +00:00
|
|
|
udf_sync_inode(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
|
|
|
mark_inode_dirty(inode);
|
2023-01-19 11:46:09 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-09-04 12:06:55 +00:00
|
|
|
/*
|
|
|
|
* Maximum length of linked list formed by ICB hierarchy. The chosen number is
|
|
|
|
* arbitrary - just that we hopefully don't limit any real use of rewritten
|
|
|
|
* inode on write-once media but avoid looping for too long on corrupted media.
|
|
|
|
*/
|
|
|
|
#define UDF_MAX_ICB_NESTING 1024
|
|
|
|
|
2014-10-09 10:52:16 +00:00
|
|
|
static int udf_read_inode(struct inode *inode, bool hidden_inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct buffer_head *bh = NULL;
|
|
|
|
struct fileEntry *fe;
|
2014-09-04 11:32:50 +00:00
|
|
|
struct extendedFileEntry *efe;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint16_t ident;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2014-09-04 11:32:50 +00:00
|
|
|
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
|
2014-09-04 14:15:51 +00:00
|
|
|
struct kernel_lb_addr *iloc = &iinfo->i_location;
|
2014-09-04 11:32:50 +00:00
|
|
|
unsigned int link_count;
|
2014-09-04 12:06:55 +00:00
|
|
|
unsigned int indirections = 0;
|
2015-01-07 12:46:16 +00:00
|
|
|
int bs = inode->i_sb->s_blocksize;
|
2014-09-04 14:15:51 +00:00
|
|
|
int ret = -EIO;
|
2018-02-22 09:28:52 +00:00
|
|
|
uint32_t uid, gid;
|
2023-10-04 18:52:59 +00:00
|
|
|
struct timespec64 ts;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-09-04 12:06:55 +00:00
|
|
|
reread:
|
2017-01-06 20:54:43 +00:00
|
|
|
if (iloc->partitionReferenceNum >= sbi->s_partitions) {
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_debug("partition reference: %u > logical volume partitions: %u\n",
|
2017-01-06 20:54:43 +00:00
|
|
|
iloc->partitionReferenceNum, sbi->s_partitions);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2014-09-04 14:15:51 +00:00
|
|
|
if (iloc->logicalBlockNum >=
|
|
|
|
sbi->s_partmaps[iloc->partitionReferenceNum].s_partition_len) {
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_debug("block=%u, partition=%u out of range\n",
|
2014-09-04 14:15:51 +00:00
|
|
|
iloc->logicalBlockNum, iloc->partitionReferenceNum);
|
|
|
|
return -EIO;
|
|
|
|
}
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/*
|
|
|
|
* Set defaults, but the inode is still incomplete!
|
|
|
|
* Note: get_new_inode() sets the following on a new inode:
|
|
|
|
* i_sb = sb
|
|
|
|
* i_no = ino
|
|
|
|
* i_flags = sb->s_flags
|
|
|
|
* i_state = 0
|
|
|
|
* clean_inode(): zero fills and sets
|
|
|
|
* i_count = 1
|
|
|
|
* i_nlink = 1
|
|
|
|
* i_op = NULL;
|
|
|
|
*/
|
2014-09-04 14:15:51 +00:00
|
|
|
bh = udf_read_ptagged(inode->i_sb, iloc, 0, &ident);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!bh) {
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_err(inode->i_sb, "(ino %lu) failed !bh\n", inode->i_ino);
|
2014-09-04 14:15:51 +00:00
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (ident != TAG_IDENT_FE && ident != TAG_IDENT_EFE &&
|
2007-07-19 08:47:43 +00:00
|
|
|
ident != TAG_IDENT_USE) {
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_err(inode->i_sb, "(ino %lu) failed ident=%u\n",
|
2011-10-10 08:08:05 +00:00
|
|
|
inode->i_ino, ident);
|
2014-09-04 14:15:51 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
fe = (struct fileEntry *)bh->b_data;
|
2014-09-04 11:32:50 +00:00
|
|
|
efe = (struct extendedFileEntry *)bh->b_data;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:41 +00:00
|
|
|
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
|
2008-01-30 21:03:58 +00:00
|
|
|
struct buffer_head *ibh;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-09-04 14:15:51 +00:00
|
|
|
ibh = udf_read_ptagged(inode->i_sb, iloc, 1, &ident);
|
2008-01-30 21:03:58 +00:00
|
|
|
if (ident == TAG_IDENT_IE && ibh) {
|
2008-10-15 10:28:03 +00:00
|
|
|
struct kernel_lb_addr loc;
|
2008-01-30 21:03:58 +00:00
|
|
|
struct indirectEntry *ie;
|
|
|
|
|
|
|
|
ie = (struct indirectEntry *)ibh->b_data;
|
|
|
|
loc = lelb_to_cpu(ie->indirectICB.extLocation);
|
|
|
|
|
2014-09-04 12:06:55 +00:00
|
|
|
if (ie->indirectICB.extLength) {
|
|
|
|
brelse(ibh);
|
|
|
|
memcpy(&iinfo->i_location, &loc,
|
|
|
|
sizeof(struct kernel_lb_addr));
|
|
|
|
if (++indirections > UDF_MAX_ICB_NESTING) {
|
|
|
|
udf_err(inode->i_sb,
|
|
|
|
"too many ICBs in ICB hierarchy"
|
|
|
|
" (max %d supported)\n",
|
|
|
|
UDF_MAX_ICB_NESTING);
|
2014-09-04 14:15:51 +00:00
|
|
|
goto out;
|
2007-07-21 11:37:18 +00:00
|
|
|
}
|
2014-09-04 14:15:51 +00:00
|
|
|
brelse(bh);
|
2014-09-04 12:06:55 +00:00
|
|
|
goto reread;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-07-21 11:37:18 +00:00
|
|
|
}
|
2008-01-30 21:03:58 +00:00
|
|
|
brelse(ibh);
|
2008-02-08 12:20:41 +00:00
|
|
|
} else if (fe->icbTag.strategyType != cpu_to_le16(4)) {
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_err(inode->i_sb, "unsupported strategy type: %u\n",
|
2011-10-10 08:08:05 +00:00
|
|
|
le16_to_cpu(fe->icbTag.strategyType));
|
2014-09-04 14:15:51 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-02-08 12:20:41 +00:00
|
|
|
if (fe->icbTag.strategyType == cpu_to_le16(4))
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_strat4096 = 0;
|
2008-02-08 12:20:41 +00:00
|
|
|
else /* if (fe->icbTag.strategyType == cpu_to_le16(4096)) */
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_strat4096 = 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_alloc_type = le16_to_cpu(fe->icbTag.flags) &
|
2008-02-08 12:20:36 +00:00
|
|
|
ICBTAG_FLAG_AD_MASK;
|
2018-12-12 13:29:20 +00:00
|
|
|
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_SHORT &&
|
|
|
|
iinfo->i_alloc_type != ICBTAG_FLAG_AD_LONG &&
|
|
|
|
iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
|
|
|
|
ret = -EIO;
|
|
|
|
goto out;
|
|
|
|
}
|
2023-01-03 08:56:56 +00:00
|
|
|
iinfo->i_hidden = hidden_inode;
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_unique = 0;
|
|
|
|
iinfo->i_lenEAttr = 0;
|
|
|
|
iinfo->i_lenExtents = 0;
|
|
|
|
iinfo->i_lenAlloc = 0;
|
|
|
|
iinfo->i_next_alloc_block = 0;
|
|
|
|
iinfo->i_next_alloc_goal = 0;
|
2008-02-08 12:20:41 +00:00
|
|
|
if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_EFE)) {
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_efe = 1;
|
|
|
|
iinfo->i_use = 0;
|
2015-01-07 12:46:16 +00:00
|
|
|
ret = udf_alloc_i_data(inode, bs -
|
2014-09-04 14:15:51 +00:00
|
|
|
sizeof(struct extendedFileEntry));
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2020-09-25 10:29:54 +00:00
|
|
|
memcpy(iinfo->i_data,
|
2008-02-08 12:20:36 +00:00
|
|
|
bh->b_data + sizeof(struct extendedFileEntry),
|
2015-01-07 12:46:16 +00:00
|
|
|
bs - sizeof(struct extendedFileEntry));
|
2008-02-08 12:20:41 +00:00
|
|
|
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_FE)) {
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_efe = 0;
|
|
|
|
iinfo->i_use = 0;
|
2015-01-07 12:46:16 +00:00
|
|
|
ret = udf_alloc_i_data(inode, bs - sizeof(struct fileEntry));
|
2014-09-04 14:15:51 +00:00
|
|
|
if (ret)
|
|
|
|
goto out;
|
2020-09-25 10:29:54 +00:00
|
|
|
memcpy(iinfo->i_data,
|
2008-02-08 12:20:42 +00:00
|
|
|
bh->b_data + sizeof(struct fileEntry),
|
2015-01-07 12:46:16 +00:00
|
|
|
bs - sizeof(struct fileEntry));
|
2008-02-08 12:20:41 +00:00
|
|
|
} else if (fe->descTag.tagIdent == cpu_to_le16(TAG_IDENT_USE)) {
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_efe = 0;
|
|
|
|
iinfo->i_use = 1;
|
|
|
|
iinfo->i_lenAlloc = le32_to_cpu(
|
2008-02-08 12:20:36 +00:00
|
|
|
((struct unallocSpaceEntry *)bh->b_data)->
|
|
|
|
lengthAllocDescs);
|
2015-01-07 12:46:16 +00:00
|
|
|
ret = udf_alloc_i_data(inode, bs -
|
2014-09-04 14:15:51 +00:00
|
|
|
sizeof(struct unallocSpaceEntry));
|
|
|
|
if (ret)
|
|
|
|
goto out;
|
2020-09-25 10:29:54 +00:00
|
|
|
memcpy(iinfo->i_data,
|
2008-02-08 12:20:36 +00:00
|
|
|
bh->b_data + sizeof(struct unallocSpaceEntry),
|
2015-01-07 12:46:16 +00:00
|
|
|
bs - sizeof(struct unallocSpaceEntry));
|
2014-09-04 14:15:51 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2014-09-04 14:15:51 +00:00
|
|
|
ret = -EIO;
|
2010-10-20 20:17:28 +00:00
|
|
|
read_lock(&sbi->s_cred_lock);
|
2018-02-22 09:28:52 +00:00
|
|
|
uid = le32_to_cpu(fe->uid);
|
|
|
|
if (uid == UDF_INVALID_ID ||
|
2007-07-31 07:39:40 +00:00
|
|
|
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_SET))
|
2018-02-22 09:28:52 +00:00
|
|
|
inode->i_uid = sbi->s_uid;
|
|
|
|
else
|
|
|
|
i_uid_write(inode, uid);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2018-02-22 09:28:52 +00:00
|
|
|
gid = le32_to_cpu(fe->gid);
|
|
|
|
if (gid == UDF_INVALID_ID ||
|
2007-07-31 07:39:40 +00:00
|
|
|
UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_SET))
|
2018-02-22 09:28:52 +00:00
|
|
|
inode->i_gid = sbi->s_gid;
|
|
|
|
else
|
|
|
|
i_gid_write(inode, gid);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-11-16 19:52:19 +00:00
|
|
|
if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
|
2008-12-02 12:40:11 +00:00
|
|
|
sbi->s_fmode != UDF_INVALID_MODE)
|
2008-11-16 19:52:19 +00:00
|
|
|
inode->i_mode = sbi->s_fmode;
|
|
|
|
else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
|
2008-12-02 12:40:11 +00:00
|
|
|
sbi->s_dmode != UDF_INVALID_MODE)
|
2008-11-16 19:52:19 +00:00
|
|
|
inode->i_mode = sbi->s_dmode;
|
|
|
|
else
|
|
|
|
inode->i_mode = udf_convert_permissions(fe);
|
|
|
|
inode->i_mode &= ~sbi->s_umask;
|
2019-08-27 12:13:59 +00:00
|
|
|
iinfo->i_extraPerms = le32_to_cpu(fe->permissions) & ~FE_MAPPED_PERMS;
|
|
|
|
|
2010-10-20 20:17:28 +00:00
|
|
|
read_unlock(&sbi->s_cred_lock);
|
|
|
|
|
2011-10-28 12:13:29 +00:00
|
|
|
link_count = le16_to_cpu(fe->fileLinkCount);
|
2014-09-04 14:19:47 +00:00
|
|
|
if (!link_count) {
|
2014-10-09 10:52:16 +00:00
|
|
|
if (!hidden_inode) {
|
|
|
|
ret = -ESTALE;
|
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
link_count = 1;
|
2014-09-04 14:19:47 +00:00
|
|
|
}
|
2011-10-28 12:13:29 +00:00
|
|
|
set_nlink(inode, link_count);
|
2010-10-20 20:17:28 +00:00
|
|
|
|
|
|
|
inode->i_size = le64_to_cpu(fe->informationLength);
|
|
|
|
iinfo->i_lenExtents = inode->i_size;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
if (iinfo->i_efe == 0) {
|
2005-04-16 22:20:36 +00:00
|
|
|
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
|
2007-07-21 11:37:18 +00:00
|
|
|
(inode->i_sb->s_blocksize_bits - 9);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-04 18:52:59 +00:00
|
|
|
udf_disk_stamp_to_time(&ts, fe->accessTime);
|
|
|
|
inode_set_atime_to_ts(inode, ts);
|
|
|
|
udf_disk_stamp_to_time(&ts, fe->modificationTime);
|
|
|
|
inode_set_mtime_to_ts(inode, ts);
|
|
|
|
udf_disk_stamp_to_time(&ts, fe->attrTime);
|
|
|
|
inode_set_ctime_to_ts(inode, ts);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_unique = le64_to_cpu(fe->uniqueID);
|
|
|
|
iinfo->i_lenEAttr = le32_to_cpu(fe->lengthExtendedAttr);
|
|
|
|
iinfo->i_lenAlloc = le32_to_cpu(fe->lengthAllocDescs);
|
2012-02-14 05:28:42 +00:00
|
|
|
iinfo->i_checkpoint = le32_to_cpu(fe->checkpoint);
|
2019-08-14 12:50:02 +00:00
|
|
|
iinfo->i_streamdir = 0;
|
|
|
|
iinfo->i_lenStreams = 0;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2007-07-16 06:39:47 +00:00
|
|
|
inode->i_blocks = le64_to_cpu(efe->logicalBlocksRecorded) <<
|
2007-07-19 08:47:43 +00:00
|
|
|
(inode->i_sb->s_blocksize_bits - 9);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-04 18:52:59 +00:00
|
|
|
udf_disk_stamp_to_time(&ts, efe->accessTime);
|
|
|
|
inode_set_atime_to_ts(inode, ts);
|
|
|
|
udf_disk_stamp_to_time(&ts, efe->modificationTime);
|
|
|
|
inode_set_mtime_to_ts(inode, ts);
|
|
|
|
udf_disk_stamp_to_time(&ts, efe->attrTime);
|
|
|
|
inode_set_ctime_to_ts(inode, ts);
|
2018-05-10 15:26:17 +00:00
|
|
|
udf_disk_stamp_to_time(&iinfo->i_crtime, efe->createTime);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_unique = le64_to_cpu(efe->uniqueID);
|
|
|
|
iinfo->i_lenEAttr = le32_to_cpu(efe->lengthExtendedAttr);
|
|
|
|
iinfo->i_lenAlloc = le32_to_cpu(efe->lengthAllocDescs);
|
2012-02-14 05:28:42 +00:00
|
|
|
iinfo->i_checkpoint = le32_to_cpu(efe->checkpoint);
|
2019-08-14 12:50:02 +00:00
|
|
|
|
|
|
|
/* Named streams */
|
|
|
|
iinfo->i_streamdir = (efe->streamDirectoryICB.extLength != 0);
|
|
|
|
iinfo->i_locStreamdir =
|
|
|
|
lelb_to_cpu(efe->streamDirectoryICB.extLocation);
|
|
|
|
iinfo->i_lenStreams = le64_to_cpu(efe->objectSize);
|
|
|
|
if (iinfo->i_lenStreams >= inode->i_size)
|
|
|
|
iinfo->i_lenStreams -= inode->i_size;
|
|
|
|
else
|
|
|
|
iinfo->i_lenStreams = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2014-09-04 14:26:19 +00:00
|
|
|
inode->i_generation = iinfo->i_unique;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-01-07 12:49:08 +00:00
|
|
|
/*
|
|
|
|
* Sanity check length of allocation descriptors and extended attrs to
|
|
|
|
* avoid integer overflows
|
|
|
|
*/
|
|
|
|
if (iinfo->i_lenEAttr > bs || iinfo->i_lenAlloc > bs)
|
|
|
|
goto out;
|
|
|
|
/* Now do exact checks */
|
|
|
|
if (udf_file_entry_alloc_offset(inode) + iinfo->i_lenAlloc > bs)
|
|
|
|
goto out;
|
2014-12-19 11:03:53 +00:00
|
|
|
/* Sanity checks for files in ICB so that we don't get confused later */
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) {
|
|
|
|
/*
|
|
|
|
* For file in ICB data is stored in allocation descriptor
|
|
|
|
* so sizes should match
|
|
|
|
*/
|
|
|
|
if (iinfo->i_lenAlloc != inode->i_size)
|
|
|
|
goto out;
|
|
|
|
/* File in ICB has to fit in there... */
|
2015-01-07 12:46:16 +00:00
|
|
|
if (inode->i_size > bs - udf_file_entry_alloc_offset(inode))
|
2014-12-19 11:03:53 +00:00
|
|
|
goto out;
|
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
switch (fe->icbTag.fileType) {
|
|
|
|
case ICBTAG_FILE_TYPE_DIRECTORY:
|
2007-07-21 11:37:18 +00:00
|
|
|
inode->i_op = &udf_dir_inode_operations;
|
|
|
|
inode->i_fop = &udf_dir_operations;
|
|
|
|
inode->i_mode |= S_IFDIR;
|
|
|
|
inc_nlink(inode);
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FILE_TYPE_REALTIME:
|
|
|
|
case ICBTAG_FILE_TYPE_REGULAR:
|
|
|
|
case ICBTAG_FILE_TYPE_UNDEF:
|
2008-04-07 23:17:52 +00:00
|
|
|
case ICBTAG_FILE_TYPE_VAT20:
|
2023-01-24 11:23:04 +00:00
|
|
|
inode->i_data.a_ops = &udf_aops;
|
2007-07-21 11:37:18 +00:00
|
|
|
inode->i_op = &udf_file_inode_operations;
|
|
|
|
inode->i_fop = &udf_file_operations;
|
|
|
|
inode->i_mode |= S_IFREG;
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FILE_TYPE_BLOCK:
|
2007-07-21 11:37:18 +00:00
|
|
|
inode->i_mode |= S_IFBLK;
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FILE_TYPE_CHAR:
|
2007-07-21 11:37:18 +00:00
|
|
|
inode->i_mode |= S_IFCHR;
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FILE_TYPE_FIFO:
|
2007-07-21 11:37:18 +00:00
|
|
|
init_special_inode(inode, inode->i_mode | S_IFIFO, 0);
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FILE_TYPE_SOCKET:
|
2007-07-21 11:37:18 +00:00
|
|
|
init_special_inode(inode, inode->i_mode | S_IFSOCK, 0);
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FILE_TYPE_SYMLINK:
|
2007-07-21 11:37:18 +00:00
|
|
|
inode->i_data.a_ops = &udf_symlink_aops;
|
2017-01-02 13:30:31 +00:00
|
|
|
inode->i_op = &udf_symlink_inode_operations;
|
2015-11-17 06:07:57 +00:00
|
|
|
inode_nohighmem(inode);
|
2017-04-23 18:58:15 +00:00
|
|
|
inode->i_mode = S_IFLNK | 0777;
|
2007-07-21 11:37:18 +00:00
|
|
|
break;
|
2008-04-08 18:37:21 +00:00
|
|
|
case ICBTAG_FILE_TYPE_MAIN:
|
|
|
|
udf_debug("METADATA FILE-----\n");
|
|
|
|
break;
|
|
|
|
case ICBTAG_FILE_TYPE_MIRROR:
|
|
|
|
udf_debug("METADATA MIRROR FILE-----\n");
|
|
|
|
break;
|
|
|
|
case ICBTAG_FILE_TYPE_BITMAP:
|
|
|
|
udf_debug("METADATA BITMAP FILE-----\n");
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
default:
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_err(inode->i_sb, "(ino %lu) failed unknown file type=%u\n",
|
2011-10-10 08:08:05 +00:00
|
|
|
inode->i_ino, fe->icbTag.fileType);
|
2014-09-04 14:15:51 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-07-19 08:47:43 +00:00
|
|
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
|
2008-02-08 12:20:36 +00:00
|
|
|
struct deviceSpec *dsea =
|
|
|
|
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (dsea) {
|
|
|
|
init_special_inode(inode, inode->i_mode,
|
2008-02-08 12:20:36 +00:00
|
|
|
MKDEV(le32_to_cpu(dsea->majorDeviceIdent),
|
|
|
|
le32_to_cpu(dsea->minorDeviceIdent)));
|
2005-04-16 22:20:36 +00:00
|
|
|
/* Developer ID ??? */
|
2008-02-08 12:20:36 +00:00
|
|
|
} else
|
2014-09-04 14:15:51 +00:00
|
|
|
goto out;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2014-09-04 14:15:51 +00:00
|
|
|
ret = 0;
|
|
|
|
out:
|
2014-09-04 11:32:50 +00:00
|
|
|
brelse(bh);
|
2014-09-04 14:15:51 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-16 06:39:47 +00:00
|
|
|
static int udf_alloc_i_data(struct inode *inode, size_t size)
|
|
|
|
{
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2020-09-25 10:29:54 +00:00
|
|
|
iinfo->i_data = kmalloc(size, GFP_KERNEL);
|
|
|
|
if (!iinfo->i_data)
|
2007-07-16 06:39:47 +00:00
|
|
|
return -ENOMEM;
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
2011-07-26 07:18:29 +00:00
|
|
|
static umode_t udf_convert_permissions(struct fileEntry *fe)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2011-07-26 07:18:29 +00:00
|
|
|
umode_t mode;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint32_t permissions;
|
|
|
|
uint32_t flags;
|
|
|
|
|
|
|
|
permissions = le32_to_cpu(fe->permissions);
|
|
|
|
flags = le16_to_cpu(fe->icbTag.flags);
|
|
|
|
|
2017-04-23 18:58:15 +00:00
|
|
|
mode = ((permissions) & 0007) |
|
|
|
|
((permissions >> 2) & 0070) |
|
|
|
|
((permissions >> 4) & 0700) |
|
2008-02-08 12:20:36 +00:00
|
|
|
((flags & ICBTAG_FLAG_SETUID) ? S_ISUID : 0) |
|
|
|
|
((flags & ICBTAG_FLAG_SETGID) ? S_ISGID : 0) |
|
|
|
|
((flags & ICBTAG_FLAG_STICKY) ? S_ISVTX : 0);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return mode;
|
|
|
|
}
|
|
|
|
|
2019-08-27 12:13:59 +00:00
|
|
|
void udf_update_extra_perms(struct inode *inode, umode_t mode)
|
|
|
|
{
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
|
|
|
|
/*
|
|
|
|
* UDF 2.01 sec. 3.3.3.3 Note 2:
|
|
|
|
* In Unix, delete permission tracks write
|
|
|
|
*/
|
|
|
|
iinfo->i_extraPerms &= ~FE_DELETE_PERMS;
|
|
|
|
if (mode & 0200)
|
|
|
|
iinfo->i_extraPerms |= FE_PERM_U_DELETE;
|
|
|
|
if (mode & 0020)
|
|
|
|
iinfo->i_extraPerms |= FE_PERM_G_DELETE;
|
|
|
|
if (mode & 0002)
|
|
|
|
iinfo->i_extraPerms |= FE_PERM_O_DELETE;
|
|
|
|
}
|
|
|
|
|
2010-03-05 08:21:37 +00:00
|
|
|
int udf_write_inode(struct inode *inode, struct writeback_control *wbc)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2010-10-20 15:42:44 +00:00
|
|
|
return udf_update_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-10-20 15:42:44 +00:00
|
|
|
static int udf_sync_inode(struct inode *inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
return udf_update_inode(inode, 1);
|
|
|
|
}
|
|
|
|
|
2018-06-20 08:15:13 +00:00
|
|
|
static void udf_adjust_time(struct udf_inode_info *iinfo, struct timespec64 time)
|
2017-01-06 20:53:54 +00:00
|
|
|
{
|
|
|
|
if (iinfo->i_crtime.tv_sec > time.tv_sec ||
|
|
|
|
(iinfo->i_crtime.tv_sec == time.tv_sec &&
|
|
|
|
iinfo->i_crtime.tv_nsec > time.tv_nsec))
|
|
|
|
iinfo->i_crtime = time;
|
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
static int udf_update_inode(struct inode *inode, int do_sync)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
struct buffer_head *bh = NULL;
|
|
|
|
struct fileEntry *fe;
|
|
|
|
struct extendedFileEntry *efe;
|
2012-02-16 17:53:53 +00:00
|
|
|
uint64_t lb_recorded;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint32_t udfperms;
|
|
|
|
uint16_t icbflags;
|
|
|
|
uint16_t crclen;
|
|
|
|
int err = 0;
|
2008-02-08 12:20:30 +00:00
|
|
|
struct udf_sb_info *sbi = UDF_SB(inode->i_sb);
|
2008-02-08 12:20:36 +00:00
|
|
|
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-01-18 12:27:07 +00:00
|
|
|
bh = sb_getblk(inode->i_sb,
|
2010-01-08 15:52:59 +00:00
|
|
|
udf_get_lb_pblock(inode->i_sb, &iinfo->i_location, 0));
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!bh) {
|
2010-01-08 15:46:29 +00:00
|
|
|
udf_debug("getblk failure\n");
|
2015-03-22 23:17:49 +00:00
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-01-08 15:46:29 +00:00
|
|
|
lock_buffer(bh);
|
|
|
|
memset(bh->b_data, 0, inode->i_sb->s_blocksize);
|
2005-04-16 22:20:36 +00:00
|
|
|
fe = (struct fileEntry *)bh->b_data;
|
|
|
|
efe = (struct extendedFileEntry *)bh->b_data;
|
|
|
|
|
2010-01-08 15:46:29 +00:00
|
|
|
if (iinfo->i_use) {
|
2005-04-16 22:20:36 +00:00
|
|
|
struct unallocSpaceEntry *use =
|
2007-07-21 11:37:18 +00:00
|
|
|
(struct unallocSpaceEntry *)bh->b_data;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
use->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
|
2008-02-08 12:20:36 +00:00
|
|
|
memcpy(bh->b_data + sizeof(struct unallocSpaceEntry),
|
2020-09-25 10:29:54 +00:00
|
|
|
iinfo->i_data, inode->i_sb->s_blocksize -
|
2008-02-08 12:20:36 +00:00
|
|
|
sizeof(struct unallocSpaceEntry));
|
2010-01-08 15:46:29 +00:00
|
|
|
use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
|
2015-07-07 18:06:05 +00:00
|
|
|
crclen = sizeof(struct unallocSpaceEntry);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-07-07 18:06:05 +00:00
|
|
|
goto finish;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2006-03-08 05:55:24 +00:00
|
|
|
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
|
2018-02-22 09:28:52 +00:00
|
|
|
fe->uid = cpu_to_le32(UDF_INVALID_ID);
|
2007-07-19 08:47:43 +00:00
|
|
|
else
|
2012-02-10 20:20:35 +00:00
|
|
|
fe->uid = cpu_to_le32(i_uid_read(inode));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2006-03-08 05:55:24 +00:00
|
|
|
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_GID_FORGET))
|
2018-02-22 09:28:52 +00:00
|
|
|
fe->gid = cpu_to_le32(UDF_INVALID_ID);
|
2007-07-19 08:47:43 +00:00
|
|
|
else
|
2012-02-10 20:20:35 +00:00
|
|
|
fe->gid = cpu_to_le32(i_gid_read(inode));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-04-23 18:58:15 +00:00
|
|
|
udfperms = ((inode->i_mode & 0007)) |
|
|
|
|
((inode->i_mode & 0070) << 2) |
|
|
|
|
((inode->i_mode & 0700) << 4);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-27 12:13:59 +00:00
|
|
|
udfperms |= iinfo->i_extraPerms;
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->permissions = cpu_to_le32(udfperms);
|
|
|
|
|
2014-09-04 09:47:51 +00:00
|
|
|
if (S_ISDIR(inode->i_mode) && inode->i_nlink > 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->fileLinkCount = cpu_to_le16(inode->i_nlink - 1);
|
2023-01-03 08:56:56 +00:00
|
|
|
else {
|
|
|
|
if (iinfo->i_hidden)
|
|
|
|
fe->fileLinkCount = cpu_to_le16(0);
|
|
|
|
else
|
|
|
|
fe->fileLinkCount = cpu_to_le16(inode->i_nlink);
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
fe->informationLength = cpu_to_le64(inode->i_size);
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
|
2008-10-15 10:28:03 +00:00
|
|
|
struct regid *eid;
|
2007-07-21 11:37:18 +00:00
|
|
|
struct deviceSpec *dsea =
|
|
|
|
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!dsea) {
|
2005-04-16 22:20:36 +00:00
|
|
|
dsea = (struct deviceSpec *)
|
2007-07-21 11:37:18 +00:00
|
|
|
udf_add_extendedattr(inode,
|
|
|
|
sizeof(struct deviceSpec) +
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct regid), 12, 0x3);
|
2005-04-16 22:20:36 +00:00
|
|
|
dsea->attrType = cpu_to_le32(12);
|
|
|
|
dsea->attrSubtype = 1;
|
2008-02-08 12:20:36 +00:00
|
|
|
dsea->attrLength = cpu_to_le32(
|
|
|
|
sizeof(struct deviceSpec) +
|
2008-10-15 10:28:03 +00:00
|
|
|
sizeof(struct regid));
|
|
|
|
dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2008-10-15 10:28:03 +00:00
|
|
|
eid = (struct regid *)dsea->impUse;
|
2017-08-15 14:45:44 +00:00
|
|
|
memset(eid, 0, sizeof(*eid));
|
2005-04-16 22:20:36 +00:00
|
|
|
strcpy(eid->ident, UDF_ID_DEVELOPER);
|
|
|
|
eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
|
|
|
|
eid->identSuffix[1] = UDF_OS_ID_LINUX;
|
|
|
|
dsea->majorDeviceIdent = cpu_to_le32(imajor(inode));
|
|
|
|
dsea->minorDeviceIdent = cpu_to_le32(iminor(inode));
|
|
|
|
}
|
|
|
|
|
2012-02-16 17:53:53 +00:00
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
|
|
|
|
lb_recorded = 0; /* No extents => no blocks! */
|
|
|
|
else
|
|
|
|
lb_recorded =
|
|
|
|
(inode->i_blocks + (1 << (blocksize_bits - 9)) - 1) >>
|
|
|
|
(blocksize_bits - 9);
|
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
if (iinfo->i_efe == 0) {
|
2008-02-08 12:20:42 +00:00
|
|
|
memcpy(bh->b_data + sizeof(struct fileEntry),
|
2020-09-25 10:29:54 +00:00
|
|
|
iinfo->i_data,
|
2007-07-19 08:47:43 +00:00
|
|
|
inode->i_sb->s_blocksize - sizeof(struct fileEntry));
|
2012-02-16 17:53:53 +00:00
|
|
|
fe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-04 18:52:59 +00:00
|
|
|
udf_time_to_disk_stamp(&fe->accessTime, inode_get_atime(inode));
|
|
|
|
udf_time_to_disk_stamp(&fe->modificationTime, inode_get_mtime(inode));
|
2023-07-05 19:01:44 +00:00
|
|
|
udf_time_to_disk_stamp(&fe->attrTime, inode_get_ctime(inode));
|
2008-10-15 10:28:03 +00:00
|
|
|
memset(&(fe->impIdent), 0, sizeof(struct regid));
|
2005-04-16 22:20:36 +00:00
|
|
|
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
|
|
|
|
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
|
|
|
|
fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
|
2008-02-08 12:20:44 +00:00
|
|
|
fe->uniqueID = cpu_to_le64(iinfo->i_unique);
|
|
|
|
fe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
|
|
|
|
fe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
|
2012-02-14 05:28:42 +00:00
|
|
|
fe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_FE);
|
|
|
|
crclen = sizeof(struct fileEntry);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2008-02-08 12:20:36 +00:00
|
|
|
memcpy(bh->b_data + sizeof(struct extendedFileEntry),
|
2020-09-25 10:29:54 +00:00
|
|
|
iinfo->i_data,
|
2008-02-08 12:20:36 +00:00
|
|
|
inode->i_sb->s_blocksize -
|
|
|
|
sizeof(struct extendedFileEntry));
|
2019-08-14 12:50:02 +00:00
|
|
|
efe->objectSize =
|
|
|
|
cpu_to_le64(inode->i_size + iinfo->i_lenStreams);
|
2012-02-16 17:53:53 +00:00
|
|
|
efe->logicalBlocksRecorded = cpu_to_le64(lb_recorded);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2019-08-14 12:50:02 +00:00
|
|
|
if (iinfo->i_streamdir) {
|
|
|
|
struct long_ad *icb_lad = &efe->streamDirectoryICB;
|
|
|
|
|
|
|
|
icb_lad->extLocation =
|
|
|
|
cpu_to_lelb(iinfo->i_locStreamdir);
|
|
|
|
icb_lad->extLength =
|
|
|
|
cpu_to_le32(inode->i_sb->s_blocksize);
|
|
|
|
}
|
|
|
|
|
2023-10-04 18:52:59 +00:00
|
|
|
udf_adjust_time(iinfo, inode_get_atime(inode));
|
|
|
|
udf_adjust_time(iinfo, inode_get_mtime(inode));
|
2023-07-05 19:01:44 +00:00
|
|
|
udf_adjust_time(iinfo, inode_get_ctime(inode));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-10-04 18:52:59 +00:00
|
|
|
udf_time_to_disk_stamp(&efe->accessTime,
|
|
|
|
inode_get_atime(inode));
|
|
|
|
udf_time_to_disk_stamp(&efe->modificationTime,
|
|
|
|
inode_get_mtime(inode));
|
2008-02-10 10:25:31 +00:00
|
|
|
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
|
2023-07-05 19:01:44 +00:00
|
|
|
udf_time_to_disk_stamp(&efe->attrTime, inode_get_ctime(inode));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2017-08-15 14:45:44 +00:00
|
|
|
memset(&(efe->impIdent), 0, sizeof(efe->impIdent));
|
2005-04-16 22:20:36 +00:00
|
|
|
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
|
|
|
|
efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
|
|
|
|
efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
|
2008-02-08 12:20:44 +00:00
|
|
|
efe->uniqueID = cpu_to_le64(iinfo->i_unique);
|
|
|
|
efe->lengthExtendedAttr = cpu_to_le32(iinfo->i_lenEAttr);
|
|
|
|
efe->lengthAllocDescs = cpu_to_le32(iinfo->i_lenAlloc);
|
2012-02-14 05:28:42 +00:00
|
|
|
efe->checkpoint = cpu_to_le32(iinfo->i_checkpoint);
|
2005-04-16 22:20:36 +00:00
|
|
|
efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
|
|
|
|
crclen = sizeof(struct extendedFileEntry);
|
|
|
|
}
|
2015-07-07 18:06:05 +00:00
|
|
|
|
|
|
|
finish:
|
2008-02-08 12:20:44 +00:00
|
|
|
if (iinfo->i_strat4096) {
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->icbTag.strategyType = cpu_to_le16(4096);
|
|
|
|
fe->icbTag.strategyParameter = cpu_to_le16(1);
|
|
|
|
fe->icbTag.numEntries = cpu_to_le16(2);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->icbTag.strategyType = cpu_to_le16(4);
|
|
|
|
fe->icbTag.numEntries = cpu_to_le16(1);
|
|
|
|
}
|
|
|
|
|
2015-07-07 18:06:05 +00:00
|
|
|
if (iinfo->i_use)
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
|
|
|
|
else if (S_ISDIR(inode->i_mode))
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
|
|
|
|
else if (S_ISREG(inode->i_mode))
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
|
|
|
|
else if (S_ISLNK(inode->i_mode))
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_SYMLINK;
|
|
|
|
else if (S_ISBLK(inode->i_mode))
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_BLOCK;
|
|
|
|
else if (S_ISCHR(inode->i_mode))
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_CHAR;
|
|
|
|
else if (S_ISFIFO(inode->i_mode))
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_FIFO;
|
|
|
|
else if (S_ISSOCK(inode->i_mode))
|
|
|
|
fe->icbTag.fileType = ICBTAG_FILE_TYPE_SOCKET;
|
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
icbflags = iinfo->i_alloc_type |
|
2007-07-21 11:37:18 +00:00
|
|
|
((inode->i_mode & S_ISUID) ? ICBTAG_FLAG_SETUID : 0) |
|
|
|
|
((inode->i_mode & S_ISGID) ? ICBTAG_FLAG_SETGID : 0) |
|
|
|
|
((inode->i_mode & S_ISVTX) ? ICBTAG_FLAG_STICKY : 0) |
|
|
|
|
(le16_to_cpu(fe->icbTag.flags) &
|
|
|
|
~(ICBTAG_FLAG_AD_MASK | ICBTAG_FLAG_SETUID |
|
|
|
|
ICBTAG_FLAG_SETGID | ICBTAG_FLAG_STICKY));
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
fe->icbTag.flags = cpu_to_le16(icbflags);
|
2008-02-08 12:20:30 +00:00
|
|
|
if (sbi->s_udfrev >= 0x0200)
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->descTag.descVersion = cpu_to_le16(3);
|
|
|
|
else
|
|
|
|
fe->descTag.descVersion = cpu_to_le16(2);
|
2008-02-08 12:20:30 +00:00
|
|
|
fe->descTag.tagSerialNum = cpu_to_le16(sbi->s_serial_number);
|
2008-02-08 12:20:36 +00:00
|
|
|
fe->descTag.tagLocation = cpu_to_le32(
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_location.logicalBlockNum);
|
2010-01-08 15:46:29 +00:00
|
|
|
crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc - sizeof(struct tag);
|
2005-04-16 22:20:36 +00:00
|
|
|
fe->descTag.descCRCLength = cpu_to_le16(crclen);
|
2008-10-15 10:28:03 +00:00
|
|
|
fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
|
2008-04-17 07:47:48 +00:00
|
|
|
crclen));
|
2008-02-08 12:20:39 +00:00
|
|
|
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2010-01-08 15:52:59 +00:00
|
|
|
set_buffer_uptodate(bh);
|
2010-01-08 15:46:29 +00:00
|
|
|
unlock_buffer(bh);
|
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
/* write the data blocks */
|
|
|
|
mark_buffer_dirty(bh);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (do_sync) {
|
2005-04-16 22:20:36 +00:00
|
|
|
sync_dirty_buffer(bh);
|
2010-01-08 15:46:29 +00:00
|
|
|
if (buffer_write_io_error(bh)) {
|
2011-10-10 08:08:05 +00:00
|
|
|
udf_warn(inode->i_sb, "IO error syncing udf inode [%08lx]\n",
|
|
|
|
inode->i_ino);
|
2005-04-16 22:20:36 +00:00
|
|
|
err = -EIO;
|
|
|
|
}
|
|
|
|
}
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(bh);
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
2014-10-09 10:52:16 +00:00
|
|
|
struct inode *__udf_iget(struct super_block *sb, struct kernel_lb_addr *ino,
|
|
|
|
bool hidden_inode)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
|
|
|
|
struct inode *inode = iget_locked(sb, block);
|
2014-09-04 14:15:51 +00:00
|
|
|
int err;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (!inode)
|
2014-09-04 14:15:51 +00:00
|
|
|
return ERR_PTR(-ENOMEM);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2023-01-03 09:03:35 +00:00
|
|
|
if (!(inode->i_state & I_NEW)) {
|
|
|
|
if (UDF_I(inode)->i_hidden != hidden_inode) {
|
|
|
|
iput(inode);
|
|
|
|
return ERR_PTR(-EFSCORRUPTED);
|
|
|
|
}
|
2014-09-04 14:15:51 +00:00
|
|
|
return inode;
|
2023-01-03 09:03:35 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2014-09-04 14:15:51 +00:00
|
|
|
memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
|
2014-10-09 10:52:16 +00:00
|
|
|
err = udf_read_inode(inode, hidden_inode);
|
2014-09-04 14:15:51 +00:00
|
|
|
if (err < 0) {
|
|
|
|
iget_failed(inode);
|
|
|
|
return ERR_PTR(err);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2014-09-04 14:15:51 +00:00
|
|
|
unlock_new_inode(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
return inode;
|
|
|
|
}
|
|
|
|
|
2017-10-12 13:48:40 +00:00
|
|
|
int udf_setup_indirect_aext(struct inode *inode, udf_pblk_t block,
|
2015-12-23 13:21:13 +00:00
|
|
|
struct extent_position *epos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-12-23 13:21:13 +00:00
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
struct buffer_head *bh;
|
2005-04-16 22:20:36 +00:00
|
|
|
struct allocExtDesc *aed;
|
2015-12-23 13:21:13 +00:00
|
|
|
struct extent_position nepos;
|
|
|
|
struct kernel_lb_addr neloc;
|
|
|
|
int ver, adsize;
|
2024-10-01 11:54:23 +00:00
|
|
|
int err = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-12-23 13:21:13 +00:00
|
|
|
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
|
|
|
adsize = sizeof(struct short_ad);
|
|
|
|
else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
|
|
|
|
adsize = sizeof(struct long_ad);
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2016-01-01 14:21:54 +00:00
|
|
|
return -EIO;
|
2015-12-23 13:21:13 +00:00
|
|
|
|
|
|
|
neloc.logicalBlockNum = block;
|
|
|
|
neloc.partitionReferenceNum = epos->block.partitionReferenceNum;
|
|
|
|
|
2023-01-18 12:27:07 +00:00
|
|
|
bh = sb_getblk(sb, udf_get_lb_pblock(sb, &neloc, 0));
|
2015-12-23 13:21:13 +00:00
|
|
|
if (!bh)
|
|
|
|
return -EIO;
|
|
|
|
lock_buffer(bh);
|
|
|
|
memset(bh->b_data, 0x00, sb->s_blocksize);
|
|
|
|
set_buffer_uptodate(bh);
|
|
|
|
unlock_buffer(bh);
|
|
|
|
mark_buffer_dirty_inode(bh, inode);
|
|
|
|
|
|
|
|
aed = (struct allocExtDesc *)(bh->b_data);
|
|
|
|
if (!UDF_QUERY_FLAG(sb, UDF_FLAG_STRICT)) {
|
|
|
|
aed->previousAllocExtLocation =
|
|
|
|
cpu_to_le32(epos->block.logicalBlockNum);
|
|
|
|
}
|
|
|
|
aed->lengthAllocDescs = cpu_to_le32(0);
|
|
|
|
if (UDF_SB(sb)->s_udfrev >= 0x0200)
|
|
|
|
ver = 3;
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2015-12-23 13:21:13 +00:00
|
|
|
ver = 2;
|
|
|
|
udf_new_tag(bh->b_data, TAG_IDENT_AED, ver, 1, block,
|
|
|
|
sizeof(struct tag));
|
|
|
|
|
|
|
|
nepos.block = neloc;
|
|
|
|
nepos.offset = sizeof(struct allocExtDesc);
|
|
|
|
nepos.bh = bh;
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Do we have to copy current last extent to make space for indirect
|
|
|
|
* one?
|
|
|
|
*/
|
|
|
|
if (epos->offset + adsize > sb->s_blocksize) {
|
|
|
|
struct kernel_lb_addr cp_loc;
|
|
|
|
uint32_t cp_len;
|
2024-10-01 11:54:23 +00:00
|
|
|
int8_t cp_type;
|
2015-12-23 13:21:13 +00:00
|
|
|
|
|
|
|
epos->offset -= adsize;
|
2024-10-01 11:54:23 +00:00
|
|
|
err = udf_current_aext(inode, epos, &cp_loc, &cp_len, &cp_type, 0);
|
|
|
|
if (err <= 0)
|
|
|
|
goto err_out;
|
2015-12-23 13:21:13 +00:00
|
|
|
cp_len |= ((uint32_t)cp_type) << 30;
|
|
|
|
|
|
|
|
__udf_add_aext(inode, &nepos, &cp_loc, cp_len, 1);
|
|
|
|
udf_write_aext(inode, epos, &nepos.block,
|
2020-01-07 21:29:02 +00:00
|
|
|
sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
|
2015-12-23 13:21:13 +00:00
|
|
|
} else {
|
|
|
|
__udf_add_aext(inode, epos, &nepos.block,
|
2020-01-07 21:29:02 +00:00
|
|
|
sb->s_blocksize | EXT_NEXT_EXTENT_ALLOCDESCS, 0);
|
2015-12-23 13:21:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
brelse(epos->bh);
|
|
|
|
*epos = nepos;
|
|
|
|
|
|
|
|
return 0;
|
2024-10-01 11:54:23 +00:00
|
|
|
err_out:
|
|
|
|
brelse(bh);
|
|
|
|
return err;
|
2015-12-23 13:21:13 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
|
|
|
* Append extent at the given position - should be the first free one in inode
|
|
|
|
* / indirect extent. This function assumes there is enough space in the inode
|
|
|
|
* or indirect extent. Use udf_add_aext() if you didn't check for this before.
|
|
|
|
*/
|
|
|
|
int __udf_add_aext(struct inode *inode, struct extent_position *epos,
|
|
|
|
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
|
|
|
|
{
|
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
|
|
|
struct allocExtDesc *aed;
|
|
|
|
int adsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
2008-10-15 10:28:03 +00:00
|
|
|
adsize = sizeof(struct short_ad);
|
2008-02-08 12:20:44 +00:00
|
|
|
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
|
2008-10-15 10:28:03 +00:00
|
|
|
adsize = sizeof(struct long_ad);
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2010-10-21 22:30:26 +00:00
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2015-12-23 13:21:13 +00:00
|
|
|
if (!epos->bh) {
|
|
|
|
WARN_ON(iinfo->i_lenAlloc !=
|
|
|
|
epos->offset - udf_file_entry_alloc_offset(inode));
|
|
|
|
} else {
|
|
|
|
aed = (struct allocExtDesc *)epos->bh->b_data;
|
|
|
|
WARN_ON(le32_to_cpu(aed->lengthAllocDescs) !=
|
|
|
|
epos->offset - sizeof(struct allocExtDesc));
|
|
|
|
WARN_ON(epos->offset + adsize > inode->i_sb->s_blocksize);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 22:30:26 +00:00
|
|
|
udf_write_aext(inode, epos, eloc, elen, inc);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!epos->bh) {
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenAlloc += adsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_inode_dirty(inode);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2007-05-08 07:35:14 +00:00
|
|
|
aed = (struct allocExtDesc *)epos->bh->b_data;
|
2008-01-30 21:03:57 +00:00
|
|
|
le32_add_cpu(&aed->lengthAllocDescs, adsize);
|
2008-02-08 12:20:36 +00:00
|
|
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
|
|
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
|
|
|
udf_update_tag(epos->bh->b_data,
|
|
|
|
epos->offset + (inc ? 0 : adsize));
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_update_tag(epos->bh->b_data,
|
|
|
|
sizeof(struct allocExtDesc));
|
2007-05-08 07:35:14 +00:00
|
|
|
mark_buffer_dirty_inode(epos->bh, inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2010-10-21 22:30:26 +00:00
|
|
|
return 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-12-23 13:21:13 +00:00
|
|
|
/*
|
|
|
|
* Append extent at given position - should be the first free one in inode
|
|
|
|
* / indirect extent. Takes care of allocating and linking indirect blocks.
|
|
|
|
*/
|
|
|
|
int udf_add_aext(struct inode *inode, struct extent_position *epos,
|
|
|
|
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
|
|
|
|
{
|
|
|
|
int adsize;
|
|
|
|
struct super_block *sb = inode->i_sb;
|
|
|
|
|
|
|
|
if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
|
|
|
adsize = sizeof(struct short_ad);
|
|
|
|
else if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_LONG)
|
|
|
|
adsize = sizeof(struct long_ad);
|
|
|
|
else
|
|
|
|
return -EIO;
|
|
|
|
|
|
|
|
if (epos->offset + (2 * adsize) > sb->s_blocksize) {
|
|
|
|
int err;
|
2017-10-12 13:48:40 +00:00
|
|
|
udf_pblk_t new_block;
|
2015-12-23 13:21:13 +00:00
|
|
|
|
|
|
|
new_block = udf_new_block(sb, NULL,
|
|
|
|
epos->block.partitionReferenceNum,
|
|
|
|
epos->block.logicalBlockNum, &err);
|
|
|
|
if (!new_block)
|
|
|
|
return -ENOSPC;
|
|
|
|
|
|
|
|
err = udf_setup_indirect_aext(inode, new_block, epos);
|
|
|
|
if (err)
|
|
|
|
return err;
|
|
|
|
}
|
|
|
|
|
|
|
|
return __udf_add_aext(inode, epos, eloc, elen, inc);
|
|
|
|
}
|
|
|
|
|
2010-10-21 22:30:26 +00:00
|
|
|
void udf_write_aext(struct inode *inode, struct extent_position *epos,
|
|
|
|
struct kernel_lb_addr *eloc, uint32_t elen, int inc)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int adsize;
|
|
|
|
uint8_t *ptr;
|
2008-10-15 10:28:03 +00:00
|
|
|
struct short_ad *sad;
|
|
|
|
struct long_ad *lad;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
if (!epos->bh)
|
2020-09-25 10:29:54 +00:00
|
|
|
ptr = iinfo->i_data + epos->offset -
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_file_entry_alloc_offset(inode) +
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenEAttr;
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2007-05-08 07:35:14 +00:00
|
|
|
ptr = epos->bh->b_data + epos->offset;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
switch (iinfo->i_alloc_type) {
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FLAG_AD_SHORT:
|
2008-10-15 10:28:03 +00:00
|
|
|
sad = (struct short_ad *)ptr;
|
2007-07-21 11:37:18 +00:00
|
|
|
sad->extLength = cpu_to_le32(elen);
|
2008-10-15 10:29:03 +00:00
|
|
|
sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
|
2008-10-15 10:28:03 +00:00
|
|
|
adsize = sizeof(struct short_ad);
|
2007-07-21 11:37:18 +00:00
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FLAG_AD_LONG:
|
2008-10-15 10:28:03 +00:00
|
|
|
lad = (struct long_ad *)ptr;
|
2007-07-21 11:37:18 +00:00
|
|
|
lad->extLength = cpu_to_le32(elen);
|
2008-10-15 10:29:03 +00:00
|
|
|
lad->extLocation = cpu_to_lelb(*eloc);
|
2007-07-21 11:37:18 +00:00
|
|
|
memset(lad->impUse, 0x00, sizeof(lad->impUse));
|
2008-10-15 10:28:03 +00:00
|
|
|
adsize = sizeof(struct long_ad);
|
2007-07-21 11:37:18 +00:00
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
default:
|
2010-10-21 22:30:26 +00:00
|
|
|
return;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (epos->bh) {
|
2007-07-21 11:37:18 +00:00
|
|
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
2008-02-08 12:20:30 +00:00
|
|
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201) {
|
2008-02-08 12:20:36 +00:00
|
|
|
struct allocExtDesc *aed =
|
|
|
|
(struct allocExtDesc *)epos->bh->b_data;
|
2007-05-08 07:35:14 +00:00
|
|
|
udf_update_tag(epos->bh->b_data,
|
2008-02-08 12:20:36 +00:00
|
|
|
le32_to_cpu(aed->lengthAllocDescs) +
|
|
|
|
sizeof(struct allocExtDesc));
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-05-08 07:35:14 +00:00
|
|
|
mark_buffer_dirty_inode(epos->bh, inode);
|
2007-07-21 11:37:18 +00:00
|
|
|
} else {
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_inode_dirty(inode);
|
2007-07-21 11:37:18 +00:00
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
|
|
|
|
if (inc)
|
2007-05-08 07:35:14 +00:00
|
|
|
epos->offset += adsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2015-12-11 14:54:16 +00:00
|
|
|
/*
|
|
|
|
* Only 1 indirect extent in a row really makes sense but allow upto 16 in case
|
|
|
|
* someone does some weird stuff.
|
|
|
|
*/
|
|
|
|
#define UDF_MAX_INDIR_EXTS 16
|
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
/*
|
|
|
|
* Returns 1 on success, -errno on error, 0 on hit EOF.
|
|
|
|
*/
|
|
|
|
int udf_next_aext(struct inode *inode, struct extent_position *epos,
|
|
|
|
struct kernel_lb_addr *eloc, uint32_t *elen, int8_t *etype,
|
|
|
|
int inc)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2015-12-11 14:54:16 +00:00
|
|
|
unsigned int indirections = 0;
|
2024-10-01 11:54:23 +00:00
|
|
|
int ret = 0;
|
2024-10-01 11:54:24 +00:00
|
|
|
udf_pblk_t block;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
while (1) {
|
|
|
|
ret = udf_current_aext(inode, epos, eloc, elen,
|
|
|
|
etype, inc);
|
|
|
|
if (ret <= 0)
|
|
|
|
return ret;
|
|
|
|
if (*etype != (EXT_NEXT_EXTENT_ALLOCDESCS >> 30))
|
|
|
|
return ret;
|
2015-12-11 14:54:16 +00:00
|
|
|
|
|
|
|
if (++indirections > UDF_MAX_INDIR_EXTS) {
|
|
|
|
udf_err(inode->i_sb,
|
|
|
|
"too many indirect extents in inode %lu\n",
|
|
|
|
inode->i_ino);
|
2024-10-01 11:54:24 +00:00
|
|
|
return -EFSCORRUPTED;
|
2015-12-11 14:54:16 +00:00
|
|
|
}
|
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
epos->block = *eloc;
|
|
|
|
epos->offset = sizeof(struct allocExtDesc);
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(epos->bh);
|
2008-10-15 10:29:03 +00:00
|
|
|
block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
|
2023-01-18 12:27:07 +00:00
|
|
|
epos->bh = sb_bread(inode->i_sb, block);
|
2008-02-08 12:20:36 +00:00
|
|
|
if (!epos->bh) {
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_debug("reading block %u failed!\n", block);
|
2024-10-01 11:54:24 +00:00
|
|
|
return -EIO;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-10-01 11:54:23 +00:00
|
|
|
/*
|
|
|
|
* Returns 1 on success, -errno on error, 0 on hit EOF.
|
|
|
|
*/
|
|
|
|
int udf_current_aext(struct inode *inode, struct extent_position *epos,
|
|
|
|
struct kernel_lb_addr *eloc, uint32_t *elen, int8_t *etype,
|
|
|
|
int inc)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
|
|
|
int alen;
|
|
|
|
uint8_t *ptr;
|
2008-10-15 10:28:03 +00:00
|
|
|
struct short_ad *sad;
|
|
|
|
struct long_ad *lad;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo = UDF_I(inode);
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!epos->bh) {
|
2007-05-08 07:35:14 +00:00
|
|
|
if (!epos->offset)
|
|
|
|
epos->offset = udf_file_entry_alloc_offset(inode);
|
2020-09-25 10:29:54 +00:00
|
|
|
ptr = iinfo->i_data + epos->offset -
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_file_entry_alloc_offset(inode) +
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenEAttr;
|
2008-02-08 12:20:36 +00:00
|
|
|
alen = udf_file_entry_alloc_offset(inode) +
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenAlloc;
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2024-09-25 07:46:15 +00:00
|
|
|
struct allocExtDesc *header =
|
|
|
|
(struct allocExtDesc *)epos->bh->b_data;
|
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
if (!epos->offset)
|
|
|
|
epos->offset = sizeof(struct allocExtDesc);
|
|
|
|
ptr = epos->bh->b_data + epos->offset;
|
2024-09-25 07:46:15 +00:00
|
|
|
if (check_add_overflow(sizeof(struct allocExtDesc),
|
|
|
|
le32_to_cpu(header->lengthAllocDescs), &alen))
|
|
|
|
return -1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
switch (iinfo->i_alloc_type) {
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FLAG_AD_SHORT:
|
2008-02-08 12:20:36 +00:00
|
|
|
sad = udf_get_fileshortad(ptr, alen, &epos->offset, inc);
|
|
|
|
if (!sad)
|
2024-10-01 11:54:23 +00:00
|
|
|
return 0;
|
|
|
|
*etype = le32_to_cpu(sad->extLength) >> 30;
|
2007-07-21 11:37:18 +00:00
|
|
|
eloc->logicalBlockNum = le32_to_cpu(sad->extPosition);
|
2008-02-08 12:20:36 +00:00
|
|
|
eloc->partitionReferenceNum =
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_location.partitionReferenceNum;
|
2007-07-21 11:37:18 +00:00
|
|
|
*elen = le32_to_cpu(sad->extLength) & UDF_EXTENT_LENGTH_MASK;
|
|
|
|
break;
|
2007-07-19 08:47:43 +00:00
|
|
|
case ICBTAG_FLAG_AD_LONG:
|
2008-02-08 12:20:36 +00:00
|
|
|
lad = udf_get_filelongad(ptr, alen, &epos->offset, inc);
|
|
|
|
if (!lad)
|
2024-10-01 11:54:23 +00:00
|
|
|
return 0;
|
|
|
|
*etype = le32_to_cpu(lad->extLength) >> 30;
|
2007-07-21 11:37:18 +00:00
|
|
|
*eloc = lelb_to_cpu(lad->extLocation);
|
|
|
|
*elen = le32_to_cpu(lad->extLength) & UDF_EXTENT_LENGTH_MASK;
|
|
|
|
break;
|
|
|
|
default:
|
2017-10-12 13:48:41 +00:00
|
|
|
udf_debug("alloc_type = %u unsupported\n", iinfo->i_alloc_type);
|
2024-10-01 11:54:23 +00:00
|
|
|
return -EINVAL;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2024-10-01 11:54:23 +00:00
|
|
|
return 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2022-12-19 19:10:35 +00:00
|
|
|
static int udf_insert_aext(struct inode *inode, struct extent_position epos,
|
|
|
|
struct kernel_lb_addr neloc, uint32_t nelen)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-10-15 10:28:03 +00:00
|
|
|
struct kernel_lb_addr oeloc;
|
2005-04-16 22:20:36 +00:00
|
|
|
uint32_t oelen;
|
|
|
|
int8_t etype;
|
2024-10-01 11:54:24 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
if (epos.bh)
|
2007-05-08 07:35:16 +00:00
|
|
|
get_bh(epos.bh);
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
while (1) {
|
|
|
|
ret = udf_next_aext(inode, &epos, &oeloc, &oelen, &etype, 0);
|
|
|
|
if (ret <= 0)
|
|
|
|
break;
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_write_aext(inode, &epos, &neloc, nelen, 1);
|
2005-04-16 22:20:36 +00:00
|
|
|
neloc = oeloc;
|
|
|
|
nelen = (etype << 30) | oelen;
|
|
|
|
}
|
2024-10-01 11:54:24 +00:00
|
|
|
if (ret == 0)
|
|
|
|
ret = udf_add_aext(inode, &epos, &neloc, nelen, 1);
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(epos.bh);
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
return ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2018-06-13 16:04:24 +00:00
|
|
|
int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2007-05-08 07:35:14 +00:00
|
|
|
struct extent_position oepos;
|
|
|
|
int adsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
int8_t etype;
|
|
|
|
struct allocExtDesc *aed;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo;
|
2018-06-13 16:04:24 +00:00
|
|
|
struct kernel_lb_addr eloc;
|
|
|
|
uint32_t elen;
|
2024-10-01 11:54:24 +00:00
|
|
|
int ret;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (epos.bh) {
|
2007-05-08 07:35:16 +00:00
|
|
|
get_bh(epos.bh);
|
|
|
|
get_bh(epos.bh);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo = UDF_I(inode);
|
|
|
|
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
|
2008-10-15 10:28:03 +00:00
|
|
|
adsize = sizeof(struct short_ad);
|
2008-02-08 12:20:44 +00:00
|
|
|
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
|
2008-10-15 10:28:03 +00:00
|
|
|
adsize = sizeof(struct long_ad);
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
|
|
|
adsize = 0;
|
|
|
|
|
2007-05-08 07:35:14 +00:00
|
|
|
oepos = epos;
|
2024-10-01 11:54:24 +00:00
|
|
|
if (udf_next_aext(inode, &epos, &eloc, &elen, &etype, 1) <= 0)
|
2005-04-16 22:20:36 +00:00
|
|
|
return -1;
|
|
|
|
|
2024-10-01 11:54:24 +00:00
|
|
|
while (1) {
|
|
|
|
ret = udf_next_aext(inode, &epos, &eloc, &elen, &etype, 1);
|
|
|
|
if (ret < 0) {
|
|
|
|
brelse(epos.bh);
|
|
|
|
brelse(oepos.bh);
|
|
|
|
return -1;
|
|
|
|
}
|
|
|
|
if (ret == 0)
|
|
|
|
break;
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (oepos.bh != epos.bh) {
|
2007-05-08 07:35:14 +00:00
|
|
|
oepos.block = epos.block;
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(oepos.bh);
|
|
|
|
get_bh(epos.bh);
|
2007-05-08 07:35:14 +00:00
|
|
|
oepos.bh = epos.bh;
|
|
|
|
oepos.offset = epos.offset - adsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2008-10-15 10:28:03 +00:00
|
|
|
memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
|
2005-04-16 22:20:36 +00:00
|
|
|
elen = 0;
|
|
|
|
|
2007-07-19 08:47:43 +00:00
|
|
|
if (epos.bh != oepos.bh) {
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
|
|
|
|
udf_write_aext(inode, &oepos, &eloc, elen, 1);
|
|
|
|
udf_write_aext(inode, &oepos, &eloc, elen, 1);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!oepos.bh) {
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenAlloc -= (adsize * 2);
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_inode_dirty(inode);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2007-05-08 07:35:14 +00:00
|
|
|
aed = (struct allocExtDesc *)oepos.bh->b_data;
|
2008-01-30 21:03:57 +00:00
|
|
|
le32_add_cpu(&aed->lengthAllocDescs, -(2 * adsize));
|
2007-07-21 11:37:18 +00:00
|
|
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
2008-02-08 12:20:30 +00:00
|
|
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_update_tag(oepos.bh->b_data,
|
|
|
|
oepos.offset - (2 * adsize));
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_update_tag(oepos.bh->b_data,
|
|
|
|
sizeof(struct allocExtDesc));
|
2007-05-08 07:35:14 +00:00
|
|
|
mark_buffer_dirty_inode(oepos.bh, inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2008-10-15 10:29:03 +00:00
|
|
|
udf_write_aext(inode, &oepos, &eloc, elen, 1);
|
2007-07-19 08:47:43 +00:00
|
|
|
if (!oepos.bh) {
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo->i_lenAlloc -= adsize;
|
2005-04-16 22:20:36 +00:00
|
|
|
mark_inode_dirty(inode);
|
2007-07-19 08:47:43 +00:00
|
|
|
} else {
|
2007-05-08 07:35:14 +00:00
|
|
|
aed = (struct allocExtDesc *)oepos.bh->b_data;
|
2008-01-30 21:03:57 +00:00
|
|
|
le32_add_cpu(&aed->lengthAllocDescs, -adsize);
|
2007-07-21 11:37:18 +00:00
|
|
|
if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
|
2008-02-08 12:20:30 +00:00
|
|
|
UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_update_tag(oepos.bh->b_data,
|
|
|
|
epos.offset - adsize);
|
2005-04-16 22:20:36 +00:00
|
|
|
else
|
2008-02-08 12:20:36 +00:00
|
|
|
udf_update_tag(oepos.bh->b_data,
|
|
|
|
sizeof(struct allocExtDesc));
|
2007-05-08 07:35:14 +00:00
|
|
|
mark_buffer_dirty_inode(oepos.bh, inode);
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
}
|
2007-07-16 06:39:47 +00:00
|
|
|
|
2007-05-08 07:35:16 +00:00
|
|
|
brelse(epos.bh);
|
|
|
|
brelse(oepos.bh);
|
2007-07-21 11:37:18 +00:00
|
|
|
|
2005-04-16 22:20:36 +00:00
|
|
|
return (elen >> 30);
|
|
|
|
}
|
|
|
|
|
2024-10-01 11:54:25 +00:00
|
|
|
/*
|
|
|
|
* Returns 1 on success, -errno on error, 0 on hit EOF.
|
|
|
|
*/
|
|
|
|
int inode_bmap(struct inode *inode, sector_t block, struct extent_position *pos,
|
|
|
|
struct kernel_lb_addr *eloc, uint32_t *elen, sector_t *offset,
|
|
|
|
int8_t *etype)
|
2005-04-16 22:20:36 +00:00
|
|
|
{
|
2008-02-08 12:20:36 +00:00
|
|
|
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
|
2017-01-06 20:53:52 +00:00
|
|
|
loff_t lbcount = 0, bcount = (loff_t) block << blocksize_bits;
|
2008-02-08 12:20:44 +00:00
|
|
|
struct udf_inode_info *iinfo;
|
2024-10-01 11:54:24 +00:00
|
|
|
int err = 0;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2008-02-08 12:20:44 +00:00
|
|
|
iinfo = UDF_I(inode);
|
2013-01-19 02:17:14 +00:00
|
|
|
if (!udf_read_extent_cache(inode, bcount, &lbcount, pos)) {
|
|
|
|
pos->offset = 0;
|
|
|
|
pos->block = iinfo->i_location;
|
|
|
|
pos->bh = NULL;
|
|
|
|
}
|
2005-04-16 22:20:36 +00:00
|
|
|
*elen = 0;
|
2007-07-19 08:47:43 +00:00
|
|
|
do {
|
2024-10-01 11:54:25 +00:00
|
|
|
err = udf_next_aext(inode, pos, eloc, elen, etype, 1);
|
2024-10-01 11:54:24 +00:00
|
|
|
if (err <= 0) {
|
|
|
|
if (err == 0) {
|
|
|
|
*offset = (bcount - lbcount) >> blocksize_bits;
|
|
|
|
iinfo->i_lenExtents = lbcount;
|
|
|
|
}
|
2024-10-01 11:54:25 +00:00
|
|
|
return err;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|
|
|
|
lbcount += *elen;
|
|
|
|
} while (lbcount <= bcount);
|
2013-01-19 02:17:14 +00:00
|
|
|
/* update extent cache */
|
2017-01-06 20:53:56 +00:00
|
|
|
udf_update_extent_cache(inode, lbcount - *elen, pos);
|
2008-02-08 12:20:36 +00:00
|
|
|
*offset = (bcount + *elen - lbcount) >> blocksize_bits;
|
2005-04-16 22:20:36 +00:00
|
|
|
|
2024-10-01 11:54:25 +00:00
|
|
|
return 1;
|
2005-04-16 22:20:36 +00:00
|
|
|
}
|