This commit is contained in:
Andrew Morton 2024-12-18 19:51:48 -08:00
commit 45f41efd96
114 changed files with 1650 additions and 1374 deletions

View File

@ -100,29 +100,29 @@ Get delays, since system boot, for pid 10::
# ./getdelays -d -p 10
(output similar to next case)
Get sum of delays, since system boot, for all pids with tgid 5::
Get sum and peak of delays, since system boot, for all pids with tgid 242::
# ./getdelays -d -t 5
bash-4.4# ./getdelays -d -t 242
print delayacct stats ON
TGID 5
TGID 242
CPU count real total virtual total delay total delay average
8 7000000 6872122 3382277 0.423ms
IO count delay total delay average
0 0 0.000ms
SWAP count delay total delay average
0 0 0.000ms
RECLAIM count delay total delay average
0 0 0.000ms
THRASHING count delay total delay average
0 0 0.000ms
COMPACT count delay total delay average
0 0 0.000ms
WPCOPY count delay total delay average
0 0 0.000ms
IRQ count delay total delay average
0 0 0.000ms
CPU count real total virtual total delay total delay average delay max
239 296000000 307724885 1127792 0.005ms 0.238382ms
IO count delay total delay average delay max
0 0 0.000ms 0.000000ms
SWAP count delay total delay average delay max
0 0 0.000ms 0.000000ms
RECLAIM count delay total delay average delay max
0 0 0.000ms 0.000000ms
THRASHING count delay total delay average delay max
0 0 0.000ms 0.000000ms
COMPACT count delay total delay average delay max
0 0 0.000ms 0.000000ms
WPCOPY count delay total delay average delay max
230 19100476 0.083ms 0.383822ms
IRQ count delay total delay average delay max
0 0 0.000ms 0.000000ms
Get IO accounting for pid 1, it works only with -p::

View File

@ -4,6 +4,8 @@
Min Heap API
============
:Author: Kuan-Wei Chiu <visitorckw@gmail.com>
Introduction
============

View File

@ -42,8 +42,8 @@ call xa_tag_pointer() to create an entry with a tag, xa_untag_pointer()
to turn a tagged entry back into an untagged pointer and xa_pointer_tag()
to retrieve the tag of an entry. Tagged pointers use the same bits that
are used to distinguish value entries from normal pointers, so you must
decide whether they want to store value entries or tagged pointers in
any particular XArray.
decide whether you want to store value entries or tagged pointers in any
particular XArray.
The XArray does not support storing IS_ERR() pointers as some
conflict with value entries or internal entries.
@ -52,8 +52,9 @@ An unusual feature of the XArray is the ability to create entries which
occupy a range of indices. Once stored to, looking up any index in
the range will return the same entry as looking up any other index in
the range. Storing to any index will store to all of them. Multi-index
entries can be explicitly split into smaller entries, or storing ``NULL``
into any entry will cause the XArray to forget about the range.
entries can be explicitly split into smaller entries. Unsetting (using
xa_erase() or xa_store() with ``NULL``) any entry will cause the XArray
to forget about the range.
Normal API
==========
@ -63,13 +64,14 @@ for statically allocated XArrays or xa_init() for dynamically
allocated ones. A freshly-initialised XArray contains a ``NULL``
pointer at every index.
You can then set entries using xa_store() and get entries
using xa_load(). xa_store will overwrite any entry with the
new entry and return the previous entry stored at that index. You can
use xa_erase() instead of calling xa_store() with a
``NULL`` entry. There is no difference between an entry that has never
been stored to, one that has been erased and one that has most recently
had ``NULL`` stored to it.
You can then set entries using xa_store() and get entries using
xa_load(). xa_store() will overwrite any entry with the new entry and
return the previous entry stored at that index. You can unset entries
using xa_erase() or by setting the entry to ``NULL`` using xa_store().
There is no difference between an entry that has never been stored to
and one that has been erased with xa_erase(); an entry that has most
recently had ``NULL`` stored to it is also equivalent except if the
XArray was initialized with ``XA_FLAGS_ALLOC``.
You can conditionally replace an entry at an index by using
xa_cmpxchg(). Like cmpxchg(), it will only succeed if

View File

@ -10,7 +10,6 @@
#include <linux/preempt.h>
#include <asm/fpu.h>
#include <asm/thread_info.h>
#include <asm/fpu.h>
#if defined(CONFIG_ALPHA_EV6) || defined(CONFIG_ALPHA_EV67)
#define STT(reg,val) asm volatile ("ftoit $f"#reg",%0" : "=r"(val));

View File

@ -31,10 +31,10 @@
/*
* Constants
*/
#define SHARPSL_CHARGE_ON_TIME_INTERVAL (msecs_to_jiffies(1*60*1000)) /* 1 min */
#define SHARPSL_CHARGE_FINISH_TIME (msecs_to_jiffies(10*60*1000)) /* 10 min */
#define SHARPSL_BATCHK_TIME (msecs_to_jiffies(15*1000)) /* 15 sec */
#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
#define SHARPSL_CHARGE_ON_TIME_INTERVAL (secs_to_jiffies(60))
#define SHARPSL_CHARGE_FINISH_TIME (secs_to_jiffies(10*60))
#define SHARPSL_BATCHK_TIME (secs_to_jiffies(15))
#define SHARPSL_BATCHK_TIME_SUSPEND (60*10) /* 10 min */
#define SHARPSL_WAIT_CO_TIME 15 /* 15 sec */
#define SHARPSL_WAIT_DISCHARGE_ON 100 /* 100 msec */

View File

@ -629,7 +629,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -586,7 +586,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -606,7 +606,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -588,7 +588,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -605,7 +605,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -692,7 +692,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -578,7 +578,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -579,7 +579,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -595,7 +595,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -575,7 +575,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -576,7 +576,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -451,7 +451,6 @@ CONFIG_TEST_PRINTF=m
CONFIG_TEST_SCANF=m
CONFIG_TEST_BITMAP=m
CONFIG_TEST_UUID=m
CONFIG_TEST_XARRAY=m
CONFIG_TEST_MAPLE_TREE=m
CONFIG_TEST_RHASHTABLE=m
CONFIG_TEST_IDA=m

View File

@ -4957,7 +4957,7 @@ int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit,
* states are synchronized from L0 to L1. L1 needs to inform L0 about
* MER=1 only when there are pending external interrupts.
* In the above if check, MER bit is set if there are pending
* external interrupts. Hence, explicity mask off MER bit
* external interrupts. Hence, explicitly mask off MER bit
* here as otherwise it may generate spurious interrupts in L2 KVM
* causing an endless loop, which results in L2 guest getting hung.
*/

View File

@ -544,7 +544,7 @@ static int drc_pmem_query_health(struct papr_scm_priv *p)
/* Jiffies offset for which the health data is assumed to be same */
cache_timeout = p->lasthealth_jiffies +
msecs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL * 1000);
secs_to_jiffies(MIN_HEALTH_QUERY_INTERVAL);
/* Fetch new health info is its older than MIN_HEALTH_QUERY_INTERVAL */
if (time_after(jiffies, cache_timeout))

View File

@ -166,7 +166,7 @@ static struct timer_list lgr_timer;
*/
static void lgr_timer_set(void)
{
mod_timer(&lgr_timer, jiffies + msecs_to_jiffies(LGR_TIMER_INTERVAL_SECS * MSEC_PER_SEC));
mod_timer(&lgr_timer, jiffies + secs_to_jiffies(LGR_TIMER_INTERVAL_SECS));
}
/*

View File

@ -662,12 +662,12 @@ static void stp_check_leap(void)
if (ret < 0)
pr_err("failed to set leap second flags\n");
/* arm Timer to clear leap second flags */
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(14400 * MSEC_PER_SEC));
mod_timer(&stp_timer, jiffies + secs_to_jiffies(14400));
} else {
/* The day the leap second is scheduled for hasn't been reached. Retry
* in one hour.
*/
mod_timer(&stp_timer, jiffies + msecs_to_jiffies(3600 * MSEC_PER_SEC));
mod_timer(&stp_timer, jiffies + secs_to_jiffies(3600));
}
}

View File

@ -371,7 +371,7 @@ static void set_topology_timer(void)
if (atomic_add_unless(&topology_poll, -1, 0))
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(100));
else
mod_timer(&topology_timer, jiffies + msecs_to_jiffies(60 * MSEC_PER_SEC));
mod_timer(&topology_timer, jiffies + secs_to_jiffies(60));
}
void topology_expect_change(void)

View File

@ -204,7 +204,7 @@ static void cmm_set_timer(void)
del_timer(&cmm_timer);
return;
}
mod_timer(&cmm_timer, jiffies + msecs_to_jiffies(cmm_timeout_seconds * MSEC_PER_SEC));
mod_timer(&cmm_timer, jiffies + secs_to_jiffies(cmm_timeout_seconds));
}
static void cmm_timer_fn(struct timer_list *unused)

View File

@ -817,7 +817,7 @@ static void device_hard_reset_pending(struct work_struct *work)
}
queue_delayed_work(hdev->reset_wq, &device_reset_work->reset_work,
msecs_to_jiffies(HL_PENDING_RESET_PER_SEC * 1000));
secs_to_jiffies(HL_PENDING_RESET_PER_SEC));
}
}

View File

@ -362,8 +362,7 @@ static void fixup_device_params_per_asic(struct hl_device *hdev, int timeout)
* a different default timeout for Gaudi
*/
if (timeout == HL_DEFAULT_TIMEOUT_LOCKED)
hdev->timeout_jiffies = msecs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED *
MSEC_PER_SEC);
hdev->timeout_jiffies = secs_to_jiffies(GAUDI_DEFAULT_TIMEOUT_LOCKED);
hdev->reset_upon_device_release = 0;
break;

View File

@ -544,7 +544,7 @@ static void print_stats(struct xen_blkif_ring *ring)
ring->st_rd_req, ring->st_wr_req,
ring->st_f_req, ring->st_ds_req,
ring->persistent_gnt_c, max_pgrants);
ring->st_print = jiffies + msecs_to_jiffies(10 * 1000);
ring->st_print = jiffies + secs_to_jiffies(10);
ring->st_rd_req = 0;
ring->st_wr_req = 0;
ring->st_oo_req = 0;

View File

@ -514,7 +514,7 @@ static int wait_for_lmem_ready(struct xe_device *xe)
drm_dbg(&xe->drm, "Waiting for lmem initialization\n");
start = jiffies;
timeout = start + msecs_to_jiffies(60 * 1000); /* 60 sec! */
timeout = start + secs_to_jiffies(60); /* 60 sec! */
do {
if (signal_pending(current))

View File

@ -92,7 +92,7 @@ struct iowait_work {
*
* The lock field is used by waiters to record
* the seqlock_t that guards the list head.
* Waiters explicity know that, but the destroy
* Waiters explicitly know that, but the destroy
* code that unwaits QPs does not.
*/
struct iowait {

View File

@ -72,7 +72,7 @@ struct usnic_ib_create_qp_resp {
u64 bar_bus_addr;
u32 bar_len;
/*
* WQ, RQ, CQ are explicity specified bc exposing a generic resources inteface
* WQ, RQ, CQ are explicitly specified bc exposing a generic resources inteface
* expands the scope of ABI to many files.
*/
u32 wq_cnt;

View File

@ -178,7 +178,7 @@ static int ath11k_debugfs_fw_stats_request(struct ath11k *ar,
* received 'update stats' event, we keep a 3 seconds timeout in case,
* fw_stats_done is not marked yet
*/
timeout = jiffies + msecs_to_jiffies(3 * 1000);
timeout = jiffies + secs_to_jiffies(3);
ath11k_debugfs_fw_stats_reset(ar);

View File

@ -1810,7 +1810,7 @@ void brcmf_fws_rxreorder(struct brcmf_if *ifp, struct sk_buff *pkt)
rfi->cur_idx = cur_idx;
}
} else {
/* explicity window move updating the expected index */
/* explicitly window move updating the expected index */
exp_idx = reorder_data[BRCMF_RXREORDER_EXPIDX_OFFSET];
brcmf_dbg(DATA, "flow-%d (0x%x): change expected: %d -> %d\n",

View File

@ -1044,7 +1044,7 @@ static void arcmsr_init_get_devmap_timer(struct AdapterControlBlock *pacb)
static void arcmsr_init_set_datetime_timer(struct AdapterControlBlock *pacb)
{
timer_setup(&pacb->refresh_timer, arcmsr_set_iop_datetime, 0);
pacb->refresh_timer.expires = jiffies + msecs_to_jiffies(60 * 1000);
pacb->refresh_timer.expires = jiffies + secs_to_jiffies(60);
add_timer(&pacb->refresh_timer);
}

View File

@ -966,7 +966,7 @@ static int cxlflash_disk_detach(struct scsi_device *sdev, void *detach)
*
* This routine is the release handler for the fops registered with
* the CXL services on an initial attach for a context. It is called
* when a close (explicity by the user or as part of a process tear
* when a close (explicitly by the user or as part of a process tear
* down) is performed on the adapter file descriptor returned to the
* user. The user should be aware that explicitly performing a close
* considered catastrophic and subsequent usage of the superpipe API

View File

@ -598,7 +598,7 @@ lpfc_config_port_post(struct lpfc_hba *phba)
jiffies + msecs_to_jiffies(1000 * timeout));
/* Set up heart beat (HB) timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@ -1267,7 +1267,7 @@ lpfc_hb_mbox_cmpl(struct lpfc_hba * phba, LPFC_MBOXQ_t * pmboxq)
!test_bit(FC_UNLOADING, &phba->pport->load_flag))
mod_timer(&phba->hb_tmofunc,
jiffies +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
return;
}
@ -1555,7 +1555,7 @@ lpfc_hb_timeout_handler(struct lpfc_hba *phba)
/* If IOs are completing, no need to issue a MBX_HEARTBEAT */
spin_lock_irq(&phba->pport->work_port_lock);
if (time_after(phba->last_completion_time +
msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL),
secs_to_jiffies(LPFC_HB_MBOX_INTERVAL),
jiffies)) {
spin_unlock_irq(&phba->pport->work_port_lock);
if (test_bit(HBA_HBEAT_INP, &phba->hba_flag))
@ -3354,7 +3354,7 @@ lpfc_block_mgmt_io(struct lpfc_hba *phba, int mbx_action)
spin_unlock_irqrestore(&phba->hbalock, iflag);
if (mbx_action == LPFC_MBX_NO_WAIT)
return;
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
spin_lock_irqsave(&phba->hbalock, iflag);
if (phba->sli.mbox_active) {
actcmd = phba->sli.mbox_active->u.mb.mbxCommand;
@ -4924,14 +4924,14 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
stat = 1;
goto finished;
}
if (time >= msecs_to_jiffies(30 * 1000)) {
if (time >= secs_to_jiffies(30)) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0461 Scanning longer than 30 "
"seconds. Continuing initialization\n");
stat = 1;
goto finished;
}
if (time >= msecs_to_jiffies(15 * 1000) &&
if (time >= secs_to_jiffies(15) &&
phba->link_state <= LPFC_LINK_DOWN) {
lpfc_printf_log(phba, KERN_INFO, LOG_INIT,
"0465 Link down longer than 15 "
@ -4945,7 +4945,7 @@ int lpfc_scan_finished(struct Scsi_Host *shost, unsigned long time)
if (vport->num_disc_nodes || vport->fc_prli_sent)
goto finished;
if (!atomic_read(&vport->fc_map_cnt) &&
time < msecs_to_jiffies(2 * 1000))
time < secs_to_jiffies(2))
goto finished;
if ((phba->sli.sli_flag & LPFC_SLI_MBOX_ACTIVE) != 0)
goto finished;
@ -5179,8 +5179,8 @@ lpfc_vmid_poll(struct timer_list *t)
lpfc_worker_wake_up(phba);
/* restart the timer for the next iteration */
mod_timer(&phba->inactive_vmid_poll, jiffies + msecs_to_jiffies(1000 *
LPFC_VMID_TIMER));
mod_timer(&phba->inactive_vmid_poll,
jiffies + secs_to_jiffies(LPFC_VMID_TIMER));
}
/**

View File

@ -906,7 +906,7 @@ lpfc_rcv_logo(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
(ndlp->nlp_state >= NLP_STE_ADISC_ISSUE ||
ndlp->nlp_state <= NLP_STE_PRLI_ISSUE)) {
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
lpfc_printf_vlog(vport, KERN_INFO,
@ -1332,7 +1332,7 @@ lpfc_rcv_els_plogi_issue(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
}
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc, jiffies + msecs_to_jiffies(1000 * 1));
mod_timer(&ndlp->nlp_delayfunc, jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
ndlp->nlp_prev_state = NLP_STE_PLOGI_ISSUE;
@ -1936,7 +1936,7 @@ lpfc_cmpl_reglogin_reglogin_issue(struct lpfc_vport *vport,
/* Put ndlp in npr state set plogi timer for 1 sec */
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;
@ -2743,7 +2743,7 @@ lpfc_rcv_prlo_npr_node(struct lpfc_vport *vport, struct lpfc_nodelist *ndlp,
if (!test_bit(NLP_DELAY_TMO, &ndlp->nlp_flag)) {
mod_timer(&ndlp->nlp_delayfunc,
jiffies + msecs_to_jiffies(1000 * 1));
jiffies + secs_to_jiffies(1));
set_bit(NLP_DELAY_TMO, &ndlp->nlp_flag);
clear_bit(NLP_NPR_ADISC, &ndlp->nlp_flag);
ndlp->nlp_last_elscmd = ELS_CMD_PLOGI;

View File

@ -2237,7 +2237,7 @@ lpfc_nvme_lport_unreg_wait(struct lpfc_vport *vport,
* wait. Print a message if a 10 second wait expires and renew the
* wait. This is unexpected.
*/
wait_tmo = msecs_to_jiffies(LPFC_NVME_WAIT_TMO * 1000);
wait_tmo = secs_to_jiffies(LPFC_NVME_WAIT_TMO);
while (true) {
ret = wait_for_completion_timeout(lport_unreg_cmp, wait_tmo);
if (unlikely(!ret)) {

View File

@ -9012,7 +9012,7 @@ lpfc_sli4_hba_setup(struct lpfc_hba *phba)
/* Start heart beat timer */
mod_timer(&phba->hb_tmofunc,
jiffies + msecs_to_jiffies(1000 * LPFC_HB_MBOX_INTERVAL));
jiffies + secs_to_jiffies(LPFC_HB_MBOX_INTERVAL));
clear_bit(HBA_HBEAT_INP, &phba->hba_flag);
clear_bit(HBA_HBEAT_TMO, &phba->hba_flag);
phba->last_completion_time = jiffies;
@ -13323,7 +13323,7 @@ lpfc_sli_mbox_sys_shutdown(struct lpfc_hba *phba, int mbx_action)
lpfc_sli_mbox_sys_flush(phba);
return;
}
timeout = msecs_to_jiffies(LPFC_MBOX_TMO * 1000) + jiffies;
timeout = secs_to_jiffies(LPFC_MBOX_TMO) + jiffies;
/* Disable softirqs, including timers from obtaining phba->hbalock */
local_bh_disable();

View File

@ -278,7 +278,7 @@ int lpfc_vmid_get_appid(struct lpfc_vport *vport, char *uuid,
if (!(vport->phba->pport->vmid_flag & LPFC_VMID_TIMER_ENBLD)) {
mod_timer(&vport->phba->inactive_vmid_poll,
jiffies +
msecs_to_jiffies(1000 * LPFC_VMID_TIMER));
secs_to_jiffies(LPFC_VMID_TIMER));
vport->phba->pport->vmid_flag |= LPFC_VMID_TIMER_ENBLD;
}
}

View File

@ -736,7 +736,7 @@ static int pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
return -EIO;
}
time_remaining = wait_for_completion_timeout(&completion,
msecs_to_jiffies(60*1000)); // 1 min
secs_to_jiffies(60)); // 1 min
if (!time_remaining) {
kfree(payload.func_specific);
pm8001_dbg(pm8001_ha, FAIL, "get_nvmd_req timeout\n");

View File

@ -59,7 +59,7 @@ static int bcm2835_audio_send_msg_locked(struct bcm2835_audio_instance *instance
if (wait) {
if (!wait_for_completion_timeout(&instance->msg_avail_comp,
msecs_to_jiffies(10 * 1000))) {
secs_to_jiffies(10))) {
dev_err(instance->dev,
"vchi message timeout, msg=%d\n", m->type);
return -ETIMEDOUT;

View File

@ -166,7 +166,7 @@ static struct inode *lookup_quotarealm_inode(struct ceph_mds_client *mdsc,
if (IS_ERR(in)) {
doutc(cl, "Can't lookup inode %llx (err: %ld)\n", realm->ino,
PTR_ERR(in));
qri->timeout = jiffies + msecs_to_jiffies(60 * 1000); /* XXX */
qri->timeout = jiffies + secs_to_jiffies(60); /* XXX */
} else {
qri->timeout = 0;
qri->inode = in;

View File

@ -566,7 +566,7 @@ static void ocfs2_adjust_rightmost_records(handle_t *handle,
struct ocfs2_path *path,
struct ocfs2_extent_rec *insert_rec);
/*
* Reset the actual path elements so that we can re-use the structure
* Reset the actual path elements so that we can reuse the structure
* to build another path. Generally, this involves freeing the buffer
* heads.
*/
@ -1182,7 +1182,7 @@ static int ocfs2_add_branch(handle_t *handle,
/*
* If there is a gap before the root end and the real end
* of the righmost leaf block, we need to remove the gap
* of the rightmost leaf block, we need to remove the gap
* between new_cpos and root_end first so that the tree
* is consistent after we add a new branch(it will start
* from new_cpos).
@ -1238,7 +1238,7 @@ static int ocfs2_add_branch(handle_t *handle,
/* Note: new_eb_bhs[new_blocks - 1] is the guy which will be
* linked with the rest of the tree.
* conversly, new_eb_bhs[0] is the new bottommost leaf.
* conversely, new_eb_bhs[0] is the new bottommost leaf.
*
* when we leave the loop, new_last_eb_blk will point to the
* newest leaf, and next_blkno will point to the topmost extent
@ -3712,7 +3712,7 @@ static int ocfs2_try_to_merge_extent(handle_t *handle,
* update split_index here.
*
* When the split_index is zero, we need to merge it to the
* prevoius extent block. It is more efficient and easier
* previous extent block. It is more efficient and easier
* if we do merge_right first and merge_left later.
*/
ret = ocfs2_merge_rec_right(path, handle, et, split_rec,
@ -4517,7 +4517,7 @@ static void ocfs2_figure_contig_type(struct ocfs2_extent_tree *et,
}
/*
* This should only be called against the righmost leaf extent list.
* This should only be called against the rightmost leaf extent list.
*
* ocfs2_figure_appending_type() will figure out whether we'll have to
* insert at the tail of the rightmost leaf.
@ -6808,27 +6808,27 @@ static int ocfs2_zero_func(handle_t *handle, struct buffer_head *bh)
return 0;
}
void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
unsigned int from, unsigned int to,
struct page *page, int zero, u64 *phys)
void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
size_t from, size_t to, struct folio *folio, int zero,
u64 *phys)
{
int ret, partial = 0;
loff_t start_byte = ((loff_t)page->index << PAGE_SHIFT) + from;
loff_t start_byte = folio_pos(folio) + from;
loff_t length = to - from;
ret = ocfs2_map_page_blocks(page, phys, inode, from, to, 0);
ret = ocfs2_map_folio_blocks(folio, phys, inode, from, to, 0);
if (ret)
mlog_errno(ret);
if (zero)
zero_user_segment(page, from, to);
folio_zero_segment(folio, from, to);
/*
* Need to set the buffers we zero'd into uptodate
* here if they aren't - ocfs2_map_page_blocks()
* might've skipped some
*/
ret = walk_page_buffers(handle, page_buffers(page),
ret = walk_page_buffers(handle, folio_buffers(folio),
from, to, &partial,
ocfs2_zero_func);
if (ret < 0)
@ -6841,92 +6841,88 @@ void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
}
if (!partial)
SetPageUptodate(page);
folio_mark_uptodate(folio);
flush_dcache_page(page);
flush_dcache_folio(folio);
}
static void ocfs2_zero_cluster_pages(struct inode *inode, loff_t start,
loff_t end, struct page **pages,
int numpages, u64 phys, handle_t *handle)
static void ocfs2_zero_cluster_folios(struct inode *inode, loff_t start,
loff_t end, struct folio **folios, int numfolios,
u64 phys, handle_t *handle)
{
int i;
struct page *page;
unsigned int from, to = PAGE_SIZE;
struct super_block *sb = inode->i_sb;
BUG_ON(!ocfs2_sparse_alloc(OCFS2_SB(sb)));
if (numpages == 0)
if (numfolios == 0)
goto out;
to = PAGE_SIZE;
for(i = 0; i < numpages; i++) {
page = pages[i];
for (i = 0; i < numfolios; i++) {
struct folio *folio = folios[i];
size_t to = folio_size(folio);
size_t from = offset_in_folio(folio, start);
from = start & (PAGE_SIZE - 1);
if ((end >> PAGE_SHIFT) == page->index)
to = end & (PAGE_SIZE - 1);
if (to > end - folio_pos(folio))
to = end - folio_pos(folio);
BUG_ON(from > PAGE_SIZE);
BUG_ON(to > PAGE_SIZE);
ocfs2_map_and_dirty_folio(inode, handle, from, to, folio, 1,
&phys);
ocfs2_map_and_dirty_page(inode, handle, from, to, page, 1,
&phys);
start = (page->index + 1) << PAGE_SHIFT;
start = folio_next_index(folio) << PAGE_SHIFT;
}
out:
if (pages)
ocfs2_unlock_and_free_pages(pages, numpages);
if (folios)
ocfs2_unlock_and_free_folios(folios, numfolios);
}
int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
struct page **pages, int *num)
static int ocfs2_grab_folios(struct inode *inode, loff_t start, loff_t end,
struct folio **folios, int *num)
{
int numpages, ret = 0;
int numfolios, ret = 0;
struct address_space *mapping = inode->i_mapping;
unsigned long index;
loff_t last_page_bytes;
BUG_ON(start > end);
numpages = 0;
numfolios = 0;
last_page_bytes = PAGE_ALIGN(end);
index = start >> PAGE_SHIFT;
do {
pages[numpages] = find_or_create_page(mapping, index, GFP_NOFS);
if (!pages[numpages]) {
ret = -ENOMEM;
folios[numfolios] = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
if (IS_ERR(folios[numfolios])) {
ret = PTR_ERR(folios[numfolios]);
mlog_errno(ret);
goto out;
}
numpages++;
index++;
index = folio_next_index(folios[numfolios]);
numfolios++;
} while (index < (last_page_bytes >> PAGE_SHIFT));
out:
if (ret != 0) {
if (pages)
ocfs2_unlock_and_free_pages(pages, numpages);
numpages = 0;
if (folios)
ocfs2_unlock_and_free_folios(folios, numfolios);
numfolios = 0;
}
*num = numpages;
*num = numfolios;
return ret;
}
static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
struct page **pages, int *num)
static int ocfs2_grab_eof_folios(struct inode *inode, loff_t start, loff_t end,
struct folio **folios, int *num)
{
struct super_block *sb = inode->i_sb;
BUG_ON(start >> OCFS2_SB(sb)->s_clustersize_bits !=
(end - 1) >> OCFS2_SB(sb)->s_clustersize_bits);
return ocfs2_grab_pages(inode, start, end, pages, num);
return ocfs2_grab_folios(inode, start, end, folios, num);
}
/*
@ -6940,8 +6936,8 @@ static int ocfs2_grab_eof_pages(struct inode *inode, loff_t start, loff_t end,
int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
u64 range_start, u64 range_end)
{
int ret = 0, numpages;
struct page **pages = NULL;
int ret = 0, numfolios;
struct folio **folios = NULL;
u64 phys;
unsigned int ext_flags;
struct super_block *sb = inode->i_sb;
@ -6954,17 +6950,17 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
return 0;
/*
* Avoid zeroing pages fully beyond current i_size. It is pointless as
* underlying blocks of those pages should be already zeroed out and
* Avoid zeroing folios fully beyond current i_size. It is pointless as
* underlying blocks of those folios should be already zeroed out and
* page writeback will skip them anyway.
*/
range_end = min_t(u64, range_end, i_size_read(inode));
if (range_start >= range_end)
return 0;
pages = kcalloc(ocfs2_pages_per_cluster(sb),
sizeof(struct page *), GFP_NOFS);
if (pages == NULL) {
folios = kcalloc(ocfs2_pages_per_cluster(sb),
sizeof(struct folio *), GFP_NOFS);
if (folios == NULL) {
ret = -ENOMEM;
mlog_errno(ret);
goto out;
@ -6985,18 +6981,18 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
if (phys == 0 || ext_flags & OCFS2_EXT_UNWRITTEN)
goto out;
ret = ocfs2_grab_eof_pages(inode, range_start, range_end, pages,
&numpages);
ret = ocfs2_grab_eof_folios(inode, range_start, range_end, folios,
&numfolios);
if (ret) {
mlog_errno(ret);
goto out;
}
ocfs2_zero_cluster_pages(inode, range_start, range_end, pages,
numpages, phys, handle);
ocfs2_zero_cluster_folios(inode, range_start, range_end, folios,
numfolios, phys, handle);
/*
* Initiate writeout of the pages we zero'd here. We don't
* Initiate writeout of the folios we zero'd here. We don't
* wait on them - the truncate_inode_pages() call later will
* do that for us.
*/
@ -7006,7 +7002,7 @@ int ocfs2_zero_range_for_truncate(struct inode *inode, handle_t *handle,
mlog_errno(ret);
out:
kfree(pages);
kfree(folios);
return ret;
}
@ -7059,7 +7055,7 @@ void ocfs2_set_inode_data_inline(struct inode *inode, struct ocfs2_dinode *di)
int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct buffer_head *di_bh)
{
int ret, has_data, num_pages = 0;
int ret, has_data, num_folios = 0;
int need_free = 0;
u32 bit_off, num;
handle_t *handle;
@ -7068,7 +7064,7 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
struct ocfs2_alloc_context *data_ac = NULL;
struct page *page = NULL;
struct folio *folio = NULL;
struct ocfs2_extent_tree et;
int did_quota = 0;
@ -7119,12 +7115,12 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
/*
* Save two copies, one for insert, and one that can
* be changed by ocfs2_map_and_dirty_page() below.
* be changed by ocfs2_map_and_dirty_folio() below.
*/
block = phys = ocfs2_clusters_to_blocks(inode->i_sb, bit_off);
ret = ocfs2_grab_eof_pages(inode, 0, page_end, &page,
&num_pages);
ret = ocfs2_grab_eof_folios(inode, 0, page_end, &folio,
&num_folios);
if (ret) {
mlog_errno(ret);
need_free = 1;
@ -7135,15 +7131,15 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
* This should populate the 1st page for us and mark
* it up to date.
*/
ret = ocfs2_read_inline_data(inode, page, di_bh);
ret = ocfs2_read_inline_data(inode, folio, di_bh);
if (ret) {
mlog_errno(ret);
need_free = 1;
goto out_unlock;
}
ocfs2_map_and_dirty_page(inode, handle, 0, page_end, page, 0,
&phys);
ocfs2_map_and_dirty_folio(inode, handle, 0, page_end, folio, 0,
&phys);
}
spin_lock(&oi->ip_lock);
@ -7174,8 +7170,8 @@ int ocfs2_convert_inline_data_to_extents(struct inode *inode,
}
out_unlock:
if (page)
ocfs2_unlock_and_free_pages(&page, num_pages);
if (folio)
ocfs2_unlock_and_free_folios(&folio, num_folios);
out_commit:
if (ret < 0 && did_quota)

View File

@ -254,11 +254,9 @@ static inline int ocfs2_is_empty_extent(struct ocfs2_extent_rec *rec)
return !rec->e_leaf_clusters;
}
int ocfs2_grab_pages(struct inode *inode, loff_t start, loff_t end,
struct page **pages, int *num);
void ocfs2_map_and_dirty_page(struct inode *inode, handle_t *handle,
unsigned int from, unsigned int to,
struct page *page, int zero, u64 *phys);
void ocfs2_map_and_dirty_folio(struct inode *inode, handle_t *handle,
size_t from, size_t to, struct folio *folio, int zero,
u64 *phys);
/*
* Structures which describe a path through a btree, and functions to
* manipulate them.

View File

@ -215,10 +215,9 @@ int ocfs2_get_block(struct inode *inode, sector_t iblock,
return err;
}
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
struct buffer_head *di_bh)
{
void *kaddr;
loff_t size;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
@ -230,7 +229,7 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
size = i_size_read(inode);
if (size > PAGE_SIZE ||
if (size > folio_size(folio) ||
size > ocfs2_max_inline_data_with_xattr(inode->i_sb, di)) {
ocfs2_error(inode->i_sb,
"Inode %llu has with inline data has bad size: %Lu\n",
@ -239,25 +238,18 @@ int ocfs2_read_inline_data(struct inode *inode, struct page *page,
return -EROFS;
}
kaddr = kmap_atomic(page);
if (size)
memcpy(kaddr, di->id2.i_data.id_data, size);
/* Clear the remaining part of the page */
memset(kaddr + size, 0, PAGE_SIZE - size);
flush_dcache_page(page);
kunmap_atomic(kaddr);
SetPageUptodate(page);
folio_fill_tail(folio, 0, di->id2.i_data.id_data, size);
folio_mark_uptodate(folio);
return 0;
}
static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
static int ocfs2_readpage_inline(struct inode *inode, struct folio *folio)
{
int ret;
struct buffer_head *di_bh = NULL;
BUG_ON(!PageLocked(page));
BUG_ON(!folio_test_locked(folio));
BUG_ON(!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL));
ret = ocfs2_read_inode_block(inode, &di_bh);
@ -266,9 +258,9 @@ static int ocfs2_readpage_inline(struct inode *inode, struct page *page)
goto out;
}
ret = ocfs2_read_inline_data(inode, page, di_bh);
ret = ocfs2_read_inline_data(inode, folio, di_bh);
out:
unlock_page(page);
folio_unlock(folio);
brelse(di_bh);
return ret;
@ -283,7 +275,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
trace_ocfs2_readpage((unsigned long long)oi->ip_blkno, folio->index);
ret = ocfs2_inode_lock_with_page(inode, NULL, 0, &folio->page);
ret = ocfs2_inode_lock_with_folio(inode, NULL, 0, folio);
if (ret != 0) {
if (ret == AOP_TRUNCATED_PAGE)
unlock = 0;
@ -305,7 +297,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
}
/*
* i_size might have just been updated as we grabed the meta lock. We
* i_size might have just been updated as we grabbed the meta lock. We
* might now be discovering a truncate that hit on another node.
* block_read_full_folio->get_block freaks out if it is asked to read
* beyond the end of a file, so we check here. Callers
@ -322,7 +314,7 @@ static int ocfs2_read_folio(struct file *file, struct folio *folio)
}
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL)
ret = ocfs2_readpage_inline(inode, &folio->page);
ret = ocfs2_readpage_inline(inode, folio);
else
ret = block_read_full_folio(folio, ocfs2_get_block);
unlock = 0;
@ -534,7 +526,7 @@ static void ocfs2_figure_cluster_boundaries(struct ocfs2_super *osb,
*
* from == to == 0 is code for "zero the entire cluster region"
*/
static void ocfs2_clear_page_regions(struct page *page,
static void ocfs2_clear_folio_regions(struct folio *folio,
struct ocfs2_super *osb, u32 cpos,
unsigned from, unsigned to)
{
@ -543,7 +535,7 @@ static void ocfs2_clear_page_regions(struct page *page,
ocfs2_figure_cluster_boundaries(osb, cpos, &cluster_start, &cluster_end);
kaddr = kmap_atomic(page);
kaddr = kmap_local_folio(folio, 0);
if (from || to) {
if (from > cluster_start)
@ -554,13 +546,13 @@ static void ocfs2_clear_page_regions(struct page *page,
memset(kaddr + cluster_start, 0, cluster_end - cluster_start);
}
kunmap_atomic(kaddr);
kunmap_local(kaddr);
}
/*
* Nonsparse file systems fully allocate before we get to the write
* code. This prevents ocfs2_write() from tagging the write as an
* allocating one, which means ocfs2_map_page_blocks() might try to
* allocating one, which means ocfs2_map_folio_blocks() might try to
* read-in the blocks at the tail of our file. Avoid reading them by
* testing i_size against each block offset.
*/
@ -585,11 +577,10 @@ static int ocfs2_should_read_blk(struct inode *inode, struct folio *folio,
*
* This will also skip zeroing, which is handled externally.
*/
int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new)
{
struct folio *folio = page_folio(page);
int ret = 0;
struct buffer_head *head, *bh, *wait[2], **wait_bh = wait;
unsigned int block_end, block_start;
@ -729,24 +720,24 @@ struct ocfs2_write_ctxt {
unsigned int w_large_pages;
/*
* Pages involved in this write.
* Folios involved in this write.
*
* w_target_page is the page being written to by the user.
* w_target_folio is the folio being written to by the user.
*
* w_pages is an array of pages which always contains
* w_target_page, and in the case of an allocating write with
* w_folios is an array of folios which always contains
* w_target_folio, and in the case of an allocating write with
* page_size < cluster size, it will contain zero'd and mapped
* pages adjacent to w_target_page which need to be written
* pages adjacent to w_target_folio which need to be written
* out in so that future reads from that region will get
* zero's.
*/
unsigned int w_num_pages;
struct page *w_pages[OCFS2_MAX_CTXT_PAGES];
struct page *w_target_page;
unsigned int w_num_folios;
struct folio *w_folios[OCFS2_MAX_CTXT_PAGES];
struct folio *w_target_folio;
/*
* w_target_locked is used for page_mkwrite path indicating no unlocking
* against w_target_page in ocfs2_write_end_nolock.
* against w_target_folio in ocfs2_write_end_nolock.
*/
unsigned int w_target_locked:1;
@ -771,40 +762,40 @@ struct ocfs2_write_ctxt {
unsigned int w_unwritten_count;
};
void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages)
void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios)
{
int i;
for(i = 0; i < num_pages; i++) {
if (pages[i]) {
unlock_page(pages[i]);
mark_page_accessed(pages[i]);
put_page(pages[i]);
}
for(i = 0; i < num_folios; i++) {
if (!folios[i])
continue;
folio_unlock(folios[i]);
folio_mark_accessed(folios[i]);
folio_put(folios[i]);
}
}
static void ocfs2_unlock_pages(struct ocfs2_write_ctxt *wc)
static void ocfs2_unlock_folios(struct ocfs2_write_ctxt *wc)
{
int i;
/*
* w_target_locked is only set to true in the page_mkwrite() case.
* The intent is to allow us to lock the target page from write_begin()
* to write_end(). The caller must hold a ref on w_target_page.
* to write_end(). The caller must hold a ref on w_target_folio.
*/
if (wc->w_target_locked) {
BUG_ON(!wc->w_target_page);
for (i = 0; i < wc->w_num_pages; i++) {
if (wc->w_target_page == wc->w_pages[i]) {
wc->w_pages[i] = NULL;
BUG_ON(!wc->w_target_folio);
for (i = 0; i < wc->w_num_folios; i++) {
if (wc->w_target_folio == wc->w_folios[i]) {
wc->w_folios[i] = NULL;
break;
}
}
mark_page_accessed(wc->w_target_page);
put_page(wc->w_target_page);
folio_mark_accessed(wc->w_target_folio);
folio_put(wc->w_target_folio);
}
ocfs2_unlock_and_free_pages(wc->w_pages, wc->w_num_pages);
ocfs2_unlock_and_free_folios(wc->w_folios, wc->w_num_folios);
}
static void ocfs2_free_unwritten_list(struct inode *inode,
@ -826,7 +817,7 @@ static void ocfs2_free_write_ctxt(struct inode *inode,
struct ocfs2_write_ctxt *wc)
{
ocfs2_free_unwritten_list(inode, &wc->w_unwritten_list);
ocfs2_unlock_pages(wc);
ocfs2_unlock_folios(wc);
brelse(wc->w_di_bh);
kfree(wc);
}
@ -869,29 +860,30 @@ static int ocfs2_alloc_write_ctxt(struct ocfs2_write_ctxt **wcp,
* and dirty so they'll be written out (in order to prevent uninitialised
* block data from leaking). And clear the new bit.
*/
static void ocfs2_zero_new_buffers(struct page *page, unsigned from, unsigned to)
static void ocfs2_zero_new_buffers(struct folio *folio, size_t from, size_t to)
{
unsigned int block_start, block_end;
struct buffer_head *head, *bh;
BUG_ON(!PageLocked(page));
if (!page_has_buffers(page))
BUG_ON(!folio_test_locked(folio));
head = folio_buffers(folio);
if (!head)
return;
bh = head = page_buffers(page);
bh = head;
block_start = 0;
do {
block_end = block_start + bh->b_size;
if (buffer_new(bh)) {
if (block_end > from && block_start < to) {
if (!PageUptodate(page)) {
if (!folio_test_uptodate(folio)) {
unsigned start, end;
start = max(from, block_start);
end = min(to, block_end);
zero_user_segment(page, start, end);
folio_zero_segment(folio, start, end);
set_buffer_uptodate(bh);
}
@ -916,29 +908,26 @@ static void ocfs2_write_failure(struct inode *inode,
int i;
unsigned from = user_pos & (PAGE_SIZE - 1),
to = user_pos + user_len;
struct page *tmppage;
if (wc->w_target_page)
ocfs2_zero_new_buffers(wc->w_target_page, from, to);
if (wc->w_target_folio)
ocfs2_zero_new_buffers(wc->w_target_folio, from, to);
for(i = 0; i < wc->w_num_pages; i++) {
tmppage = wc->w_pages[i];
for (i = 0; i < wc->w_num_folios; i++) {
struct folio *folio = wc->w_folios[i];
if (tmppage && page_has_buffers(tmppage)) {
if (folio && folio_buffers(folio)) {
if (ocfs2_should_order_data(inode))
ocfs2_jbd2_inode_add_write(wc->w_handle, inode,
user_pos, user_len);
block_commit_write(tmppage, from, to);
block_commit_write(&folio->page, from, to);
}
}
}
static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
struct ocfs2_write_ctxt *wc,
struct page *page, u32 cpos,
loff_t user_pos, unsigned user_len,
int new)
static int ocfs2_prepare_folio_for_write(struct inode *inode, u64 *p_blkno,
struct ocfs2_write_ctxt *wc, struct folio *folio, u32 cpos,
loff_t user_pos, unsigned user_len, int new)
{
int ret;
unsigned int map_from = 0, map_to = 0;
@ -951,20 +940,19 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
/* treat the write as new if the a hole/lseek spanned across
* the page boundary.
*/
new = new | ((i_size_read(inode) <= page_offset(page)) &&
(page_offset(page) <= user_pos));
new = new | ((i_size_read(inode) <= folio_pos(folio)) &&
(folio_pos(folio) <= user_pos));
if (page == wc->w_target_page) {
if (folio == wc->w_target_folio) {
map_from = user_pos & (PAGE_SIZE - 1);
map_to = map_from + user_len;
if (new)
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
cluster_start, cluster_end,
new);
ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
cluster_start, cluster_end, new);
else
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
map_from, map_to, new);
ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
map_from, map_to, new);
if (ret) {
mlog_errno(ret);
goto out;
@ -978,7 +966,7 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
}
} else {
/*
* If we haven't allocated the new page yet, we
* If we haven't allocated the new folio yet, we
* shouldn't be writing it out without copying user
* data. This is likely a math error from the caller.
*/
@ -987,8 +975,8 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
map_from = cluster_start;
map_to = cluster_end;
ret = ocfs2_map_page_blocks(page, p_blkno, inode,
cluster_start, cluster_end, new);
ret = ocfs2_map_folio_blocks(folio, p_blkno, inode,
cluster_start, cluster_end, new);
if (ret) {
mlog_errno(ret);
goto out;
@ -996,20 +984,20 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
}
/*
* Parts of newly allocated pages need to be zero'd.
* Parts of newly allocated folios need to be zero'd.
*
* Above, we have also rewritten 'to' and 'from' - as far as
* the rest of the function is concerned, the entire cluster
* range inside of a page needs to be written.
* range inside of a folio needs to be written.
*
* We can skip this if the page is up to date - it's already
* We can skip this if the folio is uptodate - it's already
* been zero'd from being read in as a hole.
*/
if (new && !PageUptodate(page))
ocfs2_clear_page_regions(page, OCFS2_SB(inode->i_sb),
if (new && !folio_test_uptodate(folio))
ocfs2_clear_folio_regions(folio, OCFS2_SB(inode->i_sb),
cpos, user_data_from, user_data_to);
flush_dcache_page(page);
flush_dcache_folio(folio);
out:
return ret;
@ -1018,11 +1006,9 @@ static int ocfs2_prepare_page_for_write(struct inode *inode, u64 *p_blkno,
/*
* This function will only grab one clusters worth of pages.
*/
static int ocfs2_grab_pages_for_write(struct address_space *mapping,
struct ocfs2_write_ctxt *wc,
u32 cpos, loff_t user_pos,
unsigned user_len, int new,
struct page *mmap_page)
static int ocfs2_grab_folios_for_write(struct address_space *mapping,
struct ocfs2_write_ctxt *wc, u32 cpos, loff_t user_pos,
unsigned user_len, int new, struct folio *mmap_folio)
{
int ret = 0, i;
unsigned long start, target_index, end_index, index;
@ -1039,7 +1025,7 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
* last page of the write.
*/
if (new) {
wc->w_num_pages = ocfs2_pages_per_cluster(inode->i_sb);
wc->w_num_folios = ocfs2_pages_per_cluster(inode->i_sb);
start = ocfs2_align_clusters_to_page_index(inode->i_sb, cpos);
/*
* We need the index *past* the last page we could possibly
@ -1049,15 +1035,15 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
last_byte = max(user_pos + user_len, i_size_read(inode));
BUG_ON(last_byte < 1);
end_index = ((last_byte - 1) >> PAGE_SHIFT) + 1;
if ((start + wc->w_num_pages) > end_index)
wc->w_num_pages = end_index - start;
if ((start + wc->w_num_folios) > end_index)
wc->w_num_folios = end_index - start;
} else {
wc->w_num_pages = 1;
wc->w_num_folios = 1;
start = target_index;
}
end_index = (user_pos + user_len - 1) >> PAGE_SHIFT;
for(i = 0; i < wc->w_num_pages; i++) {
for(i = 0; i < wc->w_num_folios; i++) {
index = start + i;
if (index >= target_index && index <= end_index &&
@ -1067,37 +1053,38 @@ static int ocfs2_grab_pages_for_write(struct address_space *mapping,
* and wants us to directly use the page
* passed in.
*/
lock_page(mmap_page);
folio_lock(mmap_folio);
/* Exit and let the caller retry */
if (mmap_page->mapping != mapping) {
WARN_ON(mmap_page->mapping);
unlock_page(mmap_page);
if (mmap_folio->mapping != mapping) {
WARN_ON(mmap_folio->mapping);
folio_unlock(mmap_folio);
ret = -EAGAIN;
goto out;
}
get_page(mmap_page);
wc->w_pages[i] = mmap_page;
folio_get(mmap_folio);
wc->w_folios[i] = mmap_folio;
wc->w_target_locked = true;
} else if (index >= target_index && index <= end_index &&
wc->w_type == OCFS2_WRITE_DIRECT) {
/* Direct write has no mapping page. */
wc->w_pages[i] = NULL;
wc->w_folios[i] = NULL;
continue;
} else {
wc->w_pages[i] = find_or_create_page(mapping, index,
GFP_NOFS);
if (!wc->w_pages[i]) {
ret = -ENOMEM;
wc->w_folios[i] = __filemap_get_folio(mapping, index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
GFP_NOFS);
if (IS_ERR(wc->w_folios[i])) {
ret = PTR_ERR(wc->w_folios[i]);
mlog_errno(ret);
goto out;
}
}
wait_for_stable_page(wc->w_pages[i]);
folio_wait_stable(wc->w_folios[i]);
if (index == target_index)
wc->w_target_page = wc->w_pages[i];
wc->w_target_folio = wc->w_folios[i];
}
out:
if (ret)
@ -1181,19 +1168,18 @@ static int ocfs2_write_cluster(struct address_space *mapping,
if (!should_zero)
p_blkno += (user_pos >> inode->i_sb->s_blocksize_bits) & (u64)(bpc - 1);
for(i = 0; i < wc->w_num_pages; i++) {
for (i = 0; i < wc->w_num_folios; i++) {
int tmpret;
/* This is the direct io target page. */
if (wc->w_pages[i] == NULL) {
if (wc->w_folios[i] == NULL) {
p_blkno += (1 << (PAGE_SHIFT - inode->i_sb->s_blocksize_bits));
continue;
}
tmpret = ocfs2_prepare_page_for_write(inode, &p_blkno, wc,
wc->w_pages[i], cpos,
user_pos, user_len,
should_zero);
tmpret = ocfs2_prepare_folio_for_write(inode, &p_blkno, wc,
wc->w_folios[i], cpos, user_pos, user_len,
should_zero);
if (tmpret) {
mlog_errno(tmpret);
if (ret == 0)
@ -1472,7 +1458,7 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
{
int ret;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct page *page;
struct folio *folio;
handle_t *handle;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
@ -1483,19 +1469,21 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
goto out;
}
page = find_or_create_page(mapping, 0, GFP_NOFS);
if (!page) {
folio = __filemap_get_folio(mapping, 0,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
if (IS_ERR(folio)) {
ocfs2_commit_trans(osb, handle);
ret = -ENOMEM;
ret = PTR_ERR(folio);
mlog_errno(ret);
goto out;
}
/*
* If we don't set w_num_pages then this page won't get unlocked
* If we don't set w_num_folios then this folio won't get unlocked
* and freed on cleanup of the write context.
*/
wc->w_pages[0] = wc->w_target_page = page;
wc->w_num_pages = 1;
wc->w_target_folio = folio;
wc->w_folios[0] = folio;
wc->w_num_folios = 1;
ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), wc->w_di_bh,
OCFS2_JOURNAL_ACCESS_WRITE);
@ -1509,8 +1497,8 @@ static int ocfs2_write_begin_inline(struct address_space *mapping,
if (!(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
ocfs2_set_inode_data_inline(inode, di);
if (!PageUptodate(page)) {
ret = ocfs2_read_inline_data(inode, page, wc->w_di_bh);
if (!folio_test_uptodate(folio)) {
ret = ocfs2_read_inline_data(inode, folio, wc->w_di_bh);
if (ret) {
ocfs2_commit_trans(osb, handle);
@ -1533,9 +1521,8 @@ int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size)
}
static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
struct inode *inode, loff_t pos,
unsigned len, struct page *mmap_page,
struct ocfs2_write_ctxt *wc)
struct inode *inode, loff_t pos, size_t len,
struct folio *mmap_folio, struct ocfs2_write_ctxt *wc)
{
int ret, written = 0;
loff_t end = pos + len;
@ -1550,7 +1537,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* Handle inodes which already have inline data 1st.
*/
if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
if (mmap_page == NULL &&
if (mmap_folio == NULL &&
ocfs2_size_fits_inline_data(wc->w_di_bh, end))
goto do_inline_write;
@ -1574,7 +1561,7 @@ static int ocfs2_try_to_write_inline_data(struct address_space *mapping,
* Check whether the write can fit.
*/
di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
if (mmap_page ||
if (mmap_folio ||
end > ocfs2_max_inline_data_with_xattr(inode->i_sb, di))
return 0;
@ -1641,9 +1628,9 @@ static int ocfs2_zero_tail(struct inode *inode, struct buffer_head *di_bh,
}
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page)
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct folio *mmap_folio)
{
int ret, cluster_of_pages, credits = OCFS2_INODE_UPDATE_CREDITS;
unsigned int clusters_to_alloc, extents_to_split, clusters_need = 0;
@ -1666,7 +1653,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
if (ocfs2_supports_inline_data(osb)) {
ret = ocfs2_try_to_write_inline_data(mapping, inode, pos, len,
mmap_page, wc);
mmap_folio, wc);
if (ret == 1) {
ret = 0;
goto success;
@ -1718,7 +1705,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
(unsigned long long)OCFS2_I(inode)->ip_blkno,
(long long)i_size_read(inode),
le32_to_cpu(di->i_clusters),
pos, len, type, mmap_page,
pos, len, type, mmap_folio,
clusters_to_alloc, extents_to_split);
/*
@ -1789,21 +1776,21 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
}
/*
* Fill our page array first. That way we've grabbed enough so
* Fill our folio array first. That way we've grabbed enough so
* that we can zero and flush if we error after adding the
* extent.
*/
ret = ocfs2_grab_pages_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_page);
ret = ocfs2_grab_folios_for_write(mapping, wc, wc->w_cpos, pos, len,
cluster_of_pages, mmap_folio);
if (ret) {
/*
* ocfs2_grab_pages_for_write() returns -EAGAIN if it could not lock
* the target page. In this case, we exit with no error and no target
* page. This will trigger the caller, page_mkwrite(), to re-try
* the operation.
* ocfs2_grab_folios_for_write() returns -EAGAIN if it
* could not lock the target folio. In this case, we exit
* with no error and no target folio. This will trigger
* the caller, page_mkwrite(), to re-try the operation.
*/
if (type == OCFS2_WRITE_MMAP && ret == -EAGAIN) {
BUG_ON(wc->w_target_page);
BUG_ON(wc->w_target_folio);
ret = 0;
goto out_quota;
}
@ -1826,7 +1813,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
success:
if (foliop)
*foliop = page_folio(wc->w_target_page);
*foliop = wc->w_target_folio;
*fsdata = wc;
return 0;
out_quota:
@ -1845,7 +1832,7 @@ int ocfs2_write_begin_nolock(struct address_space *mapping,
* to VM code.
*/
if (wc->w_target_locked)
unlock_page(mmap_page);
folio_unlock(mmap_folio);
ocfs2_free_write_ctxt(inode, wc);
@ -1924,18 +1911,15 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
struct ocfs2_dinode *di,
struct ocfs2_write_ctxt *wc)
{
void *kaddr;
if (unlikely(*copied < len)) {
if (!PageUptodate(wc->w_target_page)) {
if (!folio_test_uptodate(wc->w_target_folio)) {
*copied = 0;
return;
}
}
kaddr = kmap_atomic(wc->w_target_page);
memcpy(di->id2.i_data.id_data + pos, kaddr + pos, *copied);
kunmap_atomic(kaddr);
memcpy_from_folio(di->id2.i_data.id_data + pos, wc->w_target_folio,
pos, *copied);
trace_ocfs2_write_end_inline(
(unsigned long long)OCFS2_I(inode)->ip_blkno,
@ -1944,17 +1928,16 @@ static void ocfs2_write_end_inline(struct inode *inode, loff_t pos,
le16_to_cpu(di->i_dyn_features));
}
int ocfs2_write_end_nolock(struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied, void *fsdata)
int ocfs2_write_end_nolock(struct address_space *mapping, loff_t pos,
unsigned len, unsigned copied, void *fsdata)
{
int i, ret;
unsigned from, to, start = pos & (PAGE_SIZE - 1);
size_t from, to, start = pos & (PAGE_SIZE - 1);
struct inode *inode = mapping->host;
struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
struct ocfs2_write_ctxt *wc = fsdata;
struct ocfs2_dinode *di = (struct ocfs2_dinode *)wc->w_di_bh->b_data;
handle_t *handle = wc->w_handle;
struct page *tmppage;
BUG_ON(!list_empty(&wc->w_unwritten_list));
@ -1973,44 +1956,44 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
goto out_write_size;
}
if (unlikely(copied < len) && wc->w_target_page) {
if (unlikely(copied < len) && wc->w_target_folio) {
loff_t new_isize;
if (!PageUptodate(wc->w_target_page))
if (!folio_test_uptodate(wc->w_target_folio))
copied = 0;
new_isize = max_t(loff_t, i_size_read(inode), pos + copied);
if (new_isize > page_offset(wc->w_target_page))
ocfs2_zero_new_buffers(wc->w_target_page, start+copied,
if (new_isize > folio_pos(wc->w_target_folio))
ocfs2_zero_new_buffers(wc->w_target_folio, start+copied,
start+len);
else {
/*
* When page is fully beyond new isize (data copy
* failed), do not bother zeroing the page. Invalidate
* When folio is fully beyond new isize (data copy
* failed), do not bother zeroing the folio. Invalidate
* it instead so that writeback does not get confused
* put page & buffer dirty bits into inconsistent
* state.
*/
block_invalidate_folio(page_folio(wc->w_target_page),
0, PAGE_SIZE);
block_invalidate_folio(wc->w_target_folio, 0,
folio_size(wc->w_target_folio));
}
}
if (wc->w_target_page)
flush_dcache_page(wc->w_target_page);
if (wc->w_target_folio)
flush_dcache_folio(wc->w_target_folio);
for(i = 0; i < wc->w_num_pages; i++) {
tmppage = wc->w_pages[i];
for (i = 0; i < wc->w_num_folios; i++) {
struct folio *folio = wc->w_folios[i];
/* This is the direct io target page. */
if (tmppage == NULL)
/* This is the direct io target folio */
if (folio == NULL)
continue;
if (tmppage == wc->w_target_page) {
if (folio == wc->w_target_folio) {
from = wc->w_target_from;
to = wc->w_target_to;
BUG_ON(from > PAGE_SIZE ||
to > PAGE_SIZE ||
BUG_ON(from > folio_size(folio) ||
to > folio_size(folio) ||
to < from);
} else {
/*
@ -2019,19 +2002,17 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
* to flush their entire range.
*/
from = 0;
to = PAGE_SIZE;
to = folio_size(folio);
}
if (page_has_buffers(tmppage)) {
if (folio_buffers(folio)) {
if (handle && ocfs2_should_order_data(inode)) {
loff_t start_byte =
((loff_t)tmppage->index << PAGE_SHIFT) +
from;
loff_t start_byte = folio_pos(folio) + from;
loff_t length = to - from;
ocfs2_jbd2_inode_add_write(handle, inode,
start_byte, length);
}
block_commit_write(tmppage, from, to);
block_commit_write(&folio->page, from, to);
}
}
@ -2060,7 +2041,7 @@ int ocfs2_write_end_nolock(struct address_space *mapping,
* this lock and will ask for the page lock when flushing the data.
* put it here to preserve the unlock order.
*/
ocfs2_unlock_pages(wc);
ocfs2_unlock_folios(wc);
if (handle)
ocfs2_commit_trans(osb, handle);

View File

@ -8,16 +8,11 @@
#include <linux/fs.h>
handle_t *ocfs2_start_walk_page_trans(struct inode *inode,
struct page *page,
unsigned from,
unsigned to);
int ocfs2_map_page_blocks(struct page *page, u64 *p_blkno,
int ocfs2_map_folio_blocks(struct folio *folio, u64 *p_blkno,
struct inode *inode, unsigned int from,
unsigned int to, int new);
void ocfs2_unlock_and_free_pages(struct page **pages, int num_pages);
void ocfs2_unlock_and_free_folios(struct folio **folios, int num_folios);
int walk_page_buffers( handle_t *handle,
struct buffer_head *head,
@ -37,11 +32,11 @@ typedef enum {
} ocfs2_write_type_t;
int ocfs2_write_begin_nolock(struct address_space *mapping,
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct page *mmap_page);
loff_t pos, unsigned len, ocfs2_write_type_t type,
struct folio **foliop, void **fsdata,
struct buffer_head *di_bh, struct folio *mmap_folio);
int ocfs2_read_inline_data(struct inode *inode, struct page *page,
int ocfs2_read_inline_data(struct inode *inode, struct folio *folio,
struct buffer_head *di_bh);
int ocfs2_size_fits_inline_data(struct buffer_head *di_bh, u64 new_size);

View File

@ -3,6 +3,7 @@
* Copyright (C) 2004, 2005 Oracle. All rights reserved.
*/
#include "linux/kstrtox.h"
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/jiffies.h>
@ -1020,7 +1021,7 @@ static int o2hb_check_slot(struct o2hb_region *reg,
if (list_empty(&slot->ds_live_item))
goto out;
/* live nodes only go dead after enough consequtive missed
/* live nodes only go dead after enough consecutive missed
* samples.. reset the missed counter whenever we see
* activity */
if (slot->ds_equal_samples >= o2hb_dead_threshold || gen_changed) {
@ -1535,10 +1536,11 @@ static int o2hb_read_block_input(struct o2hb_region *reg,
{
unsigned long bytes;
char *p = (char *)page;
int ret;
bytes = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
ret = kstrtoul(p, 0, &bytes);
if (ret)
return ret;
/* Heartbeat and fs min / max block sizes are the same. */
if (bytes > 4096 || bytes < 512)
@ -1622,13 +1624,14 @@ static ssize_t o2hb_region_blocks_store(struct config_item *item,
struct o2hb_region *reg = to_o2hb_region(item);
unsigned long tmp;
char *p = (char *)page;
int ret;
if (reg->hr_bdev_file)
return -EINVAL;
tmp = simple_strtoul(p, &p, 0);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
ret = kstrtoul(p, 0, &tmp);
if (ret)
return ret;
if (tmp > O2NM_MAX_NODES || tmp == 0)
return -ERANGE;
@ -1776,8 +1779,8 @@ static ssize_t o2hb_region_dev_store(struct config_item *item,
if (o2nm_this_node() == O2NM_MAX_NODES)
return -EINVAL;
fd = simple_strtol(p, &p, 0);
if (!p || (*p && (*p != '\n')))
ret = kstrtol(p, 0, &fd);
if (ret < 0)
return -EINVAL;
if (fd < 0 || fd >= INT_MAX)
@ -2136,10 +2139,11 @@ static ssize_t o2hb_heartbeat_group_dead_threshold_store(struct config_item *ite
{
unsigned long tmp;
char *p = (char *)page;
int ret;
tmp = simple_strtoul(p, &p, 10);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
ret = kstrtoul(p, 10, &tmp);
if (ret)
return ret;
/* this will validate ranges for us. */
o2hb_dead_threshold_set((unsigned int) tmp);

View File

@ -29,7 +29,7 @@
* just calling printk() so that this can eventually make its way through
* relayfs along with the debugging messages. Everything else gets KERN_DEBUG.
* The inline tests and macro dance give GCC the opportunity to quite cleverly
* only emit the appropriage printk() when the caller passes in a constant
* only emit the appropriate printk() when the caller passes in a constant
* mask, as is almost always the case.
*
* All this bitmask nonsense is managed from the files under

View File

@ -23,7 +23,7 @@
* race between when we see a node start heartbeating and when we connect
* to it.
*
* So nodes that are in this transtion put a hold on the quorum decision
* So nodes that are in this transition put a hold on the quorum decision
* with a counter. As they fall out of this transition they drop the count
* and if they're the last, they fire off the decision.
*/
@ -189,7 +189,7 @@ static void o2quo_clear_hold(struct o2quo_state *qs, u8 node)
}
/* as a node comes up we delay the quorum decision until we know the fate of
* the connection. the hold will be droped in conn_up or hb_down. it might be
* the connection. the hold will be dropped in conn_up or hb_down. it might be
* perpetuated by con_err until hb_down. if we already have a conn, we might
* be dropping a hold that conn_up got. */
void o2quo_hb_up(u8 node)
@ -256,7 +256,7 @@ void o2quo_hb_still_up(u8 node)
}
/* This is analogous to hb_up. as a node's connection comes up we delay the
* quorum decision until we see it heartbeating. the hold will be droped in
* quorum decision until we see it heartbeating. the hold will be dropped in
* hb_up or hb_down. it might be perpetuated by con_err until hb_down. if
* it's already heartbeating we might be dropping a hold that conn_up got.
* */

View File

@ -5,13 +5,13 @@
*
* ----
*
* Callers for this were originally written against a very simple synchronus
* Callers for this were originally written against a very simple synchronous
* API. This implementation reflects those simple callers. Some day I'm sure
* we'll need to move to a more robust posting/callback mechanism.
*
* Transmit calls pass in kernel virtual addresses and block copying this into
* the socket's tx buffers via a usual blocking sendmsg. They'll block waiting
* for a failed socket to timeout. TX callers can also pass in a poniter to an
* for a failed socket to timeout. TX callers can also pass in a pointer to an
* 'int' which gets filled with an errno off the wire in response to the
* message they send.
*
@ -101,7 +101,7 @@ static struct socket *o2net_listen_sock;
* o2net_wq. teardown detaches the callbacks before destroying the workqueue.
* quorum work is queued as sock containers are shutdown.. stop_listening
* tears down all the node's sock containers, preventing future shutdowns
* and queued quroum work, before canceling delayed quorum work and
* and queued quorum work, before canceling delayed quorum work and
* destroying the work queue.
*/
static struct workqueue_struct *o2net_wq;
@ -1419,7 +1419,7 @@ static int o2net_advance_rx(struct o2net_sock_container *sc)
return ret;
}
/* this work func is triggerd by data ready. it reads until it can read no
/* this work func is triggered by data ready. it reads until it can read no
* more. it interprets 0, eof, as fatal. if data_ready hits while we're doing
* our work the work struct will be marked and we'll be called again. */
static void o2net_rx_until_empty(struct work_struct *work)

View File

@ -118,7 +118,7 @@ struct dlm_lockstatus {
#define LKM_VALBLK 0x00000100 /* lock value block request */
#define LKM_NOQUEUE 0x00000200 /* non blocking request */
#define LKM_CONVERT 0x00000400 /* conversion request */
#define LKM_NODLCKWT 0x00000800 /* this lock wont deadlock (U) */
#define LKM_NODLCKWT 0x00000800 /* this lock won't deadlock (U) */
#define LKM_UNLOCK 0x00001000 /* deallocate this lock */
#define LKM_CANCEL 0x00002000 /* cancel conversion request */
#define LKM_DEQALL 0x00004000 /* remove all locks held by proc (U) */

View File

@ -207,7 +207,7 @@ void dlm_complete_recovery_thread(struct dlm_ctxt *dlm)
* 1) all recovery threads cluster wide will work on recovering
* ONE node at a time
* 2) negotiate who will take over all the locks for the dead node.
* thats right... ALL the locks.
* that's right... ALL the locks.
* 3) once a new master is chosen, everyone scans all locks
* and moves aside those mastered by the dead guy
* 4) each of these locks should be locked until recovery is done
@ -1469,7 +1469,7 @@ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data,
* The first one is handled at the end of this function. The
* other two are handled in the worker thread after locks have
* been attached. Yes, we don't wait for purge time to match
* kref_init. The lockres will still have atleast one ref
* kref_init. The lockres will still have at least one ref
* added because it is in the hash __dlm_insert_lockres() */
extra_refs++;
@ -1735,7 +1735,7 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
spin_unlock(&res->spinlock);
}
} else {
/* put.. incase we are not the master */
/* put.. in case we are not the master */
spin_unlock(&res->spinlock);
dlm_lockres_put(res);
}

View File

@ -20,6 +20,7 @@
#include <linux/module.h>
#include <linux/fs.h>
#include <linux/fs_context.h>
#include <linux/pagemap.h>
#include <linux/types.h>
#include <linux/slab.h>
@ -506,9 +507,7 @@ static int dlmfs_unlink(struct inode *dir,
return status;
}
static int dlmfs_fill_super(struct super_block * sb,
void * data,
int silent)
static int dlmfs_fill_super(struct super_block *sb, struct fs_context *fc)
{
sb->s_maxbytes = MAX_LFS_FILESIZE;
sb->s_blocksize = PAGE_SIZE;
@ -556,17 +555,27 @@ static const struct inode_operations dlmfs_file_inode_operations = {
.setattr = dlmfs_file_setattr,
};
static struct dentry *dlmfs_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
static int dlmfs_get_tree(struct fs_context *fc)
{
return mount_nodev(fs_type, flags, data, dlmfs_fill_super);
return get_tree_nodev(fc, dlmfs_fill_super);
}
static const struct fs_context_operations dlmfs_context_ops = {
.get_tree = dlmfs_get_tree,
};
static int dlmfs_init_fs_context(struct fs_context *fc)
{
fc->ops = &dlmfs_context_ops;
return 0;
}
static struct file_system_type dlmfs_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2_dlmfs",
.mount = dlmfs_mount,
.kill_sb = kill_litter_super,
.init_fs_context = dlmfs_init_fs_context,
};
MODULE_ALIAS_FS("ocfs2_dlmfs");

View File

@ -794,7 +794,7 @@ void ocfs2_lock_res_free(struct ocfs2_lock_res *res)
/*
* Keep a list of processes who have interest in a lockres.
* Note: this is now only uesed for check recursive cluster locking.
* Note: this is now only used for check recursive cluster locking.
*/
static inline void ocfs2_add_holder(struct ocfs2_lock_res *lockres,
struct ocfs2_lock_holder *oh)
@ -2529,30 +2529,28 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
/*
* This is working around a lock inversion between tasks acquiring DLM
* locks while holding a page lock and the downconvert thread which
* blocks dlm lock acquiry while acquiring page locks.
* locks while holding a folio lock and the downconvert thread which
* blocks dlm lock acquiry while acquiring folio locks.
*
* ** These _with_page variantes are only intended to be called from aop
* methods that hold page locks and return a very specific *positive* error
* ** These _with_folio variants are only intended to be called from aop
* methods that hold folio locks and return a very specific *positive* error
* code that aop methods pass up to the VFS -- test for errors with != 0. **
*
* The DLM is called such that it returns -EAGAIN if it would have
* blocked waiting for the downconvert thread. In that case we unlock
* our page so the downconvert thread can make progress. Once we've
* our folio so the downconvert thread can make progress. Once we've
* done this we have to return AOP_TRUNCATED_PAGE so the aop method
* that called us can bubble that back up into the VFS who will then
* immediately retry the aop call.
*/
int ocfs2_inode_lock_with_page(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct page *page)
int ocfs2_inode_lock_with_folio(struct inode *inode,
struct buffer_head **ret_bh, int ex, struct folio *folio)
{
int ret;
ret = ocfs2_inode_lock_full(inode, ret_bh, ex, OCFS2_LOCK_NONBLOCK);
if (ret == -EAGAIN) {
unlock_page(page);
folio_unlock(folio);
/*
* If we can't get inode lock immediately, we should not return
* directly here, since this will lead to a softlockup problem.
@ -2630,7 +2628,7 @@ void ocfs2_inode_unlock(struct inode *inode,
}
/*
* This _tracker variantes are introduced to deal with the recursive cluster
* This _tracker variants are introduced to deal with the recursive cluster
* locking issue. The idea is to keep track of a lock holder on the stack of
* the current process. If there's a lock holder on the stack, we know the
* task context is already protected by cluster locking. Currently, they're
@ -2735,7 +2733,7 @@ void ocfs2_inode_unlock_tracker(struct inode *inode,
struct ocfs2_lock_res *lockres;
lockres = &OCFS2_I(inode)->ip_inode_lockres;
/* had_lock means that the currect process already takes the cluster
/* had_lock means that the current process already takes the cluster
* lock previously.
* If had_lock is 1, we have nothing to do here.
* If had_lock is 0, we will release the lock.
@ -3802,9 +3800,9 @@ static int ocfs2_unblock_lock(struct ocfs2_super *osb,
* set when the ast is received for an upconvert just before the
* OCFS2_LOCK_BUSY flag is cleared. Now if the fs received a bast
* on the heels of the ast, we want to delay the downconvert just
* enough to allow the up requestor to do its task. Because this
* enough to allow the up requester to do its task. Because this
* lock is in the blocked queue, the lock will be downconverted
* as soon as the requestor is done with the lock.
* as soon as the requester is done with the lock.
*/
if (lockres->l_flags & OCFS2_LOCK_UPCONVERT_FINISHING)
goto leave_requeue;

View File

@ -137,10 +137,8 @@ int ocfs2_inode_lock_full_nested(struct inode *inode,
int ex,
int arg_flags,
int subclass);
int ocfs2_inode_lock_with_page(struct inode *inode,
struct buffer_head **ret_bh,
int ex,
struct page *page);
int ocfs2_inode_lock_with_folio(struct inode *inode,
struct buffer_head **ret_bh, int ex, struct folio *folio);
/* Variants without special locking class or flags */
#define ocfs2_inode_lock_full(i, r, e, f)\
ocfs2_inode_lock_full_nested(i, r, e, f, OI_LS_NORMAL)

View File

@ -782,11 +782,11 @@ static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
goto out_commit_trans;
}
/* Get the offsets within the page that we want to zero */
zero_from = abs_from & (PAGE_SIZE - 1);
zero_to = abs_to & (PAGE_SIZE - 1);
/* Get the offsets within the folio that we want to zero */
zero_from = offset_in_folio(folio, abs_from);
zero_to = offset_in_folio(folio, abs_to);
if (!zero_to)
zero_to = PAGE_SIZE;
zero_to = folio_size(folio);
trace_ocfs2_write_zero_page(
(unsigned long long)OCFS2_I(inode)->ip_blkno,

View File

@ -1122,7 +1122,7 @@ static void ocfs2_clear_inode(struct inode *inode)
dquot_drop(inode);
/* To preven remote deletes we hold open lock before, now it
/* To prevent remote deletes we hold open lock before, now it
* is time to unlock PR and EX open locks. */
ocfs2_open_unlock(inode);
@ -1437,7 +1437,7 @@ static int ocfs2_filecheck_validate_inode_block(struct super_block *sb,
* Call ocfs2_validate_meta_ecc() first since it has ecc repair
* function, but we should not return error immediately when ecc
* validation fails, because the reason is quite likely the invalid
* inode number inputed.
* inode number inputted.
*/
rc = ocfs2_validate_meta_ecc(sb, bh->b_data, &di->i_check);
if (rc) {

View File

@ -796,7 +796,7 @@ static int ocfs2_get_request_ptr(struct ocfs2_info *info, int idx,
/*
* OCFS2_IOC_INFO handles an array of requests passed from userspace.
*
* ocfs2_info_handle() recevies a large info aggregation, grab and
* ocfs2_info_handle() receives a large info aggregation, grab and
* validate the request count from header, then break it into small
* pieces, later specific handlers can handle them one by one.
*

View File

@ -1956,7 +1956,7 @@ int ocfs2_mark_dead_nodes(struct ocfs2_super *osb)
/*
* Scan timer should get fired every ORPHAN_SCAN_SCHEDULE_TIMEOUT. Add some
* randomness to the timeout to minimize multple nodes firing the timer at the
* randomness to the timeout to minimize multiple nodes firing the timer at the
* same time.
*/
static inline unsigned long ocfs2_orphan_scan_timeout(void)

View File

@ -44,13 +44,13 @@ static vm_fault_t ocfs2_fault(struct vm_fault *vmf)
}
static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
struct buffer_head *di_bh, struct page *page)
struct buffer_head *di_bh, struct folio *folio)
{
int err;
vm_fault_t ret = VM_FAULT_NOPAGE;
struct inode *inode = file_inode(file);
struct address_space *mapping = inode->i_mapping;
loff_t pos = page_offset(page);
loff_t pos = folio_pos(folio);
unsigned int len = PAGE_SIZE;
pgoff_t last_index;
struct folio *locked_folio = NULL;
@ -72,9 +72,9 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
*
* Let VM retry with these cases.
*/
if ((page->mapping != inode->i_mapping) ||
(!PageUptodate(page)) ||
(page_offset(page) >= size))
if ((folio->mapping != inode->i_mapping) ||
!folio_test_uptodate(folio) ||
(pos >= size))
goto out;
/*
@ -87,11 +87,11 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
* worry about ocfs2_write_begin() skipping some buffer reads
* because the "write" would invalidate their data.
*/
if (page->index == last_index)
if (folio->index == last_index)
len = ((size - 1) & ~PAGE_MASK) + 1;
err = ocfs2_write_begin_nolock(mapping, pos, len, OCFS2_WRITE_MMAP,
&locked_folio, &fsdata, di_bh, page);
&locked_folio, &fsdata, di_bh, folio);
if (err) {
if (err != -ENOSPC)
mlog_errno(err);
@ -112,7 +112,7 @@ static vm_fault_t __ocfs2_page_mkwrite(struct file *file,
static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
{
struct page *page = vmf->page;
struct folio *folio = page_folio(vmf->page);
struct inode *inode = file_inode(vmf->vma->vm_file);
struct buffer_head *di_bh = NULL;
sigset_t oldset;
@ -141,7 +141,7 @@ static vm_fault_t ocfs2_page_mkwrite(struct vm_fault *vmf)
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, page);
ret = __ocfs2_page_mkwrite(vmf->vma->vm_file, di_bh, folio);
up_write(&OCFS2_I(inode)->ip_alloc_sem);

View File

@ -492,7 +492,7 @@ static int ocfs2_validate_and_adjust_move_goal(struct inode *inode,
bg = (struct ocfs2_group_desc *)gd_bh->b_data;
/*
* moving goal is not allowd to start with a group desc blok(#0 blk)
* moving goal is not allowed to start with a group desc blok(#0 blk)
* let's compromise to the latter cluster.
*/
if (range->me_goal == le64_to_cpu(bg->bg_blkno))
@ -658,7 +658,7 @@ static int ocfs2_move_extent(struct ocfs2_move_extents_context *context,
/*
* probe the victim cluster group to find a proper
* region to fit wanted movement, it even will perfrom
* region to fit wanted movement, it even will perform
* a best-effort attempt by compromising to a threshold
* around the goal.
*/
@ -920,7 +920,7 @@ static int ocfs2_move_extents(struct ocfs2_move_extents_context *context)
}
/*
* rememer ip_xattr_sem also needs to be held if necessary
* remember ip_xattr_sem also needs to be held if necessary
*/
down_write(&OCFS2_I(inode)->ip_alloc_sem);
@ -1022,7 +1022,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
context->range = &range;
/*
* ok, the default theshold for the defragmentation
* ok, the default threshold for the defragmentation
* is 1M, since our maximum clustersize was 1M also.
* any thought?
*/

View File

@ -132,7 +132,7 @@
* well as the name of the cluster being joined.
* mount.ocfs2 must pass in a matching stack name.
*
* If not set, the classic stack will be used. This is compatbile with
* If not set, the classic stack will be used. This is compatible with
* all older versions.
*/
#define OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK 0x0080
@ -143,7 +143,7 @@
/* Support for extended attributes */
#define OCFS2_FEATURE_INCOMPAT_XATTR 0x0200
/* Support for indexed directores */
/* Support for indexed directories */
#define OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS 0x0400
/* Metadata checksum and error correction */
@ -156,7 +156,7 @@
#define OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG 0x2000
/*
* Incompat bit to indicate useable clusterinfo with stackflags for all
* Incompat bit to indicate usable clusterinfo with stackflags for all
* cluster stacks (userspace adnd o2cb). If this bit is set,
* INCOMPAT_USERSPACE_STACK becomes superfluous and thus should not be set.
*/
@ -1083,7 +1083,7 @@ struct ocfs2_xattr_block {
struct ocfs2_xattr_header xb_header; /* xattr header if this
block contains xattr */
struct ocfs2_xattr_tree_root xb_root;/* xattr tree root if this
block cotains xattr
block contains xattr
tree. */
} xb_attrs;
};

View File

@ -215,7 +215,7 @@ struct ocfs2_move_extents {
movement less likely
to fail, may make fs
even more fragmented */
#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmenation
#define OCFS2_MOVE_EXT_FL_COMPLETE (0x00000004) /* Move or defragmentation
completely gets done.
*/

View File

@ -93,7 +93,7 @@ static char *ocfs2_lock_type_strings[] = {
[OCFS2_LOCK_TYPE_DATA] = "Data",
[OCFS2_LOCK_TYPE_SUPER] = "Super",
[OCFS2_LOCK_TYPE_RENAME] = "Rename",
/* Need to differntiate from [R]ename.. serializing writes is the
/* Need to differentiate from [R]ename.. serializing writes is the
* important job it does, anyway. */
[OCFS2_LOCK_TYPE_RW] = "Write/Read",
[OCFS2_LOCK_TYPE_DENTRY] = "Dentry",

View File

@ -1658,34 +1658,34 @@ TRACE_EVENT(ocfs2_remount,
);
TRACE_EVENT(ocfs2_fill_super,
TP_PROTO(void *sb, void *data, int silent),
TP_ARGS(sb, data, silent),
TP_PROTO(void *sb, void *fc, int silent),
TP_ARGS(sb, fc, silent),
TP_STRUCT__entry(
__field(void *, sb)
__field(void *, data)
__field(void *, fc)
__field(int, silent)
),
TP_fast_assign(
__entry->sb = sb;
__entry->data = data;
__entry->fc = fc;
__entry->silent = silent;
),
TP_printk("%p %p %d", __entry->sb,
__entry->data, __entry->silent)
__entry->fc, __entry->silent)
);
TRACE_EVENT(ocfs2_parse_options,
TP_PROTO(int is_remount, char *options),
TP_ARGS(is_remount, options),
TP_PROTO(int is_remount, const char *option),
TP_ARGS(is_remount, option),
TP_STRUCT__entry(
__field(int, is_remount)
__string(options, options)
__string(option, option)
),
TP_fast_assign(
__entry->is_remount = is_remount;
__assign_str(options);
__assign_str(option);
),
TP_printk("%d %s", __entry->is_remount, __get_str(options))
TP_printk("%d %s", __entry->is_remount, __get_str(option))
);
DEFINE_OCFS2_POINTER_EVENT(ocfs2_put_super);

View File

@ -2420,7 +2420,7 @@ static int ocfs2_calc_refcount_meta_credits(struct super_block *sb,
*
* If we will insert a new one, this is easy and only happens
* during adding refcounted flag to the extent, so we don't
* have a chance of spliting. We just need one record.
* have a chance of splitting. We just need one record.
*
* If the refcount rec already exists, that would be a little
* complicated. we may have to:
@ -2610,11 +2610,11 @@ static inline unsigned int ocfs2_cow_align_length(struct super_block *sb,
/*
* Calculate out the start and number of virtual clusters we need to CoW.
*
* cpos is vitual start cluster position we want to do CoW in a
* cpos is virtual start cluster position we want to do CoW in a
* file and write_len is the cluster length.
* max_cpos is the place where we want to stop CoW intentionally.
*
* Normal we will start CoW from the beginning of extent record cotaining cpos.
* Normal we will start CoW from the beginning of extent record containing cpos.
* We try to break up extents on boundaries of MAX_CONTIG_BYTES so that we
* get good I/O from the resulting extent tree.
*/
@ -2902,7 +2902,6 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
int ret = 0, partial;
struct super_block *sb = inode->i_sb;
u64 new_block = ocfs2_clusters_to_blocks(sb, new_cluster);
struct page *page;
pgoff_t page_index;
unsigned int from, to;
loff_t offset, end, map_end;
@ -2921,6 +2920,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
end = i_size_read(inode);
while (offset < end) {
struct folio *folio;
page_index = offset >> PAGE_SHIFT;
map_end = ((loff_t)page_index + 1) << PAGE_SHIFT;
if (map_end > end)
@ -2933,9 +2933,10 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
to = map_end & (PAGE_SIZE - 1);
retry:
page = find_or_create_page(mapping, page_index, GFP_NOFS);
if (!page) {
ret = -ENOMEM;
folio = __filemap_get_folio(mapping, page_index,
FGP_LOCK | FGP_ACCESSED | FGP_CREAT, GFP_NOFS);
if (IS_ERR(folio)) {
ret = PTR_ERR(folio);
mlog_errno(ret);
break;
}
@ -2945,9 +2946,9 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
* page, so write it back.
*/
if (PAGE_SIZE <= OCFS2_SB(sb)->s_clustersize) {
if (PageDirty(page)) {
unlock_page(page);
put_page(page);
if (folio_test_dirty(folio)) {
folio_unlock(folio);
folio_put(folio);
ret = filemap_write_and_wait_range(mapping,
offset, map_end - 1);
@ -2955,9 +2956,7 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
}
if (!PageUptodate(page)) {
struct folio *folio = page_folio(page);
if (!folio_test_uptodate(folio)) {
ret = block_read_full_folio(folio, ocfs2_get_block);
if (ret) {
mlog_errno(ret);
@ -2966,8 +2965,8 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
folio_lock(folio);
}
if (page_has_buffers(page)) {
ret = walk_page_buffers(handle, page_buffers(page),
if (folio_buffers(folio)) {
ret = walk_page_buffers(handle, folio_buffers(folio),
from, to, &partial,
ocfs2_clear_cow_buffer);
if (ret) {
@ -2976,14 +2975,12 @@ int ocfs2_duplicate_clusters_by_page(handle_t *handle,
}
}
ocfs2_map_and_dirty_page(inode,
handle, from, to,
page, 0, &new_block);
mark_page_accessed(page);
ocfs2_map_and_dirty_folio(inode, handle, from, to,
folio, 0, &new_block);
folio_mark_accessed(folio);
unlock:
unlock_page(page);
put_page(page);
page = NULL;
folio_unlock(folio);
folio_put(folio);
offset = map_end;
if (ret)
break;

View File

@ -31,7 +31,7 @@ struct ocfs2_alloc_reservation {
#define OCFS2_RESV_FLAG_INUSE 0x01 /* Set when r_node is part of a btree */
#define OCFS2_RESV_FLAG_TMP 0x02 /* Temporary reservation, will be
* destroyed immedately after use */
* destroyed immediately after use */
#define OCFS2_RESV_FLAG_DIR 0x04 /* Reservation is for an unindexed
* directory btree */
@ -125,7 +125,7 @@ int ocfs2_resmap_resv_bits(struct ocfs2_reservation_map *resmap,
/**
* ocfs2_resmap_claimed_bits() - Tell the reservation code that bits were used.
* @resmap: reservations bitmap
* @resv: optional reservation to recalulate based on new bitmap
* @resv: optional reservation to recalculate based on new bitmap
* @cstart: start of allocation in clusters
* @clen: end of allocation in clusters.
*

View File

@ -227,7 +227,7 @@ static int o2cb_dlm_lock_status(struct ocfs2_dlm_lksb *lksb)
}
/*
* o2dlm aways has a "valid" LVB. If the dlm loses track of the LVB
* o2dlm always has a "valid" LVB. If the dlm loses track of the LVB
* contents, it will zero out the LVB. Thus the caller can always trust
* the contents.
*/

View File

@ -210,7 +210,7 @@ struct ocfs2_stack_operations {
struct file_lock *fl);
/*
* This is an optoinal debugging hook. If provided, the
* This is an optional debugging hook. If provided, the
* stack can dump debugging information about this lock.
*/
void (*dump_lksb)(struct ocfs2_dlm_lksb *lksb);

View File

@ -19,10 +19,10 @@
#include <linux/blkdev.h>
#include <linux/socket.h>
#include <linux/inet.h>
#include <linux/parser.h>
#include <linux/fs_parser.h>
#include <linux/fs_context.h>
#include <linux/crc32.h>
#include <linux/debugfs.h>
#include <linux/mount.h>
#include <linux/seq_file.h>
#include <linux/quotaops.h>
#include <linux/signal.h>
@ -80,17 +80,15 @@ struct mount_options
unsigned int resv_level;
int dir_resv_level;
char cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
bool user_stack;
};
static int ocfs2_parse_options(struct super_block *sb, char *options,
struct mount_options *mopt,
int is_remount);
static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param);
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options);
static int ocfs2_show_options(struct seq_file *s, struct dentry *root);
static void ocfs2_put_super(struct super_block *sb);
static int ocfs2_mount_volume(struct super_block *sb);
static int ocfs2_remount(struct super_block *sb, int *flags, char *data);
static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err);
static int ocfs2_initialize_mem_caches(void);
static void ocfs2_free_mem_caches(void);
@ -135,7 +133,6 @@ static const struct super_operations ocfs2_sops = {
.evict_inode = ocfs2_evict_inode,
.sync_fs = ocfs2_sync_fs,
.put_super = ocfs2_put_super,
.remount_fs = ocfs2_remount,
.show_options = ocfs2_show_options,
.quota_read = ocfs2_quota_read,
.quota_write = ocfs2_quota_write,
@ -144,15 +141,10 @@ static const struct super_operations ocfs2_sops = {
enum {
Opt_barrier,
Opt_err_panic,
Opt_err_ro,
Opt_errors,
Opt_intr,
Opt_nointr,
Opt_hb_none,
Opt_hb_local,
Opt_hb_global,
Opt_data_ordered,
Opt_data_writeback,
Opt_heartbeat,
Opt_data,
Opt_atime_quantum,
Opt_slot,
Opt_commit,
@ -160,52 +152,64 @@ enum {
Opt_localflocks,
Opt_stack,
Opt_user_xattr,
Opt_nouser_xattr,
Opt_inode64,
Opt_acl,
Opt_noacl,
Opt_usrquota,
Opt_grpquota,
Opt_coherency_buffered,
Opt_coherency_full,
Opt_coherency,
Opt_resv_level,
Opt_dir_resv_level,
Opt_journal_async_commit,
Opt_err_cont,
Opt_err,
};
static const match_table_t tokens = {
{Opt_barrier, "barrier=%u"},
{Opt_err_panic, "errors=panic"},
{Opt_err_ro, "errors=remount-ro"},
{Opt_intr, "intr"},
{Opt_nointr, "nointr"},
{Opt_hb_none, OCFS2_HB_NONE},
{Opt_hb_local, OCFS2_HB_LOCAL},
{Opt_hb_global, OCFS2_HB_GLOBAL},
{Opt_data_ordered, "data=ordered"},
{Opt_data_writeback, "data=writeback"},
{Opt_atime_quantum, "atime_quantum=%u"},
{Opt_slot, "preferred_slot=%u"},
{Opt_commit, "commit=%u"},
{Opt_localalloc, "localalloc=%d"},
{Opt_localflocks, "localflocks"},
{Opt_stack, "cluster_stack=%s"},
{Opt_user_xattr, "user_xattr"},
{Opt_nouser_xattr, "nouser_xattr"},
{Opt_inode64, "inode64"},
{Opt_acl, "acl"},
{Opt_noacl, "noacl"},
{Opt_usrquota, "usrquota"},
{Opt_grpquota, "grpquota"},
{Opt_coherency_buffered, "coherency=buffered"},
{Opt_coherency_full, "coherency=full"},
{Opt_resv_level, "resv_level=%u"},
{Opt_dir_resv_level, "dir_resv_level=%u"},
{Opt_journal_async_commit, "journal_async_commit"},
{Opt_err_cont, "errors=continue"},
{Opt_err, NULL}
static const struct constant_table ocfs2_param_errors[] = {
{"panic", OCFS2_MOUNT_ERRORS_PANIC},
{"remount-ro", OCFS2_MOUNT_ERRORS_ROFS},
{"continue", OCFS2_MOUNT_ERRORS_CONT},
{}
};
static const struct constant_table ocfs2_param_heartbeat[] = {
{"local", OCFS2_MOUNT_HB_LOCAL},
{"none", OCFS2_MOUNT_HB_NONE},
{"global", OCFS2_MOUNT_HB_GLOBAL},
{}
};
static const struct constant_table ocfs2_param_data[] = {
{"writeback", OCFS2_MOUNT_DATA_WRITEBACK},
{"ordered", 0},
{}
};
static const struct constant_table ocfs2_param_coherency[] = {
{"buffered", OCFS2_MOUNT_COHERENCY_BUFFERED},
{"full", 0},
{}
};
static const struct fs_parameter_spec ocfs2_param_spec[] = {
fsparam_u32 ("barrier", Opt_barrier),
fsparam_enum ("errors", Opt_errors, ocfs2_param_errors),
fsparam_flag_no ("intr", Opt_intr),
fsparam_enum ("heartbeat", Opt_heartbeat, ocfs2_param_heartbeat),
fsparam_enum ("data", Opt_data, ocfs2_param_data),
fsparam_u32 ("atime_quantum", Opt_atime_quantum),
fsparam_u32 ("preferred_slot", Opt_slot),
fsparam_u32 ("commit", Opt_commit),
fsparam_s32 ("localalloc", Opt_localalloc),
fsparam_flag ("localflocks", Opt_localflocks),
fsparam_string ("cluster_stack", Opt_stack),
fsparam_flag_no ("user_xattr", Opt_user_xattr),
fsparam_flag ("inode64", Opt_inode64),
fsparam_flag_no ("acl", Opt_acl),
fsparam_flag ("usrquota", Opt_usrquota),
fsparam_flag ("grpquota", Opt_grpquota),
fsparam_enum ("coherency", Opt_coherency, ocfs2_param_coherency),
fsparam_u32 ("resv_level", Opt_resv_level),
fsparam_u32 ("dir_resv_level", Opt_dir_resv_level),
fsparam_flag ("journal_async_commit", Opt_journal_async_commit),
{}
};
#ifdef CONFIG_DEBUG_FS
@ -600,32 +604,32 @@ static unsigned long long ocfs2_max_file_offset(unsigned int bbits,
return (((unsigned long long)bytes) << bitshift) - trim;
}
static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
static int ocfs2_reconfigure(struct fs_context *fc)
{
int incompat_features;
int ret = 0;
struct mount_options parsed_options;
struct mount_options *parsed_options = fc->fs_private;
struct super_block *sb = fc->root->d_sb;
struct ocfs2_super *osb = OCFS2_SB(sb);
u32 tmp;
sync_filesystem(sb);
if (!ocfs2_parse_options(sb, data, &parsed_options, 1) ||
!ocfs2_check_set_options(sb, &parsed_options)) {
if (!ocfs2_check_set_options(sb, parsed_options)) {
ret = -EINVAL;
goto out;
}
tmp = OCFS2_MOUNT_HB_LOCAL | OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE;
if ((osb->s_mount_opt & tmp) != (parsed_options.mount_opt & tmp)) {
if ((osb->s_mount_opt & tmp) != (parsed_options->mount_opt & tmp)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change heartbeat mode on remount\n");
goto out;
}
if ((osb->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK) !=
(parsed_options.mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
(parsed_options->mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot change data mode on remount\n");
goto out;
@ -634,16 +638,16 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
/* Probably don't want this on remount; it might
* mess with other nodes */
if (!(osb->s_mount_opt & OCFS2_MOUNT_INODE64) &&
(parsed_options.mount_opt & OCFS2_MOUNT_INODE64)) {
(parsed_options->mount_opt & OCFS2_MOUNT_INODE64)) {
ret = -EINVAL;
mlog(ML_ERROR, "Cannot enable inode64 on remount\n");
goto out;
}
/* We're going to/from readonly mode. */
if ((bool)(*flags & SB_RDONLY) != sb_rdonly(sb)) {
if ((bool)(fc->sb_flags & SB_RDONLY) != sb_rdonly(sb)) {
/* Disable quota accounting before remounting RO */
if (*flags & SB_RDONLY) {
if (fc->sb_flags & SB_RDONLY) {
ret = ocfs2_susp_quotas(osb, 0);
if (ret < 0)
goto out;
@ -657,7 +661,7 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
goto unlock_osb;
}
if (*flags & SB_RDONLY) {
if (fc->sb_flags & SB_RDONLY) {
sb->s_flags |= SB_RDONLY;
osb->osb_flags |= OCFS2_OSB_SOFT_RO;
} else {
@ -678,11 +682,11 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
sb->s_flags &= ~SB_RDONLY;
osb->osb_flags &= ~OCFS2_OSB_SOFT_RO;
}
trace_ocfs2_remount(sb->s_flags, osb->osb_flags, *flags);
trace_ocfs2_remount(sb->s_flags, osb->osb_flags, fc->sb_flags);
unlock_osb:
spin_unlock(&osb->osb_lock);
/* Enable quota accounting after remounting RW */
if (!ret && !(*flags & SB_RDONLY)) {
if (!ret && !(fc->sb_flags & SB_RDONLY)) {
if (sb_any_quota_suspended(sb))
ret = ocfs2_susp_quotas(osb, 1);
else
@ -701,11 +705,11 @@ static int ocfs2_remount(struct super_block *sb, int *flags, char *data)
if (!ret) {
/* Only save off the new mount options in case of a successful
* remount. */
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
if (parsed_options.commit_interval)
osb->osb_commit_interval = parsed_options.commit_interval;
osb->s_mount_opt = parsed_options->mount_opt;
osb->s_atime_quantum = parsed_options->atime_quantum;
osb->preferred_slot = parsed_options->slot;
if (parsed_options->commit_interval)
osb->osb_commit_interval = parsed_options->commit_interval;
if (!ocfs2_is_hard_readonly(osb))
ocfs2_set_journal_params(osb);
@ -966,23 +970,18 @@ static void ocfs2_disable_quotas(struct ocfs2_super *osb)
}
}
static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
static int ocfs2_fill_super(struct super_block *sb, struct fs_context *fc)
{
struct dentry *root;
int status, sector_size;
struct mount_options parsed_options;
struct mount_options *parsed_options = fc->fs_private;
struct inode *inode = NULL;
struct ocfs2_super *osb = NULL;
struct buffer_head *bh = NULL;
char nodestr[12];
struct ocfs2_blockcheck_stats stats;
trace_ocfs2_fill_super(sb, data, silent);
if (!ocfs2_parse_options(sb, data, &parsed_options, 0)) {
status = -EINVAL;
goto out;
}
trace_ocfs2_fill_super(sb, fc, fc->sb_flags & SB_SILENT);
/* probe for superblock */
status = ocfs2_sb_probe(sb, &bh, &sector_size, &stats);
@ -999,24 +998,24 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
osb = OCFS2_SB(sb);
if (!ocfs2_check_set_options(sb, &parsed_options)) {
if (!ocfs2_check_set_options(sb, parsed_options)) {
status = -EINVAL;
goto out_super;
}
osb->s_mount_opt = parsed_options.mount_opt;
osb->s_atime_quantum = parsed_options.atime_quantum;
osb->preferred_slot = parsed_options.slot;
osb->osb_commit_interval = parsed_options.commit_interval;
osb->s_mount_opt = parsed_options->mount_opt;
osb->s_atime_quantum = parsed_options->atime_quantum;
osb->preferred_slot = parsed_options->slot;
osb->osb_commit_interval = parsed_options->commit_interval;
ocfs2_la_set_sizes(osb, parsed_options.localalloc_opt);
osb->osb_resv_level = parsed_options.resv_level;
osb->osb_dir_resv_level = parsed_options.resv_level;
if (parsed_options.dir_resv_level == -1)
osb->osb_dir_resv_level = parsed_options.resv_level;
ocfs2_la_set_sizes(osb, parsed_options->localalloc_opt);
osb->osb_resv_level = parsed_options->resv_level;
osb->osb_dir_resv_level = parsed_options->resv_level;
if (parsed_options->dir_resv_level == -1)
osb->osb_dir_resv_level = parsed_options->resv_level;
else
osb->osb_dir_resv_level = parsed_options.dir_resv_level;
osb->osb_dir_resv_level = parsed_options->dir_resv_level;
status = ocfs2_verify_userspace_stack(osb, &parsed_options);
status = ocfs2_verify_userspace_stack(osb, parsed_options);
if (status)
goto out_super;
@ -1180,27 +1179,72 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
return status;
}
static struct dentry *ocfs2_mount(struct file_system_type *fs_type,
int flags,
const char *dev_name,
void *data)
static int ocfs2_get_tree(struct fs_context *fc)
{
return mount_bdev(fs_type, flags, dev_name, data, ocfs2_fill_super);
return get_tree_bdev(fc, ocfs2_fill_super);
}
static void ocfs2_free_fc(struct fs_context *fc)
{
kfree(fc->fs_private);
}
static const struct fs_context_operations ocfs2_context_ops = {
.parse_param = ocfs2_parse_param,
.get_tree = ocfs2_get_tree,
.reconfigure = ocfs2_reconfigure,
.free = ocfs2_free_fc,
};
static int ocfs2_init_fs_context(struct fs_context *fc)
{
struct mount_options *mopt;
mopt = kzalloc(sizeof(struct mount_options), GFP_KERNEL);
if (!mopt)
return -EINVAL;
mopt->commit_interval = 0;
mopt->mount_opt = OCFS2_MOUNT_NOINTR;
mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
mopt->slot = OCFS2_INVALID_SLOT;
mopt->localalloc_opt = -1;
mopt->cluster_stack[0] = '\0';
mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
mopt->dir_resv_level = -1;
fc->fs_private = mopt;
fc->ops = &ocfs2_context_ops;
return 0;
}
static struct file_system_type ocfs2_fs_type = {
.owner = THIS_MODULE,
.name = "ocfs2",
.mount = ocfs2_mount,
.kill_sb = kill_block_super,
.fs_flags = FS_REQUIRES_DEV|FS_RENAME_DOES_D_MOVE,
.next = NULL
.next = NULL,
.init_fs_context = ocfs2_init_fs_context,
.parameters = ocfs2_param_spec,
};
MODULE_ALIAS_FS("ocfs2");
static int ocfs2_check_set_options(struct super_block *sb,
struct mount_options *options)
{
if (options->user_stack == 0) {
u32 tmp;
/* Ensure only one heartbeat mode */
tmp = options->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE);
if (hweight32(tmp) != 1) {
mlog(ML_ERROR, "Invalid heartbeat mount options\n");
return 0;
}
}
if (options->mount_opt & OCFS2_MOUNT_USRQUOTA &&
!OCFS2_HAS_RO_COMPAT_FEATURE(sb,
OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
@ -1232,241 +1276,142 @@ static int ocfs2_check_set_options(struct super_block *sb,
return 1;
}
static int ocfs2_parse_options(struct super_block *sb,
char *options,
struct mount_options *mopt,
int is_remount)
static int ocfs2_parse_param(struct fs_context *fc, struct fs_parameter *param)
{
int status, user_stack = 0;
char *p;
u32 tmp;
int token, option;
substring_t args[MAX_OPT_ARGS];
struct fs_parse_result result;
int opt;
struct mount_options *mopt = fc->fs_private;
bool is_remount = (fc->purpose & FS_CONTEXT_FOR_RECONFIGURE);
trace_ocfs2_parse_options(is_remount, options ? options : "(none)");
trace_ocfs2_parse_options(is_remount, param->key);
mopt->commit_interval = 0;
mopt->mount_opt = OCFS2_MOUNT_NOINTR;
mopt->atime_quantum = OCFS2_DEFAULT_ATIME_QUANTUM;
mopt->slot = OCFS2_INVALID_SLOT;
mopt->localalloc_opt = -1;
mopt->cluster_stack[0] = '\0';
mopt->resv_level = OCFS2_DEFAULT_RESV_LEVEL;
mopt->dir_resv_level = -1;
opt = fs_parse(fc, ocfs2_param_spec, param, &result);
if (opt < 0)
return opt;
if (!options) {
status = 1;
goto bail;
}
while ((p = strsep(&options, ",")) != NULL) {
if (!*p)
continue;
token = match_token(p, tokens, args);
switch (token) {
case Opt_hb_local:
mopt->mount_opt |= OCFS2_MOUNT_HB_LOCAL;
break;
case Opt_hb_none:
mopt->mount_opt |= OCFS2_MOUNT_HB_NONE;
break;
case Opt_hb_global:
mopt->mount_opt |= OCFS2_MOUNT_HB_GLOBAL;
break;
case Opt_barrier:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option)
mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
else
mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
break;
case Opt_intr:
mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
break;
case Opt_nointr:
switch (opt) {
case Opt_heartbeat:
mopt->mount_opt |= result.uint_32;
break;
case Opt_barrier:
if (result.uint_32)
mopt->mount_opt |= OCFS2_MOUNT_BARRIER;
else
mopt->mount_opt &= ~OCFS2_MOUNT_BARRIER;
break;
case Opt_intr:
if (result.negated)
mopt->mount_opt |= OCFS2_MOUNT_NOINTR;
break;
case Opt_err_panic:
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
mopt->mount_opt |= OCFS2_MOUNT_ERRORS_PANIC;
break;
case Opt_err_ro:
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_CONT;
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
mopt->mount_opt |= OCFS2_MOUNT_ERRORS_ROFS;
break;
case Opt_err_cont:
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_ROFS;
mopt->mount_opt &= ~OCFS2_MOUNT_ERRORS_PANIC;
mopt->mount_opt |= OCFS2_MOUNT_ERRORS_CONT;
break;
case Opt_data_ordered:
mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
break;
case Opt_data_writeback:
mopt->mount_opt |= OCFS2_MOUNT_DATA_WRITEBACK;
break;
case Opt_user_xattr:
mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR;
break;
case Opt_nouser_xattr:
else
mopt->mount_opt &= ~OCFS2_MOUNT_NOINTR;
break;
case Opt_errors:
mopt->mount_opt &= ~(OCFS2_MOUNT_ERRORS_CONT |
OCFS2_MOUNT_ERRORS_ROFS |
OCFS2_MOUNT_ERRORS_PANIC);
mopt->mount_opt |= result.uint_32;
break;
case Opt_data:
mopt->mount_opt &= ~OCFS2_MOUNT_DATA_WRITEBACK;
mopt->mount_opt |= result.uint_32;
break;
case Opt_user_xattr:
if (result.negated)
mopt->mount_opt |= OCFS2_MOUNT_NOUSERXATTR;
break;
case Opt_atime_quantum:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= 0)
mopt->atime_quantum = option;
break;
case Opt_slot:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option)
mopt->slot = (u16)option;
break;
case Opt_commit:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option < 0)
return 0;
if (option == 0)
option = JBD2_DEFAULT_MAX_COMMIT_AGE;
mopt->commit_interval = HZ * option;
break;
case Opt_localalloc:
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= 0)
mopt->localalloc_opt = option;
break;
case Opt_localflocks:
/*
* Changing this during remount could race
* flock() requests, or "unbalance" existing
* ones (e.g., a lock is taken in one mode but
* dropped in the other). If users care enough
* to flip locking modes during remount, we
* could add a "local" flag to individual
* flock structures for proper tracking of
* state.
*/
if (!is_remount)
mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
break;
case Opt_stack:
/* Check both that the option we were passed
* is of the right length and that it is a proper
* string of the right length.
*/
if (((args[0].to - args[0].from) !=
OCFS2_STACK_LABEL_LEN) ||
(strnlen(args[0].from,
OCFS2_STACK_LABEL_LEN) !=
OCFS2_STACK_LABEL_LEN)) {
mlog(ML_ERROR,
"Invalid cluster_stack option\n");
status = 0;
goto bail;
}
memcpy(mopt->cluster_stack, args[0].from,
OCFS2_STACK_LABEL_LEN);
mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
/*
* Open code the memcmp here as we don't have
* an osb to pass to
* ocfs2_userspace_stack().
*/
if (memcmp(mopt->cluster_stack,
OCFS2_CLASSIC_CLUSTER_STACK,
OCFS2_STACK_LABEL_LEN))
user_stack = 1;
break;
case Opt_inode64:
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
break;
case Opt_usrquota:
mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA;
break;
case Opt_grpquota:
mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA;
break;
case Opt_coherency_buffered:
mopt->mount_opt |= OCFS2_MOUNT_COHERENCY_BUFFERED;
break;
case Opt_coherency_full:
mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED;
break;
case Opt_acl:
mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL;
break;
case Opt_noacl:
else
mopt->mount_opt &= ~OCFS2_MOUNT_NOUSERXATTR;
break;
case Opt_atime_quantum:
mopt->atime_quantum = result.uint_32;
break;
case Opt_slot:
if (result.uint_32)
mopt->slot = (u16)result.uint_32;
break;
case Opt_commit:
if (result.uint_32 == 0)
mopt->commit_interval = HZ * JBD2_DEFAULT_MAX_COMMIT_AGE;
else
mopt->commit_interval = HZ * result.uint_32;
break;
case Opt_localalloc:
if (result.int_32 >= 0)
mopt->localalloc_opt = result.int_32;
break;
case Opt_localflocks:
/*
* Changing this during remount could race flock() requests, or
* "unbalance" existing ones (e.g., a lock is taken in one mode
* but dropped in the other). If users care enough to flip
* locking modes during remount, we could add a "local" flag to
* individual flock structures for proper tracking of state.
*/
if (!is_remount)
mopt->mount_opt |= OCFS2_MOUNT_LOCALFLOCKS;
break;
case Opt_stack:
/* Check both that the option we were passed is of the right
* length and that it is a proper string of the right length.
*/
if (strlen(param->string) != OCFS2_STACK_LABEL_LEN) {
mlog(ML_ERROR, "Invalid cluster_stack option\n");
return -EINVAL;
}
memcpy(mopt->cluster_stack, param->string, OCFS2_STACK_LABEL_LEN);
mopt->cluster_stack[OCFS2_STACK_LABEL_LEN] = '\0';
/*
* Open code the memcmp here as we don't have an osb to pass
* to ocfs2_userspace_stack().
*/
if (memcmp(mopt->cluster_stack,
OCFS2_CLASSIC_CLUSTER_STACK,
OCFS2_STACK_LABEL_LEN))
mopt->user_stack = 1;
break;
case Opt_inode64:
mopt->mount_opt |= OCFS2_MOUNT_INODE64;
break;
case Opt_usrquota:
mopt->mount_opt |= OCFS2_MOUNT_USRQUOTA;
break;
case Opt_grpquota:
mopt->mount_opt |= OCFS2_MOUNT_GRPQUOTA;
break;
case Opt_coherency:
mopt->mount_opt &= ~OCFS2_MOUNT_COHERENCY_BUFFERED;
mopt->mount_opt |= result.uint_32;
break;
case Opt_acl:
if (result.negated) {
mopt->mount_opt |= OCFS2_MOUNT_NO_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_POSIX_ACL;
break;
case Opt_resv_level:
if (is_remount)
break;
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= OCFS2_MIN_RESV_LEVEL &&
option < OCFS2_MAX_RESV_LEVEL)
mopt->resv_level = option;
break;
case Opt_dir_resv_level:
if (is_remount)
break;
if (match_int(&args[0], &option)) {
status = 0;
goto bail;
}
if (option >= OCFS2_MIN_RESV_LEVEL &&
option < OCFS2_MAX_RESV_LEVEL)
mopt->dir_resv_level = option;
break;
case Opt_journal_async_commit:
mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
break;
default:
mlog(ML_ERROR,
"Unrecognized mount option \"%s\" "
"or missing value\n", p);
status = 0;
goto bail;
} else {
mopt->mount_opt |= OCFS2_MOUNT_POSIX_ACL;
mopt->mount_opt &= ~OCFS2_MOUNT_NO_POSIX_ACL;
}
break;
case Opt_resv_level:
if (is_remount)
break;
if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL &&
result.uint_32 < OCFS2_MAX_RESV_LEVEL)
mopt->resv_level = result.uint_32;
break;
case Opt_dir_resv_level:
if (is_remount)
break;
if (result.uint_32 >= OCFS2_MIN_RESV_LEVEL &&
result.uint_32 < OCFS2_MAX_RESV_LEVEL)
mopt->dir_resv_level = result.uint_32;
break;
case Opt_journal_async_commit:
mopt->mount_opt |= OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT;
break;
default:
return -EINVAL;
}
if (user_stack == 0) {
/* Ensure only one heartbeat mode */
tmp = mopt->mount_opt & (OCFS2_MOUNT_HB_LOCAL |
OCFS2_MOUNT_HB_GLOBAL |
OCFS2_MOUNT_HB_NONE);
if (hweight32(tmp) != 1) {
mlog(ML_ERROR, "Invalid heartbeat mount options\n");
status = 0;
goto bail;
}
}
status = 1;
bail:
return status;
return 0;
}
static int ocfs2_show_options(struct seq_file *s, struct dentry *root)
@ -1858,7 +1803,7 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
osb = OCFS2_SB(sb);
BUG_ON(!osb);
/* Remove file check sysfs related directores/files,
/* Remove file check sysfs related directories/files,
* and wait for the pending file check operations */
ocfs2_filecheck_remove_sysfs(osb);

View File

@ -54,31 +54,27 @@
static int ocfs2_fast_symlink_read_folio(struct file *f, struct folio *folio)
{
struct page *page = &folio->page;
struct inode *inode = page->mapping->host;
struct inode *inode = folio->mapping->host;
struct buffer_head *bh = NULL;
int status = ocfs2_read_inode_block(inode, &bh);
struct ocfs2_dinode *fe;
const char *link;
void *kaddr;
size_t len;
if (status < 0) {
mlog_errno(status);
return status;
goto out;
}
fe = (struct ocfs2_dinode *) bh->b_data;
link = (char *) fe->id2.i_symlink;
/* will be less than a page size */
len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
kaddr = kmap_atomic(page);
memcpy(kaddr, link, len + 1);
kunmap_atomic(kaddr);
SetPageUptodate(page);
unlock_page(page);
memcpy_to_folio(folio, 0, link, len + 1);
out:
folio_end_read(folio, status == 0);
brelse(bh);
return 0;
return status;
}
const struct address_space_operations ocfs2_fast_symlink_aops = {

View File

@ -648,7 +648,7 @@ int ocfs2_calc_xattr_init(struct inode *dir,
* 256(name) + 80(value) + 16(entry) = 352 bytes,
* The max space of acl xattr taken inline is
* 80(value) + 16(entry) * 2(if directory) = 192 bytes,
* when blocksize = 512, may reserve one more cluser for
* when blocksize = 512, may reserve one more cluster for
* xattr bucket, otherwise reserve one metadata block
* for them is ok.
* If this is a new directory with inline data,
@ -4371,7 +4371,7 @@ static int cmp_xe_offset(const void *a, const void *b)
/*
* defrag a xattr bucket if we find that the bucket has some
* holes beteen name/value pairs.
* holes between name/value pairs.
* We will move all the name/value pairs to the end of the bucket
* so that we can spare some space for insertion.
*/
@ -5011,7 +5011,7 @@ static int ocfs2_divide_xattr_cluster(struct inode *inode,
* 2. If cluster_size == bucket_size:
* a) If the previous extent rec has more than one cluster and the insert
* place isn't in the last cluster, copy the entire last cluster to the
* new one. This time, we don't need to upate the first_bh and header_bh
* new one. This time, we don't need to update the first_bh and header_bh
* since they will not be moved into the new cluster.
* b) Otherwise, move the bottom half of the xattrs in the last cluster into
* the new one. And we set the extend flag to zero if the insert place is
@ -6189,7 +6189,7 @@ struct ocfs2_xattr_reflink {
/*
* Given a xattr header and xe offset,
* return the proper xv and the corresponding bh.
* xattr in inode, block and xattr tree have different implementaions.
* xattr in inode, block and xattr tree have different implementations.
*/
typedef int (get_xattr_value_root)(struct super_block *sb,
struct buffer_head *bh,
@ -6269,7 +6269,7 @@ static int ocfs2_get_xattr_value_root(struct super_block *sb,
}
/*
* Lock the meta_ac and caculate how much credits we need for reflink xattrs.
* Lock the meta_ac and calculate how much credits we need for reflink xattrs.
* It is only used for inline xattr and xattr block.
*/
static int ocfs2_reflink_lock_xattr_allocators(struct ocfs2_super *osb,

View File

@ -405,7 +405,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
goto check_directory_table;
msblk->fragment_cache = squashfs_cache_init("fragment",
SQUASHFS_CACHED_FRAGMENTS, msblk->block_size);
min(SQUASHFS_CACHED_FRAGMENTS, fragments), msblk->block_size);
if (msblk->fragment_cache == NULL) {
err = -ENOMEM;
goto failed_mount;

View File

@ -5,7 +5,7 @@
* Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
*
* This file is a stub providing documentation for what functions
* asm-ARCH/syscall.h files need to define. Most arch definitions
* arch/ARCH/include/asm/syscall.h files need to define. Most arch definitions
* will be simple inlines.
*
* All of these functions expect to be called with no locks,

View File

@ -23,7 +23,7 @@ struct device;
*
* Function implementations generic to all architectures are in
* lib/bitmap.c. Functions implementations that are architecture
* specific are in various include/asm-<arch>/bitops.h headers
* specific are in various arch/<arch>/include/asm/bitops.h headers
* and other arch/<arch> specific files.
*
* See lib/bitmap.c for more details.

View File

@ -29,25 +29,32 @@ struct task_delay_info {
* XXX_delay contains the accumulated delay time in nanoseconds.
*/
u64 blkio_start;
u64 blkio_delay_max;
u64 blkio_delay; /* wait for sync block io completion */
u64 swapin_start;
u64 swapin_delay_max;
u64 swapin_delay; /* wait for swapin */
u32 blkio_count; /* total count of the number of sync block */
/* io operations performed */
u32 swapin_count; /* total count of swapin */
u64 freepages_start;
u64 freepages_delay_max;
u64 freepages_delay; /* wait for memory reclaim */
u64 thrashing_start;
u64 thrashing_delay_max;
u64 thrashing_delay; /* wait for thrashing page */
u64 compact_start;
u64 compact_delay_max;
u64 compact_delay; /* wait for memory compact */
u64 wpcopy_start;
u64 wpcopy_delay_max;
u64 wpcopy_delay; /* wait for write-protect copy */
u64 irq_delay_max;
u64 irq_delay; /* wait for IRQ/SOFTIRQ */
u32 freepages_count; /* total count of memory reclaim */

View File

@ -6,6 +6,17 @@
#include <linux/string.h>
#include <linux/types.h>
/*
* The Min Heap API provides utilities for managing min-heaps, a binary tree
* structure where each node's value is less than or equal to its children's
* values, ensuring the smallest element is at the root.
*
* Users should avoid directly calling functions prefixed with __min_heap_*().
* Instead, use the provided macro wrappers.
*
* For further details and examples, refer to Documentation/core-api/min_heap.rst.
*/
/**
* Data structure to hold a min-heap.
* @nr: Number of elements currently in the heap.
@ -218,7 +229,7 @@ void __min_heap_init_inline(min_heap_char *heap, void *data, int size)
}
#define min_heap_init_inline(_heap, _data, _size) \
__min_heap_init_inline((min_heap_char *)_heap, _data, _size)
__min_heap_init_inline(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
/* Get the minimum element from the heap. */
static __always_inline
@ -228,7 +239,8 @@ void *__min_heap_peek_inline(struct min_heap_char *heap)
}
#define min_heap_peek_inline(_heap) \
(__minheap_cast(_heap) __min_heap_peek_inline((min_heap_char *)_heap))
(__minheap_cast(_heap) \
__min_heap_peek_inline(container_of(&(_heap)->nr, min_heap_char, nr)))
/* Check if the heap is full. */
static __always_inline
@ -238,7 +250,7 @@ bool __min_heap_full_inline(min_heap_char *heap)
}
#define min_heap_full_inline(_heap) \
__min_heap_full_inline((min_heap_char *)_heap)
__min_heap_full_inline(container_of(&(_heap)->nr, min_heap_char, nr))
/* Sift the element at pos down the heap. */
static __always_inline
@ -277,8 +289,8 @@ void __min_heap_sift_down_inline(min_heap_char *heap, int pos, size_t elem_size,
}
#define min_heap_sift_down_inline(_heap, _pos, _func, _args) \
__min_heap_sift_down_inline((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), \
_func, _args)
__min_heap_sift_down_inline(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
__minheap_obj_size(_heap), _func, _args)
/* Sift up ith element from the heap, O(log2(nr)). */
static __always_inline
@ -304,8 +316,8 @@ void __min_heap_sift_up_inline(min_heap_char *heap, size_t elem_size, size_t idx
}
#define min_heap_sift_up_inline(_heap, _idx, _func, _args) \
__min_heap_sift_up_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \
_func, _args)
__min_heap_sift_up_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _idx, _func, _args)
/* Floyd's approach to heapification that is O(nr). */
static __always_inline
@ -319,7 +331,8 @@ void __min_heapify_all_inline(min_heap_char *heap, size_t elem_size,
}
#define min_heapify_all_inline(_heap, _func, _args) \
__min_heapify_all_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
__min_heapify_all_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _func, _args)
/* Remove minimum element from the heap, O(log2(nr)). */
static __always_inline
@ -340,7 +353,8 @@ bool __min_heap_pop_inline(min_heap_char *heap, size_t elem_size,
}
#define min_heap_pop_inline(_heap, _func, _args) \
__min_heap_pop_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
__min_heap_pop_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _func, _args)
/*
* Remove the minimum element and then push the given element. The
@ -356,8 +370,8 @@ void __min_heap_pop_push_inline(min_heap_char *heap, const void *element, size_t
}
#define min_heap_pop_push_inline(_heap, _element, _func, _args) \
__min_heap_pop_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \
_func, _args)
__min_heap_pop_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
__minheap_obj_size(_heap), _func, _args)
/* Push an element on to the heap, O(log2(nr)). */
static __always_inline
@ -382,8 +396,8 @@ bool __min_heap_push_inline(min_heap_char *heap, const void *element, size_t ele
}
#define min_heap_push_inline(_heap, _element, _func, _args) \
__min_heap_push_inline((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \
_func, _args)
__min_heap_push_inline(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
__minheap_obj_size(_heap), _func, _args)
/* Remove ith element from the heap, O(log2(nr)). */
static __always_inline
@ -411,8 +425,8 @@ bool __min_heap_del_inline(min_heap_char *heap, size_t elem_size, size_t idx,
}
#define min_heap_del_inline(_heap, _idx, _func, _args) \
__min_heap_del_inline((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, \
_func, _args)
__min_heap_del_inline(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _idx, _func, _args)
void __min_heap_init(min_heap_char *heap, void *data, int size);
void *__min_heap_peek(struct min_heap_char *heap);
@ -433,25 +447,31 @@ bool __min_heap_del(min_heap_char *heap, size_t elem_size, size_t idx,
const struct min_heap_callbacks *func, void *args);
#define min_heap_init(_heap, _data, _size) \
__min_heap_init((min_heap_char *)_heap, _data, _size)
__min_heap_init(container_of(&(_heap)->nr, min_heap_char, nr), _data, _size)
#define min_heap_peek(_heap) \
(__minheap_cast(_heap) __min_heap_peek((min_heap_char *)_heap))
(__minheap_cast(_heap) __min_heap_peek(container_of(&(_heap)->nr, min_heap_char, nr)))
#define min_heap_full(_heap) \
__min_heap_full((min_heap_char *)_heap)
__min_heap_full(container_of(&(_heap)->nr, min_heap_char, nr))
#define min_heap_sift_down(_heap, _pos, _func, _args) \
__min_heap_sift_down((min_heap_char *)_heap, _pos, __minheap_obj_size(_heap), _func, _args)
__min_heap_sift_down(container_of(&(_heap)->nr, min_heap_char, nr), _pos, \
__minheap_obj_size(_heap), _func, _args)
#define min_heap_sift_up(_heap, _idx, _func, _args) \
__min_heap_sift_up((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args)
__min_heap_sift_up(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _idx, _func, _args)
#define min_heapify_all(_heap, _func, _args) \
__min_heapify_all((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
__min_heapify_all(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _func, _args)
#define min_heap_pop(_heap, _func, _args) \
__min_heap_pop((min_heap_char *)_heap, __minheap_obj_size(_heap), _func, _args)
__min_heap_pop(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _func, _args)
#define min_heap_pop_push(_heap, _element, _func, _args) \
__min_heap_pop_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), \
_func, _args)
__min_heap_pop_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
__minheap_obj_size(_heap), _func, _args)
#define min_heap_push(_heap, _element, _func, _args) \
__min_heap_push((min_heap_char *)_heap, _element, __minheap_obj_size(_heap), _func, _args)
__min_heap_push(container_of(&(_heap)->nr, min_heap_char, nr), _element, \
__minheap_obj_size(_heap), _func, _args)
#define min_heap_del(_heap, _idx, _func, _args) \
__min_heap_del((min_heap_char *)_heap, __minheap_obj_size(_heap), _idx, _func, _args)
__min_heap_del(container_of(&(_heap)->nr, min_heap_char, nr), \
__minheap_obj_size(_heap), _idx, _func, _args)
#endif /* _LINUX_MIN_HEAP_H */

View File

@ -8,13 +8,10 @@
#include <linux/types.h>
/*
* min()/max()/clamp() macros must accomplish three things:
* min()/max()/clamp() macros must accomplish several things:
*
* - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
* - Retain result as a constant expressions when called with only
* constant expressions (to avoid tripping VLA warnings in stack
* allocation usage).
* - Perform signed v unsigned type-checking (to generate compile
* errors instead of nasty runtime surprises).
* - Unsigned char/short are always promoted to signed int and can be
@ -31,58 +28,54 @@
* bit #0 set if ok for unsigned comparisons
* bit #1 set if ok for signed comparisons
*
* In particular, statically non-negative signed integer
* expressions are ok for both.
* In particular, statically non-negative signed integer expressions
* are ok for both.
*
* NOTE! Unsigned types smaller than 'int' are implicitly
* converted to 'int' in expressions, and are accepted for
* signed conversions for now. This is debatable.
* NOTE! Unsigned types smaller than 'int' are implicitly converted to 'int'
* in expressions, and are accepted for signed conversions for now.
* This is debatable.
*
* Note that 'x' is the original expression, and 'ux' is
* the unique variable that contains the value.
* Note that 'x' is the original expression, and 'ux' is the unique variable
* that contains the value.
*
* We use 'ux' for pure type checking, and 'x' for when
* we need to look at the value (but without evaluating
* it for side effects! Careful to only ever evaluate it
* with sizeof() or __builtin_constant_p() etc).
* We use 'ux' for pure type checking, and 'x' for when we need to look at the
* value (but without evaluating it for side effects!
* Careful to only ever evaluate it with sizeof() or __builtin_constant_p() etc).
*
* Pointers end up being checked by the normal C type
* rules at the actual comparison, and these expressions
* only need to be careful to not cause warnings for
* pointer use.
* Pointers end up being checked by the normal C type rules at the actual
* comparison, and these expressions only need to be careful to not cause
* warnings for pointer use.
*/
#define __signed_type_use(x,ux) (2+__is_nonneg(x,ux))
#define __unsigned_type_use(x,ux) (1+2*(sizeof(ux)<4))
#define __sign_use(x,ux) (is_signed_type(typeof(ux))? \
__signed_type_use(x,ux):__unsigned_type_use(x,ux))
#define __sign_use(ux) (is_signed_type(typeof(ux)) ? \
(2 + __is_nonneg(ux)) : (1 + 2 * (sizeof(ux) < 4)))
/*
* To avoid warnings about casting pointers to integers
* of different sizes, we need that special sign type.
* Check whether a signed value is always non-negative.
*
* On 64-bit we can just always use 'long', since any
* integer or pointer type can just be cast to that.
* A cast is needed to avoid any warnings from values that aren't signed
* integer types (in which case the result doesn't matter).
*
* This does not work for 128-bit signed integers since
* the cast would truncate them, but we do not use s128
* types in the kernel (we do use 'u128', but they will
* be handled by the !is_signed_type() case).
* On 64-bit any integer or pointer type can safely be cast to 'long long'.
* But on 32-bit we need to avoid warnings about casting pointers to integers
* of different sizes without truncating 64-bit values so 'long' or 'long long'
* must be used depending on the size of the value.
*
* NOTE! The cast is there only to avoid any warnings
* from when values that aren't signed integer types.
* This does not work for 128-bit signed integers since the cast would truncate
* them, but we do not use s128 types in the kernel (we do use 'u128',
* but they are handled by the !is_signed_type() case).
*/
#ifdef CONFIG_64BIT
#define __signed_type(ux) long
#if __SIZEOF_POINTER__ == __SIZEOF_LONG_LONG__
#define __is_nonneg(ux) statically_true((long long)(ux) >= 0)
#else
#define __signed_type(ux) typeof(__builtin_choose_expr(sizeof(ux)>4,1LL,1L))
#define __is_nonneg(ux) statically_true( \
(typeof(__builtin_choose_expr(sizeof(ux) > 4, 1LL, 1L)))(ux) >= 0)
#endif
#define __is_nonneg(x,ux) statically_true((__signed_type(ux))(x)>=0)
#define __types_ok(x,y,ux,uy) \
(__sign_use(x,ux) & __sign_use(y,uy))
#define __types_ok(ux, uy) \
(__sign_use(ux) & __sign_use(uy))
#define __types_ok3(x,y,z,ux,uy,uz) \
(__sign_use(x,ux) & __sign_use(y,uy) & __sign_use(z,uz))
#define __types_ok3(ux, uy, uz) \
(__sign_use(ux) & __sign_use(uy) & __sign_use(uz))
#define __cmp_op_min <
#define __cmp_op_max >
@ -97,30 +90,13 @@
#define __careful_cmp_once(op, x, y, ux, uy) ({ \
__auto_type ux = (x); __auto_type uy = (y); \
BUILD_BUG_ON_MSG(!__types_ok(x,y,ux,uy), \
BUILD_BUG_ON_MSG(!__types_ok(ux, uy), \
#op"("#x", "#y") signedness error"); \
__cmp(op, ux, uy); })
#define __careful_cmp(op, x, y) \
__careful_cmp_once(op, x, y, __UNIQUE_ID(x_), __UNIQUE_ID(y_))
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
#define __clamp_once(val, lo, hi, uval, ulo, uhi) ({ \
__auto_type uval = (val); \
__auto_type ulo = (lo); \
__auto_type uhi = (hi); \
static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
(lo) <= (hi), true), \
"clamp() low limit " #lo " greater than high limit " #hi); \
BUILD_BUG_ON_MSG(!__types_ok3(val,lo,hi,uval,ulo,uhi), \
"clamp("#val", "#lo", "#hi") signedness error"); \
__clamp(uval, ulo, uhi); })
#define __careful_clamp(val, lo, hi) \
__clamp_once(val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
/**
* min - return minimum of two values of the same or compatible types
* @x: first value
@ -154,7 +130,7 @@
#define __careful_op3(op, x, y, z, ux, uy, uz) ({ \
__auto_type ux = (x); __auto_type uy = (y);__auto_type uz = (z);\
BUILD_BUG_ON_MSG(!__types_ok3(x,y,z,ux,uy,uz), \
BUILD_BUG_ON_MSG(!__types_ok3(ux, uy, uz), \
#op"3("#x", "#y", "#z") signedness error"); \
__cmp(op, ux, __cmp(op, uy, uz)); })
@ -176,34 +152,6 @@
#define max3(x, y, z) \
__careful_op3(max, x, y, z, __UNIQUE_ID(x_), __UNIQUE_ID(y_), __UNIQUE_ID(z_))
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
* @x: value1
* @y: value2
*/
#define min_not_zero(x, y) ({ \
typeof(x) __x = (x); \
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
/**
* clamp - return a value clamped to a given range with strict typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
* This macro does strict typechecking of @lo/@hi to make sure they are of the
* same type as @val. See the unnecessary pointer comparisons.
*/
#define clamp(val, lo, hi) __careful_clamp(val, lo, hi)
/*
* ..and if you can't take the strict
* types, you can specify one yourself.
*
* Or not use min/max/clamp at all, of course.
*/
/**
* min_t - return minimum of two values, using the specified type
* @type: data type to use
@ -220,6 +168,68 @@
*/
#define max_t(type, x, y) __cmp_once(max, type, x, y)
/**
* min_not_zero - return the minimum that is _not_ zero, unless both are zero
* @x: value1
* @y: value2
*/
#define min_not_zero(x, y) ({ \
typeof(x) __x = (x); \
typeof(y) __y = (y); \
__x == 0 ? __y : ((__y == 0) ? __x : min(__x, __y)); })
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
#define __clamp_once(type, val, lo, hi, uval, ulo, uhi) ({ \
type uval = (val); \
type ulo = (lo); \
type uhi = (hi); \
BUILD_BUG_ON_MSG(statically_true(ulo > uhi), \
"clamp() low limit " #lo " greater than high limit " #hi); \
BUILD_BUG_ON_MSG(!__types_ok3(uval, ulo, uhi), \
"clamp("#val", "#lo", "#hi") signedness error"); \
__clamp(uval, ulo, uhi); })
#define __careful_clamp(type, val, lo, hi) \
__clamp_once(type, val, lo, hi, __UNIQUE_ID(v_), __UNIQUE_ID(l_), __UNIQUE_ID(h_))
/**
* clamp - return a value clamped to a given range with typechecking
* @val: current value
* @lo: lowest allowable value
* @hi: highest allowable value
*
* This macro checks @val/@lo/@hi to make sure they have compatible
* signedness.
*/
#define clamp(val, lo, hi) __careful_clamp(__auto_type, val, lo, hi)
/**
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
* @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) __careful_clamp(type, val, lo, hi)
/**
* clamp_val - return a value clamped to a given range using val's type
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
* type the input argument @val is. This is useful when @val is an unsigned
* type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) __careful_clamp(typeof(val), val, lo, hi)
/*
* Do not check the array parameter using __must_be_array().
* In the following legit use-case where the "array" passed is a simple pointer,
@ -263,31 +273,6 @@
*/
#define max_array(array, len) __minmax_array(max, array, len)
/**
* clamp_t - return a value clamped to a given range using a given type
* @type: the type of variable to use
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of type
* @type to make all the comparisons.
*/
#define clamp_t(type, val, lo, hi) __careful_clamp((type)(val), (type)(lo), (type)(hi))
/**
* clamp_val - return a value clamped to a given range using val's type
* @val: current value
* @lo: minimum allowable value
* @hi: maximum allowable value
*
* This macro does no typechecking and uses temporary variables of whatever
* type the input argument @val is. This is useful when @val is an unsigned
* type and @lo and @hi are literals that will otherwise be assigned a signed
* integer type.
*/
#define clamp_val(val, lo, hi) clamp_t(typeof(val), val, lo, hi)
static inline bool in_range64(u64 val, u64 start, u64 len)
{
return (val - start) < len;
@ -326,9 +311,9 @@ static inline bool in_range32(u32 val, u32 start, u32 len)
* Use these carefully: no type checking, and uses the arguments
* multiple times. Use for obvious constants only.
*/
#define MIN(a,b) __cmp(min,a,b)
#define MAX(a,b) __cmp(max,a,b)
#define MIN_T(type,a,b) __cmp(min,(type)(a),(type)(b))
#define MAX_T(type,a,b) __cmp(max,(type)(a),(type)(b))
#define MIN(a, b) __cmp(min, a, b)
#define MAX(a, b) __cmp(max, a, b)
#define MIN_T(type, a, b) __cmp(min, (type)(a), (type)(b))
#define MAX_T(type, a, b) __cmp(max, (type)(a), (type)(b))
#endif /* _LINUX_MINMAX_H */

View File

@ -398,6 +398,9 @@ struct sched_info {
/* Time spent waiting on a runqueue: */
unsigned long long run_delay;
/* Max time spent waiting on a runqueue: */
unsigned long long max_run_delay;
/* Timestamps: */
/* When did we last run on a CPU? */

View File

@ -43,7 +43,7 @@ typedef unsigned long uintptr_t;
typedef long intptr_t;
#ifdef CONFIG_HAVE_UID16
/* This is defined by include/asm-{arch}/posix_types.h */
/* This is defined by arch/{arch}/include/asm/posix_types.h */
typedef __kernel_old_uid_t old_uid_t;
typedef __kernel_old_gid_t old_gid_t;
#endif /* CONFIG_UID16 */

View File

@ -72,6 +72,7 @@ struct taskstats {
*/
__u64 cpu_count __attribute__((aligned(8)));
__u64 cpu_delay_total;
__u64 cpu_delay_max;
/* Following four fields atomically updated using task->delays->lock */
@ -80,10 +81,12 @@ struct taskstats {
*/
__u64 blkio_count;
__u64 blkio_delay_total;
__u64 blkio_delay_max;
/* Delay waiting for page fault I/O (swap in only) */
__u64 swapin_count;
__u64 swapin_delay_total;
__u64 swapin_delay_max;
/* cpu "wall-clock" running time
* On some architectures, value will adjust for cpu time stolen
@ -166,10 +169,12 @@ struct taskstats {
/* Delay waiting for memory reclaim */
__u64 freepages_count;
__u64 freepages_delay_total;
__u64 freepages_delay_max;
/* Delay waiting for thrashing page */
__u64 thrashing_count;
__u64 thrashing_delay_total;
__u64 thrashing_delay_max;
/* v10: 64-bit btime to avoid overflow */
__u64 ac_btime64; /* 64-bit begin time */
@ -177,6 +182,7 @@ struct taskstats {
/* v11: Delay waiting for memory compact */
__u64 compact_count;
__u64 compact_delay_total;
__u64 compact_delay_max;
/* v12 begin */
__u32 ac_tgid; /* thread group ID */
@ -198,10 +204,13 @@ struct taskstats {
/* v13: Delay waiting for write-protect copy */
__u64 wpcopy_count;
__u64 wpcopy_delay_total;
__u64 wpcopy_delay_max;
/* v14: Delay waiting for IRQ/SOFTIRQ */
__u64 irq_count;
__u64 irq_delay_total;
__u64 irq_delay_max;
/* v15: add Delay max */
};

View File

@ -89,7 +89,7 @@ static void __init handle_initrd(char *root_device_name)
extern char *envp_init[];
int error;
pr_warn("using deprecated initrd support, will be removed in 2021.\n");
pr_warn("using deprecated initrd support, will be removed soon.\n");
real_root_dev = new_encode_dev(ROOT_DEV);
create_dev("/dev/root.old", Root_RAM0);

View File

@ -93,9 +93,9 @@ void __delayacct_tsk_init(struct task_struct *tsk)
/*
* Finish delay accounting for a statistic using its timestamps (@start),
* accumalator (@total) and @count
* accumulator (@total) and @count
*/
static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count)
static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *count, u64 *max)
{
s64 ns = local_clock() - *start;
unsigned long flags;
@ -104,6 +104,8 @@ static void delayacct_end(raw_spinlock_t *lock, u64 *start, u64 *total, u32 *cou
raw_spin_lock_irqsave(lock, flags);
*total += ns;
(*count)++;
if (ns > *max)
*max = ns;
raw_spin_unlock_irqrestore(lock, flags);
}
}
@ -122,7 +124,8 @@ void __delayacct_blkio_end(struct task_struct *p)
delayacct_end(&p->delays->lock,
&p->delays->blkio_start,
&p->delays->blkio_delay,
&p->delays->blkio_count);
&p->delays->blkio_count,
&p->delays->blkio_delay_max);
}
int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
@ -153,10 +156,11 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
d->cpu_count += t1;
d->cpu_delay_max = tsk->sched_info.max_run_delay;
tmp = (s64)d->cpu_delay_total + t2;
d->cpu_delay_total = (tmp < (s64)d->cpu_delay_total) ? 0 : tmp;
tmp = (s64)d->cpu_run_virtual_total + t3;
d->cpu_run_virtual_total =
(tmp < (s64)d->cpu_run_virtual_total) ? 0 : tmp;
@ -164,20 +168,26 @@ int delayacct_add_tsk(struct taskstats *d, struct task_struct *tsk)
return 0;
/* zero XXX_total, non-zero XXX_count implies XXX stat overflowed */
raw_spin_lock_irqsave(&tsk->delays->lock, flags);
d->blkio_delay_max = tsk->delays->blkio_delay_max;
tmp = d->blkio_delay_total + tsk->delays->blkio_delay;
d->blkio_delay_total = (tmp < d->blkio_delay_total) ? 0 : tmp;
d->swapin_delay_max = tsk->delays->swapin_delay_max;
tmp = d->swapin_delay_total + tsk->delays->swapin_delay;
d->swapin_delay_total = (tmp < d->swapin_delay_total) ? 0 : tmp;
d->freepages_delay_max = tsk->delays->freepages_delay_max;
tmp = d->freepages_delay_total + tsk->delays->freepages_delay;
d->freepages_delay_total = (tmp < d->freepages_delay_total) ? 0 : tmp;
d->thrashing_delay_max = tsk->delays->thrashing_delay_max;
tmp = d->thrashing_delay_total + tsk->delays->thrashing_delay;
d->thrashing_delay_total = (tmp < d->thrashing_delay_total) ? 0 : tmp;
d->compact_delay_max = tsk->delays->compact_delay_max;
tmp = d->compact_delay_total + tsk->delays->compact_delay;
d->compact_delay_total = (tmp < d->compact_delay_total) ? 0 : tmp;
d->wpcopy_delay_max = tsk->delays->wpcopy_delay_max;
tmp = d->wpcopy_delay_total + tsk->delays->wpcopy_delay;
d->wpcopy_delay_total = (tmp < d->wpcopy_delay_total) ? 0 : tmp;
d->irq_delay_max = tsk->delays->irq_delay_max;
tmp = d->irq_delay_total + tsk->delays->irq_delay;
d->irq_delay_total = (tmp < d->irq_delay_total) ? 0 : tmp;
d->blkio_count += tsk->delays->blkio_count;
@ -213,7 +223,8 @@ void __delayacct_freepages_end(void)
delayacct_end(&current->delays->lock,
&current->delays->freepages_start,
&current->delays->freepages_delay,
&current->delays->freepages_count);
&current->delays->freepages_count,
&current->delays->freepages_delay_max);
}
void __delayacct_thrashing_start(bool *in_thrashing)
@ -235,7 +246,8 @@ void __delayacct_thrashing_end(bool *in_thrashing)
delayacct_end(&current->delays->lock,
&current->delays->thrashing_start,
&current->delays->thrashing_delay,
&current->delays->thrashing_count);
&current->delays->thrashing_count,
&current->delays->thrashing_delay_max);
}
void __delayacct_swapin_start(void)
@ -248,7 +260,8 @@ void __delayacct_swapin_end(void)
delayacct_end(&current->delays->lock,
&current->delays->swapin_start,
&current->delays->swapin_delay,
&current->delays->swapin_count);
&current->delays->swapin_count,
&current->delays->swapin_delay_max);
}
void __delayacct_compact_start(void)
@ -261,7 +274,8 @@ void __delayacct_compact_end(void)
delayacct_end(&current->delays->lock,
&current->delays->compact_start,
&current->delays->compact_delay,
&current->delays->compact_count);
&current->delays->compact_count,
&current->delays->compact_delay_max);
}
void __delayacct_wpcopy_start(void)
@ -274,7 +288,8 @@ void __delayacct_wpcopy_end(void)
delayacct_end(&current->delays->lock,
&current->delays->wpcopy_start,
&current->delays->wpcopy_delay,
&current->delays->wpcopy_count);
&current->delays->wpcopy_count,
&current->delays->wpcopy_delay_max);
}
void __delayacct_irq(struct task_struct *task, u32 delta)
@ -284,6 +299,8 @@ void __delayacct_irq(struct task_struct *task, u32 delta)
raw_spin_lock_irqsave(&task->delays->lock, flags);
task->delays->irq_delay += delta;
task->delays->irq_count++;
if (delta > task->delays->irq_delay_max)
task->delays->irq_delay_max = delta;
raw_spin_unlock_irqrestore(&task->delays->lock, flags);
}

View File

@ -1511,12 +1511,13 @@ struct file *get_task_exe_file(struct task_struct *task)
struct file *exe_file = NULL;
struct mm_struct *mm;
if (task->flags & PF_KTHREAD)
return NULL;
task_lock(task);
mm = task->mm;
if (mm) {
if (!(task->flags & PF_KTHREAD))
exe_file = get_mm_exe_file(mm);
}
if (mm)
exe_file = get_mm_exe_file(mm);
task_unlock(task);
return exe_file;
}

View File

@ -1683,8 +1683,7 @@ void __devm_release_region(struct device *dev, struct resource *parent,
{
struct region_devres match_data = { parent, start, n };
__release_region(parent, start, n);
WARN_ON(devres_destroy(dev, devm_region_release, devm_region_match,
WARN_ON(devres_release(dev, devm_region_release, devm_region_match,
&match_data));
}
EXPORT_SYMBOL(__devm_release_region);

View File

@ -244,7 +244,8 @@ static inline void sched_info_dequeue(struct rq *rq, struct task_struct *t)
delta = rq_clock(rq) - t->sched_info.last_queued;
t->sched_info.last_queued = 0;
t->sched_info.run_delay += delta;
if (delta > t->sched_info.max_run_delay)
t->sched_info.max_run_delay = delta;
rq_sched_info_dequeue(rq, delta);
}
@ -266,6 +267,8 @@ static void sched_info_arrive(struct rq *rq, struct task_struct *t)
t->sched_info.run_delay += delta;
t->sched_info.last_arrival = now;
t->sched_info.pcount++;
if (delta > t->sched_info.max_run_delay)
t->sched_info.max_run_delay = delta;
rq_sched_info_arrive(rq, delta);
}

View File

@ -164,8 +164,8 @@ struct ucounts *get_ucounts(struct ucounts *ucounts)
struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
{
struct hlist_head *hashent = ucounts_hashentry(ns, uid);
struct ucounts *ucounts, *new;
bool wrapped;
struct ucounts *ucounts, *new = NULL;
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
@ -182,17 +182,17 @@ struct ucounts *alloc_ucounts(struct user_namespace *ns, kuid_t uid)
spin_lock_irq(&ucounts_lock);
ucounts = find_ucounts(ns, uid, hashent);
if (ucounts) {
kfree(new);
} else {
if (!ucounts) {
hlist_add_head(&new->node, hashent);
get_user_ns(new->ns);
spin_unlock_irq(&ucounts_lock);
return new;
}
}
wrapped = !get_ucounts_or_wrap(ucounts);
spin_unlock_irq(&ucounts_lock);
kfree(new);
if (wrapped) {
put_ucounts(ucounts);
return NULL;

View File

@ -190,7 +190,7 @@ void watchdog_hardlockup_check(unsigned int cpu, struct pt_regs *regs)
* with printk_cpu_sync_get_irqsave() that we can still at least
* get the message about the lockup out.
*/
pr_emerg("Watchdog detected hard LOCKUP on cpu %d\n", cpu);
pr_emerg("CPU%u: Watchdog detected hard LOCKUP on cpu %u\n", this_cpu, cpu);
printk_cpu_sync_get_irqsave(flags);
print_modules();

View File

@ -2269,7 +2269,6 @@ config TEST_LIST_SORT
config TEST_MIN_HEAP
tristate "Min heap test"
depends on DEBUG_KERNEL || m
select MIN_HEAP
help
Enable this to turn on min heap function tests. This test is
executed only once during system boot (so affects only boot time),
@ -2457,8 +2456,22 @@ config TEST_BITMAP
config TEST_UUID
tristate "Test functions located in the uuid module at runtime"
config TEST_XARRAY
tristate "Test the XArray code at runtime"
config XARRAY_KUNIT
tristate "KUnit test XArray code at runtime" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
Enable this option to test the Xarray code at boot.
KUnit tests run during boot and output the results to the debug log
in TAP format (http://testanything.org/). Only useful for kernel devs
running the KUnit test harness, and not intended for inclusion into a
production build.
For more information on KUnit and unit tests in general please refer
to the KUnit documentation in Documentation/dev-tools/kunit/.
If unsure, say N.
config TEST_MAPLE_TREE
tristate "Test the Maple Tree code at runtime or module load"
@ -3161,6 +3174,21 @@ config INT_POW_TEST
If unsure, say N
config INT_SQRT_KUNIT_TEST
tristate "Integer square root test" if !KUNIT_ALL_TESTS
depends on KUNIT
default KUNIT_ALL_TESTS
help
This option enables the KUnit test suite for the int_sqrt() function,
which performs square root calculation. The test suite checks
various scenarios, including edge cases, to ensure correctness.
Enabling this option will include tests that check various scenarios
and edge cases to ensure the accuracy and reliability of the square root
function.
If unsure, say N
endif # RUNTIME_TESTING_MENU
config ARCH_USE_MEMTEST

View File

@ -94,7 +94,6 @@ GCOV_PROFILE_test_bitmap.o := n
endif
obj-$(CONFIG_TEST_UUID) += test_uuid.o
obj-$(CONFIG_TEST_XARRAY) += test_xarray.o
obj-$(CONFIG_TEST_MAPLE_TREE) += test_maple_tree.o
obj-$(CONFIG_TEST_PARMAN) += test_parman.o
obj-$(CONFIG_TEST_KMOD) += test_kmod.o
@ -375,6 +374,7 @@ CFLAGS_bitfield_kunit.o := $(DISABLE_STRUCTLEAK_PLUGIN)
obj-$(CONFIG_BITFIELD_KUNIT) += bitfield_kunit.o
obj-$(CONFIG_CHECKSUM_KUNIT) += checksum_kunit.o
obj-$(CONFIG_UTIL_MACROS_KUNIT) += util_macros_kunit.o
obj-$(CONFIG_XARRAY_KUNIT) += test_xarray.o
obj-$(CONFIG_LIST_KUNIT_TEST) += list-test.o
obj-$(CONFIG_HASHTABLE_KUNIT_TEST) += hashtable_test.o
obj-$(CONFIG_LINEAR_RANGES_TEST) += test_linear_ranges.o

View File

@ -1,7 +1,7 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/random.h>
#include <linux/prandom.h>
#include <linux/debugfs.h>
#include <linux/sched.h>
#include <linux/stat.h>
@ -12,6 +12,24 @@
#include <linux/stacktrace.h>
#include <linux/fault-inject.h>
/*
* The should_fail() functions use prandom instead of the normal Linux RNG
* since they don't need cryptographically secure random numbers.
*/
static DEFINE_PER_CPU(struct rnd_state, fault_rnd_state);
static u32 fault_prandom_u32_below_100(void)
{
struct rnd_state *state;
u32 res;
state = &get_cpu_var(fault_rnd_state);
res = prandom_u32_state(state);
put_cpu_var(fault_rnd_state);
return res % 100;
}
/*
* setup_fault_attr() is a helper function for various __setup handlers, so it
* returns 0 on error, because that is what __setup handlers do.
@ -31,6 +49,8 @@ int setup_fault_attr(struct fault_attr *attr, char *str)
return 0;
}
prandom_init_once(&fault_rnd_state);
attr->probability = probability;
attr->interval = interval;
atomic_set(&attr->times, times);
@ -146,7 +166,7 @@ bool should_fail_ex(struct fault_attr *attr, ssize_t size, int flags)
return false;
}
if (attr->probability <= get_random_u32_below(100))
if (attr->probability <= fault_prandom_u32_below_100())
return false;
fail:
@ -219,6 +239,8 @@ struct dentry *fault_create_debugfs_attr(const char *name,
if (IS_ERR(dir))
return dir;
prandom_init_once(&fault_rnd_state);
debugfs_create_ul("probability", mode, dir, &attr->probability);
debugfs_create_ul("interval", mode, dir, &attr->interval);
debugfs_create_atomic_t("times", mode, dir, &attr->times);
@ -431,6 +453,8 @@ static const struct config_item_type fault_config_type = {
void fault_config_init(struct fault_config *config, const char *name)
{
prandom_init_once(&fault_rnd_state);
config_group_init_type_name(&config->group, name, &fault_config_type);
}
EXPORT_SYMBOL_GPL(fault_config_init);

View File

@ -63,9 +63,6 @@ static void *__init iov_kunit_create_buffer(struct kunit *test,
KUNIT_ASSERT_EQ(test, got, npages);
}
for (int i = 0; i < npages; i++)
pages[i]->index = i;
buffer = vmap(pages, npages, VM_MAP | VM_MAP_PUT_PAGES, PAGE_KERNEL);
KUNIT_ASSERT_NOT_ERR_OR_NULL(test, buffer);

View File

@ -9,3 +9,4 @@ obj-$(CONFIG_INT_POW_TEST) += tests/int_pow_kunit.o
obj-$(CONFIG_TEST_DIV64) += test_div64.o
obj-$(CONFIG_TEST_MULDIV64) += test_mul_u64_u64_div_u64.o
obj-$(CONFIG_RATIONAL_KUNIT_TEST) += rational-test.o
obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += tests/int_sqrt_kunit.o

View File

@ -1,3 +1,4 @@
# SPDX-License-Identifier: GPL-2.0-only
obj-$(CONFIG_INT_POW_TEST) += int_pow_kunit.o
obj-$(CONFIG_INT_SQRT_KUNIT_TEST) += int_sqrt_kunit.o

View File

@ -0,0 +1,66 @@
// SPDX-License-Identifier: GPL-2.0-only
#include <kunit/test.h>
#include <linux/limits.h>
#include <linux/math.h>
#include <linux/module.h>
#include <linux/string.h>
struct test_case_params {
unsigned long x;
unsigned long expected_result;
const char *name;
};
static const struct test_case_params params[] = {
{ 0, 0, "edge case: square root of 0" },
{ 1, 1, "perfect square: square root of 1" },
{ 2, 1, "non-perfect square: square root of 2" },
{ 3, 1, "non-perfect square: square root of 3" },
{ 4, 2, "perfect square: square root of 4" },
{ 5, 2, "non-perfect square: square root of 5" },
{ 6, 2, "non-perfect square: square root of 6" },
{ 7, 2, "non-perfect square: square root of 7" },
{ 8, 2, "non-perfect square: square root of 8" },
{ 9, 3, "perfect square: square root of 9" },
{ 15, 3, "non-perfect square: square root of 15 (N-1 from 16)" },
{ 16, 4, "perfect square: square root of 16" },
{ 17, 4, "non-perfect square: square root of 17 (N+1 from 16)" },
{ 80, 8, "non-perfect square: square root of 80 (N-1 from 81)" },
{ 81, 9, "perfect square: square root of 81" },
{ 82, 9, "non-perfect square: square root of 82 (N+1 from 81)" },
{ 255, 15, "non-perfect square: square root of 255 (N-1 from 256)" },
{ 256, 16, "perfect square: square root of 256" },
{ 257, 16, "non-perfect square: square root of 257 (N+1 from 256)" },
{ 2147483648, 46340, "large input: square root of 2147483648" },
{ 4294967295, 65535, "edge case: ULONG_MAX for 32-bit" },
};
static void get_desc(const struct test_case_params *tc, char *desc)
{
strscpy(desc, tc->name, KUNIT_PARAM_DESC_SIZE);
}
KUNIT_ARRAY_PARAM(int_sqrt, params, get_desc);
static void int_sqrt_test(struct kunit *test)
{
const struct test_case_params *tc = (const struct test_case_params *)test->param_value;
KUNIT_EXPECT_EQ(test, tc->expected_result, int_sqrt(tc->x));
}
static struct kunit_case math_int_sqrt_test_cases[] = {
KUNIT_CASE_PARAM(int_sqrt_test, int_sqrt_gen_params),
{}
};
static struct kunit_suite int_sqrt_test_suite = {
.name = "math-int_sqrt",
.test_cases = math_int_sqrt_test_cases,
};
kunit_test_suites(&int_sqrt_test_suite);
MODULE_DESCRIPTION("math.int_sqrt KUnit test suite");
MODULE_LICENSE("GPL");

View File

@ -665,7 +665,7 @@ EXPORT_SYMBOL_GPL(rhashtable_insert_slow);
* structure outside the hash table.
*
* This function may be called from any process context, including
* non-preemptable context, but cannot be called from softirq or
* non-preemptible context, but cannot be called from softirq or
* hardirq context.
*
* You must call rhashtable_walk_exit after this function returns.

View File

@ -32,7 +32,7 @@ static __init int pop_verify_heap(bool min_heap,
int last;
last = values[0];
min_heap_pop(heap, funcs, NULL);
min_heap_pop_inline(heap, funcs, NULL);
while (heap->nr > 0) {
if (min_heap) {
if (last > values[0]) {
@ -48,7 +48,7 @@ static __init int pop_verify_heap(bool min_heap,
}
}
last = values[0];
min_heap_pop(heap, funcs, NULL);
min_heap_pop_inline(heap, funcs, NULL);
}
return err;
}
@ -69,7 +69,7 @@ static __init int test_heapify_all(bool min_heap)
int i, err;
/* Test with known set of values. */
min_heapify_all(&heap, &funcs, NULL);
min_heapify_all_inline(&heap, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@ -78,7 +78,7 @@ static __init int test_heapify_all(bool min_heap)
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
min_heapify_all(&heap, &funcs, NULL);
min_heapify_all_inline(&heap, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;
@ -102,14 +102,14 @@ static __init int test_heap_push(bool min_heap)
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_push(&heap, &data[i], &funcs, NULL);
min_heap_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
/* Test with randomly generated values. */
while (heap.nr < heap.size) {
temp = get_random_u32();
min_heap_push(&heap, &temp, &funcs, NULL);
min_heap_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@ -135,22 +135,22 @@ static __init int test_heap_pop_push(bool min_heap)
/* Fill values with data to pop and replace. */
temp = min_heap ? 0x80000000 : 0x7FFFFFFF;
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_push(&heap, &temp, &funcs, NULL);
min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with known set of values copied from data. */
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_pop_push(&heap, &data[i], &funcs, NULL);
min_heap_pop_push_inline(&heap, &data[i], &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
heap.nr = 0;
for (i = 0; i < ARRAY_SIZE(data); i++)
min_heap_push(&heap, &temp, &funcs, NULL);
min_heap_push_inline(&heap, &temp, &funcs, NULL);
/* Test with randomly generated values. */
for (i = 0; i < ARRAY_SIZE(data); i++) {
temp = get_random_u32();
min_heap_pop_push(&heap, &temp, &funcs, NULL);
min_heap_pop_push_inline(&heap, &temp, &funcs, NULL);
}
err += pop_verify_heap(min_heap, &heap, &funcs);
@ -163,7 +163,7 @@ static __init int test_heap_del(bool min_heap)
-3, -1, -2, -4, 0x8000000, 0x7FFFFFF };
struct min_heap_test heap;
min_heap_init(&heap, values, ARRAY_SIZE(values));
min_heap_init_inline(&heap, values, ARRAY_SIZE(values));
heap.nr = ARRAY_SIZE(values);
struct min_heap_callbacks funcs = {
.less = min_heap ? less_than : greater_than,
@ -172,9 +172,9 @@ static __init int test_heap_del(bool min_heap)
int i, err;
/* Test with known set of values. */
min_heapify_all(&heap, &funcs, NULL);
min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err = pop_verify_heap(min_heap, &heap, &funcs);
@ -182,10 +182,10 @@ static __init int test_heap_del(bool min_heap)
heap.nr = ARRAY_SIZE(values);
for (i = 0; i < heap.nr; i++)
values[i] = get_random_u32();
min_heapify_all(&heap, &funcs, NULL);
min_heapify_all_inline(&heap, &funcs, NULL);
for (i = 0; i < ARRAY_SIZE(values) / 2; i++)
min_heap_del(&heap, get_random_u32() % heap.nr, &funcs, NULL);
min_heap_del_inline(&heap, get_random_u32() % heap.nr, &funcs, NULL);
err += pop_verify_heap(min_heap, &heap, &funcs);
return err;

File diff suppressed because it is too large Load Diff

View File

@ -125,19 +125,20 @@ static inline void node_mark_all(struct xa_node *node, xa_mark_t mark)
*/
static void xas_squash_marks(const struct xa_state *xas)
{
unsigned int mark = 0;
xa_mark_t mark = 0;
unsigned int limit = xas->xa_offset + xas->xa_sibs + 1;
if (!xas->xa_sibs)
return;
for (;;) {
unsigned long *marks = node_marks(xas->xa_node, mark);
do {
unsigned long *marks = xas->xa_node->marks[mark];
if (find_next_bit(marks, limit, xas->xa_offset + 1) == limit)
continue;
__set_bit(xas->xa_offset, marks);
bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
} while (mark++ != (__force unsigned)XA_MARK_MAX);
if (find_next_bit(marks, limit, xas->xa_offset + 1) != limit) {
__set_bit(xas->xa_offset, marks);
bitmap_clear(marks, xas->xa_offset + 1, xas->xa_sibs);
}
if (mark == XA_MARK_MAX)
break;
mark_inc(mark);
}
}
/* extracts the offset within this node from the index */
@ -435,6 +436,11 @@ static unsigned long max_index(void *entry)
return (XA_CHUNK_SIZE << xa_to_node(entry)->shift) - 1;
}
static inline void *xa_zero_to_null(void *entry)
{
return xa_is_zero(entry) ? NULL : entry;
}
static void xas_shrink(struct xa_state *xas)
{
struct xarray *xa = xas->xa;
@ -451,8 +457,8 @@ static void xas_shrink(struct xa_state *xas)
break;
if (!xa_is_node(entry) && node->shift)
break;
if (xa_is_zero(entry) && xa_zero_busy(xa))
entry = NULL;
if (xa_zero_busy(xa))
entry = xa_zero_to_null(entry);
xas->xa_node = XAS_BOUNDS;
RCU_INIT_POINTER(xa->xa_head, entry);
@ -1022,7 +1028,7 @@ void xas_split_alloc(struct xa_state *xas, void *entry, unsigned int order,
unsigned int mask = xas->xa_sibs;
/* XXX: no support for splitting really large entries yet */
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT < order))
if (WARN_ON(xas->xa_shift + 2 * XA_CHUNK_SHIFT <= order))
goto nomem;
if (xas->xa_shift + XA_CHUNK_SHIFT > order)
return;
@ -1147,6 +1153,7 @@ void xas_pause(struct xa_state *xas)
if (!xa_is_sibling(xa_entry(xas->xa, node, offset)))
break;
}
xas->xa_index &= ~0UL << node->shift;
xas->xa_index += (offset - xas->xa_offset) << node->shift;
if (xas->xa_index == 0)
xas->xa_node = XAS_BOUNDS;
@ -1382,6 +1389,8 @@ void *xas_find_marked(struct xa_state *xas, unsigned long max, xa_mark_t mark)
entry = xa_entry(xas->xa, xas->xa_node, xas->xa_offset);
if (!entry && !(xa_track_free(xas->xa) && mark == XA_FREE_MARK))
continue;
if (xa_is_sibling(entry))
continue;
if (!xa_is_node(entry))
return entry;
xas->xa_node = xa_to_node(entry);
@ -1474,9 +1483,7 @@ void *xa_load(struct xarray *xa, unsigned long index)
rcu_read_lock();
do {
entry = xas_load(&xas);
if (xa_is_zero(entry))
entry = NULL;
entry = xa_zero_to_null(xas_load(&xas));
} while (xas_retry(&xas, entry));
rcu_read_unlock();
@ -1486,8 +1493,6 @@ EXPORT_SYMBOL(xa_load);
static void *xas_result(struct xa_state *xas, void *curr)
{
if (xa_is_zero(curr))
return NULL;
if (xas_error(xas))
curr = xas->xa_node;
return curr;
@ -1508,7 +1513,7 @@ static void *xas_result(struct xa_state *xas, void *curr)
void *__xa_erase(struct xarray *xa, unsigned long index)
{
XA_STATE(xas, xa, index);
return xas_result(&xas, xas_store(&xas, NULL));
return xas_result(&xas, xa_zero_to_null(xas_store(&xas, NULL)));
}
EXPORT_SYMBOL(__xa_erase);
@ -1567,7 +1572,7 @@ void *__xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
xas_clear_mark(&xas, XA_FREE_MARK);
} while (__xas_nomem(&xas, gfp));
return xas_result(&xas, curr);
return xas_result(&xas, xa_zero_to_null(curr));
}
EXPORT_SYMBOL(__xa_store);
@ -1600,6 +1605,9 @@ void *xa_store(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
}
EXPORT_SYMBOL(xa_store);
static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp);
/**
* __xa_cmpxchg() - Store this entry in the XArray.
* @xa: XArray.
@ -1618,6 +1626,13 @@ EXPORT_SYMBOL(xa_store);
*/
void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
return xa_zero_to_null(__xa_cmpxchg_raw(xa, index, old, entry, gfp));
}
EXPORT_SYMBOL(__xa_cmpxchg);
static inline void *__xa_cmpxchg_raw(struct xarray *xa, unsigned long index,
void *old, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
@ -1636,7 +1651,6 @@ void *__xa_cmpxchg(struct xarray *xa, unsigned long index,
return xas_result(&xas, curr);
}
EXPORT_SYMBOL(__xa_cmpxchg);
/**
* __xa_insert() - Store this entry in the XArray if no entry is present.
@ -1656,26 +1670,16 @@ EXPORT_SYMBOL(__xa_cmpxchg);
*/
int __xa_insert(struct xarray *xa, unsigned long index, void *entry, gfp_t gfp)
{
XA_STATE(xas, xa, index);
void *curr;
int errno;
if (WARN_ON_ONCE(xa_is_advanced(entry)))
return -EINVAL;
if (!entry)
entry = XA_ZERO_ENTRY;
do {
curr = xas_load(&xas);
if (!curr) {
xas_store(&xas, entry);
if (xa_track_free(xa))
xas_clear_mark(&xas, XA_FREE_MARK);
} else {
xas_set_err(&xas, -EBUSY);
}
} while (__xas_nomem(&xas, gfp));
return xas_error(&xas);
curr = __xa_cmpxchg_raw(xa, index, NULL, entry, gfp);
errno = xa_err(curr);
if (errno)
return errno;
return (curr != NULL) ? -EBUSY : 0;
}
EXPORT_SYMBOL(__xa_insert);

View File

@ -1855,7 +1855,7 @@ static int kmemleak_scan_thread(void *arg)
* Wait before the first scan to allow the system to fully initialize.
*/
if (first_run) {
signed long timeout = msecs_to_jiffies(SECS_FIRST_SCAN * 1000);
signed long timeout = secs_to_jiffies(SECS_FIRST_SCAN);
first_run = 0;
while (timeout && !kthread_should_stop())
timeout = schedule_timeout_interruptible(timeout);
@ -2241,7 +2241,7 @@ void __init kmemleak_init(void)
return;
jiffies_min_age = msecs_to_jiffies(MSECS_MIN_AGE);
jiffies_scan_wait = msecs_to_jiffies(SECS_SCAN_WAIT * 1000);
jiffies_scan_wait = secs_to_jiffies(SECS_SCAN_WAIT);
object_cache = KMEM_CACHE(kmemleak_object, SLAB_NOLEAKTRACE);
scan_area_cache = KMEM_CACHE(kmemleak_scan_area, SLAB_NOLEAKTRACE);

Some files were not shown because too many files have changed in this diff Show More