mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/next/linux-next.git
synced 2025-01-15 21:23:23 +00:00
rxrpc: Use umin() and umax() rather than min_t()/max_t() where possible
Use umin() and umax() rather than min_t()/max_t() where the type specified is an unsigned type. Signed-off-by: David Howells <dhowells@redhat.com> cc: Marc Dionne <marc.dionne@auristor.com> cc: linux-afs@lists.infradead.org Link: https://patch.msgid.link/20241204074710.990092-4-dhowells@redhat.com Signed-off-by: Jakub Kicinski <kuba@kernel.org>
This commit is contained in:
parent
0e56ebde24
commit
29e03ec757
@ -233,8 +233,7 @@ static void rxrpc_close_tx_phase(struct rxrpc_call *call)
|
||||
|
||||
static bool rxrpc_tx_window_has_space(struct rxrpc_call *call)
|
||||
{
|
||||
unsigned int winsize = min_t(unsigned int, call->tx_winsize,
|
||||
call->cong_cwnd + call->cong_extra);
|
||||
unsigned int winsize = umin(call->tx_winsize, call->cong_cwnd + call->cong_extra);
|
||||
rxrpc_seq_t window = call->acks_hard_ack, wtop = window + winsize;
|
||||
rxrpc_seq_t tx_top = call->tx_top;
|
||||
int space;
|
||||
@ -467,7 +466,7 @@ bool rxrpc_input_call_event(struct rxrpc_call *call, struct sk_buff *skb)
|
||||
} else {
|
||||
unsigned long nowj = jiffies, delayj, nextj;
|
||||
|
||||
delayj = max(nsecs_to_jiffies(delay), 1);
|
||||
delayj = umax(nsecs_to_jiffies(delay), 1);
|
||||
nextj = nowj + delayj;
|
||||
if (time_before(nextj, call->timer.expires) ||
|
||||
!timer_pending(&call->timer)) {
|
||||
|
@ -220,9 +220,9 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
|
||||
__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
|
||||
|
||||
if (p->timeouts.normal)
|
||||
call->next_rx_timo = min(p->timeouts.normal, 1);
|
||||
call->next_rx_timo = umin(p->timeouts.normal, 1);
|
||||
if (p->timeouts.idle)
|
||||
call->next_req_timo = min(p->timeouts.idle, 1);
|
||||
call->next_req_timo = umin(p->timeouts.idle, 1);
|
||||
if (p->timeouts.hard)
|
||||
call->hard_timo = p->timeouts.hard;
|
||||
|
||||
|
@ -231,7 +231,7 @@ static bool rxrpc_may_reuse_conn(struct rxrpc_connection *conn)
|
||||
distance = id - id_cursor;
|
||||
if (distance < 0)
|
||||
distance = -distance;
|
||||
limit = max_t(unsigned long, atomic_read(&rxnet->nr_conns) * 4, 1024);
|
||||
limit = umax(atomic_read(&rxnet->nr_conns) * 4, 1024);
|
||||
if (distance > limit)
|
||||
goto mark_dont_reuse;
|
||||
|
||||
|
@ -44,8 +44,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
|
||||
|
||||
if (test_and_clear_bit(RXRPC_CALL_RETRANS_TIMEOUT, &call->flags)) {
|
||||
summary->retrans_timeo = true;
|
||||
call->cong_ssthresh = max_t(unsigned int,
|
||||
summary->flight_size / 2, 2);
|
||||
call->cong_ssthresh = umax(summary->flight_size / 2, 2);
|
||||
cwnd = 1;
|
||||
if (cwnd >= call->cong_ssthresh &&
|
||||
call->cong_mode == RXRPC_CALL_SLOW_START) {
|
||||
@ -113,8 +112,7 @@ static void rxrpc_congestion_management(struct rxrpc_call *call,
|
||||
|
||||
change = rxrpc_cong_begin_retransmission;
|
||||
call->cong_mode = RXRPC_CALL_FAST_RETRANSMIT;
|
||||
call->cong_ssthresh = max_t(unsigned int,
|
||||
summary->flight_size / 2, 2);
|
||||
call->cong_ssthresh = umax(summary->flight_size / 2, 2);
|
||||
cwnd = call->cong_ssthresh + 3;
|
||||
call->cong_extra = 0;
|
||||
call->cong_dup_acks = 0;
|
||||
@ -206,9 +204,8 @@ void rxrpc_congestion_degrade(struct rxrpc_call *call)
|
||||
rxrpc_inc_stat(call->rxnet, stat_tx_data_cwnd_reset);
|
||||
call->tx_last_sent = now;
|
||||
call->cong_mode = RXRPC_CALL_SLOW_START;
|
||||
call->cong_ssthresh = max_t(unsigned int, call->cong_ssthresh,
|
||||
call->cong_cwnd * 3 / 4);
|
||||
call->cong_cwnd = max_t(unsigned int, call->cong_cwnd / 2, RXRPC_MIN_CWND);
|
||||
call->cong_ssthresh = umax(call->cong_ssthresh, call->cong_cwnd * 3 / 4);
|
||||
call->cong_cwnd = umax(call->cong_cwnd / 2, RXRPC_MIN_CWND);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -709,7 +706,7 @@ static void rxrpc_input_ack_trailer(struct rxrpc_call *call, struct sk_buff *skb
|
||||
call->tx_winsize = rwind;
|
||||
}
|
||||
|
||||
mtu = min(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
|
||||
mtu = umin(ntohl(trailer->maxMTU), ntohl(trailer->ifMTU));
|
||||
|
||||
peer = call->peer;
|
||||
if (mtu < peer->maxdata) {
|
||||
|
@ -19,7 +19,7 @@ static int none_init_connection_security(struct rxrpc_connection *conn,
|
||||
*/
|
||||
static struct rxrpc_txbuf *none_alloc_txbuf(struct rxrpc_call *call, size_t remain, gfp_t gfp)
|
||||
{
|
||||
return rxrpc_alloc_data_txbuf(call, min_t(size_t, remain, RXRPC_JUMBO_DATALEN), 1, gfp);
|
||||
return rxrpc_alloc_data_txbuf(call, umin(remain, RXRPC_JUMBO_DATALEN), 1, gfp);
|
||||
}
|
||||
|
||||
static int none_secure_packet(struct rxrpc_call *call, struct rxrpc_txbuf *txb)
|
||||
|
@ -558,7 +558,7 @@ int rxrpc_io_thread(void *data)
|
||||
}
|
||||
|
||||
timeout = nsecs_to_jiffies(delay_ns);
|
||||
timeout = max(timeout, 1UL);
|
||||
timeout = umax(timeout, 1);
|
||||
schedule_timeout(timeout);
|
||||
__set_current_state(TASK_RUNNING);
|
||||
continue;
|
||||
|
@ -118,7 +118,7 @@ static void rxrpc_fill_out_ack(struct rxrpc_call *call,
|
||||
txb->kvec[1].iov_len = ack->nAcks;
|
||||
|
||||
wrap = RXRPC_SACK_SIZE - sack;
|
||||
to = min_t(unsigned int, ack->nAcks, RXRPC_SACK_SIZE);
|
||||
to = umin(ack->nAcks, RXRPC_SACK_SIZE);
|
||||
|
||||
if (sack + ack->nAcks <= RXRPC_SACK_SIZE) {
|
||||
memcpy(sackp, call->ackr_sack_table + sack, ack->nAcks);
|
||||
|
@ -27,7 +27,7 @@ static u32 __rxrpc_set_rto(const struct rxrpc_peer *peer)
|
||||
|
||||
static u32 rxrpc_bound_rto(u32 rto)
|
||||
{
|
||||
return min(rto, RXRPC_RTO_MAX);
|
||||
return umin(rto, RXRPC_RTO_MAX);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -91,11 +91,11 @@ static void rxrpc_rtt_estimator(struct rxrpc_peer *peer, long sample_rtt_us)
|
||||
/* no previous measure. */
|
||||
srtt = m << 3; /* take the measured time to be rtt */
|
||||
peer->mdev_us = m << 1; /* make sure rto = 3*rtt */
|
||||
peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer));
|
||||
peer->rttvar_us = umax(peer->mdev_us, rxrpc_rto_min_us(peer));
|
||||
peer->mdev_max_us = peer->rttvar_us;
|
||||
}
|
||||
|
||||
peer->srtt_us = max(1U, srtt);
|
||||
peer->srtt_us = umax(srtt, 1);
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -150,11 +150,11 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
|
||||
struct rxrpc_txbuf *txb;
|
||||
size_t shdr, space;
|
||||
|
||||
remain = min(remain, 65535 - sizeof(struct rxrpc_wire_header));
|
||||
remain = umin(remain, 65535 - sizeof(struct rxrpc_wire_header));
|
||||
|
||||
switch (call->conn->security_level) {
|
||||
default:
|
||||
space = min_t(size_t, remain, RXRPC_JUMBO_DATALEN);
|
||||
space = umin(remain, RXRPC_JUMBO_DATALEN);
|
||||
return rxrpc_alloc_data_txbuf(call, space, 1, gfp);
|
||||
case RXRPC_SECURITY_AUTH:
|
||||
shdr = sizeof(struct rxkad_level1_hdr);
|
||||
@ -164,7 +164,7 @@ static struct rxrpc_txbuf *rxkad_alloc_txbuf(struct rxrpc_call *call, size_t rem
|
||||
break;
|
||||
}
|
||||
|
||||
space = min_t(size_t, round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
|
||||
space = umin(round_down(RXRPC_JUMBO_DATALEN, RXKAD_ALIGN), remain + shdr);
|
||||
space = round_up(space, RXKAD_ALIGN);
|
||||
|
||||
txb = rxrpc_alloc_data_txbuf(call, space, RXKAD_ALIGN, gfp);
|
||||
|
@ -503,7 +503,7 @@ static int rxperf_process_call(struct rxperf_call *call)
|
||||
reply_len + sizeof(rxperf_magic_cookie));
|
||||
|
||||
while (reply_len > 0) {
|
||||
len = min_t(size_t, reply_len, PAGE_SIZE);
|
||||
len = umin(reply_len, PAGE_SIZE);
|
||||
bvec_set_page(&bv, ZERO_PAGE(0), len, 0);
|
||||
iov_iter_bvec(&msg.msg_iter, WRITE, &bv, 1, len);
|
||||
msg.msg_flags = MSG_MORE;
|
||||
|
@ -360,7 +360,7 @@ reload:
|
||||
|
||||
/* append next segment of data to the current buffer */
|
||||
if (msg_data_left(msg) > 0) {
|
||||
size_t copy = min_t(size_t, txb->space, msg_data_left(msg));
|
||||
size_t copy = umin(txb->space, msg_data_left(msg));
|
||||
|
||||
_debug("add %zu", copy);
|
||||
if (!copy_from_iter_full(txb->kvec[0].iov_base + txb->offset,
|
||||
|
Loading…
x
Reference in New Issue
Block a user