mirror of
https://git.kernel.org/pub/scm/linux/kernel/git/stable/linux.git
synced 2025-01-18 03:06:43 +00:00
s390/zcrypt: fix card and queue total counter wrap
The internal statistic counters for the total number of requests processed per card and per queue used integers. So they do wrap after a rather huge amount of crypto requests processed. This patch introduces uint64 counters which should hold much longer but still may wrap. The sysfs attributes request_count for card and queue also used only %ld and now display the counter value with %llu. This is not a security relevant fix. The int overflow which happened is not in any way exploitable as a security breach. Signed-off-by: Harald Freudenberger <freude@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
This commit is contained in:
parent
aab73d278d
commit
fcd98d4002
@ -162,7 +162,7 @@ struct ap_card {
|
||||
unsigned int functions; /* AP device function bitfield. */
|
||||
int queue_depth; /* AP queue depth.*/
|
||||
int id; /* AP card number. */
|
||||
atomic_t total_request_count; /* # requests ever for this AP device.*/
|
||||
atomic64_t total_request_count; /* # requests ever for this AP device.*/
|
||||
};
|
||||
|
||||
#define to_ap_card(x) container_of((x), struct ap_card, ap_dev.device)
|
||||
@ -179,7 +179,7 @@ struct ap_queue {
|
||||
enum ap_state state; /* State of the AP device. */
|
||||
int pendingq_count; /* # requests on pendingq list. */
|
||||
int requestq_count; /* # requests on requestq list. */
|
||||
int total_request_count; /* # requests ever for this AP device.*/
|
||||
u64 total_request_count; /* # requests ever for this AP device.*/
|
||||
int request_timeout; /* Request timeout in jiffies. */
|
||||
struct timer_list timeout; /* Timer for request timeouts. */
|
||||
struct list_head pendingq; /* List of message sent to AP queue. */
|
||||
|
@ -63,13 +63,13 @@ static ssize_t request_count_show(struct device *dev,
|
||||
char *buf)
|
||||
{
|
||||
struct ap_card *ac = to_ap_card(dev);
|
||||
unsigned int req_cnt;
|
||||
u64 req_cnt;
|
||||
|
||||
req_cnt = 0;
|
||||
spin_lock_bh(&ap_list_lock);
|
||||
req_cnt = atomic_read(&ac->total_request_count);
|
||||
req_cnt = atomic64_read(&ac->total_request_count);
|
||||
spin_unlock_bh(&ap_list_lock);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
|
||||
}
|
||||
|
||||
static ssize_t request_count_store(struct device *dev,
|
||||
@ -83,7 +83,7 @@ static ssize_t request_count_store(struct device *dev,
|
||||
for_each_ap_queue(aq, ac)
|
||||
aq->total_request_count = 0;
|
||||
spin_unlock_bh(&ap_list_lock);
|
||||
atomic_set(&ac->total_request_count, 0);
|
||||
atomic64_set(&ac->total_request_count, 0);
|
||||
|
||||
return count;
|
||||
}
|
||||
|
@ -479,12 +479,12 @@ static ssize_t request_count_show(struct device *dev,
|
||||
char *buf)
|
||||
{
|
||||
struct ap_queue *aq = to_ap_queue(dev);
|
||||
unsigned int req_cnt;
|
||||
u64 req_cnt;
|
||||
|
||||
spin_lock_bh(&aq->lock);
|
||||
req_cnt = aq->total_request_count;
|
||||
spin_unlock_bh(&aq->lock);
|
||||
return snprintf(buf, PAGE_SIZE, "%d\n", req_cnt);
|
||||
return snprintf(buf, PAGE_SIZE, "%llu\n", req_cnt);
|
||||
}
|
||||
|
||||
static ssize_t request_count_store(struct device *dev,
|
||||
@ -676,7 +676,7 @@ void ap_queue_message(struct ap_queue *aq, struct ap_message *ap_msg)
|
||||
list_add_tail(&ap_msg->list, &aq->requestq);
|
||||
aq->requestq_count++;
|
||||
aq->total_request_count++;
|
||||
atomic_inc(&aq->card->total_request_count);
|
||||
atomic64_inc(&aq->card->total_request_count);
|
||||
/* Send/receive as many request from the queue as possible. */
|
||||
ap_wait(ap_sm_event_loop(aq, AP_EVENT_POLL));
|
||||
spin_unlock_bh(&aq->lock);
|
||||
|
@ -606,8 +606,8 @@ static inline bool zcrypt_card_compare(struct zcrypt_card *zc,
|
||||
weight += atomic_read(&zc->load);
|
||||
pref_weight += atomic_read(&pref_zc->load);
|
||||
if (weight == pref_weight)
|
||||
return atomic_read(&zc->card->total_request_count) >
|
||||
atomic_read(&pref_zc->card->total_request_count);
|
||||
return atomic64_read(&zc->card->total_request_count) >
|
||||
atomic64_read(&pref_zc->card->total_request_count);
|
||||
return weight > pref_weight;
|
||||
}
|
||||
|
||||
@ -1226,11 +1226,12 @@ static void zcrypt_qdepth_mask(char qdepth[], size_t max_adapters)
|
||||
spin_unlock(&zcrypt_list_lock);
|
||||
}
|
||||
|
||||
static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
|
||||
static void zcrypt_perdev_reqcnt(u32 reqcnt[], size_t max_adapters)
|
||||
{
|
||||
struct zcrypt_card *zc;
|
||||
struct zcrypt_queue *zq;
|
||||
int card;
|
||||
u64 cnt;
|
||||
|
||||
memset(reqcnt, 0, sizeof(int) * max_adapters);
|
||||
spin_lock(&zcrypt_list_lock);
|
||||
@ -1242,8 +1243,9 @@ static void zcrypt_perdev_reqcnt(int reqcnt[], size_t max_adapters)
|
||||
|| card >= max_adapters)
|
||||
continue;
|
||||
spin_lock(&zq->queue->lock);
|
||||
reqcnt[card] = zq->queue->total_request_count;
|
||||
cnt = zq->queue->total_request_count;
|
||||
spin_unlock(&zq->queue->lock);
|
||||
reqcnt[card] = (cnt < UINT_MAX) ? (u32) cnt : UINT_MAX;
|
||||
}
|
||||
}
|
||||
local_bh_enable();
|
||||
@ -1421,9 +1423,9 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
return 0;
|
||||
}
|
||||
case ZCRYPT_PERDEV_REQCNT: {
|
||||
int *reqcnt;
|
||||
u32 *reqcnt;
|
||||
|
||||
reqcnt = kcalloc(AP_DEVICES, sizeof(int), GFP_KERNEL);
|
||||
reqcnt = kcalloc(AP_DEVICES, sizeof(u32), GFP_KERNEL);
|
||||
if (!reqcnt)
|
||||
return -ENOMEM;
|
||||
zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES);
|
||||
@ -1480,7 +1482,7 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd,
|
||||
}
|
||||
case Z90STAT_PERDEV_REQCNT: {
|
||||
/* the old ioctl supports only 64 adapters */
|
||||
int reqcnt[MAX_ZDEV_CARDIDS];
|
||||
u32 reqcnt[MAX_ZDEV_CARDIDS];
|
||||
|
||||
zcrypt_perdev_reqcnt(reqcnt, MAX_ZDEV_CARDIDS);
|
||||
if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt)))
|
||||
|
Loading…
x
Reference in New Issue
Block a user