A fix for the mount "device" string parser from Patrick and two cred

reference counting fixups from Max, marked for stable.  Also included
 a number of cleanups and a tweak to MAINTAINERS to avoid unnecessarily
 CCing netdev list.
 -----BEGIN PGP SIGNATURE-----
 
 iQFHBAABCAAxFiEEydHwtzie9C7TfviiSn/eOAIR84sFAmdJ/sMTHGlkcnlvbW92
 QGdtYWlsLmNvbQAKCRBKf944AhHzi8EOB/9Jhq1nOe0dN7aAWN1owZH85TXmOLuX
 eSS79AJp63lmJgx+mF0CbLLN6Vwjvm1vqz10Uhe5VCmqtxKy1/F4QxEOwk+zEMwT
 iGkM+6nUtMMqnxqItJpFC19YQONwgidsNcbi7v8nDEHqH8FXEC4R0pi0990bUQSj
 E8zVzq44TNFQrhWjDJHPnXsxbH9SijRuwu1O4KEZ2HK0QQ8LfpPptozJawH0p2Hs
 Wc6V6ppt7o9F9MHW137I4BOG9xm18aAQa9Ztd5GHhip63SuLpdQNwoM9JhC8J/bt
 bcci5CoCa34P57g9/1kh9ov/NXf9XgjSCFlDzk9zZH0IxaX5CYgTqYL5
 =xf5D
 -----END PGP SIGNATURE-----

Merge tag 'ceph-for-6.13-rc1' of https://github.com/ceph/ceph-client

Pull ceph updates from Ilya Dryomov:
 "A fix for the mount "device" string parser from Patrick and two cred
  reference counting fixups from Max, marked for stable.

  Also included a number of cleanups and a tweak to MAINTAINERS to avoid
  unnecessarily CCing netdev list"

* tag 'ceph-for-6.13-rc1' of https://github.com/ceph/ceph-client:
  ceph: fix cred leak in ceph_mds_check_access()
  ceph: pass cred pointer to ceph_mds_auth_match()
  ceph: improve caps debugging output
  ceph: correct ceph_mds_cap_peer field name
  ceph: correct ceph_mds_cap_item field name
  ceph: miscellaneous spelling fixes
  ceph: Use strscpy() instead of strcpy() in __get_snap_name()
  ceph: Use str_true_false() helper in status_show()
  ceph: requalify some char pointers as const
  ceph: extract entity name from device id
  MAINTAINERS: exclude net/ceph from networking
  ceph: Remove fs/ceph deadcode
  libceph: Remove unused ceph_crypto_key_encode
  libceph: Remove unused ceph_osdc_watch_check
  libceph: Remove unused pagevec functions
  libceph: Remove unused ceph_pagelist functions
This commit is contained in:
Linus Torvalds 2024-11-30 10:22:38 -08:00
commit 9d0ad04553
22 changed files with 65 additions and 230 deletions

View File

@ -16353,6 +16353,7 @@ X: include/net/wext.h
X: net/9p/
X: net/bluetooth/
X: net/can/
X: net/ceph/
X: net/mac80211/
X: net/rfkill/
X: net/wireless/

View File

@ -2195,7 +2195,7 @@ int ceph_pool_perm_check(struct inode *inode, int need)
if (ci->i_vino.snap != CEPH_NOSNAP) {
/*
* Pool permission check needs to write to the first object.
* But for snapshot, head of the first object may have alread
* But for snapshot, head of the first object may have already
* been deleted. Skip check to avoid creating orphan object.
*/
return 0;

View File

@ -978,20 +978,6 @@ int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
return 0;
}
int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
{
struct inode *inode = &ci->netfs.inode;
struct ceph_client *cl = ceph_inode_to_client(inode);
int ret;
spin_lock(&ci->i_ceph_lock);
ret = __ceph_caps_revoking_other(ci, NULL, mask);
spin_unlock(&ci->i_ceph_lock);
doutc(cl, "%p %llx.%llx %s = %d\n", inode, ceph_vinop(inode),
ceph_cap_string(mask), ret);
return ret;
}
int __ceph_caps_used(struct ceph_inode_info *ci)
{
int used = 0;
@ -2813,7 +2799,7 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
* requested from the MDS.
*
* Returns 0 if caps were not able to be acquired (yet), 1 if succeed,
* or a negative error code. There are 3 speical error codes:
* or a negative error code. There are 3 special error codes:
* -EAGAIN: need to sleep but non-blocking is specified
* -EFBIG: ask caller to call check_max_size() and try again.
* -EUCLEAN: ask caller to call ceph_renew_caps() and try again.
@ -4085,23 +4071,22 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
struct ceph_cap *cap, *tcap, *new_cap = NULL;
struct ceph_inode_info *ci = ceph_inode(inode);
u64 t_cap_id;
unsigned mseq = le32_to_cpu(ex->migrate_seq);
unsigned t_seq, t_mseq;
u32 t_issue_seq, t_mseq;
int target, issued;
int mds = session->s_mds;
if (ph) {
t_cap_id = le64_to_cpu(ph->cap_id);
t_seq = le32_to_cpu(ph->seq);
t_issue_seq = le32_to_cpu(ph->issue_seq);
t_mseq = le32_to_cpu(ph->mseq);
target = le32_to_cpu(ph->mds);
} else {
t_cap_id = t_seq = t_mseq = 0;
t_cap_id = t_issue_seq = t_mseq = 0;
target = -1;
}
doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d target %d\n",
inode, ceph_vinop(inode), ci, mds, mseq, target);
doutc(cl, " cap %llx.%llx export to peer %d piseq %u pmseq %u\n",
ceph_vinop(inode), target, t_issue_seq, t_mseq);
retry:
down_read(&mdsc->snap_rwsem);
spin_lock(&ci->i_ceph_lock);
@ -4134,12 +4119,12 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
if (tcap) {
/* already have caps from the target */
if (tcap->cap_id == t_cap_id &&
ceph_seq_cmp(tcap->seq, t_seq) < 0) {
ceph_seq_cmp(tcap->seq, t_issue_seq) < 0) {
doutc(cl, " updating import cap %p mds%d\n", tcap,
target);
tcap->cap_id = t_cap_id;
tcap->seq = t_seq - 1;
tcap->issue_seq = t_seq - 1;
tcap->seq = t_issue_seq - 1;
tcap->issue_seq = t_issue_seq - 1;
tcap->issued |= issued;
tcap->implemented |= issued;
if (cap == ci->i_auth_cap) {
@ -4154,7 +4139,7 @@ static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
tcap = new_cap;
ceph_add_cap(inode, tsession, t_cap_id, issued, 0,
t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
t_issue_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
if (!list_empty(&ci->i_cap_flush_list) &&
ci->i_auth_cap == tcap) {
@ -4228,18 +4213,22 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
u64 realmino = le64_to_cpu(im->realm);
u64 cap_id = le64_to_cpu(im->cap_id);
u64 p_cap_id;
u32 piseq = 0;
u32 pmseq = 0;
int peer;
if (ph) {
p_cap_id = le64_to_cpu(ph->cap_id);
peer = le32_to_cpu(ph->mds);
piseq = le32_to_cpu(ph->issue_seq);
pmseq = le32_to_cpu(ph->mseq);
} else {
p_cap_id = 0;
peer = -1;
}
doutc(cl, "%p %llx.%llx ci %p mds%d mseq %d peer %d\n",
inode, ceph_vinop(inode), ci, mds, mseq, peer);
doutc(cl, " cap %llx.%llx import from peer %d piseq %u pmseq %u\n",
ceph_vinop(inode), peer, piseq, pmseq);
retry:
cap = __get_cap_for_mds(ci, mds);
if (!cap) {
@ -4268,15 +4257,13 @@ static void handle_cap_import(struct ceph_mds_client *mdsc,
doutc(cl, " remove export cap %p mds%d flags %d\n",
ocap, peer, ph->flags);
if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
(ocap->seq != le32_to_cpu(ph->seq) ||
ocap->mseq != le32_to_cpu(ph->mseq))) {
(ocap->seq != piseq ||
ocap->mseq != pmseq)) {
pr_err_ratelimited_client(cl, "mismatched seq/mseq: "
"%p %llx.%llx mds%d seq %d mseq %d"
" importer mds%d has peer seq %d mseq %d\n",
inode, ceph_vinop(inode), peer,
ocap->seq, ocap->mseq, mds,
le32_to_cpu(ph->seq),
le32_to_cpu(ph->mseq));
ocap->seq, ocap->mseq, mds, piseq, pmseq);
}
ceph_remove_cap(mdsc, ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
}
@ -4350,7 +4337,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
struct ceph_snap_realm *realm = NULL;
int op;
int msg_version = le16_to_cpu(msg->hdr.version);
u32 seq, mseq;
u32 seq, mseq, issue_seq;
struct ceph_vino vino;
void *snaptrace;
size_t snaptrace_len;
@ -4360,8 +4347,6 @@ void ceph_handle_caps(struct ceph_mds_session *session,
bool close_sessions = false;
bool do_cap_release = false;
doutc(cl, "from mds%d\n", session->s_mds);
if (!ceph_inc_mds_stopping_blocker(mdsc, session))
return;
@ -4375,6 +4360,7 @@ void ceph_handle_caps(struct ceph_mds_session *session,
vino.snap = CEPH_NOSNAP;
seq = le32_to_cpu(h->seq);
mseq = le32_to_cpu(h->migrate_seq);
issue_seq = le32_to_cpu(h->issue_seq);
snaptrace = h + 1;
snaptrace_len = le32_to_cpu(h->snap_trace_len);
@ -4462,12 +4448,11 @@ void ceph_handle_caps(struct ceph_mds_session *session,
/* lookup ino */
inode = ceph_find_inode(mdsc->fsc->sb, vino);
doutc(cl, " op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op),
vino.ino, vino.snap, inode);
doutc(cl, " caps mds%d op %s ino %llx.%llx inode %p seq %u iseq %u mseq %u\n",
session->s_mds, ceph_cap_op_name(op), vino.ino, vino.snap, inode,
seq, issue_seq, mseq);
mutex_lock(&session->s_mutex);
doutc(cl, " mds%d seq %lld cap seq %u\n", session->s_mds,
session->s_seq, (unsigned)seq);
if (!inode) {
doutc(cl, " i don't have ino %llx\n", vino.ino);

View File

@ -27,7 +27,7 @@ struct ceph_fname {
};
/*
* Header for the crypted file when truncating the size, this
* Header for the encrypted file when truncating the size, this
* will be sent to MDS, and the MDS will update the encrypted
* last block and then truncate the size.
*/

View File

@ -357,7 +357,7 @@ static int status_show(struct seq_file *s, void *p)
seq_printf(s, "instance: %s.%lld %s/%u\n", ENTITY_NAME(inst->name),
ceph_pr_addr(client_addr), le32_to_cpu(client_addr->nonce));
seq_printf(s, "blocklisted: %s\n", fsc->blocklisted ? "true" : "false");
seq_printf(s, "blocklisted: %s\n", str_true_false(fsc->blocklisted));
return 0;
}

View File

@ -207,7 +207,7 @@ static int __dcache_readdir(struct file *file, struct dir_context *ctx,
dentry = __dcache_find_get_entry(parent, idx + step,
&cache_ctl);
if (!dentry) {
/* use linar search */
/* use linear search */
idx = 0;
break;
}
@ -659,7 +659,7 @@ static bool need_reset_readdir(struct ceph_dir_file_info *dfi, loff_t new_pos)
return true;
if (is_hash_order(new_pos)) {
/* no need to reset last_name for a forward seek when
* dentries are sotred in hash order */
* dentries are sorted in hash order */
} else if (dfi->frag != fpos_frag(new_pos)) {
return true;
}

View File

@ -393,9 +393,9 @@ static struct dentry *ceph_get_parent(struct dentry *child)
}
dir = snapdir;
}
/* If directory has already been deleted, futher get_parent
/* If directory has already been deleted, further get_parent
* will fail. Do not mark snapdir dentry as disconnected,
* this prevent exportfs from doing futher get_parent. */
* this prevents exportfs from doing further get_parent. */
if (unlinked)
dn = d_obtain_root(dir);
else
@ -452,7 +452,13 @@ static int __get_snap_name(struct dentry *parent, char *name,
goto out;
if (ceph_snap(inode) == CEPH_SNAPDIR) {
if (ceph_snap(dir) == CEPH_NOSNAP) {
strcpy(name, fsc->mount_options->snapdir_name);
/*
* .get_name() from struct export_operations
* assumes that its 'name' parameter is pointing
* to a NAME_MAX+1 sized buffer
*/
strscpy(name, fsc->mount_options->snapdir_name,
NAME_MAX + 1);
err = 0;
}
goto out;

View File

@ -160,7 +160,7 @@ struct inode *ceph_get_inode(struct super_block *sb, struct ceph_vino vino,
}
/*
* get/constuct snapdir inode for a given directory
* get/construct snapdir inode for a given directory
*/
struct inode *ceph_get_snapdir(struct inode *parent)
{

View File

@ -827,7 +827,7 @@ static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
* And the worst case is that for the none async openc request it will
* successfully open the file if the CDentry hasn't been unlinked yet,
* but later the previous delayed async unlink request will remove the
* CDenty. That means the just created file is possiblly deleted later
* CDentry. That means the just created file is possibly deleted later
* by accident.
*
* We need to wait for the inflight async unlink requests to finish
@ -1747,14 +1747,6 @@ static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
}
}
void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session)
{
mutex_lock(&mdsc->mutex);
__open_export_target_sessions(mdsc, session);
mutex_unlock(&mdsc->mutex);
}
/*
* session caps
*/
@ -2362,7 +2354,7 @@ static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
item->ino = cpu_to_le64(cap->cap_ino);
item->cap_id = cpu_to_le64(cap->cap_id);
item->migrate_seq = cpu_to_le32(cap->mseq);
item->seq = cpu_to_le32(cap->issue_seq);
item->issue_seq = cpu_to_le32(cap->issue_seq);
msg->front.iov_len += sizeof(*item);
ceph_put_cap(mdsc, cap);
@ -3269,7 +3261,7 @@ static int __prepare_send_request(struct ceph_mds_session *session,
&session->s_features);
/*
* Avoid inifinite retrying after overflow. The client will
* Avoid infinite retrying after overflow. The client will
* increase the retry count and if the MDS is old version,
* so we limit to retry at most 256 times.
*/
@ -3522,7 +3514,7 @@ static void __do_request(struct ceph_mds_client *mdsc,
/*
* For async create we will choose the auth MDS of frag in parent
* directory to send the request and ususally this works fine, but
* directory to send the request and usually this works fine, but
* if the migrated the dirtory to another MDS before it could handle
* it the request will be forwarded.
*
@ -4033,7 +4025,7 @@ static void handle_forward(struct ceph_mds_client *mdsc,
__unregister_request(mdsc, req);
} else if (fwd_seq <= req->r_num_fwd || (uint32_t)fwd_seq >= U32_MAX) {
/*
* Avoid inifinite retrying after overflow.
* Avoid infinite retrying after overflow.
*
* The MDS will increase the fwd count and in client side
* if the num_fwd is less than the one saved in request
@ -5609,9 +5601,9 @@ void send_flush_mdlog(struct ceph_mds_session *s)
static int ceph_mds_auth_match(struct ceph_mds_client *mdsc,
struct ceph_mds_cap_auth *auth,
const struct cred *cred,
char *tpath)
{
const struct cred *cred = get_current_cred();
u32 caller_uid = from_kuid(&init_user_ns, cred->fsuid);
u32 caller_gid = from_kgid(&init_user_ns, cred->fsgid);
struct ceph_client *cl = mdsc->fsc->client;
@ -5734,11 +5726,12 @@ int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
for (i = 0; i < mdsc->s_cap_auths_num; i++) {
struct ceph_mds_cap_auth *s = &mdsc->s_cap_auths[i];
err = ceph_mds_auth_match(mdsc, s, tpath);
err = ceph_mds_auth_match(mdsc, s, cred, tpath);
if (err < 0) {
put_cred(cred);
return err;
} else if (err > 0) {
/* always follow the last auth caps' permision */
/* always follow the last auth caps' permission */
root_squash_perms = true;
rw_perms_s = NULL;
if ((mask & MAY_WRITE) && s->writeable &&
@ -5751,6 +5744,8 @@ int ceph_mds_check_access(struct ceph_mds_client *mdsc, char *tpath, int mask)
}
}
put_cred(cred);
doutc(cl, "root_squash_perms %d, rw_perms_s %p\n", root_squash_perms,
rw_perms_s);
if (root_squash_perms && rw_perms_s == NULL) {

View File

@ -634,8 +634,6 @@ extern void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc,
extern struct ceph_mds_session *
ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target);
extern void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session);
extern int ceph_trim_caps(struct ceph_mds_client *mdsc,
struct ceph_mds_session *session,

View File

@ -285,8 +285,10 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
size_t len;
struct ceph_fsid fsid;
struct ceph_parse_opts_ctx *pctx = fc->fs_private;
struct ceph_options *opts = pctx->copts;
struct ceph_mount_options *fsopt = pctx->opts;
char *fsid_start, *fs_name_start;
const char *name_start = dev_name;
const char *fsid_start, *fs_name_start;
if (*dev_name_end != '=') {
dout("separator '=' missing in source");
@ -296,8 +298,14 @@ static int ceph_parse_new_source(const char *dev_name, const char *dev_name_end,
fsid_start = strchr(dev_name, '@');
if (!fsid_start)
return invalfc(fc, "missing cluster fsid");
++fsid_start; /* start of cluster fsid */
len = fsid_start - name_start;
kfree(opts->name);
opts->name = kstrndup(name_start, len, GFP_KERNEL);
if (!opts->name)
return -ENOMEM;
dout("using %s entity name", opts->name);
++fsid_start; /* start of cluster fsid */
fs_name_start = strchr(fsid_start, '.');
if (!fs_name_start)
return invalfc(fc, "missing file system name");

View File

@ -60,7 +60,7 @@
/* max size of osd read request, limited by libceph */
#define CEPH_MAX_READ_SIZE CEPH_MSG_MAX_DATA_LEN
/* osd has a configurable limitaion of max write size.
/* osd has a configurable limitation of max write size.
* CEPH_MSG_MAX_DATA_LEN should be small enough. */
#define CEPH_MAX_WRITE_SIZE CEPH_MSG_MAX_DATA_LEN
#define CEPH_RASIZE_DEFAULT (8192*1024) /* max readahead */
@ -796,7 +796,6 @@ extern int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
extern int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
struct ceph_cap *ocap, int mask);
extern int ceph_caps_revoking(struct ceph_inode_info *ci, int mask);
extern int __ceph_caps_used(struct ceph_inode_info *ci);
static inline bool __ceph_is_file_opened(struct ceph_inode_info *ci)

View File

@ -899,7 +899,7 @@ static int __get_required_blob_size(struct ceph_inode_info *ci, int name_size,
}
/*
* If there are dirty xattrs, reencode xattrs into the prealloc_blob
* If there are dirty xattrs, re-encode xattrs into the prealloc_blob
* and swap into place. It returns the old i_xattrs.blob (or NULL) so
* that it can be freed by the caller as the i_ceph_lock is likely to be
* held.

View File

@ -808,7 +808,7 @@ struct ceph_mds_caps {
struct ceph_mds_cap_peer {
__le64 cap_id;
__le32 seq;
__le32 issue_seq;
__le32 mseq;
__le32 mds;
__u8 flags;
@ -822,7 +822,7 @@ struct ceph_mds_cap_release {
struct ceph_mds_cap_item {
__le64 ino;
__le64 cap_id;
__le32 migrate_seq, seq;
__le32 migrate_seq, issue_seq;
} __attribute__ ((packed));
#define CEPH_MDS_LEASE_REVOKE 1 /* mds -> client */

View File

@ -317,12 +317,6 @@ extern void ceph_release_page_vector(struct page **pages, int num_pages);
extern void ceph_put_page_vector(struct page **pages, int num_pages,
bool dirty);
extern struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags);
extern int ceph_copy_user_to_page_vector(struct page **pages,
const void __user *data,
loff_t off, size_t len);
extern void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len);
extern void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len);

View File

@ -626,8 +626,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
u32 timeout,
struct page ***preply_pages,
size_t *preply_len);
int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
struct ceph_osd_linger_request *lreq);
int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
struct ceph_object_id *oid,
struct ceph_object_locator *oloc,

View File

@ -17,12 +17,6 @@ struct ceph_pagelist {
refcount_t refcnt;
};
struct ceph_pagelist_cursor {
struct ceph_pagelist *pl; /* pagelist, for error checking */
struct list_head *page_lru; /* page in list */
size_t room; /* room remaining to reset to */
};
struct ceph_pagelist *ceph_pagelist_alloc(gfp_t gfp_flags);
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
@ -33,12 +27,6 @@ extern int ceph_pagelist_reserve(struct ceph_pagelist *pl, size_t space);
extern int ceph_pagelist_free_reserve(struct ceph_pagelist *pl);
extern void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c);
extern int ceph_pagelist_truncate(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c);
static inline int ceph_pagelist_encode_64(struct ceph_pagelist *pl, u64 v)
{
__le64 ev = cpu_to_le64(v);

View File

@ -74,18 +74,6 @@ int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
return set_secret(dst, src->key);
}
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end)
{
if (*p + sizeof(u16) + sizeof(key->created) +
sizeof(u16) + key->len > end)
return -ERANGE;
ceph_encode_16(p, key->type);
ceph_encode_copy(p, &key->created, sizeof(key->created));
ceph_encode_16(p, key->len);
ceph_encode_copy(p, key->key, key->len);
return 0;
}
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end)
{
int ret;

View File

@ -21,7 +21,6 @@ struct ceph_crypto_key {
int ceph_crypto_key_clone(struct ceph_crypto_key *dst,
const struct ceph_crypto_key *src);
int ceph_crypto_key_encode(struct ceph_crypto_key *key, void **p, void *end);
int ceph_crypto_key_decode(struct ceph_crypto_key *key, void **p, void *end);
int ceph_crypto_key_unarmor(struct ceph_crypto_key *key, const char *in);
void ceph_crypto_key_destroy(struct ceph_crypto_key *key);

View File

@ -4999,40 +4999,6 @@ int ceph_osdc_notify(struct ceph_osd_client *osdc,
}
EXPORT_SYMBOL(ceph_osdc_notify);
/*
* Return the number of milliseconds since the watch was last
* confirmed, or an error. If there is an error, the watch is no
* longer valid, and should be destroyed with ceph_osdc_unwatch().
*/
int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
struct ceph_osd_linger_request *lreq)
{
unsigned long stamp, age;
int ret;
down_read(&osdc->lock);
mutex_lock(&lreq->lock);
stamp = lreq->watch_valid_thru;
if (!list_empty(&lreq->pending_lworks)) {
struct linger_work *lwork =
list_first_entry(&lreq->pending_lworks,
struct linger_work,
pending_item);
if (time_before(lwork->queued_stamp, stamp))
stamp = lwork->queued_stamp;
}
age = jiffies - stamp;
dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
lreq, lreq->linger_id, age, lreq->last_error);
/* we are truncating to msecs, so return a safe upper bound */
ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
mutex_unlock(&lreq->lock);
up_read(&osdc->lock);
return ret;
}
static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
{
u8 struct_v;

View File

@ -131,41 +131,3 @@ int ceph_pagelist_free_reserve(struct ceph_pagelist *pl)
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_free_reserve);
/* Create a truncation point. */
void ceph_pagelist_set_cursor(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
c->pl = pl;
c->page_lru = pl->head.prev;
c->room = pl->room;
}
EXPORT_SYMBOL(ceph_pagelist_set_cursor);
/* Truncate a pagelist to the given point. Move extra pages to reserve.
* This won't sleep.
* Returns: 0 on success,
* -EINVAL if the pagelist doesn't match the trunc point pagelist
*/
int ceph_pagelist_truncate(struct ceph_pagelist *pl,
struct ceph_pagelist_cursor *c)
{
struct page *page;
if (pl != c->pl)
return -EINVAL;
ceph_pagelist_unmap_tail(pl);
while (pl->head.prev != c->page_lru) {
page = list_entry(pl->head.prev, struct page, lru);
/* move from pagelist to reserve */
list_move_tail(&page->lru, &pl->free_list);
++pl->num_pages_free;
}
pl->room = c->room;
if (!list_empty(&pl->head)) {
page = list_entry(pl->head.prev, struct page, lru);
pl->mapped_tail = kmap(page);
}
return 0;
}
EXPORT_SYMBOL(ceph_pagelist_truncate);

View File

@ -55,58 +55,6 @@ struct page **ceph_alloc_page_vector(int num_pages, gfp_t flags)
}
EXPORT_SYMBOL(ceph_alloc_page_vector);
/*
* copy user data into a page vector
*/
int ceph_copy_user_to_page_vector(struct page **pages,
const void __user *data,
loff_t off, size_t len)
{
int i = 0;
int po = off & ~PAGE_MASK;
int left = len;
int l, bad;
while (left > 0) {
l = min_t(int, PAGE_SIZE-po, left);
bad = copy_from_user(page_address(pages[i]) + po, data, l);
if (bad == l)
return -EFAULT;
data += l - bad;
left -= l - bad;
po += l - bad;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
return len;
}
EXPORT_SYMBOL(ceph_copy_user_to_page_vector);
void ceph_copy_to_page_vector(struct page **pages,
const void *data,
loff_t off, size_t len)
{
int i = 0;
size_t po = off & ~PAGE_MASK;
size_t left = len;
while (left > 0) {
size_t l = min_t(size_t, PAGE_SIZE-po, left);
memcpy(page_address(pages[i]) + po, data, l);
data += l;
left -= l;
po += l;
if (po == PAGE_SIZE) {
po = 0;
i++;
}
}
}
EXPORT_SYMBOL(ceph_copy_to_page_vector);
void ceph_copy_from_page_vector(struct page **pages,
void *data,
loff_t off, size_t len)