linux-stable/fs/dlm/dlm_internal.h

832 lines
22 KiB
C
Raw Permalink Normal View History

/* SPDX-License-Identifier: GPL-2.0-only */
/******************************************************************************
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
** Copyright (C) 2004-2011 Red Hat, Inc. All rights reserved.
**
**
*******************************************************************************
******************************************************************************/
#ifndef __DLM_INTERNAL_DOT_H__
#define __DLM_INTERNAL_DOT_H__
/*
* This is the main header file to be included in each DLM source file.
*/
#include <uapi/linux/dlm_device.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/ctype.h>
#include <linux/spinlock.h>
#include <linux/vmalloc.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/random.h>
#include <linux/delay.h>
#include <linux/socket.h>
#include <linux/kthread.h>
#include <linux/kobject.h>
#include <linux/kref.h>
#include <linux/kernel.h>
#include <linux/jhash.h>
#include <linux/miscdevice.h>
#include <linux/rhashtable.h>
#include <linux/mutex.h>
#include <linux/xarray.h>
#include <linux/ratelimit.h>
#include <linux/uaccess.h>
#include <linux/dlm.h>
#include "config.h"
struct dlm_ls;
struct dlm_lkb;
struct dlm_rsb;
struct dlm_member;
struct dlm_rsbtable;
struct dlm_recover;
struct dlm_header;
struct dlm_message;
struct dlm_rcom;
struct dlm_mhandle;
struct dlm_msg;
#define log_print(fmt, args...) \
printk(KERN_ERR "dlm: "fmt"\n" , ##args)
#define log_print_ratelimited(fmt, args...) \
printk_ratelimited(KERN_ERR "dlm: "fmt"\n", ##args)
#define log_error(ls, fmt, args...) \
printk(KERN_ERR "dlm: %s: " fmt "\n", (ls)->ls_name , ##args)
#define log_rinfo(ls, fmt, args...) \
do { \
if (dlm_config.ci_log_info) \
printk(KERN_INFO "dlm: %s: " fmt "\n", \
(ls)->ls_name, ##args); \
else if (dlm_config.ci_log_debug) \
printk(KERN_DEBUG "dlm: %s: " fmt "\n", \
(ls)->ls_name , ##args); \
} while (0)
#define log_debug(ls, fmt, args...) \
do { \
if (dlm_config.ci_log_debug) \
printk(KERN_DEBUG "dlm: %s: " fmt "\n", \
(ls)->ls_name , ##args); \
} while (0)
#define log_limit(ls, fmt, args...) \
do { \
if (dlm_config.ci_log_debug) \
printk_ratelimited(KERN_DEBUG "dlm: %s: " fmt "\n", \
(ls)->ls_name , ##args); \
} while (0)
#define DLM_ASSERT(x, do) \
{ \
if (!(x)) \
{ \
printk(KERN_ERR "\nDLM: Assertion failed on line %d of file %s\n" \
"DLM: assertion: \"%s\"\n" \
"DLM: time = %lu\n", \
__LINE__, __FILE__, #x, jiffies); \
{do} \
printk("\n"); \
panic("DLM: Record message above and reboot.\n"); \
} \
}
/*
* Lockspace member (per node in a ls)
*/
struct dlm_member {
struct list_head list;
int nodeid;
int weight;
int slot;
int slot_prev;
int comm_seq;
uint32_t generation;
};
/*
* Save and manage recovery state for a lockspace.
*/
struct dlm_recover {
struct list_head list;
struct dlm_config_node *nodes;
int nodes_count;
uint64_t seq;
};
/*
* Pass input args to second stage locking function.
*/
struct dlm_args {
uint32_t flags;
void (*astfn) (void *astparam);
void *astparam;
void (*bastfn) (void *astparam, int mode);
int mode;
struct dlm_lksb *lksb;
};
/*
* Lock block
*
* A lock can be one of three types:
*
* local copy lock is mastered locally
* (lkb_nodeid is zero and DLM_LKF_MSTCPY is not set)
* process copy lock is mastered on a remote node
* (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is not set)
* master copy master node's copy of a lock owned by remote node
* (lkb_nodeid is non-zero and DLM_LKF_MSTCPY is set)
*
* lkb_exflags: a copy of the most recent flags arg provided to dlm_lock or
* dlm_unlock. The dlm does not modify these or use any private flags in
* this field; it only contains DLM_LKF_ flags from dlm.h. These flags
* are sent as-is to the remote master when the lock is remote.
*
* lkb_flags: internal dlm flags (DLM_IFL_ prefix) from dlm_internal.h.
* Some internal flags are shared between the master and process nodes;
* these shared flags are kept in the lower two bytes. One of these
* flags set on the master copy will be propagated to the process copy
* and v.v. Other internal flags are private to the master or process
* node (e.g. DLM_IFL_MSTCPY). These are kept in the high two bytes.
*
* lkb_sbflags: status block flags. These flags are copied directly into
* the caller's lksb.sb_flags prior to the dlm_lock/dlm_unlock completion
* ast. All defined in dlm.h with DLM_SBF_ prefix.
*
* lkb_status: the lock status indicates which rsb queue the lock is
* on, grant, convert, or wait. DLM_LKSTS_ WAITING/GRANTED/CONVERT
*
* lkb_wait_type: the dlm message type (DLM_MSG_ prefix) for which a
* reply is needed. Only set when the lkb is on the lockspace waiters
* list awaiting a reply from a remote node.
*
* lkb_nodeid: when the lkb is a local copy, nodeid is 0; when the lkb
* is a master copy, nodeid specifies the remote lock holder, when the
* lkb is a process copy, the nodeid specifies the lock master.
*/
/* lkb_status */
#define DLM_LKSTS_WAITING 1
#define DLM_LKSTS_GRANTED 2
#define DLM_LKSTS_CONVERT 3
/* lkb_iflags */
#define DLM_IFL_MSTCPY_BIT 16
#define __DLM_IFL_MIN_BIT DLM_IFL_MSTCPY_BIT
#define DLM_IFL_RESEND_BIT 17
#define DLM_IFL_DEAD_BIT 18
#define DLM_IFL_OVERLAP_UNLOCK_BIT 19
#define DLM_IFL_OVERLAP_CANCEL_BIT 20
#define DLM_IFL_ENDOFLIFE_BIT 21
#define DLM_IFL_DEADLOCK_CANCEL_BIT 24
#define __DLM_IFL_MAX_BIT DLM_IFL_DEADLOCK_CANCEL_BIT
2023-03-06 20:48:08 +00:00
/* lkb_dflags */
#define DLM_DFL_USER_BIT 0
#define __DLM_DFL_MIN_BIT DLM_DFL_USER_BIT
#define DLM_DFL_ORPHAN_BIT 1
#define __DLM_DFL_MAX_BIT DLM_DFL_ORPHAN_BIT
#define DLM_CB_CAST 0x00000001
#define DLM_CB_BAST 0x00000002
/* much of this is just saving user space pointers associated with the
* lock that we pass back to the user lib with an ast
*/
struct dlm_user_args {
struct dlm_user_proc *proc; /* each process that opens the lockspace
* device has private data
* (dlm_user_proc) on the struct file,
* the process's locks point back to it
*/
struct dlm_lksb lksb;
struct dlm_lksb __user *user_lksb;
void __user *castparam;
void __user *castaddr;
void __user *bastparam;
void __user *bastaddr;
uint64_t xid;
};
struct dlm_callback {
uint32_t flags; /* DLM_CBF_ */
int sb_status; /* copy to lksb status */
uint8_t sb_flags; /* copy to lksb flags */
int8_t mode; /* rq mode of bast, gr mode of cast */
bool copy_lvb;
struct dlm_lksb *lkb_lksb;
unsigned char lvbptr[DLM_USER_LVB_LEN];
union {
void *astparam; /* caller's ast arg */
struct dlm_user_args ua;
};
struct work_struct work;
void (*bastfn)(void *astparam, int mode);
void (*astfn)(void *astparam);
char res_name[DLM_RESNAME_MAXLEN];
size_t res_length;
uint32_t ls_id;
uint32_t lkb_id;
struct list_head list;
};
struct dlm_lkb {
struct dlm_rsb *lkb_resource; /* the rsb */
struct kref lkb_ref;
int lkb_nodeid; /* copied from rsb */
int lkb_ownpid; /* pid of lock owner */
uint32_t lkb_id; /* our lock ID */
uint32_t lkb_remid; /* lock ID on remote partner */
uint32_t lkb_exflags; /* external flags from caller */
unsigned long lkb_sbflags; /* lksb flags */
unsigned long lkb_dflags; /* distributed flags */
2023-03-06 20:48:08 +00:00
unsigned long lkb_iflags; /* internal flags */
uint32_t lkb_lvbseq; /* lvb sequence number */
int8_t lkb_status; /* granted, waiting, convert */
int8_t lkb_rqmode; /* requested lock mode */
int8_t lkb_grmode; /* granted lock mode */
int8_t lkb_highbast; /* highest mode bast sent for */
int8_t lkb_wait_type; /* type of reply waiting for */
int8_t lkb_wait_count;
int lkb_wait_nodeid; /* for debugging */
struct list_head lkb_statequeue; /* rsb g/c/w list */
struct list_head lkb_rsb_lookup; /* waiting for rsb lookup */
struct list_head lkb_wait_reply; /* waiting for remote reply */
struct list_head lkb_ownqueue; /* list of locks for a process */
ktime_t lkb_timestamp;
int8_t lkb_last_cast_cb_mode;
int8_t lkb_last_bast_cb_mode;
int8_t lkb_last_cb_mode;
uint8_t lkb_last_cb_flags;
ktime_t lkb_last_cast_time; /* for debugging */
ktime_t lkb_last_bast_time; /* for debugging */
dlm: fixes for nodir mode The "nodir" mode (statically assign master nodes instead of using the resource directory) has always been highly experimental, and never seriously used. This commit fixes a number of problems, making nodir much more usable. - Major change to recovery: recover all locks and restart all in-progress operations after recovery. In some cases it's not possible to know which in-progess locks to recover, so recover all. (Most require recovery in nodir mode anyway since rehashing changes most master nodes.) - Change the way nodir mode is enabled, from a command line mount arg passed through gfs2, into a sysfs file managed by dlm_controld, consistent with the other config settings. - Allow recovering MSTCPY locks on an rsb that has not yet been turned into a master copy. - Ignore RCOM_LOCK and RCOM_LOCK_REPLY recovery messages from a previous, aborted recovery cycle. Base this on the local recovery status not being in the state where any nodes should be sending LOCK messages for the current recovery cycle. - Hold rsb lock around dlm_purge_mstcpy_locks() because it may run concurrently with dlm_recover_master_copy(). - Maintain highbast on process-copy lkb's (in addition to the master as is usual), because the lkb can switch back and forth between being a master and being a process copy as the master node changes in recovery. - When recovering MSTCPY locks, flag rsb's that have non-empty convert or waiting queues for granting at the end of recovery. (Rename flag from LOCKS_PURGED to RECOVER_GRANT and similar for the recovery function, because it's not only resources with purged locks that need grant a grant attempt.) - Replace a couple of unnecessary assertion panics with error messages. Signed-off-by: David Teigland <teigland@redhat.com>
2012-04-26 20:54:29 +00:00
uint64_t lkb_recover_seq; /* from ls_recover_seq */
char *lkb_lvbptr;
struct dlm_lksb *lkb_lksb; /* caller's status block */
void (*lkb_astfn) (void *astparam);
void (*lkb_bastfn) (void *astparam, int mode);
union {
void *lkb_astparam; /* caller's ast arg */
struct dlm_user_args *lkb_ua;
};
struct rcu_head rcu;
};
/*
* res_master_nodeid is "normal": 0 is unset/invalid, non-zero is the real
* nodeid, even when nodeid is our_nodeid.
*
* res_nodeid is "odd": -1 is unset/invalid, zero means our_nodeid,
* greater than zero when another nodeid.
*
* (TODO: remove res_nodeid and only use res_master_nodeid)
*/
struct dlm_rsb {
struct dlm_ls *res_ls; /* the lockspace */
struct kref res_ref;
spinlock_t res_lock;
unsigned long res_flags;
int res_length; /* length of rsb name */
int res_nodeid;
int res_master_nodeid;
int res_dir_nodeid;
unsigned long res_id; /* for ls_recover_xa */
uint32_t res_lvbseq;
uint32_t res_hash;
unsigned long res_toss_time;
uint32_t res_first_lkid;
struct list_head res_lookup; /* lkbs waiting on first */
struct rhash_head res_node; /* rsbtbl */
struct list_head res_grantqueue;
struct list_head res_convertqueue;
struct list_head res_waitqueue;
struct list_head res_slow_list; /* ls_slow_* */
struct list_head res_scan_list;
struct list_head res_root_list; /* used for recovery */
struct list_head res_masters_list; /* used for recovery */
struct list_head res_recover_list; /* used for recovery */
int res_recover_locks_count;
struct rcu_head rcu;
char *res_lvbptr;
char res_name[DLM_RESNAME_MAXLEN+1];
};
/* dlm_master_lookup() flags */
#define DLM_LU_RECOVER_DIR 1
#define DLM_LU_RECOVER_MASTER 2
/* dlm_master_lookup() results */
#define DLM_LU_MATCH 1
#define DLM_LU_ADD 2
/* find_rsb() flags */
#define R_REQUEST 0x00000001
#define R_RECEIVE_REQUEST 0x00000002
#define R_RECEIVE_RECOVER 0x00000004
/* rsb_flags */
enum rsb_flags {
RSB_MASTER_UNCERTAIN,
RSB_VALNOTVALID,
RSB_VALNOTVALID_PREV,
RSB_NEW_MASTER,
RSB_NEW_MASTER2,
RSB_RECOVER_CONVERT,
dlm: fixes for nodir mode The "nodir" mode (statically assign master nodes instead of using the resource directory) has always been highly experimental, and never seriously used. This commit fixes a number of problems, making nodir much more usable. - Major change to recovery: recover all locks and restart all in-progress operations after recovery. In some cases it's not possible to know which in-progess locks to recover, so recover all. (Most require recovery in nodir mode anyway since rehashing changes most master nodes.) - Change the way nodir mode is enabled, from a command line mount arg passed through gfs2, into a sysfs file managed by dlm_controld, consistent with the other config settings. - Allow recovering MSTCPY locks on an rsb that has not yet been turned into a master copy. - Ignore RCOM_LOCK and RCOM_LOCK_REPLY recovery messages from a previous, aborted recovery cycle. Base this on the local recovery status not being in the state where any nodes should be sending LOCK messages for the current recovery cycle. - Hold rsb lock around dlm_purge_mstcpy_locks() because it may run concurrently with dlm_recover_master_copy(). - Maintain highbast on process-copy lkb's (in addition to the master as is usual), because the lkb can switch back and forth between being a master and being a process copy as the master node changes in recovery. - When recovering MSTCPY locks, flag rsb's that have non-empty convert or waiting queues for granting at the end of recovery. (Rename flag from LOCKS_PURGED to RECOVER_GRANT and similar for the recovery function, because it's not only resources with purged locks that need grant a grant attempt.) - Replace a couple of unnecessary assertion panics with error messages. Signed-off-by: David Teigland <teigland@redhat.com>
2012-04-26 20:54:29 +00:00
RSB_RECOVER_GRANT,
RSB_RECOVER_LVB_INVAL,
RSB_INACTIVE,
RSB_HASHED, /* set while rsb is on ls_rsbtbl */
};
static inline void rsb_set_flag(struct dlm_rsb *r, enum rsb_flags flag)
{
__set_bit(flag, &r->res_flags);
}
static inline void rsb_clear_flag(struct dlm_rsb *r, enum rsb_flags flag)
{
__clear_bit(flag, &r->res_flags);
}
static inline int rsb_flag(struct dlm_rsb *r, enum rsb_flags flag)
{
return test_bit(flag, &r->res_flags);
}
/* dlm_header is first element of all structs sent between nodes */
#define DLM_HEADER_MAJOR 0x00030000
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
#define DLM_HEADER_MINOR 0x00000002
#define DLM_VERSION_3_1 0x00030001
#define DLM_VERSION_3_2 0x00030002
#define DLM_HEADER_SLOTS 0x00000001
#define DLM_MSG 1
#define DLM_RCOM 2
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
#define DLM_OPTS 3
#define DLM_ACK 4
#define DLM_FIN 5
struct dlm_header {
__le32 h_version;
union {
/* for DLM_MSG and DLM_RCOM */
__le32 h_lockspace;
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
/* for DLM_ACK and DLM_OPTS */
__le32 h_seq;
} u;
__le32 h_nodeid; /* nodeid of sender */
__le16 h_length;
uint8_t h_cmd; /* DLM_MSG, DLM_RCOM */
uint8_t h_pad;
};
#define DLM_MSG_REQUEST 1
#define DLM_MSG_CONVERT 2
#define DLM_MSG_UNLOCK 3
#define DLM_MSG_CANCEL 4
#define DLM_MSG_REQUEST_REPLY 5
#define DLM_MSG_CONVERT_REPLY 6
#define DLM_MSG_UNLOCK_REPLY 7
#define DLM_MSG_CANCEL_REPLY 8
#define DLM_MSG_GRANT 9
#define DLM_MSG_BAST 10
#define DLM_MSG_LOOKUP 11
#define DLM_MSG_REMOVE 12
#define DLM_MSG_LOOKUP_REPLY 13
#define DLM_MSG_PURGE 14
struct dlm_message {
struct dlm_header m_header;
__le32 m_type; /* DLM_MSG_ */
__le32 m_nodeid;
__le32 m_pid;
__le32 m_lkid; /* lkid on sender */
__le32 m_remid; /* lkid on receiver */
__le32 m_parent_lkid;
__le32 m_parent_remid;
__le32 m_exflags;
__le32 m_sbflags;
__le32 m_flags;
__le32 m_lvbseq;
__le32 m_hash;
__le32 m_status;
__le32 m_grmode;
__le32 m_rqmode;
__le32 m_bastmode;
__le32 m_asts;
__le32 m_result; /* 0 or -EXXX */
char m_extra[]; /* name or lvb */
};
#define DLM_RS_NODES 0x00000001
#define DLM_RS_NODES_ALL 0x00000002
#define DLM_RS_DIR 0x00000004
#define DLM_RS_DIR_ALL 0x00000008
#define DLM_RS_LOCKS 0x00000010
#define DLM_RS_LOCKS_ALL 0x00000020
#define DLM_RS_DONE 0x00000040
#define DLM_RS_DONE_ALL 0x00000080
#define DLM_RCOM_STATUS 1
#define DLM_RCOM_NAMES 2
#define DLM_RCOM_LOOKUP 3
#define DLM_RCOM_LOCK 4
#define DLM_RCOM_STATUS_REPLY 5
#define DLM_RCOM_NAMES_REPLY 6
#define DLM_RCOM_LOOKUP_REPLY 7
#define DLM_RCOM_LOCK_REPLY 8
struct dlm_rcom {
struct dlm_header rc_header;
__le32 rc_type; /* DLM_RCOM_ */
__le32 rc_result; /* multi-purpose */
__le64 rc_id; /* match reply with request */
__le64 rc_seq; /* sender's ls_recover_seq */
__le64 rc_seq_reply; /* remote ls_recover_seq */
char rc_buf[];
};
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
struct dlm_opt_header {
__le16 t_type;
__le16 t_length;
__le32 t_pad;
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
/* need to be 8 byte aligned */
char t_value[];
};
/* encapsulation header */
struct dlm_opts {
struct dlm_header o_header;
uint8_t o_nextcmd;
uint8_t o_pad;
__le16 o_optlen;
__le32 o_pad2;
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
char o_opts[];
};
union dlm_packet {
struct dlm_header header; /* common to other two */
struct dlm_message message;
struct dlm_rcom rcom;
fs: dlm: add reliable connection if reconnect This patch introduce to make a tcp lowcomms connection reliable even if reconnects occurs. This is done by an application layer re-transmission handling and sequence numbers in dlm protocols. There are three new dlm commands: DLM_OPTS: This will encapsulate an existing dlm message (and rcom message if they don't have an own application side re-transmission handling). As optional handling additional tlv's (type length fields) can be appended. This can be for example a sequence number field. However because in DLM_OPTS the lockspace field is unused and a sequence number is a mandatory field it isn't made as a tlv and we put the sequence number inside the lockspace id. The possibility to add optional options are still there for future purposes. DLM_ACK: Just a dlm header to acknowledge the receive of a DLM_OPTS message to it's sender. DLM_FIN: This provides a 4 way handshake for connection termination inclusive support for half-closed connections. It's provided on application layer because SCTP doesn't support half-closed sockets, the shutdown() call can interrupted by e.g. TCP resets itself and a hard logic to implement it because the othercon paradigm in lowcomms. The 4-way termination handshake also solve problems to synchronize peer EOF arrival and that the cluster manager removes the peer in the node membership handling of DLM. In some cases messages can be still transmitted in this time and we need to wait for the node membership event. To provide a reliable connection the node will retransmit all unacknowledges message to it's peer on reconnect. The receiver will then filtering out the next received message and drop all messages which are duplicates. As RCOM_STATUS and RCOM_NAMES messages are the first messages which are exchanged and they have they own re-transmission handling, there exists logic that these messages must be first. If these messages arrives we store the dlm version field. This handling is on DLM 3.1 and after this patch 3.2 the same. A backwards compatibility handling has been added which seems to work on tests without tcpkill, however it's not recommended to use DLM 3.1 and 3.2 at the same time, because DLM 3.2 tries to fix long term bugs in the DLM protocol. Signed-off-by: Alexander Aring <aahringo@redhat.com> Signed-off-by: David Teigland <teigland@redhat.com>
2021-05-21 19:08:46 +00:00
struct dlm_opts opts;
};
#define DLM_RSF_NEED_SLOTS 0x00000001
/* RCOM_STATUS data */
struct rcom_status {
__le32 rs_flags;
__le32 rs_unused1;
__le64 rs_unused2;
};
/* RCOM_STATUS_REPLY data */
struct rcom_config {
__le32 rf_lvblen;
__le32 rf_lsflags;
/* DLM_HEADER_SLOTS adds: */
__le32 rf_flags;
__le16 rf_our_slot;
__le16 rf_num_slots;
__le32 rf_generation;
__le32 rf_unused1;
__le64 rf_unused2;
};
struct rcom_slot {
__le32 ro_nodeid;
__le16 ro_slot;
__le16 ro_unused1;
__le64 ro_unused2;
};
struct rcom_lock {
__le32 rl_ownpid;
__le32 rl_lkid;
__le32 rl_remid;
__le32 rl_parent_lkid;
__le32 rl_parent_remid;
__le32 rl_exflags;
__le32 rl_flags;
__le32 rl_lvbseq;
__le32 rl_result;
int8_t rl_rqmode;
int8_t rl_grmode;
int8_t rl_status;
int8_t rl_asts;
__le16 rl_wait_type;
__le16 rl_namelen;
char rl_name[DLM_RESNAME_MAXLEN];
char rl_lvb[];
};
struct dlm_ls {
struct list_head ls_list; /* list of lockspaces */
uint32_t ls_global_id; /* global unique lockspace ID */
uint32_t ls_generation;
uint32_t ls_exflags;
int ls_lvblen;
atomic_t ls_count; /* refcount of processes in
the dlm using this ls */
wait_queue_head_t ls_count_wait;
int ls_create_count; /* create/release refcount */
unsigned long ls_flags; /* LSFL_ */
struct kobject ls_kobj;
struct xarray ls_lkbxa;
rwlock_t ls_lkbxa_lock;
/* an rsb is on rsbtl for primary locking functions,
and on a slow list for recovery/dump iteration */
struct rhashtable ls_rsbtbl;
rwlock_t ls_rsbtbl_lock; /* for ls_rsbtbl and ls_slow */
struct list_head ls_slow_inactive; /* to iterate rsbtbl */
struct list_head ls_slow_active; /* to iterate rsbtbl */
struct timer_list ls_scan_timer; /* based on first scan_list rsb toss_time */
struct list_head ls_scan_list; /* rsbs ordered by res_toss_time */
spinlock_t ls_scan_lock;
spinlock_t ls_waiters_lock;
struct list_head ls_waiters; /* lkbs needing a reply */
spinlock_t ls_orphans_lock;
[DLM] overlapping cancel and unlock Full cancel and force-unlock support. In the past, cancel and force-unlock wouldn't work if there was another operation in progress on the lock. Now, both cancel and unlock-force can overlap an operation on a lock, meaning there may be 2 or 3 operations in progress on a lock in parallel. This support is important not only because cancel and force-unlock are explicit operations that an app can use, but both are used implicitly when a process exits while holding locks. Summary of changes: - add-to and remove-from waiters functions were rewritten to handle situations with more than one remote operation outstanding on a lock - validate_unlock_args detects when an overlapping cancel/unlock-force can be sent and when it needs to be delayed until a request/lookup reply is received - processing request/lookup replies detects when cancel/unlock-force occured during the op, and carries out the delayed cancel/unlock-force - manipulation of the "waiters" (remote operation) state of a lock moved under the standard rsb mutex that protects all the other lock state - the two recovery routines related to locks on the waiters list changed according to the way lkb's are now locked before accessing waiters state - waiters recovery detects when lkb's being recovered have overlapping cancel/unlock-force, and may not recover such locks - revert_lock (cancel) returns a value to distinguish cases where it did nothing vs cases where it actually did a cancel; the cancel completion ast should only be done when cancel did something - orphaned locks put on new list so they can be found later for purging - cancel must be called on a lock when making it an orphan - flag user locks (ENDOFLIFE) at the end of their useful life (to the application) so we can return an error for any further cancel/unlock-force - we weren't setting COMP/BAST ast flags if one was already set, so we'd lose either a completion or blocking ast - clear an unread bast on a lock that's become unlocked Signed-off-by: David Teigland <teigland@redhat.com> Signed-off-by: Steven Whitehouse <swhiteho@redhat.com>
2007-03-28 14:56:46 +00:00
struct list_head ls_orphans;
struct list_head ls_nodes; /* current nodes in ls */
struct list_head ls_nodes_gone; /* dead node list, recovery */
int ls_num_nodes; /* number of nodes in ls */
int ls_low_nodeid;
int ls_total_weight;
int *ls_node_array;
int ls_slot;
int ls_num_slots;
int ls_slots_size;
struct dlm_slot *ls_slots;
struct dlm_rsb ls_local_rsb; /* for returning errors */
struct dlm_lkb ls_local_lkb; /* for returning errors */
struct dlm_message ls_local_ms; /* for faking a reply */
struct dentry *ls_debug_rsb_dentry; /* debugfs */
struct dentry *ls_debug_waiters_dentry; /* debugfs */
struct dentry *ls_debug_locks_dentry; /* debugfs */
struct dentry *ls_debug_all_dentry; /* debugfs */
struct dentry *ls_debug_toss_dentry; /* debugfs */
struct dentry *ls_debug_queued_asts_dentry; /* debugfs */
wait_queue_head_t ls_uevent_wait; /* user part of join/leave */
int ls_uevent_result;
struct completion ls_recovery_done;
int ls_recovery_result;
struct miscdevice ls_device;
struct workqueue_struct *ls_callback_wq;
/* recovery related */
spinlock_t ls_cb_lock;
struct list_head ls_cb_delay; /* save for queue_work later */
struct task_struct *ls_recoverd_task;
struct mutex ls_recoverd_active;
spinlock_t ls_recover_lock;
unsigned long ls_recover_begin; /* jiffies timestamp */
uint32_t ls_recover_status; /* DLM_RS_ */
uint64_t ls_recover_seq;
struct dlm_recover *ls_recover_args;
struct rw_semaphore ls_in_recovery; /* block local requests */
rwlock_t ls_recv_active; /* block dlm_recv */
struct list_head ls_requestqueue;/* queue remote requests */
rwlock_t ls_requestqueue_lock;
struct dlm_rcom *ls_recover_buf;
int ls_recover_nodeid; /* for debugging */
dlm: fixes for nodir mode The "nodir" mode (statically assign master nodes instead of using the resource directory) has always been highly experimental, and never seriously used. This commit fixes a number of problems, making nodir much more usable. - Major change to recovery: recover all locks and restart all in-progress operations after recovery. In some cases it's not possible to know which in-progess locks to recover, so recover all. (Most require recovery in nodir mode anyway since rehashing changes most master nodes.) - Change the way nodir mode is enabled, from a command line mount arg passed through gfs2, into a sysfs file managed by dlm_controld, consistent with the other config settings. - Allow recovering MSTCPY locks on an rsb that has not yet been turned into a master copy. - Ignore RCOM_LOCK and RCOM_LOCK_REPLY recovery messages from a previous, aborted recovery cycle. Base this on the local recovery status not being in the state where any nodes should be sending LOCK messages for the current recovery cycle. - Hold rsb lock around dlm_purge_mstcpy_locks() because it may run concurrently with dlm_recover_master_copy(). - Maintain highbast on process-copy lkb's (in addition to the master as is usual), because the lkb can switch back and forth between being a master and being a process copy as the master node changes in recovery. - When recovering MSTCPY locks, flag rsb's that have non-empty convert or waiting queues for granting at the end of recovery. (Rename flag from LOCKS_PURGED to RECOVER_GRANT and similar for the recovery function, because it's not only resources with purged locks that need grant a grant attempt.) - Replace a couple of unnecessary assertion panics with error messages. Signed-off-by: David Teigland <teigland@redhat.com>
2012-04-26 20:54:29 +00:00
unsigned int ls_recover_locks_in; /* for log info */
uint64_t ls_rcom_seq;
spinlock_t ls_rcom_spin;
struct list_head ls_recover_list;
spinlock_t ls_recover_list_lock;
int ls_recover_list_count;
struct xarray ls_recover_xa;
spinlock_t ls_recover_xa_lock;
wait_queue_head_t ls_wait_general;
wait_queue_head_t ls_recover_lock_wait;
spinlock_t ls_clear_proc_locks;
struct list_head ls_masters_list; /* root resources */
rwlock_t ls_masters_lock; /* protect root_list */
struct list_head ls_dir_dump_list; /* root resources */
rwlock_t ls_dir_dump_lock; /* protect root_list */
const struct dlm_lockspace_ops *ls_ops;
void *ls_ops_arg;
struct work_struct ls_free_work;
int ls_namelen;
char ls_name[DLM_LOCKSPACE_LEN + 1];
};
/*
* LSFL_RECOVER_STOP - dlm_ls_stop() sets this to tell dlm recovery routines
* that they should abort what they're doing so new recovery can be started.
*
* LSFL_RECOVER_DOWN - dlm_ls_stop() sets this to tell dlm_recoverd that it
* should do down_write() on the in_recovery rw_semaphore. (doing down_write
* within dlm_ls_stop causes complaints about the lock acquired/released
* in different contexts.)
*
* LSFL_RECOVER_LOCK - dlm_recoverd holds the in_recovery rw_semaphore.
* It sets this after it is done with down_write() on the in_recovery
* rw_semaphore and clears it after it has released the rw_semaphore.
*
* LSFL_RECOVER_WORK - dlm_ls_start() sets this to tell dlm_recoverd that it
* should begin recovery of the lockspace.
*
* LSFL_RUNNING - set when normal locking activity is enabled.
* dlm_ls_stop() clears this to tell dlm locking routines that they should
* quit what they are doing so recovery can run. dlm_recoverd sets
* this after recovery is finished.
*/
#define LSFL_RECOVER_STOP 0
#define LSFL_RECOVER_DOWN 1
#define LSFL_RECOVER_LOCK 2
#define LSFL_RECOVER_WORK 3
#define LSFL_RUNNING 4
#define LSFL_RCOM_READY 5
#define LSFL_RCOM_WAIT 6
#define LSFL_UEVENT_WAIT 7
#define LSFL_CB_DELAY 9
#define LSFL_NODIR 10
#define LSFL_RECV_MSG_BLOCKED 11
#define LSFL_FS 12
#define LSFL_SOFTIRQ 13
#define DLM_PROC_FLAGS_CLOSING 1
#define DLM_PROC_FLAGS_COMPAT 2
/* locks list is kept so we can remove all a process's locks when it
exits (or orphan those that are persistent) */
struct dlm_user_proc {
dlm_lockspace_t *lockspace;
unsigned long flags; /* DLM_PROC_FLAGS */
struct list_head asts;
spinlock_t asts_spin;
struct list_head locks;
spinlock_t locks_spin;
struct list_head unlocking;
wait_queue_head_t wait;
};
static inline int dlm_locking_stopped(struct dlm_ls *ls)
{
return !test_bit(LSFL_RUNNING, &ls->ls_flags);
}
static inline int dlm_recovery_stopped(struct dlm_ls *ls)
{
return test_bit(LSFL_RECOVER_STOP, &ls->ls_flags);
}
static inline int dlm_no_directory(struct dlm_ls *ls)
{
dlm: fixes for nodir mode The "nodir" mode (statically assign master nodes instead of using the resource directory) has always been highly experimental, and never seriously used. This commit fixes a number of problems, making nodir much more usable. - Major change to recovery: recover all locks and restart all in-progress operations after recovery. In some cases it's not possible to know which in-progess locks to recover, so recover all. (Most require recovery in nodir mode anyway since rehashing changes most master nodes.) - Change the way nodir mode is enabled, from a command line mount arg passed through gfs2, into a sysfs file managed by dlm_controld, consistent with the other config settings. - Allow recovering MSTCPY locks on an rsb that has not yet been turned into a master copy. - Ignore RCOM_LOCK and RCOM_LOCK_REPLY recovery messages from a previous, aborted recovery cycle. Base this on the local recovery status not being in the state where any nodes should be sending LOCK messages for the current recovery cycle. - Hold rsb lock around dlm_purge_mstcpy_locks() because it may run concurrently with dlm_recover_master_copy(). - Maintain highbast on process-copy lkb's (in addition to the master as is usual), because the lkb can switch back and forth between being a master and being a process copy as the master node changes in recovery. - When recovering MSTCPY locks, flag rsb's that have non-empty convert or waiting queues for granting at the end of recovery. (Rename flag from LOCKS_PURGED to RECOVER_GRANT and similar for the recovery function, because it's not only resources with purged locks that need grant a grant attempt.) - Replace a couple of unnecessary assertion panics with error messages. Signed-off-by: David Teigland <teigland@redhat.com>
2012-04-26 20:54:29 +00:00
return test_bit(LSFL_NODIR, &ls->ls_flags);
}
/* takes a snapshot from dlm atomic flags */
static inline uint32_t dlm_flags_val(const unsigned long *addr,
uint32_t min, uint32_t max)
{
uint32_t bit = min, val = 0;
for_each_set_bit_from(bit, addr, max + 1) {
val |= BIT(bit);
}
return val;
}
static inline uint32_t dlm_iflags_val(const struct dlm_lkb *lkb)
{
return dlm_flags_val(&lkb->lkb_iflags, __DLM_IFL_MIN_BIT,
__DLM_IFL_MAX_BIT);
}
static inline uint32_t dlm_dflags_val(const struct dlm_lkb *lkb)
{
return dlm_flags_val(&lkb->lkb_dflags, __DLM_DFL_MIN_BIT,
__DLM_DFL_MAX_BIT);
}
/* coming from UAPI header
*
* TODO:
* Move this to UAPI header and let other values point to them and use BIT()
*/
#define DLM_SBF_DEMOTED_BIT 0
#define __DLM_SBF_MIN_BIT DLM_SBF_DEMOTED_BIT
#define DLM_SBF_VALNOTVALID_BIT 1
#define DLM_SBF_ALTMODE_BIT 2
#define __DLM_SBF_MAX_BIT DLM_SBF_ALTMODE_BIT
static inline uint32_t dlm_sbflags_val(const struct dlm_lkb *lkb)
{
/* be sure the next person updates this */
BUILD_BUG_ON(BIT(__DLM_SBF_MAX_BIT) != DLM_SBF_ALTMODE);
return dlm_flags_val(&lkb->lkb_sbflags, __DLM_SBF_MIN_BIT,
__DLM_SBF_MAX_BIT);
}
static inline void dlm_set_flags_val(unsigned long *addr, uint32_t val,
uint32_t min, uint32_t max)
{
uint32_t bit;
for (bit = min; bit < (max + 1); bit++) {
if (val & BIT(bit))
set_bit(bit, addr);
else
clear_bit(bit, addr);
}
}
static inline void dlm_set_dflags_val(struct dlm_lkb *lkb, uint32_t val)
{
dlm_set_flags_val(&lkb->lkb_dflags, val, __DLM_DFL_MIN_BIT,
__DLM_DFL_MAX_BIT);
}
static inline void dlm_set_sbflags_val(struct dlm_lkb *lkb, uint32_t val)
{
dlm_set_flags_val(&lkb->lkb_sbflags, val, __DLM_SBF_MIN_BIT,
__DLM_SBF_MAX_BIT);
}
extern struct workqueue_struct *dlm_wq;
int dlm_plock_init(void);
void dlm_plock_exit(void);
#ifdef CONFIG_DLM_DEBUG
void dlm_register_debugfs(void);
void dlm_unregister_debugfs(void);
void dlm_create_debug_file(struct dlm_ls *ls);
void dlm_delete_debug_file(struct dlm_ls *ls);
void *dlm_create_debug_comms_file(int nodeid, void *data);
void dlm_delete_debug_comms_file(void *ctx);
#else
static inline void dlm_register_debugfs(void) { }
static inline void dlm_unregister_debugfs(void) { }
static inline void dlm_create_debug_file(struct dlm_ls *ls) { }
static inline void dlm_delete_debug_file(struct dlm_ls *ls) { }
static inline void *dlm_create_debug_comms_file(int nodeid, void *data) { return NULL; }
static inline void dlm_delete_debug_comms_file(void *ctx) { }
#endif
#endif /* __DLM_INTERNAL_DOT_H__ */