summaryrefslogtreecommitdiff
path: root/lib/locking/locking.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/locking/locking.c')
-rw-r--r--lib/locking/locking.c726
1 files changed, 260 insertions, 466 deletions
diff --git a/lib/locking/locking.c b/lib/locking/locking.c
index 7aa519b..a8153a6 100644
--- a/lib/locking/locking.c
+++ b/lib/locking/locking.c
@@ -10,179 +10,51 @@
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program; if not, write to the Free Software Foundation,
- * Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+ * Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "lib.h"
-#include "locking.h"
+#include "lib/misc/lib.h"
+#include "lib/locking/locking.h"
+#include "lib/locking/lvmlockd.h"
#include "locking_types.h"
-#include "lvm-string.h"
-#include "activate.h"
-#include "toolcontext.h"
-#include "memlock.h"
-#include "defaults.h"
-#include "lvmcache.h"
+#include "lib/misc/lvm-string.h"
+#include "lib/activate/activate.h"
+#include "lib/commands/toolcontext.h"
+#include "lib/mm/memlock.h"
+#include "lib/config/defaults.h"
+#include "lib/cache/lvmcache.h"
+#include "lib/misc/lvm-signal.h"
#include <assert.h>
-#include <signal.h>
#include <sys/stat.h>
#include <limits.h>
#include <unistd.h>
static struct locking_type _locking;
-static sigset_t _oldset;
static int _vg_lock_count = 0; /* Number of locks held */
static int _vg_write_lock_held = 0; /* VG write lock held? */
-static int _signals_blocked = 0;
static int _blocking_supported = 0;
-
-static volatile sig_atomic_t _sigint_caught = 0;
-static volatile sig_atomic_t _handler_installed;
-static struct sigaction _oldhandler;
-static int _oldmasked;
-
-typedef enum {
- LV_NOOP,
- LV_SUSPEND,
- LV_RESUME
-} lv_operation_t;
-
-static void _catch_sigint(int unused __attribute__((unused)))
-{
- _sigint_caught = 1;
-}
-
-int sigint_caught(void) {
- return _sigint_caught;
-}
-
-void sigint_clear(void)
-{
- _sigint_caught = 0;
-}
-
-/*
- * Temporarily allow keyboard interrupts to be intercepted and noted;
- * saves interrupt handler state for sigint_restore(). Users should
- * use the sigint_caught() predicate to check whether interrupt was
- * requested and act appropriately. Interrupt flags are never
- * cleared automatically by this code, but the tools clear the flag
- * before running each command in lvm_run_command(). All other places
- * where the flag needs to be cleared need to call sigint_clear().
- */
-
-void sigint_allow(void)
-{
- struct sigaction handler;
- sigset_t sigs;
-
- /*
- * Do not overwrite the backed-up handler data -
- * just increase nesting count.
- */
- if (_handler_installed) {
- _handler_installed++;
- return;
- }
-
- /* Grab old sigaction for SIGINT: shall not fail. */
- sigaction(SIGINT, NULL, &handler);
- handler.sa_flags &= ~SA_RESTART; /* Clear restart flag */
- handler.sa_handler = _catch_sigint;
-
- _handler_installed = 1;
-
- /* Override the signal handler: shall not fail. */
- sigaction(SIGINT, &handler, &_oldhandler);
-
- /* Unmask SIGINT. Remember to mask it again on restore. */
- sigprocmask(0, NULL, &sigs);
- if ((_oldmasked = sigismember(&sigs, SIGINT))) {
- sigdelset(&sigs, SIGINT);
- sigprocmask(SIG_SETMASK, &sigs, NULL);
- }
-}
-
-void sigint_restore(void)
-{
- if (!_handler_installed)
- return;
-
- if (_handler_installed > 1) {
- _handler_installed--;
- return;
- }
-
- /* Nesting count went down to 0. */
- _handler_installed = 0;
-
- if (_oldmasked) {
- sigset_t sigs;
- sigprocmask(0, NULL, &sigs);
- sigaddset(&sigs, SIGINT);
- sigprocmask(SIG_SETMASK, &sigs, NULL);
- }
-
- sigaction(SIGINT, &_oldhandler, NULL);
-}
-
-static void _block_signals(uint32_t flags __attribute__((unused)))
-{
- sigset_t set;
-
- if (_signals_blocked)
- return;
-
- if (sigfillset(&set)) {
- log_sys_error("sigfillset", "_block_signals");
- return;
- }
-
- if (sigprocmask(SIG_SETMASK, &set, &_oldset)) {
- log_sys_error("sigprocmask", "_block_signals");
- return;
- }
-
- _signals_blocked = 1;
-}
+static int _file_locking_readonly = 0;
+static int _file_locking_sysinit = 0;
+static int _file_locking_ignorefail = 0;
+static int _file_locking_failed = 0;
static void _unblock_signals(void)
{
/* Don't unblock signals while any locks are held */
- if (!_signals_blocked || _vg_lock_count)
- return;
-
- if (sigprocmask(SIG_SETMASK, &_oldset, NULL)) {
- log_sys_error("sigprocmask", "_block_signals");
- return;
- }
-
- _signals_blocked = 0;
-}
-
-static void _lock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
-{
- if (!(_locking.flags & LCK_PRE_MEMLOCK))
- return;
-
- if (lv_op == LV_SUSPEND)
- critical_section_inc(cmd, "locking for suspend");
-}
-
-static void _unlock_memory(struct cmd_context *cmd, lv_operation_t lv_op)
-{
- if (!(_locking.flags & LCK_PRE_MEMLOCK))
- return;
-
- if (lv_op == LV_RESUME)
- critical_section_dec(cmd, "unlocking on resume");
+ if (!_vg_lock_count)
+ unblock_signals();
}
void reset_locking(void)
{
int was_locked = _vg_lock_count;
+ /* file locking disabled */
+ if (!_locking.flags)
+ return;
+
_vg_lock_count = 0;
_vg_write_lock_held = 0;
@@ -198,9 +70,7 @@ void reset_locking(void)
static void _update_vg_lock_count(const char *resource, uint32_t flags)
{
/* Ignore locks not associated with updating VG metadata */
- if ((flags & LCK_SCOPE_MASK) != LCK_VG ||
- (flags & LCK_CACHE) ||
- !strcmp(resource, VG_GLOBAL))
+ if (!strcmp(resource, VG_GLOBAL))
return;
if ((flags & LCK_TYPE_MASK) == LCK_UNLOCK)
@@ -216,355 +86,219 @@ static void _update_vg_lock_count(const char *resource, uint32_t flags)
}
/*
- * Select a locking type
- * type: locking type; if < 0, then read config tree value
+ * A mess of options have been introduced over time to override
+ * or tweak the behavior of file locking, and indirectly other
+ * behaviors. These options are allowed in different but
+ * overlapping sets of commands (see command-lines.in)
+ *
+ * --nolocking
+ *
+ * Command won't try to set up or use file locks at all.
+ *
+ * --readonly
+ *
+ * Command will grant any read lock request, without trying
+ * to acquire an actual file lock. Command will refuse any
+ * write lock request. (Activation, which uses a write lock,
+ * is not allowed.)
+ *
+ * --ignorelockingfailure
+ *
+ * Command tries to set up file locks and will use them
+ * (both read and write) if successful. If command fails
+ * to set up file locks it falls back to readonly behavior
+ * above, while allowing activation.
+ *
+ * --sysinit
+ *
+ * The same as ignorelockingfailure.
+ *
+ * --sysinit --readonly
+ *
+ * The combination of these two flags acts like --readonly,
+ * refusing write lock requests, but makes an exception to
+ * allow activation.
+ *
+ * global/metadata_read_only
+ *
+ * The command acquires actual read locks and refuses
+ * write lock requests.
*/
-int init_locking(int type, struct cmd_context *cmd, int suppress_messages)
+
+int init_locking(struct cmd_context *cmd,
+ int file_locking_sysinit, int file_locking_readonly, int file_locking_ignorefail)
{
- if (getenv("LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES"))
+ int suppress_messages = 0;
+
+ if (file_locking_sysinit || getenv("LVM_SUPPRESS_LOCKING_FAILURE_MESSAGES"))
suppress_messages = 1;
- if (type < 0)
- type = find_config_tree_int(cmd, "global/locking_type", 1);
+ _blocking_supported = find_config_tree_bool(cmd, global_wait_for_locks_CFG, NULL);
+ _file_locking_readonly = file_locking_readonly;
+ _file_locking_sysinit = file_locking_sysinit;
+ _file_locking_ignorefail = file_locking_ignorefail;
- _blocking_supported = find_config_tree_int(cmd,
- "global/wait_for_locks", DEFAULT_WAIT_FOR_LOCKS);
+ log_debug("File locking settings: readonly:%d sysinit:%d ignorelockingfailure:%d global/metadata_read_only:%d global/wait_for_locks:%d.",
+ _file_locking_readonly, _file_locking_sysinit, _file_locking_ignorefail,
+ cmd->metadata_read_only, _blocking_supported);
- switch (type) {
- case 0:
- init_no_locking(&_locking, cmd, suppress_messages);
- log_warn("WARNING: Locking disabled. Be careful! "
- "This could corrupt your metadata.");
- return 1;
+ if (!init_file_locking(&_locking, cmd, suppress_messages)) {
+ log_error_suppress(suppress_messages, "File locking initialisation failed.");
- case 1:
- log_very_verbose("%sFile-based locking selected.",
- _blocking_supported ? "" : "Non-blocking ");
+ _file_locking_failed = 1;
- if (!init_file_locking(&_locking, cmd, suppress_messages)) {
- log_error_suppress(suppress_messages,
- "File-based locking initialisation failed.");
- break;
- }
- return 1;
-
-#ifdef HAVE_LIBDL
- case 2:
- if (!is_static()) {
- log_very_verbose("External locking selected.");
- if (init_external_locking(&_locking, cmd, suppress_messages))
- return 1;
- }
- if (!find_config_tree_int(cmd, "locking/fallback_to_clustered_locking",
- find_config_tree_int(cmd, "global/fallback_to_clustered_locking",
- DEFAULT_FALLBACK_TO_CLUSTERED_LOCKING))) {
- log_error_suppress(suppress_messages, "External locking initialisation failed.");
- break;
- }
-#endif
-
-#ifdef CLUSTER_LOCKING_INTERNAL
- log_very_verbose("Falling back to internal clustered locking.");
- /* Fall through */
-
- case 3:
- log_very_verbose("Cluster locking selected.");
- if (!init_cluster_locking(&_locking, cmd, suppress_messages)) {
- log_error_suppress(suppress_messages,
- "Internal cluster locking initialisation failed.");
- break;
- }
- return 1;
-#endif
-
- case 4:
- log_verbose("Read-only locking selected. "
- "Only read operations permitted.");
- if (!init_readonly_locking(&_locking, cmd, suppress_messages))
- break;
- return 1;
-
- default:
- log_error("Unknown locking type requested.");
- return 0;
- }
-
- if ((type == 2 || type == 3) &&
- find_config_tree_int(cmd, "locking/fallback_to_local_locking",
- find_config_tree_int(cmd, "global/fallback_to_local_locking",
- DEFAULT_FALLBACK_TO_LOCAL_LOCKING))) {
- log_warn_suppress(suppress_messages, "WARNING: Falling back to local file-based locking.");
- log_warn_suppress(suppress_messages,
- "Volume Groups with the clustered attribute will "
- "be inaccessible.");
- if (init_file_locking(&_locking, cmd, suppress_messages))
+ if (file_locking_sysinit || file_locking_ignorefail)
return 1;
- else
- log_error_suppress(suppress_messages,
- "File-based locking initialisation failed.");
- }
- if (!ignorelockingfailure())
return 0;
-
- log_verbose("Locking disabled - only read operations permitted.");
- init_readonly_locking(&_locking, cmd, suppress_messages);
+ }
return 1;
}
-void fin_locking(void)
+void fin_locking(struct cmd_context *cmd)
{
- _locking.fin_locking();
-}
-
-/*
- * Does the LVM1 driver know of this VG name?
- */
-int check_lvm1_vg_inactive(struct cmd_context *cmd, const char *vgname)
-{
- struct stat info;
- char path[PATH_MAX];
-
- /* We'll allow operations on orphans */
- if (!is_real_vg(vgname))
- return 1;
-
- /* LVM1 is only present in 2.4 kernels. */
- if (strncmp(cmd->kernel_vsn, "2.4.", 4))
- return 1;
-
- if (dm_snprintf(path, sizeof(path), "%s/lvm/VGs/%s", cmd->proc_dir,
- vgname) < 0) {
- log_error("LVM1 proc VG pathname too long for %s", vgname);
- return 0;
- }
+ /* file locking disabled */
+ if (!_locking.flags)
+ return;
- if (stat(path, &info) == 0) {
- log_error("%s exists: Is the original LVM driver using "
- "this volume group?", path);
- return 0;
- } else if (errno != ENOENT && errno != ENOTDIR) {
- log_sys_error("stat", path);
- return 0;
- }
+ /*
+ * These may be automatically released when the
+ * command ends, without an explicit unlock call,
+ * in which case these flags would not be cleared.
+ */
+ cmd->lockf_global_ex = 0;
+ cmd->lockd_global_ex = 0;
- return 1;
+ _locking.fin_locking();
}
/*
* VG locking is by VG name.
* FIXME This should become VG uuid.
*/
-static int _lock_vol(struct cmd_context *cmd, const char *resource,
- uint32_t flags, lv_operation_t lv_op)
+static int _lock_vol(struct cmd_context *cmd, const char *resource, uint32_t flags)
{
- uint32_t lck_type = flags & LCK_TYPE_MASK;
- uint32_t lck_scope = flags & LCK_SCOPE_MASK;
int ret = 0;
- _block_signals(flags);
- _lock_memory(cmd, lv_op);
-
- assert(resource);
-
- if (!*resource) {
- log_error(INTERNAL_ERROR "Use of P_orphans is deprecated.");
- return 0;
- }
-
- if ((is_orphan_vg(resource) || is_global_vg(resource)) && (flags & LCK_CACHE)) {
- log_error(INTERNAL_ERROR "P_%s referenced", resource);
- return 0;
- }
-
- if (cmd->metadata_read_only && lck_type == LCK_WRITE &&
- strcmp(resource, VG_GLOBAL)) {
- log_error("Operation prohibited while global/metadata_read_only is set.");
- return 0;
- }
-
- if ((ret = _locking.lock_resource(cmd, resource, flags))) {
- if (lck_scope == LCK_VG && !(flags & LCK_CACHE)) {
- if (lck_type != LCK_UNLOCK)
- lvmcache_lock_vgname(resource, lck_type == LCK_READ);
- dev_reset_error_count(cmd);
- }
+ block_signals(flags);
+ if ((ret = _locking.lock_resource(cmd, resource, flags, NULL)))
+ /* ensure signals are blocked while VG_GLOBAL lock is held */
_update_vg_lock_count(resource, flags);
- } else
+ else
stack;
- /* If unlocking, always remove lock from lvmcache even if operation failed. */
- if (lck_scope == LCK_VG && !(flags & LCK_CACHE) && lck_type == LCK_UNLOCK) {
- lvmcache_unlock_vgname(resource);
- if (!ret)
- _update_vg_lock_count(resource, flags);
- }
-
- _unlock_memory(cmd, lv_op);
_unblock_signals();
return ret;
}
-int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags)
+int lock_vol(struct cmd_context *cmd, const char *vol, uint32_t flags, const struct logical_volume *lv)
{
char resource[258] __attribute__((aligned(8)));
- lv_operation_t lv_op;
- int lck_type = flags & LCK_TYPE_MASK;
-
- switch (flags & (LCK_SCOPE_MASK | LCK_TYPE_MASK)) {
- case LCK_LV_SUSPEND:
- lv_op = LV_SUSPEND;
- break;
- case LCK_LV_RESUME:
- lv_op = LV_RESUME;
- break;
- default: lv_op = LV_NOOP;
- }
-
+ uint32_t lck_type = flags & LCK_TYPE_MASK;
+ int is_global = !strcmp(vol, VG_GLOBAL);
- if (flags == LCK_NONE) {
- log_debug(INTERNAL_ERROR "%s: LCK_NONE lock requested", vol);
+ if (is_orphan_vg(vol))
return 1;
- }
- switch (flags & LCK_SCOPE_MASK) {
- case LCK_VG:
- if (!_blocking_supported)
- flags |= LCK_NONBLOCK;
-
- /* Global VG_ORPHANS lock covers all orphan formats. */
- if (is_orphan_vg(vol))
- vol = VG_ORPHANS;
- /* VG locks alphabetical, ORPHAN lock last */
- if ((lck_type != LCK_UNLOCK) &&
- !(flags & LCK_CACHE) &&
- !lvmcache_verify_lock_order(vol))
- return_0;
-
- /* Lock VG to change on-disk metadata. */
- /* If LVM1 driver knows about the VG, it can't be accessed. */
- if (!check_lvm1_vg_inactive(cmd, vol))
- return_0;
- break;
- case LCK_LV:
- /* All LV locks are non-blocking. */
+ if (!_blocking_supported)
flags |= LCK_NONBLOCK;
- break;
- default:
- log_error("Unrecognised lock scope: %d",
- flags & LCK_SCOPE_MASK);
+
+ if (!dm_strncpy(resource, vol, sizeof(resource))) {
+ log_error(INTERNAL_ERROR "Resource name %s is too long.", vol);
return 0;
}
- strncpy(resource, vol, sizeof(resource) - 1);
- resource[sizeof(resource) - 1] = '\0';
-
- if (!_lock_vol(cmd, resource, flags, lv_op))
- return_0;
-
/*
- * If a real lock was acquired (i.e. not LCK_CACHE),
- * perform an immediate unlock unless LCK_HOLD was requested.
+ * File locking is disabled by --nolocking.
*/
- if ((lck_type == LCK_NULL) || (lck_type == LCK_UNLOCK) ||
- (flags & (LCK_CACHE | LCK_HOLD)))
- return 1;
+ if (!_locking.flags)
+ goto out_hold;
- if (!_lock_vol(cmd, resource, (flags & ~LCK_TYPE_MASK) | LCK_UNLOCK, lv_op))
- return_0;
+ /*
+ * When file locking could not be initialized, --ignorelockingfailure
+ * and --sysinit behave like --readonly, but allow activation.
+ */
+ if (_file_locking_failed && (_file_locking_sysinit || _file_locking_ignorefail)) {
+ if (lck_type != LCK_WRITE)
+ goto out_hold;
- return 1;
-}
+ if (cmd->is_activating && !is_global)
+ goto out_hold;
-/* Unlock list of LVs */
-int resume_lvs(struct cmd_context *cmd, struct dm_list *lvs)
-{
- struct lv_list *lvl;
- int r = 1;
+ goto out_fail;
+ }
- dm_list_iterate_items(lvl, lvs)
- if (!resume_lv(cmd, lvl->lv)) {
- r = 0;
- stack;
- }
+ /*
+ * When --readonly is set, grant read lock requests without trying to
+ * acquire an actual lock, and refuse write lock requests.
+ */
+ if (_file_locking_readonly && !_file_locking_sysinit) {
+ if (lck_type != LCK_WRITE)
+ goto out_hold;
- return r;
-}
+ log_error("Operation prohibited while --readonly is set.");
+ goto out_fail;
+ }
-/* Unlock and revert list of LVs */
-int revert_lvs(struct cmd_context *cmd, struct dm_list *lvs)
-{
- struct lv_list *lvl;
- int r = 1;
+ /*
+ * When --readonly and --sysinit are set, grant read lock requests without
+ * trying to acquire an actual lock, and refuse write lock requests except
+ * in the case of activation which is permitted.
+ */
+ if (_file_locking_readonly && _file_locking_sysinit) {
+ if (lck_type != LCK_WRITE)
+ goto out_hold;
- dm_list_iterate_items(lvl, lvs)
- if (!revert_lv(cmd, lvl->lv)) {
- r = 0;
- stack;
+ if (cmd->is_activating) {
+ log_warn("Allowing activation with --readonly --sysinit.");
+ goto out_hold;
}
- return r;
-}
-/*
- * Lock a list of LVs.
- * On failure to lock any LV, calls vg_revert() if vg_to_revert is set and
- * then unlocks any LVs on the list already successfully locked.
- */
-int suspend_lvs(struct cmd_context *cmd, struct dm_list *lvs,
- struct volume_group *vg_to_revert)
-{
- struct lv_list *lvl;
-
- dm_list_iterate_items(lvl, lvs) {
- if (!suspend_lv(cmd, lvl->lv)) {
- log_error("Failed to suspend %s", lvl->lv->name);
- if (vg_to_revert)
- vg_revert(vg_to_revert);
- /*
- * FIXME Should be
- * dm_list_uniterate(lvh, lvs, &lvl->list) {
- * lvl = dm_list_item(lvh, struct lv_list);
- * but revert would need fixing to use identical tree deps first.
- */
- dm_list_iterate_items(lvl, lvs)
- if (!revert_lv(cmd, lvl->lv))
- stack;
+ log_error("Operation prohibited while --readonly is set.");
+ goto out_fail;
+ }
- return 0;
+ /*
+ * When global/metadata_read_only is set, acquire actual read locks and
+ * refuse write lock requests.
+ */
+ if (cmd->metadata_read_only) {
+ if (lck_type == LCK_WRITE) {
+ log_error("Operation prohibited while global/metadata_read_only is set.");
+ goto out_fail;
}
- }
- return 1;
-}
+ /* continue and acquire a read file lock */
+ }
-/*
- * First try to activate exclusively locally.
- * Then if the VG is clustered and the LV is not yet active (e.g. due to
- * an activation filter) try activating on remote nodes.
- */
-int activate_lv_excl(struct cmd_context *cmd, struct logical_volume *lv)
-{
- /* Non-clustered VGs are only activated locally. */
- if (!vg_is_clustered(lv->vg))
- return activate_lv_excl_local(cmd, lv);
+ if (!_lock_vol(cmd, resource, flags))
+ goto out_fail;
- if (lv_is_active_exclusive_locally(lv))
+out_hold:
+ if (is_global)
return 1;
- if (!activate_lv_excl_local(cmd, lv))
- return_0;
-
- if (lv_is_active_exclusive(lv))
- return 1;
+ /*
+ * FIXME: other parts of the code want to check if a VG is
+ * locked by looking in lvmcache. They shouldn't need to
+ * do that, and we should be able to remove this.
+ */
+ if (lck_type != LCK_UNLOCK)
+ lvmcache_lock_vgname(resource, lck_type == LCK_READ);
+ else if (lck_type == LCK_UNLOCK)
+ lvmcache_unlock_vgname(resource);
- /* FIXME Deal with error return codes. */
- if (activate_lv_excl_remote(cmd, lv))
- stack;
+ return 1;
- return lv_is_active_exclusive(lv);
+out_fail:
+ if (is_global)
+ return 0;
+ if (lck_type == LCK_UNLOCK)
+ _update_vg_lock_count(resource, flags);
+ return 0;
}
/* Lock a list of LVs */
@@ -574,16 +308,12 @@ int activate_lvs(struct cmd_context *cmd, struct dm_list *lvs, unsigned exclusiv
struct lv_list *lvl;
dm_list_iterate_items(lvl, lvs) {
- if (!exclusive && !lv_is_active_exclusive(lvl->lv)) {
- if (!activate_lv(cmd, lvl->lv)) {
- log_error("Failed to activate %s", lvl->lv->name);
- return 0;
- }
- } else if (!activate_lv_excl(cmd, lvl->lv)) {
- log_error("Failed to activate %s", lvl->lv->name);
+ if (!activate_lv(cmd, lvl->lv)) {
+ log_error("Failed to activate %s", display_lvname(lvl->lv));
+
dm_list_uniterate(lvh, lvs, &lvl->list) {
lvl = dm_list_item(lvh, struct lv_list);
- if (!activate_lv(cmd, lvl->lv))
+ if (!deactivate_lv(cmd, lvl->lv))
stack;
}
return 0;
@@ -598,45 +328,109 @@ int vg_write_lock_held(void)
return _vg_write_lock_held;
}
-int locking_is_clustered(void)
+int sync_local_dev_names(struct cmd_context* cmd)
{
- return (_locking.flags & LCK_CLUSTERED) ? 1 : 0;
+ dm_device_list_destroy(&cmd->cache_dm_devs);
+ memlock_unlock(cmd);
+ fs_unlock();
+ return 1;
}
-int remote_lock_held(const char *vol, int *exclusive)
+/*
+ * The lockf_global_ex flag is used to prevent changing
+ * an explicitly acquired ex global lock to sh in process_each.
+ */
+
+static int _lockf_global(struct cmd_context *cmd, const char *mode, int convert, int nonblock)
{
- int mode = LCK_NULL;
+ uint32_t flags = 0;
+ int ret;
+
+ if (convert)
+ flags |= LCK_CONVERT;
+
+ if (nonblock)
+ flags |= LCK_NONBLOCK;
+
+ if (!strcmp(mode, "ex")) {
+ flags |= LCK_WRITE;
- if (!locking_is_clustered())
+ if (cmd->lockf_global_ex) {
+ log_warn("global flock already held ex");
+ return 1;
+ }
+
+ ret = lock_vol(cmd, VG_GLOBAL, flags, NULL);
+ if (ret)
+ cmd->lockf_global_ex = 1;
+
+ } else if (!strcmp(mode, "sh")) {
+ if (cmd->lockf_global_ex)
+ return 1;
+
+ flags |= LCK_READ;
+
+ ret = lock_vol(cmd, VG_GLOBAL, flags, NULL);
+
+ } else if (!strcmp(mode, "un")) {
+ ret = lock_vol(cmd, VG_GLOBAL, LCK_UNLOCK, NULL);
+ cmd->lockf_global_ex = 0;
+ } else {
+ log_error(INTERNAL_ERROR "Unknown locking mode %s.", mode);
return 0;
+ }
- if (!_locking.query_resource)
- return -1;
+ return ret;
+}
- /*
- * If an error occured, expect that volume is active
- */
- if (!_locking.query_resource(vol, &mode)) {
- stack;
+int lockf_global(struct cmd_context *cmd, const char *mode)
+{
+ return _lockf_global(cmd, mode, 0, 0);
+}
+
+int lockf_global_convert(struct cmd_context *cmd, const char *mode)
+{
+ /* some uncommon cases like pvchange -a can call this multiple times */
+ if (cmd->lockf_global_ex && !strcmp(mode, "ex"))
return 1;
- }
- if (exclusive)
- *exclusive = (mode == LCK_EXCL);
+ return _lockf_global(cmd, mode, 1, 0);
+}
- return mode == LCK_NULL ? 0 : 1;
+int lockf_global_nonblock(struct cmd_context *cmd, const char *mode)
+{
+ return _lockf_global(cmd, mode, 0, 1);
}
-int sync_local_dev_names(struct cmd_context* cmd)
+int lock_global(struct cmd_context *cmd, const char *mode)
{
- memlock_unlock(cmd);
+ if (!lockf_global(cmd, mode))
+ return 0;
- return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC_LOCAL);
+ if (!lockd_global(cmd, mode)) {
+ lockf_global(cmd, "un");
+ return 0;
+ }
+
+ return 1;
}
-int sync_dev_names(struct cmd_context* cmd)
+/*
+ * The global lock is already held, convert it to another mode.
+ *
+ * Currently only used for sh->ex.
+ *
+ * (The lockf_global_ex flag would need overriding
+ * to handle ex->sh.)
+ */
+
+int lock_global_convert(struct cmd_context *cmd, const char *mode)
{
- memlock_unlock(cmd);
+ if (!lockf_global_convert(cmd, mode))
+ return 0;
- return lock_vol(cmd, VG_SYNC_NAMES, LCK_VG_SYNC);
+ if (!lockd_global(cmd, mode))
+ return 0;
+
+ return 1;
}