rocksdb/util/atomic.h
Peter Dillinger b3fdb9b3cc Use safer atomic APIs for some memtable code (#13844)
Summary:
Two instances of change that are not just cosmetic:

* InlineSkipList<>::Node::CASNext() was implicitly using memory_order_seq_cst to access `next_` while it's intended to be accessed with acquire/release. This is probably not a correctness issue for compare_exchange_strong but potentially a previously missed optimization.
* Similar for `max_height_` in Insert which is otherwise accessed with relaxed memory order.
* One non-relaxed access to `is_range_del_table_empty_` in a function only used in assertions. Access to this atomic is otherwise relaxed (and should be - comment added)

Didn't do all of memtable.h because some of them are more complicated changes and I should probably add FetchMin and FetchMax functions to simplify and take advantage of C++27 functions where available (intended follow-up).

Pull Request resolved: https://github.com/facebook/rocksdb/pull/13844

Test Plan: existing tests

Reviewed By: xingbowang

Differential Revision: D79742552

Pulled By: pdillinger

fbshipit-source-id: d97ce72ba9af6c105694b7d40622db9e994720cd
2025-08-14 21:54:52 -07:00

117 lines
4.4 KiB
C++

// Copyright (c) Meta Platforms, Inc. and affiliates.
// This source code is licensed under both the GPLv2 (found in the
// COPYING file in the root directory) and Apache 2.0 License
// (found in the LICENSE.Apache file in the root directory).
#pragma once
#include <atomic>
#include "rocksdb/rocksdb_namespace.h"
namespace ROCKSDB_NAMESPACE {
// Background:
// std::atomic is somewhat easy to misuse:
// * Implicit conversion to T using std::memory_order_seq_cst, along with
// memory order parameter defaults, make it easy to accidentally mix sequential
// consistency ordering with acquire/release memory ordering. See
// "The single total order might not be consistent with happens-before" at
// https://en.cppreference.com/w/cpp/atomic/memory_order
// * It's easy to use nonsensical (UB) combinations like store with
// std::memory_order_acquire.
// * It is unlikely that anything in RocksDB will need std::memory_order_seq_cst
// because sequential consistency for the user, potentially writing from
// multiple threads, is provided by explicit versioning with sequence numbers.
// If threads A & B update separate atomics, it's typically OK if threads C & D
// see those updates in different orders.
//
// For such reasons, we provide wrappers below to make safe usage easier.
// Wrapper around std::atomic to avoid certain bugs (see Background above).
//
// This relaxed-only wrapper is intended for atomics that do not need
// ordering constraints with other data reads/writes aside from those
// necessary for computing data values or given by other happens-before
// relationships. For example, a cross-thread counter that never returns
// the same result can be a RelaxedAtomic.
template <typename T>
class RelaxedAtomic {
public:
explicit RelaxedAtomic(T initial = {}) : v_(initial) {}
void StoreRelaxed(T desired) { v_.store(desired, std::memory_order_relaxed); }
T LoadRelaxed() const { return v_.load(std::memory_order_relaxed); }
bool CasWeakRelaxed(T& expected, T desired) {
return v_.compare_exchange_weak(expected, desired,
std::memory_order_relaxed);
}
bool CasStrongRelaxed(T& expected, T desired) {
return v_.compare_exchange_strong(expected, desired,
std::memory_order_relaxed);
}
T ExchangeRelaxed(T desired) {
return v_.exchange(desired, std::memory_order_relaxed);
}
T FetchAddRelaxed(T operand) {
return v_.fetch_add(operand, std::memory_order_relaxed);
}
T FetchSubRelaxed(T operand) {
return v_.fetch_sub(operand, std::memory_order_relaxed);
}
T FetchAndRelaxed(T operand) {
return v_.fetch_and(operand, std::memory_order_relaxed);
}
T FetchOrRelaxed(T operand) {
return v_.fetch_or(operand, std::memory_order_relaxed);
}
T FetchXorRelaxed(T operand) {
return v_.fetch_xor(operand, std::memory_order_relaxed);
}
protected:
std::atomic<T> v_;
};
// Wrapper around std::atomic to avoid certain bugs (see Background above).
//
// Except for some unusual cases requiring sequential consistency, this is
// a general-purpose atomic. Relaxed operations can be mixed in as appropriate.
template <typename T>
class AcqRelAtomic : public RelaxedAtomic<T> {
public:
explicit AcqRelAtomic(T initial = {}) : RelaxedAtomic<T>(initial) {}
void Store(T desired) {
RelaxedAtomic<T>::v_.store(desired, std::memory_order_release);
}
T Load() const {
return RelaxedAtomic<T>::v_.load(std::memory_order_acquire);
}
bool CasWeak(T& expected, T desired) {
return RelaxedAtomic<T>::v_.compare_exchange_weak(
expected, desired, std::memory_order_acq_rel);
}
bool CasStrong(T& expected, T desired) {
return RelaxedAtomic<T>::v_.compare_exchange_strong(
expected, desired, std::memory_order_acq_rel);
}
T Exchange(T desired) {
return RelaxedAtomic<T>::v_.exchange(desired, std::memory_order_acq_rel);
}
T FetchAdd(T operand) {
return RelaxedAtomic<T>::v_.fetch_add(operand, std::memory_order_acq_rel);
}
T FetchSub(T operand) {
return RelaxedAtomic<T>::v_.fetch_sub(operand, std::memory_order_acq_rel);
}
T FetchAnd(T operand) {
return RelaxedAtomic<T>::v_.fetch_and(operand, std::memory_order_acq_rel);
}
T FetchOr(T operand) {
return RelaxedAtomic<T>::v_.fetch_or(operand, std::memory_order_acq_rel);
}
T FetchXor(T operand) {
return RelaxedAtomic<T>::v_.fetch_xor(operand, std::memory_order_acq_rel);
}
};
} // namespace ROCKSDB_NAMESPACE