continuwuity/src/database/map/rev_stream_from.rs
Tom Foster b5a2e49ae4
All checks were successful
Documentation / Build and Deploy Documentation (pull_request) Successful in 43s
Release Docker Image / define-variables (push) Successful in 17s
Documentation / Build and Deploy Documentation (push) Successful in 42s
Checks / Prek / Pre-commit & Formatting (push) Successful in 1m53s
Checks / Prek / Clippy and Cargo Tests (push) Successful in 5m20s
Release Docker Image / build-image (linux/amd64, release, linux-amd64, base) (push) Successful in 17m15s
Release Docker Image / build-image (linux/arm64, release, linux-arm64, base) (push) Successful in 15m43s
Release Docker Image / merge (push) Successful in 22s
fix: Resolve Clippy CI failures from elided lifetime warnings
The latest Rust nightly compiler (2025-08-27) introduced the
elided-named-lifetimes lint which causes Clippy CI checks to fail
when an elided lifetime ('_) resolves to a named lifetime that's
already in scope.

This commit fixes the Clippy warnings by:
- Making lifetime relationships explicit where 'a is already in scope
- Keeping elided lifetimes ('_) in functions without explicit
  lifetime parameters
- Ensuring proper lifetime handling in the database pool module

Affected files (17 total):
- Database map modules: Handle, Key, and KeyVal references in get,
  qry, keys, and stream operations
- Database pool module: into_recv_seek function

This change resolves the CI build failures without changing any
functionality, ensuring the codebase remains compatible with the
latest nightly Clippy checks.
2025-08-28 21:13:19 +01:00

127 lines
3.1 KiB
Rust

use std::{convert::AsRef, fmt::Debug, sync::Arc};
use conduwuit::{Result, implement};
use futures::{FutureExt, Stream, StreamExt, TryFutureExt, TryStreamExt};
use rocksdb::Direction;
use serde::{Deserialize, Serialize};
use tokio::task;
use crate::{
keyval::{KeyVal, result_deserialize, serialize_key},
stream,
util::is_incomplete,
};
/// Iterate key-value entries in the map starting from upper-bound.
///
/// - Query is serialized
/// - Result is deserialized
#[implement(super::Map)]
pub fn rev_stream_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: Serialize + ?Sized + Debug,
K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send,
{
self.rev_stream_from_raw(from)
.map(result_deserialize::<K, V>)
}
/// Iterate key-value entries in the map starting from upper-bound.
///
/// - Query is serialized
/// - Result is raw
#[implement(super::Map)]
#[tracing::instrument(skip(self), level = "trace")]
pub fn rev_stream_from_raw<P>(
self: &Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + use<'_, P>
where
P: Serialize + ?Sized + Debug,
{
let key = serialize_key(from).expect("failed to serialize query key");
self.rev_raw_stream_from(&key)
}
/// Iterate key-value entries in the map starting from upper-bound.
///
/// - Query is raw
/// - Result is deserialized
#[implement(super::Map)]
pub fn rev_stream_raw_from<'a, K, V, P>(
self: &'a Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'a, K, V>>> + Send + use<'a, K, V, P>
where
P: AsRef<[u8]> + ?Sized + Debug + Sync,
K: Deserialize<'a> + Send,
V: Deserialize<'a> + Send,
{
self.rev_raw_stream_from(from)
.map(result_deserialize::<K, V>)
}
/// Iterate key-value entries in the map starting from upper-bound.
///
/// - Query is raw
/// - Result is raw
#[implement(super::Map)]
#[tracing::instrument(skip(self, from), fields(%self), level = "trace")]
pub fn rev_raw_stream_from<P>(
self: &Arc<Self>,
from: &P,
) -> impl Stream<Item = Result<KeyVal<'_>>> + Send + use<'_, P>
where
P: AsRef<[u8]> + ?Sized + Debug,
{
use crate::pool::Seek;
let opts = super::iter_options_default(&self.db);
let state = stream::State::new(self, opts);
if is_cached(self, from) {
let state = state.init_rev(from.as_ref().into());
return task::consume_budget()
.map(move |()| stream::ItemsRev::<'_>::from(state))
.into_stream()
.flatten()
.boxed();
}
let seek = Seek {
map: self.clone(),
dir: Direction::Reverse,
key: Some(from.as_ref().into()),
state: crate::pool::into_send_seek(state),
res: None,
};
self.db
.pool
.execute_iter(seek)
.ok_into::<stream::ItemsRev<'_>>()
.into_stream()
.try_flatten()
.boxed()
}
#[tracing::instrument(
name = "cached",
level = "trace",
skip(map, from),
fields(%map),
)]
pub(super) fn is_cached<P>(map: &Arc<super::Map>, from: &P) -> bool
where
P: AsRef<[u8]> + ?Sized,
{
let cache_opts = super::cache_iter_options_default(&map.db);
let cache_status = stream::State::new(map, cache_opts)
.init_rev(from.as_ref().into())
.status();
!matches!(cache_status, Some(e) if is_incomplete(&e))
}