Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Init collection from #1364

Merged
merged 11 commits into from
Jan 22, 2023
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
WIP: collection init from other collection
  • Loading branch information
generall committed Jan 17, 2023
commit a619bff9f89b190599e0ae5eb1e65c9729ae5039
1 change: 1 addition & 0 deletions docs/grpc/docs.md
Original file line number Diff line number Diff line change
Expand Up @@ -333,6 +333,7 @@
| vectors_config | [VectorsConfig](#qdrant-VectorsConfig) | optional | Configuration for vectors |
| replication_factor | [uint32](#uint32) | optional | Number of replicas of each shard that network tries to maintain, default = 1 |
| write_consistency_factor | [uint32](#uint32) | optional | How many replicas should apply the operation for us to consider it successful, default = 1 |
| init_from_collection | [string](#string) | optional | Specify name of the other collection to copy data from |



Expand Down
24 changes: 24 additions & 0 deletions docs/redoc/master/openapi.json
Original file line number Diff line number Diff line change
Expand Up @@ -4261,6 +4261,18 @@
"nullable": true
}
]
},
"init_from": {
"description": "Specify other collection to copy data from.",
"default": null,
"anyOf": [
{
"$ref": "#/components/schemas/InitFrom"
},
{
"nullable": true
}
]
}
}
},
Expand Down Expand Up @@ -4391,6 +4403,18 @@
}
}
},
"InitFrom": {
"description": "Operation for creating new collection and (optionally) specify index params",
"type": "object",
"required": [
"collection"
],
"properties": {
"collection": {
"type": "string"
}
}
},
"UpdateCollection": {
"description": "Operation for updating parameters of the existing collection",
"type": "object",
Expand Down
29 changes: 29 additions & 0 deletions lib/collection/src/collection.rs
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ use tokio::runtime::Handle;
use tokio::sync::{Mutex, RwLock};

use crate::collection_state::{ShardInfo, State};
use crate::common::is_ready::IsReady;
use crate::config::CollectionConfig;
use crate::hash_ring::HashRing;
use crate::operations::config_diff::{CollectionParamsDiff, DiffConfig, OptimizersConfigDiff};
Expand Down Expand Up @@ -81,6 +82,9 @@ pub struct Collection {
#[allow(dead_code)] //Might be useful in case of repartition implementation
notify_peer_failure_cb: OnPeerFailure,
init_time: Duration,
// One-way boolean flag that is set to true when the collection is fully initialized
// i.e. all shards are activated for the first time.
is_initialized: Arc<IsReady>,
}

impl Collection {
Expand Down Expand Up @@ -151,6 +155,7 @@ impl Collection {
request_shard_transfer_cb: request_shard_transfer.clone(),
notify_peer_failure_cb: on_replica_failure.clone(),
init_time: start_time.elapsed(),
is_initialized: Arc::new(Default::default()),
})
}

Expand Down Expand Up @@ -250,6 +255,7 @@ impl Collection {
request_shard_transfer_cb: request_shard_transfer.clone(),
notify_peer_failure_cb: on_replica_failure,
init_time: start_time.elapsed(),
is_initialized: Arc::new(Default::default()),
}
}

Expand Down Expand Up @@ -328,6 +334,25 @@ impl Collection {
}
}

if !self.is_initialized.check_ready() {
// If not initialized yet, we need to check if it was initialized by this call
let state = self.state().await;
let mut is_fully_active = true;
for (_shard_id, shard_info) in state.shards {
if shard_info
.replicas
.into_iter()
.any(|(_peer_id, state)| state != ReplicaState::Active)
{
is_fully_active = false;
break;
}
}
if is_fully_active {
self.is_initialized.make_ready();
}
}

// Try to request shard transfer if replicas on the current peer are dead
if state == ReplicaState::Dead && self.this_peer_id == peer_id {
let transfer_from = replica_set
Expand Down Expand Up @@ -1418,6 +1443,10 @@ impl Collection {

Ok(())
}

pub fn wait_collection_initiated(&self, timeout: Duration) -> bool {
self.is_initialized.await_ready_for_timeout(timeout)
}
}

impl Drop for Collection {
Expand Down
1 change: 1 addition & 0 deletions lib/collection/src/common/mod.rs
Original file line number Diff line number Diff line change
@@ -1,2 +1,3 @@
pub mod stoppable_task;
pub mod stoppable_task_async;
pub mod is_ready;
2 changes: 1 addition & 1 deletion lib/collection/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
pub mod collection;
pub mod collection_manager;
pub mod collection_state;
mod common;
pub mod common;
pub mod config;
pub mod hash_ring;
pub mod operations;
Expand Down
3 changes: 2 additions & 1 deletion lib/storage/src/content_manager/collections_ops.rs
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,11 @@ use std::collections::HashMap;

use async_trait::async_trait;
use collection::collection::Collection;
use collection::shards::CollectionId;

use crate::content_manager::errors::StorageError;

pub type Collections = HashMap<String, Collection>;
pub type Collections = HashMap<CollectionId, Collection>;

#[async_trait]
pub trait Checker {
Expand Down
1 change: 0 additions & 1 deletion lib/storage/src/content_manager/consensus/mod.rs
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
pub mod consensus_wal;
pub mod entry_queue;
pub mod is_ready;
pub mod operation_sender;
pub mod persistent;
2 changes: 1 addition & 1 deletion lib/storage/src/content_manager/consensus_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ use super::errors::StorageError;
use super::CollectionContainer;
use crate::content_manager::consensus::consensus_wal::ConsensusOpWal;
use crate::content_manager::consensus::entry_queue::EntryId;
use crate::content_manager::consensus::is_ready::IsReady;
use collection::common::is_ready::IsReady;
use crate::content_manager::consensus::operation_sender::OperationSender;
use crate::content_manager::consensus::persistent::Persistent;
use crate::types::{
Expand Down
111 changes: 105 additions & 6 deletions lib/storage/src/content_manager/migration.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,20 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::time::Duration;

use collection::collection::Collection;
use collection::operations::types::{CollectionError, CollectionResult};
use collection::operations::point_ops::{PointInsertOperations, PointOperations, PointStruct};
use collection::operations::types::{CollectionError, CollectionResult, ScrollRequest};
use collection::operations::CollectionUpdateOperations;
use collection::shards::replica_set::ReplicaState;
use collection::shards::shard::{PeerId, ShardId};
use collection::shards::CollectionId;
use segment::types::{WithPayloadInterface, WithVector};
use tokio::sync::RwLock;

use crate::content_manager::collections_ops::Collections;

const MIGRATION_BATCH_SIZE: usize = 1000;
const COLLECTION_INITIATION_TIMEOUT: Duration = Duration::from_secs(60);

/// Handlers for migration data from one collection into another within single cluster

Expand All @@ -24,7 +35,7 @@ async fn get_local_source_shards(
let responsible_shard_opt = shard_info
.replicas
.iter()
.filter(|(_, replica_state)| replica_state == ReplicaState::Active)
.filter(|(_, replica_state)| **replica_state == ReplicaState::Active)
.max_by_key(|(peer_id, _)| *peer_id)
.map(|(peer_id, _)| *peer_id);

Expand All @@ -46,14 +57,102 @@ async fn get_local_source_shards(
Ok(local_responsible_shards)
}

fn handle_get_collection(collection: Option<&Collection>) -> CollectionResult<&Collection> {
match collection {
Some(collection) => Ok(collection),
None => Err(CollectionError::service_error(
"Collection is not found".to_string(),
)),
}
}

async fn migrate_shard(
collections: Arc<RwLock<Collections>>,
source_collection: &CollectionId,
target_collection: &CollectionId,
shard_id: ShardId,
) -> CollectionResult<()> {
let mut offset = None;
let limit = MIGRATION_BATCH_SIZE;

loop {
let request = ScrollRequest {
offset,
limit: Some(limit),
filter: None,
with_payload: Some(WithPayloadInterface::Bool(true)),
with_vector: WithVector::Bool(true),
};

let scroll_result = {
let collections_read = collections.read().await;
let collection = handle_get_collection(collections_read.get(source_collection))?;
collection.scroll_by(request, Some(shard_id)).await?
};

offset = scroll_result.next_page_offset;

if offset.is_none() {
break;
}

let records = scroll_result
.points
.into_iter()
.map(|point| PointStruct {
id: point.id,
vector: point.vector.unwrap(),
payload: point.payload,
})
.collect();

let upsert_request = CollectionUpdateOperations::PointOperation(
PointOperations::UpsertPoints(PointInsertOperations::PointsList(records)),
);

let collections_read = collections.read().await;
let collection = handle_get_collection(collections_read.get(target_collection))?;

collection.update_from_client(upsert_request, false).await?;
}
Ok(())
}

/// Spawns a task which will retrieve data from appropriate local shards of the `source` collection
/// into target collection.
pub async fn migrate(
source: &Collection,
target: &Collection,
collections: Arc<RwLock<Collections>>,
source_collection: CollectionId,
target_collection: CollectionId,
this_peer_id: PeerId,
) -> CollectionResult<()> {
let local_responsible_shards = get_local_source_shards(source, this_peer_id).await?;
let collections_read = collections.read().await;
let collection = handle_get_collection(collections_read.get(&source_collection))?;
let local_responsible_shards = get_local_source_shards(collection, this_peer_id).await?;

// Wait for all shards to be active
{
let collections_read = collections.read().await;
let collection = handle_get_collection(collections_read.get(&target_collection))?;
let is_initialized = collection.wait_collection_initiated(COLLECTION_INITIATION_TIMEOUT);
if !is_initialized {
return Err(CollectionError::service_error(format!(
"Collection {} was not initialized within {} sec timeout",
target_collection,
COLLECTION_INITIATION_TIMEOUT.as_secs()
)));
}
}

for shard_id in local_responsible_shards {
migrate_shard(
collections.clone(),
&source_collection,
&target_collection,
shard_id,
)
.await?;
}

Ok(())
}
20 changes: 19 additions & 1 deletion lib/storage/src/content_manager/toc.rs
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ use crate::content_manager::collection_meta_ops::{
use crate::content_manager::collections_ops::{Checker, Collections};
use crate::content_manager::consensus::operation_sender::OperationSender;
use crate::content_manager::errors::StorageError;
use crate::content_manager::migration::migrate;
use crate::content_manager::shard_distribution::ShardDistributionProposal;
use crate::types::{PeerAddressById, StorageConfig};
use crate::ConsensusOperations;
Expand Down Expand Up @@ -258,7 +259,7 @@ impl TableOfContent {
optimizers_config: optimizers_config_diff,
replication_factor,
write_consistency_factor,
init_from: _init_from,
init_from,
} = operation;

self.collections
Expand Down Expand Up @@ -356,9 +357,26 @@ impl TableOfContent {
self.on_peer_created(collection_name.to_string(), self.this_peer_id, shard_id)
.await?;
}

if let Some(init_from) = init_from {
self.run_migration(init_from.collection, collection_name.to_string()).await;
}

Ok(true)
}

pub async fn run_migration(&self, from_collection: CollectionId, to_collection: CollectionId) {
let collections = self.collections.clone();
let this_peer_id = self.this_peer_id;
// There should be no search requests during initial update, so we can search runtime.
self.search_runtime.spawn(async move {
match migrate(collections, from_collection, to_collection, this_peer_id).await {
Ok(_) => log::info!("Migration completed"),
Err(err) => log::error!("Migration failed: {}", err),
}
});
}

async fn on_peer_created(
&self,
collection_name: String,
Expand Down
1 change: 1 addition & 0 deletions lib/storage/tests/alias_tests.rs
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ mod tests {
on_disk_payload: None,
replication_factor: None,
write_consistency_factor: None,
init_from: None,
},
)),
None,
Expand Down
2 changes: 1 addition & 1 deletion src/common/helpers.rs
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ mod tests {
use std::thread::sleep;
use std::time::Duration;

use storage::content_manager::consensus::is_ready::IsReady;
use collection::common::is_ready::IsReady;

#[test]
fn test_is_ready() {
Expand Down
1 change: 1 addition & 0 deletions src/consensus.rs
Original file line number Diff line number Diff line change
Expand Up @@ -921,6 +921,7 @@ mod tests {
on_disk_payload: None,
replication_factor: None,
write_consistency_factor: None,
init_from: None,
},
)),
None,
Expand Down