refactor: excel parse

This commit is contained in:
Blizzard
2026-04-16 10:01:11 +08:00
parent 680ecc320f
commit f62f95ec02
7941 changed files with 2899112 additions and 0 deletions
@@ -0,0 +1,799 @@
syntax = "proto3";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
import "json_with_int.proto";
import "qdrant_common.proto";
enum Datatype {
Default = 0;
Float32 = 1;
Uint8 = 2;
Float16 = 3;
}
// ---------------------------------------------
// ------------- Collection Config -------------
// ---------------------------------------------
message VectorParams {
uint64 size = 1; // Size of the vectors
Distance distance = 2; // Distance function used for comparing vectors
optional HnswConfigDiff hnsw_config = 3; // Configuration of vector HNSW graph. If omitted - the collection configuration will be used
optional QuantizationConfig quantization_config = 4; // Configuration of vector quantization config. If omitted - the collection configuration will be used
optional bool on_disk = 5; // If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM.
optional Datatype datatype = 6; // Data type of the vectors
optional MultiVectorConfig multivector_config = 7; // Configuration for multi-vector search
}
message VectorParamsDiff {
optional HnswConfigDiff hnsw_config = 1; // Update params for HNSW index. If empty object - it will be unset
optional QuantizationConfigDiff quantization_config = 2; // Update quantization params. If none - it is left unchanged.
optional bool on_disk = 3; // If true - serve vectors from disk. If set to false, the vectors will be loaded in RAM.
}
message VectorParamsMap {
map<string, VectorParams> map = 1;
}
message VectorParamsDiffMap {
map<string, VectorParamsDiff> map = 1;
}
message VectorsConfig {
oneof config {
VectorParams params = 1;
VectorParamsMap params_map = 2;
}
}
message VectorsConfigDiff {
oneof config {
VectorParamsDiff params = 1;
VectorParamsDiffMap params_map = 2;
}
}
enum Modifier {
None = 0;
Idf = 1; // Apply Inverse Document Frequency
}
message SparseVectorParams {
optional SparseIndexConfig index = 1; // Configuration of sparse index
optional Modifier modifier = 2; // If set - apply modifier to the vector values
}
message SparseVectorConfig {
map<string, SparseVectorParams> map = 1;
}
enum MultiVectorComparator {
MaxSim = 0;
}
message MultiVectorConfig {
MultiVectorComparator comparator = 1; // Comparator for multi-vector search
}
message GetCollectionInfoRequest {
string collection_name = 1; // Name of the collection
}
message CollectionExistsRequest {
string collection_name = 1;
}
message CollectionExists {
bool exists = 1;
}
message CollectionExistsResponse {
CollectionExists result = 1;
double time = 2; // Time spent to process
}
message ListCollectionsRequest {}
message CollectionDescription {
string name = 1; // Name of the collection
}
message GetCollectionInfoResponse {
CollectionInfo result = 1;
double time = 2; // Time spent to process
}
message ListCollectionsResponse {
repeated CollectionDescription collections = 1;
double time = 2; // Time spent to process
}
enum Distance {
UnknownDistance = 0;
Cosine = 1;
Euclid = 2;
Dot = 3;
Manhattan = 4;
}
enum CollectionStatus {
UnknownCollectionStatus = 0;
Green = 1; // All segments are ready
Yellow = 2; // Optimization in process
Red = 3; // Something went wrong
Grey = 4; // Optimization is pending
}
enum PayloadSchemaType {
UnknownType = 0;
Keyword = 1;
Integer = 2;
Float = 3;
Geo = 4;
Text = 5;
Bool = 6;
Datetime = 7;
Uuid = 8;
}
enum QuantizationType {
UnknownQuantization = 0;
Int8 = 1;
}
enum CompressionRatio {
x4 = 0;
x8 = 1;
x16 = 2;
x32 = 3;
x64 = 4;
}
message MaxOptimizationThreads {
enum Setting {
Auto = 0;
}
oneof variant {
uint64 value = 1;
Setting setting = 2;
}
}
message OptimizerStatus {
bool ok = 1;
string error = 2;
}
message CollectionWarning {
string message = 1;
}
message HnswConfigDiff {
/*
Number of edges per node in the index graph. Larger the value - more accurate the search, more space required.
*/
optional uint64 m = 1;
/*
Number of neighbours to consider during the index building. Larger the value - more accurate the search, more time required to build the index.
*/
optional uint64 ef_construct = 2;
/*
Minimal size threshold (in KiloBytes) below which full-scan is preferred over HNSW search.
This measures the total size of vectors being queried against.
When the maximum estimated amount of points that a condition satisfies is smaller than
`full_scan_threshold`, the query planner will use full-scan search instead of HNSW index
traversal for better performance.
Note: 1Kb = 1 vector of size 256
*/
optional uint64 full_scan_threshold = 3;
/*
Number of parallel threads used for background index building.
If 0 - automatically select from 8 to 16.
Best to keep between 8 and 16 to prevent likelihood of building broken/inefficient HNSW graphs.
On small CPUs, less threads are used.
*/
optional uint64 max_indexing_threads = 4;
/*
Store HNSW index on disk. If set to false, the index will be stored in RAM.
*/
optional bool on_disk = 5;
/*
Number of additional payload-aware links per node in the index graph. If not set - regular M parameter will be used.
*/
optional uint64 payload_m = 6;
/*
Store copies of original and quantized vectors within the HNSW index file. Default: false.
Enabling this option will trade the search speed for disk usage by reducing amount of
random seeks during the search.
Requires quantized vectors to be enabled. Multi-vectors are not supported.
*/
optional bool inline_storage = 7;
}
message SparseIndexConfig {
/*
Prefer a full scan search upto (excluding) this number of vectors.
Note: this is number of vectors, not KiloBytes.
*/
optional uint64 full_scan_threshold = 1;
/*
Store inverted index on disk. If set to false, the index will be stored in RAM.
*/
optional bool on_disk = 2;
/*
Datatype used to store weights in the index.
*/
optional Datatype datatype = 3;
}
message WalConfigDiff {
optional uint64 wal_capacity_mb = 1; // Size of a single WAL block file
optional uint64 wal_segments_ahead = 2; // Number of segments to create in advance
optional uint64 wal_retain_closed = 3; // Number of closed segments to retain
}
message OptimizersConfigDiff {
/*
The minimal fraction of deleted vectors in a segment, required to perform segment optimization
*/
optional double deleted_threshold = 1;
/*
The minimal number of vectors in a segment, required to perform segment optimization
*/
optional uint64 vacuum_min_vector_number = 2;
/*
Target amount of segments the optimizer will try to keep.
Real amount of segments may vary depending on multiple parameters:
- Amount of stored points.
- Current write RPS.
It is recommended to select the default number of segments as a factor of the number of search threads,
so that each segment would be handled evenly by one of the threads.
*/
optional uint64 default_segment_number = 3;
/*
Deprecated:
Do not create segments larger this size (in kilobytes).
Large segments might require disproportionately long indexation times,
therefore it makes sense to limit the size of segments.
If indexing speed is more important - make this parameter lower.
If search speed is more important - make this parameter higher.
Note: 1Kb = 1 vector of size 256
If not set, will be automatically selected considering the number of available CPUs.
*/
optional uint64 max_segment_size = 4;
/*
Maximum size (in kilobytes) of vectors to store in-memory per segment.
Segments larger than this threshold will be stored as read-only memmapped file.
Memmap storage is disabled by default, to enable it, set this threshold to a reasonable value.
To disable memmap storage, set this to `0`.
Note: 1Kb = 1 vector of size 256
*/
optional uint64 memmap_threshold = 5;
/*
Maximum size (in kilobytes) of vectors allowed for plain index, exceeding this threshold will enable vector indexing
Default value is 20,000, based on <https://github.com/google-research/google-research/blob/master/scann/docs/algorithms.md>.
To disable vector indexing, set to `0`.
Note: 1kB = 1 vector of size 256.
*/
optional uint64 indexing_threshold = 6;
/*
Interval between forced flushes.
*/
optional uint64 flush_interval_sec = 7;
// Deprecated in favor of `max_optimization_threads`
optional uint64 deprecated_max_optimization_threads = 8;
/*
Max number of threads (jobs) for running optimizations per shard.
Note: each optimization job will also use `max_indexing_threads` threads by itself for index building.
If "auto" - have no limit and choose dynamically to saturate CPU.
If 0 - no optimization threads, optimizations will be disabled.
*/
optional MaxOptimizationThreads max_optimization_threads = 9;
}
message ScalarQuantization {
QuantizationType type = 1; // Type of quantization
optional float quantile = 2; // Number of bits to use for quantization
optional bool always_ram = 3; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage
}
message ProductQuantization {
CompressionRatio compression = 1; // Compression ratio
optional bool always_ram = 2; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage
}
enum BinaryQuantizationEncoding {
OneBit = 0;
TwoBits = 1;
OneAndHalfBits = 2;
}
message BinaryQuantizationQueryEncoding {
enum Setting {
Default = 0;
Binary = 1;
Scalar4Bits = 2;
Scalar8Bits = 3;
}
oneof variant {
Setting setting = 4;
}
}
message BinaryQuantization {
optional bool always_ram = 1; // If true - quantized vectors always will be stored in RAM, ignoring the config of main storage
optional BinaryQuantizationEncoding encoding = 2; // Binary quantization encoding method
/*
Asymmetric quantization configuration allows a query to have different quantization than stored vectors.
It can increase the accuracy of search at the cost of performance.
*/
optional BinaryQuantizationQueryEncoding query_encoding = 3;
}
message QuantizationConfig {
oneof quantization {
ScalarQuantization scalar = 1;
ProductQuantization product = 2;
BinaryQuantization binary = 3;
}
}
message Disabled {}
message QuantizationConfigDiff {
oneof quantization {
ScalarQuantization scalar = 1;
ProductQuantization product = 2;
Disabled disabled = 3;
BinaryQuantization binary = 4;
}
}
enum ShardingMethod {
Auto = 0; // Auto-sharding based on record ids
Custom = 1; // Shard by user-defined key
}
message StrictModeConfig {
optional bool enabled = 1; // Whether strict mode is enabled for a collection or not.
optional uint32 max_query_limit = 2; // Max allowed `limit` parameter for all APIs that don't have their own max limit.
optional uint32 max_timeout = 3; // Max allowed `timeout` parameter.
optional bool unindexed_filtering_retrieve = 4; // Allow usage of unindexed fields in retrieval based (e.g. search) filters.
optional bool unindexed_filtering_update = 5; // Allow usage of unindexed fields in filtered updates (e.g. delete by payload).
optional uint32 search_max_hnsw_ef = 6; // Max HNSW ef value allowed in search parameters.
optional bool search_allow_exact = 7; // Whether exact search is allowed.
optional float search_max_oversampling = 8; // Max oversampling value allowed in search
optional uint64 upsert_max_batchsize = 9; // Max batchsize when upserting
optional uint64 max_collection_vector_size_bytes = 10; // Max size of a collections vector storage in bytes, ignoring replicas.
optional uint32 read_rate_limit = 11; // Max number of read operations per minute per replica
optional uint32 write_rate_limit = 12; // Max number of write operations per minute per replica
optional uint64 max_collection_payload_size_bytes = 13; // Max size of a collections payload storage in bytes, ignoring replicas.
optional uint64 filter_max_conditions = 14; // Max conditions a filter can have.
optional uint64 condition_max_size = 15; // Max size of a condition, eg. items in `MatchAny`.
optional StrictModeMultivectorConfig multivector_config = 16; // Multivector strict mode configuration
optional StrictModeSparseConfig sparse_config = 17; // Sparse vector strict mode configuration
optional uint64 max_points_count = 18; // Max number of points estimated in a collection
optional uint64 max_payload_index_count = 19; // Max number of payload indexes in a collection
}
message StrictModeSparseConfig {
map<string, StrictModeSparse> sparse_config = 1;
}
message StrictModeSparse {
optional uint64 max_length = 10; // Max length of sparse vector
}
message StrictModeMultivectorConfig {
map<string, StrictModeMultivector> multivector_config = 1;
}
message StrictModeMultivector {
optional uint64 max_vectors = 1; // Max number of vectors in a multivector
}
message CreateCollection {
string collection_name = 1; // Name of the collection
reserved 2; // Deprecated
reserved 3; // Deprecated
optional HnswConfigDiff hnsw_config = 4; // Configuration of vector index
optional WalConfigDiff wal_config = 5; // Configuration of the Write-Ahead-Log
optional OptimizersConfigDiff optimizers_config = 6; // Configuration of the optimizers
optional uint32 shard_number = 7; // Number of shards in the collection, default is 1 for standalone, otherwise equal to the number of nodes. Minimum is 1
optional bool on_disk_payload = 8; // If true - point's payload will not be stored in memory
optional uint64 timeout = 9; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
optional VectorsConfig vectors_config = 10; // Configuration for vectors
optional uint32 replication_factor = 11; // Number of replicas of each shard that network tries to maintain, default = 1
optional uint32 write_consistency_factor = 12; // How many replicas should apply the operation for us to consider it successful, default = 1
reserved 13; // Deprecated: init_from
optional QuantizationConfig quantization_config = 14; // Quantization configuration of vector
optional ShardingMethod sharding_method = 15; // Sharding method
optional SparseVectorConfig sparse_vectors_config = 16; // Configuration for sparse vectors
optional StrictModeConfig strict_mode_config = 17; // Configuration for strict mode
map<string, Value> metadata = 18; // Arbitrary JSON metadata for the collection
}
message UpdateCollection {
string collection_name = 1; // Name of the collection
optional OptimizersConfigDiff optimizers_config = 2; // New configuration parameters for the collection. This operation is blocking, it will only proceed once all current optimizations are complete
optional uint64 timeout = 3; // Wait timeout for operation commit in seconds if blocking, if not specified - default value will be supplied
optional CollectionParamsDiff params = 4; // New configuration parameters for the collection
optional HnswConfigDiff hnsw_config = 5; // New HNSW parameters for the collection index
optional VectorsConfigDiff vectors_config = 6; // New vector parameters
optional QuantizationConfigDiff quantization_config = 7; // Quantization configuration of vector
optional SparseVectorConfig sparse_vectors_config = 8; // New sparse vector parameters
optional StrictModeConfig strict_mode_config = 9; // New strict mode configuration
map<string, Value> metadata = 10; // Arbitrary JSON-like metadata for the collection, will be merged with already stored metadata
}
message DeleteCollection {
string collection_name = 1; // Name of the collection
optional uint64 timeout = 2; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
}
message CollectionOperationResponse {
bool result = 1; // if operation made changes
double time = 2; // Time spent to process
}
message CollectionParams {
reserved 1; // Deprecated
reserved 2; // Deprecated
uint32 shard_number = 3; // Number of shards in collection
bool on_disk_payload = 4; // If true - point's payload will not be stored in memory
optional VectorsConfig vectors_config = 5; // Configuration for vectors
optional uint32 replication_factor = 6; // Number of replicas of each shard that network tries to maintain
optional uint32 write_consistency_factor = 7; // How many replicas should apply the operation for us to consider it successful
optional uint32 read_fan_out_factor = 8; // Fan-out every read request to these many additional remote nodes (and return first available response)
optional ShardingMethod sharding_method = 9; // Sharding method
optional SparseVectorConfig sparse_vectors_config = 10; // Configuration for sparse vectors
}
message CollectionParamsDiff {
optional uint32 replication_factor = 1; // Number of replicas of each shard that network tries to maintain
optional uint32 write_consistency_factor = 2; // How many replicas should apply the operation for us to consider it successful
optional bool on_disk_payload = 3; // If true - point's payload will not be stored in memory
optional uint32 read_fan_out_factor = 4; // Fan-out every read request to these many additional remote nodes (and return first available response)
}
message CollectionConfig {
CollectionParams params = 1; // Collection parameters
HnswConfigDiff hnsw_config = 2; // Configuration of vector index
OptimizersConfigDiff optimizer_config = 3; // Configuration of the optimizers
WalConfigDiff wal_config = 4; // Configuration of the Write-Ahead-Log
optional QuantizationConfig quantization_config = 5; // Configuration of the vector quantization
optional StrictModeConfig strict_mode_config = 6; // Configuration of strict mode.
map<string, Value> metadata = 7; // Arbitrary JSON metadata for the collection
}
enum TokenizerType {
Unknown = 0;
Prefix = 1;
Whitespace = 2;
Word = 3;
Multilingual = 4;
}
message KeywordIndexParams {
optional bool is_tenant = 1; // If true - used for tenant optimization.
optional bool on_disk = 2; // If true - store index on disk.
}
message IntegerIndexParams {
optional bool lookup = 1; // If true - support direct lookups. Default is true.
optional bool range = 2; // If true - support ranges filters. Default is true.
optional bool is_principal = 3; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests. Default is false.
optional bool on_disk = 4; // If true - store index on disk. Default is false.
}
message FloatIndexParams {
optional bool on_disk = 1; // If true - store index on disk.
optional bool is_principal = 2; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests.
}
message GeoIndexParams {
optional bool on_disk = 1; // If true - store index on disk.
}
message StopwordsSet {
repeated string languages = 1; // List of languages to use stopwords from
repeated string custom = 2; // List of custom stopwords
}
message TextIndexParams {
TokenizerType tokenizer = 1; // Tokenizer type
optional bool lowercase = 2; // If true - all tokens will be lowercase
optional uint64 min_token_len = 3; // Minimal token length
optional uint64 max_token_len = 4; // Maximal token length
optional bool on_disk = 5; // If true - store index on disk.
optional StopwordsSet stopwords = 6; // Stopwords for the text index
optional bool phrase_matching = 7; // If true - support phrase matching.
optional StemmingAlgorithm stemmer = 8; // Set an algorithm for stemming.
optional bool ascii_folding = 9; // If true, normalize tokens by folding accented characters to ASCII (e.g., "ação" -> "acao"). Default: false.
}
message StemmingAlgorithm {
oneof stemming_params {
SnowballParams snowball = 1; // Parameters for snowball stemming
}
}
message SnowballParams {
string language = 1; // Which language the algorithm should stem.
}
message BoolIndexParams {
optional bool on_disk = 1; // If true - store index on disk.
}
message DatetimeIndexParams {
optional bool on_disk = 1; // If true - store index on disk.
optional bool is_principal = 2; // If true - use this key to organize storage of the collection data. This option assumes that this key will be used in majority of filtered requests.
}
message UuidIndexParams {
optional bool is_tenant = 1; // If true - used for tenant optimization.
optional bool on_disk = 2; // If true - store index on disk.
}
message PayloadIndexParams {
oneof index_params {
KeywordIndexParams keyword_index_params = 3; // Parameters for keyword index
IntegerIndexParams integer_index_params = 2; // Parameters for integer index
FloatIndexParams float_index_params = 4; // Parameters for float index
GeoIndexParams geo_index_params = 5; // Parameters for geo index
TextIndexParams text_index_params = 1; // Parameters for text index
BoolIndexParams bool_index_params = 6; // Parameters for bool index
DatetimeIndexParams datetime_index_params = 7; // Parameters for datetime index
UuidIndexParams uuid_index_params = 8; // Parameters for uuid index
}
}
message PayloadSchemaInfo {
PayloadSchemaType data_type = 1; // Field data type
optional PayloadIndexParams params = 2; // Field index parameters
optional uint64 points = 3; // Number of points indexed within this field indexed
}
message CollectionInfo {
CollectionStatus status = 1; // operating condition of the collection
OptimizerStatus optimizer_status = 2; // status of collection optimizers
reserved 3; // Deprecated
uint64 segments_count = 4; // Number of independent segments
reserved 5; // Deprecated
reserved 6; // Deprecated
CollectionConfig config = 7; // Configuration
map<string, PayloadSchemaInfo> payload_schema = 8; // Collection data types
optional uint64 points_count = 9; // Approximate number of points in the collection
optional uint64 indexed_vectors_count = 10; // Approximate number of indexed vectors in the collection.
repeated CollectionWarning warnings = 11; // Warnings related to the collection
}
message ChangeAliases {
repeated AliasOperations actions = 1; // List of actions
optional uint64 timeout = 2; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
}
message AliasOperations {
oneof action {
CreateAlias create_alias = 1;
RenameAlias rename_alias = 2;
DeleteAlias delete_alias = 3;
}
}
message CreateAlias {
string collection_name = 1; // Name of the collection
string alias_name = 2; // New name of the alias
}
message RenameAlias {
string old_alias_name = 1; // Name of the alias to rename
string new_alias_name = 2; // Name of the alias
}
message DeleteAlias {
string alias_name = 1; // Name of the alias
}
message ListAliasesRequest {}
message ListCollectionAliasesRequest {
string collection_name = 1; // Name of the collection
}
message AliasDescription {
string alias_name = 1; // Name of the alias
string collection_name = 2; // Name of the collection
}
message ListAliasesResponse {
repeated AliasDescription aliases = 1;
double time = 2; // Time spent to process
}
message CollectionClusterInfoRequest {
string collection_name = 1; // Name of the collection
}
enum ReplicaState {
Active = 0; // Active and sound
Dead = 1; // Failed for some reason
Partial = 2; // The shard is partially loaded and is currently receiving data from other shards
Initializing = 3; // Collection is being created
Listener = 4; // A shard which receives data, but is not used for search; Useful for backup shards
PartialSnapshot = 5; // Deprecated: snapshot shard transfer is in progress; Updates should not be sent to (and are ignored by) the shard
Recovery = 6; // Shard is undergoing recovered by an external node; Normally rejects updates, accepts updates if force is true
Resharding = 7; // Points are being migrated to this shard as part of scale-up resharding
ReshardingScaleDown = 8; // Points are being migrated to this shard as part of scale-down resharding
ActiveRead = 9; // Active for readers, Partial for writers
}
message ShardKey {
oneof key {
string keyword = 1; // String key
uint64 number = 2; // Number key
}
}
message LocalShardInfo {
uint32 shard_id = 1; // Local shard id
uint64 points_count = 2; // Number of points in the shard
ReplicaState state = 3; // Is replica active
optional ShardKey shard_key = 4; // User-defined shard key
}
message RemoteShardInfo {
uint32 shard_id = 1; // Local shard id
uint64 peer_id = 2; // Remote peer id
ReplicaState state = 3; // Is replica active
optional ShardKey shard_key = 4; // User-defined shard key
}
message ShardTransferInfo {
uint32 shard_id = 1; // Local shard id
optional uint32 to_shard_id = 5;
uint64 from = 2;
uint64 to = 3;
bool sync = 4; // If `true` transfer is a synchronization of a replicas; If `false` transfer is a moving of a shard from one peer to another
}
message ReshardingInfo {
uint32 shard_id = 1;
uint64 peer_id = 2;
optional ShardKey shard_key = 3;
ReshardingDirection direction = 4;
}
/*
Resharding direction, scale up or down in number of shards
*/
enum ReshardingDirection {
Up = 0; // Scale up, add a new shard
Down = 1; // Scale down, remove a shard
}
message CollectionClusterInfoResponse {
uint64 peer_id = 1; // ID of this peer
uint64 shard_count = 2; // Total number of shards
repeated LocalShardInfo local_shards = 3; // Local shards
repeated RemoteShardInfo remote_shards = 4; // Remote shards
repeated ShardTransferInfo shard_transfers = 5; // Shard transfers
repeated ReshardingInfo resharding_operations = 6; // Resharding operations
}
message MoveShard {
uint32 shard_id = 1; // Local shard id
optional uint32 to_shard_id = 5;
uint64 from_peer_id = 2;
uint64 to_peer_id = 3;
optional ShardTransferMethod method = 4;
}
message ReplicateShard {
uint32 shard_id = 1; // Local shard id
optional uint32 to_shard_id = 5;
uint64 from_peer_id = 2;
uint64 to_peer_id = 3;
optional ShardTransferMethod method = 4;
}
message AbortShardTransfer {
uint32 shard_id = 1; // Local shard id
optional uint32 to_shard_id = 4;
uint64 from_peer_id = 2;
uint64 to_peer_id = 3;
}
message RestartTransfer {
uint32 shard_id = 1; // Local shard id
optional uint32 to_shard_id = 5;
uint64 from_peer_id = 2;
uint64 to_peer_id = 3;
ShardTransferMethod method = 4;
}
message ReplicatePoints {
ShardKey from_shard_key = 1; // Source shard key
ShardKey to_shard_key = 2; // Target shard key
optional Filter filter = 3; // If set - only points matching the filter will be replicated
}
enum ShardTransferMethod {
StreamRecords = 0; // Stream shard records in batches
Snapshot = 1; // Snapshot the shard and recover it on the target peer
WalDelta = 2; // Resolve WAL delta between peers and transfer the difference
ReshardingStreamRecords = 3; // Stream shard records in batches for resharding
}
message Replica {
uint32 shard_id = 1;
uint64 peer_id = 2;
}
message CreateShardKey {
ShardKey shard_key = 1; // User-defined shard key
optional uint32 shards_number = 2; // Number of shards to create per shard key
optional uint32 replication_factor = 3; // Number of replicas of each shard to create
repeated uint64 placement = 4; // List of peer ids, allowed to create shards. If empty - all peers are allowed
optional ReplicaState initial_state = 5; // Initial state of created replicas. Warning: use with care.
}
message DeleteShardKey {
ShardKey shard_key = 1; // Shard key to delete
}
message UpdateCollectionClusterSetupRequest {
string collection_name = 1; // Name of the collection
oneof operation {
MoveShard move_shard = 2;
ReplicateShard replicate_shard = 3;
AbortShardTransfer abort_transfer = 4;
Replica drop_replica = 5;
CreateShardKey create_shard_key = 7;
DeleteShardKey delete_shard_key = 8;
RestartTransfer restart_transfer = 9;
ReplicatePoints replicate_points = 10;
}
optional uint64 timeout = 6; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
}
message UpdateCollectionClusterSetupResponse {
bool result = 1;
}
message CreateShardKeyRequest {
string collection_name = 1; // Name of the collection
CreateShardKey request = 2; // Request to create shard key
optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
}
message DeleteShardKeyRequest {
string collection_name = 1; // Name of the collection
DeleteShardKey request = 2; // Request to delete shard key
optional uint64 timeout = 3; // Wait timeout for operation commit in seconds, if not specified - default value will be supplied
}
message CreateShardKeyResponse {
bool result = 1;
}
message DeleteShardKeyResponse {
bool result = 1;
}
@@ -0,0 +1,61 @@
syntax = "proto3";
import "collections.proto";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
service Collections {
/*
Get detailed information about specified existing collection
*/
rpc Get (GetCollectionInfoRequest) returns (GetCollectionInfoResponse) {}
/*
Get list name of all existing collections
*/
rpc List (ListCollectionsRequest) returns (ListCollectionsResponse) {}
/*
Create new collection with given parameters
*/
rpc Create (CreateCollection) returns (CollectionOperationResponse) {}
/*
Update parameters of the existing collection
*/
rpc Update (UpdateCollection) returns (CollectionOperationResponse) {}
/*
Drop collection and all associated data
*/
rpc Delete (DeleteCollection) returns (CollectionOperationResponse) {}
/*
Update Aliases of the existing collection
*/
rpc UpdateAliases (ChangeAliases) returns (CollectionOperationResponse) {}
/*
Get list of all aliases for a collection
*/
rpc ListCollectionAliases (ListCollectionAliasesRequest) returns (ListAliasesResponse) {}
/*
Get list of all aliases for all existing collections
*/
rpc ListAliases (ListAliasesRequest) returns (ListAliasesResponse) {}
/*
Get cluster information for a collection
*/
rpc CollectionClusterInfo (CollectionClusterInfoRequest) returns (CollectionClusterInfoResponse) {}
/*
Check the existence of a collection
*/
rpc CollectionExists (CollectionExistsRequest) returns (CollectionExistsResponse) {}
/*
Update cluster setup for a collection
*/
rpc UpdateCollectionClusterSetup (UpdateCollectionClusterSetupRequest) returns (UpdateCollectionClusterSetupResponse) {}
/*
Create shard key
*/
rpc CreateShardKey (CreateShardKeyRequest) returns (CreateShardKeyResponse) {}
/*
Delete shard key
*/
rpc DeleteShardKey (DeleteShardKeyRequest) returns (DeleteShardKeyResponse) {}
}
@@ -0,0 +1,62 @@
// Fork of the google.protobuf.Value with explicit support for integer values
syntax = "proto3";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
// `Struct` represents a structured data value, consisting of fields
// which map to dynamically typed values. In some languages, `Struct`
// might be supported by a native representation. For example, in
// scripting languages like JS a struct is represented as an
// object. The details of that representation are described together
// with the proto support for the language.
//
// The JSON representation for `Struct` is a JSON object.
message Struct {
// Unordered map of dynamically typed values.
map<string, Value> fields = 1;
}
// `Value` represents a dynamically typed value which can be either
// null, a number, a string, a boolean, a recursive struct value, or a
// list of values. A producer of value is expected to set one of those
// variants, absence of any variant indicates an error.
//
// The JSON representation for `Value` is a JSON value.
message Value {
// The kind of value.
oneof kind {
// Represents a null value.
NullValue null_value = 1;
// Represents a double value.
double double_value = 2;
// Represents an integer value
int64 integer_value = 3;
// Represents a string value.
string string_value = 4;
// Represents a boolean value.
bool bool_value = 5;
// Represents a structured value.
Struct struct_value = 6;
// Represents a repeated `Value`.
ListValue list_value = 7;
}
}
// `NullValue` is a singleton enumeration to represent the null value for the
// `Value` type union.
//
// The JSON representation for `NullValue` is JSON `null`.
enum NullValue {
// Null value.
NULL_VALUE = 0;
}
// `ListValue` is a wrapper around a repeated field of values.
//
// The JSON representation for `ListValue` is a JSON array.
message ListValue {
// Repeated field of dynamically typed values.
repeated Value values = 1;
}
File diff suppressed because it is too large Load Diff
@@ -0,0 +1,136 @@
syntax = "proto3";
import "points.proto";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
service Points {
/*
Perform insert + updates on points. If a point with a given ID already exists - it will be overwritten.
*/
rpc Upsert (UpsertPoints) returns (PointsOperationResponse) {}
/*
Delete points
*/
rpc Delete (DeletePoints) returns (PointsOperationResponse) {}
/*
Retrieve points
*/
rpc Get (GetPoints) returns (GetResponse) {}
/*
Update named vectors for point
*/
rpc UpdateVectors (UpdatePointVectors) returns (PointsOperationResponse) {}
/*
Delete named vectors for points
*/
rpc DeleteVectors (DeletePointVectors) returns (PointsOperationResponse) {}
/*
Set payload for points
*/
rpc SetPayload (SetPayloadPoints) returns (PointsOperationResponse) {}
/*
Overwrite payload for points
*/
rpc OverwritePayload (SetPayloadPoints) returns (PointsOperationResponse) {}
/*
Delete specified key payload for points
*/
rpc DeletePayload (DeletePayloadPoints) returns (PointsOperationResponse) {}
/*
Remove all payload for specified points
*/
rpc ClearPayload (ClearPayloadPoints) returns (PointsOperationResponse) {}
/*
Create index for field in collection
*/
rpc CreateFieldIndex (CreateFieldIndexCollection) returns (PointsOperationResponse) {}
/*
Delete field index for collection
*/
rpc DeleteFieldIndex (DeleteFieldIndexCollection) returns (PointsOperationResponse) {}
/*
Retrieve closest points based on vector similarity and given filtering conditions
*/
rpc Search (SearchPoints) returns (SearchResponse) {}
/*
Retrieve closest points based on vector similarity and given filtering conditions
*/
rpc SearchBatch (SearchBatchPoints) returns (SearchBatchResponse) {}
/*
Retrieve closest points based on vector similarity and given filtering conditions, grouped by a given field
*/
rpc SearchGroups (SearchPointGroups) returns (SearchGroupsResponse) {}
/*
Iterate over all or filtered points
*/
rpc Scroll (ScrollPoints) returns (ScrollResponse) {}
/*
Look for the points which are closer to stored positive examples and at the same time further to negative examples.
*/
rpc Recommend (RecommendPoints) returns (RecommendResponse) {}
/*
Look for the points which are closer to stored positive examples and at the same time further to negative examples.
*/
rpc RecommendBatch (RecommendBatchPoints) returns (RecommendBatchResponse) {}
/*
Look for the points which are closer to stored positive examples and at the same time further to negative examples, grouped by a given field
*/
rpc RecommendGroups (RecommendPointGroups) returns (RecommendGroupsResponse) {}
/*
Use context and a target to find the most similar points to the target, constrained by the context.
When using only the context (without a target), a special search - called context search - is performed where
pairs of points are used to generate a loss that guides the search towards the zone where
most positive examples overlap. This means that the score minimizes the scenario of
finding a point closer to a negative than to a positive part of a pair.
Since the score of a context relates to loss, the maximum score a point can get is 0.0,
and it becomes normal that many points can have a score of 0.0.
When using target (with or without context), the score behaves a little different: The
integer part of the score represents the rank with respect to the context, while the
decimal part of the score relates to the distance to the target. The context part of the score for
each pair is calculated +1 if the point is closer to a positive than to a negative part of a pair,
and -1 otherwise.
*/
rpc Discover (DiscoverPoints) returns (DiscoverResponse) {}
/*
Batch request points based on { positive, negative } pairs of examples, and/or a target
*/
rpc DiscoverBatch (DiscoverBatchPoints) returns (DiscoverBatchResponse) {}
/*
Count points in collection with given filtering conditions
*/
rpc Count (CountPoints) returns (CountResponse) {}
/*
Perform multiple update operations in one request
*/
rpc UpdateBatch (UpdateBatchPoints) returns (UpdateBatchResponse) {}
/*
Universally query points. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
*/
rpc Query (QueryPoints) returns (QueryResponse) {}
/*
Universally query points in a batch fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
*/
rpc QueryBatch (QueryBatchPoints) returns (QueryBatchResponse) {}
/*
Universally query points in a group fashion. This endpoint covers all capabilities of search, recommend, discover, filters. But also enables hybrid and multi-stage queries.
*/
rpc QueryGroups (QueryPointGroups) returns (QueryGroupsResponse) {}
/*
Perform facet counts. For each value in the field, count the number of points that have this value and match the conditions.
*/
rpc Facet (FacetCounts) returns (FacetResponse) {}
/*
Compute distance matrix for sampled points with a pair based output format
*/
rpc SearchMatrixPairs (SearchMatrixPoints) returns (SearchMatrixPairsResponse) {}
/*
Compute distance matrix for sampled points with an offset based output format
*/
rpc SearchMatrixOffsets (SearchMatrixPoints) returns (SearchMatrixOffsetsResponse) {}
}
@@ -0,0 +1,20 @@
syntax = "proto3";
import "collections_service.proto";
import "points_service.proto";
import "snapshots_service.proto";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
service Qdrant {
rpc HealthCheck (HealthCheckRequest) returns (HealthCheckReply) {}
}
message HealthCheckRequest {}
message HealthCheckReply {
string title = 1;
string version = 2;
optional string commit = 3;
}
@@ -0,0 +1,142 @@
syntax = "proto3";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
option java_outer_classname = "Common";
import "google/protobuf/timestamp.proto";
message PointId {
oneof point_id_options {
uint64 num = 1; // Numerical ID of the point
string uuid = 2; // UUID
}
}
message GeoPoint {
double lon = 1;
double lat = 2;
}
message Filter {
repeated Condition should = 1; // At least one of those conditions should match
repeated Condition must = 2; // All conditions must match
repeated Condition must_not = 3; // All conditions must NOT match
optional MinShould min_should = 4; // At least minimum amount of given conditions should match
}
message MinShould {
repeated Condition conditions = 1;
uint64 min_count = 2;
}
message Condition {
oneof condition_one_of {
FieldCondition field = 1;
IsEmptyCondition is_empty = 2;
HasIdCondition has_id = 3;
Filter filter = 4;
IsNullCondition is_null = 5;
NestedCondition nested = 6;
HasVectorCondition has_vector = 7;
}
}
message IsEmptyCondition {
string key = 1;
}
message IsNullCondition {
string key = 1;
}
message HasIdCondition {
repeated PointId has_id = 1;
}
message HasVectorCondition {
string has_vector = 1;
}
message NestedCondition {
string key = 1; // Path to nested object
Filter filter = 2; // Filter condition
}
message FieldCondition {
string key = 1;
Match match = 2; // Check if point has field with a given value
Range range = 3; // Check if points value lies in a given range
GeoBoundingBox geo_bounding_box = 4; // Check if points geolocation lies in a given area
GeoRadius geo_radius = 5; // Check if geo point is within a given radius
ValuesCount values_count = 6; // Check number of values for a specific field
GeoPolygon geo_polygon = 7; // Check if geo point is within a given polygon
DatetimeRange datetime_range = 8; // Check if datetime is within a given range
optional bool is_empty = 9; // Check if field is empty
optional bool is_null = 10; // Check if field is null
}
message Match {
oneof match_value {
string keyword = 1; // Match string keyword
int64 integer = 2; // Match integer
bool boolean = 3; // Match boolean
string text = 4; // Match text
RepeatedStrings keywords = 5; // Match multiple keywords
RepeatedIntegers integers = 6; // Match multiple integers
RepeatedIntegers except_integers = 7; // Match any other value except those integers
RepeatedStrings except_keywords = 8; // Match any other value except those keywords
string phrase = 9; // Match phrase text
string text_any = 10; // Match any word in the text
}
}
message RepeatedStrings {
repeated string strings = 1;
}
message RepeatedIntegers {
repeated int64 integers = 1;
}
message Range {
optional double lt = 1;
optional double gt = 2;
optional double gte = 3;
optional double lte = 4;
}
message DatetimeRange {
optional google.protobuf.Timestamp lt = 1;
optional google.protobuf.Timestamp gt = 2;
optional google.protobuf.Timestamp gte = 3;
optional google.protobuf.Timestamp lte = 4;
}
message GeoBoundingBox {
GeoPoint top_left = 1; // north-west corner
GeoPoint bottom_right = 2; // south-east corner
}
message GeoRadius {
GeoPoint center = 1; // Center of the circle
float radius = 2; // In meters
}
message GeoLineString {
repeated GeoPoint points = 1; // Ordered sequence of GeoPoints representing the line
}
// For a valid GeoPolygon, both the exterior and interior GeoLineStrings must consist of a minimum of 4 points.
// Additionally, the first and last points of each GeoLineString must be the same.
message GeoPolygon {
GeoLineString exterior = 1; // The exterior line bounds the surface
repeated GeoLineString interiors = 2; // Interior lines (if present) bound holes within the surface
}
message ValuesCount {
optional uint64 lt = 1;
optional uint64 gt = 2;
optional uint64 gte = 3;
optional uint64 lte = 4;
}
@@ -0,0 +1,75 @@
syntax = "proto3";
package qdrant;
option csharp_namespace = "Qdrant.Client.Grpc";
import "google/protobuf/timestamp.proto";
service Snapshots {
/*
Create collection snapshot
*/
rpc Create (CreateSnapshotRequest) returns (CreateSnapshotResponse) {}
/*
List collection snapshots
*/
rpc List (ListSnapshotsRequest) returns (ListSnapshotsResponse) {}
/*
Delete collection snapshot
*/
rpc Delete (DeleteSnapshotRequest) returns (DeleteSnapshotResponse) {}
/*
Create full storage snapshot
*/
rpc CreateFull (CreateFullSnapshotRequest) returns (CreateSnapshotResponse) {}
/*
List full storage snapshots
*/
rpc ListFull (ListFullSnapshotsRequest) returns (ListSnapshotsResponse) {}
/*
Delete full storage snapshot
*/
rpc DeleteFull (DeleteFullSnapshotRequest) returns (DeleteSnapshotResponse) {}
}
message CreateFullSnapshotRequest {}
message ListFullSnapshotsRequest {}
message DeleteFullSnapshotRequest {
string snapshot_name = 1; // Name of the full snapshot
}
message CreateSnapshotRequest {
string collection_name = 1; // Name of the collection
}
message ListSnapshotsRequest {
string collection_name = 1; // Name of the collection
}
message DeleteSnapshotRequest {
string collection_name = 1; // Name of the collection
string snapshot_name = 2; // Name of the collection snapshot
}
message SnapshotDescription {
string name = 1; // Name of the snapshot
google.protobuf.Timestamp creation_time = 2; // Creation time of the snapshot
int64 size = 3; // Size of the snapshot in bytes
optional string checksum = 4; // SHA256 digest of the snapshot file
}
message CreateSnapshotResponse {
SnapshotDescription snapshot_description = 1;
double time = 2; // Time spent to process
}
message ListSnapshotsResponse {
repeated SnapshotDescription snapshot_descriptions = 1;
double time = 2; // Time spent to process
}
message DeleteSnapshotResponse {
double time = 1; // Time spent to process
}