Merge commit 'd803bfe2b1fe7f5e219e50ac20d6801a0a58ac75' as 'vendor/ruvector'

This commit is contained in:
ruv
2026-02-28 14:39:40 -05:00
7854 changed files with 3522914 additions and 0 deletions

View File

@@ -0,0 +1,472 @@
# IVFFlat Index Usage Examples
## Basic Setup
### 1. Create Table with Vector Column
```sql
CREATE TABLE products (
id serial PRIMARY KEY,
name text NOT NULL,
description text,
embedding vector(1536), -- OpenAI ada-002 embeddings
created_at timestamp DEFAULT now()
);
```
### 2. Insert Sample Data
```sql
-- Insert products with embeddings
INSERT INTO products (name, description, embedding) VALUES
('Laptop', 'High-performance laptop', '[0.1, 0.2, 0.3, ...]'),
('Mouse', 'Wireless mouse', '[0.4, 0.5, 0.6, ...]'),
('Keyboard', 'Mechanical keyboard', '[0.7, 0.8, 0.9, ...]');
-- Or insert from a data source
INSERT INTO products (name, description, embedding)
SELECT
name,
description,
get_embedding(description) -- Your embedding function
FROM source_table;
```
## Index Creation
### Default Configuration
```sql
-- Create index with default settings (100 lists, probe 1)
CREATE INDEX products_embedding_idx
ON products
USING ruivfflat (embedding vector_l2_ops);
```
### Optimized for Small Datasets (< 10K vectors)
```sql
CREATE INDEX products_embedding_idx
ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 50);
```
### Optimized for Medium Datasets (10K - 100K vectors)
```sql
CREATE INDEX products_embedding_idx
ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 100);
```
### Optimized for Large Datasets (> 100K vectors)
```sql
CREATE INDEX products_embedding_idx
ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 500);
```
### Very Large Datasets (> 1M vectors)
```sql
CREATE INDEX products_embedding_idx
ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 1000);
```
## Distance Metrics
### Euclidean Distance (L2)
```sql
-- Best for: General-purpose similarity search
CREATE INDEX products_embedding_l2_idx
ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 100);
-- Query
SELECT name, embedding <-> '[0.1, 0.2, ...]' AS distance
FROM products
ORDER BY embedding <-> '[0.1, 0.2, ...]'
LIMIT 10;
```
### Cosine Distance
```sql
-- Best for: Normalized vectors, text embeddings
CREATE INDEX products_embedding_cosine_idx
ON products
USING ruivfflat (embedding vector_cosine_ops)
WITH (lists = 100);
-- Query
SELECT name, embedding <=> '[0.1, 0.2, ...]' AS distance
FROM products
ORDER BY embedding <=> '[0.1, 0.2, ...]'
LIMIT 10;
```
### Inner Product
```sql
-- Best for: Maximum similarity (negative distance)
CREATE INDEX products_embedding_ip_idx
ON products
USING ruivfflat (embedding vector_ip_ops)
WITH (lists = 100);
-- Query
SELECT name, embedding <#> '[0.1, 0.2, ...]' AS distance
FROM products
ORDER BY embedding <#> '[0.1, 0.2, ...]'
LIMIT 10;
```
## Search Queries
### Basic KNN Search
```sql
-- Find 10 most similar products
SELECT
id,
name,
description,
embedding <-> '[0.1, 0.2, ...]'::vector AS distance
FROM products
ORDER BY embedding <-> '[0.1, 0.2, ...]'::vector
LIMIT 10;
```
### Search with Filters
```sql
-- Find similar products in a category
SELECT
id,
name,
embedding <-> '[0.1, 0.2, ...]'::vector AS distance
FROM products
WHERE category = 'Electronics'
ORDER BY embedding <-> '[0.1, 0.2, ...]'::vector
LIMIT 10;
```
### Search with Multiple Conditions
```sql
-- Find recent similar products
SELECT
id,
name,
created_at,
embedding <=> '[0.1, 0.2, ...]'::vector AS distance
FROM products
WHERE
created_at > now() - interval '30 days'
AND price < 1000
ORDER BY embedding <=> '[0.1, 0.2, ...]'::vector
LIMIT 10;
```
## Performance Tuning
### Adjusting Probes
```sql
-- Fast search (lower recall ~70%)
SET ruvector.ivfflat_probes = 1;
-- Balanced search (medium recall ~85%)
SET ruvector.ivfflat_probes = 5;
-- Accurate search (high recall ~95%)
SET ruvector.ivfflat_probes = 10;
-- Very accurate search (very high recall ~98%)
SET ruvector.ivfflat_probes = 20;
```
### Session-Level Configuration
```sql
-- Set for current session
SET ruvector.ivfflat_probes = 10;
-- Verify setting
SHOW ruvector.ivfflat_probes;
-- Reset to default
RESET ruvector.ivfflat_probes;
```
### Transaction-Level Configuration
```sql
BEGIN;
SET LOCAL ruvector.ivfflat_probes = 15;
-- Query will use probes = 15
SELECT * FROM products ORDER BY embedding <-> '[...]' LIMIT 10;
COMMIT;
-- Back to session default
```
### Query-Level Configuration
```sql
SELECT
id,
name,
embedding <-> '[0.1, 0.2, ...]'::vector AS distance
FROM products
ORDER BY embedding <-> '[0.1, 0.2, ...]'::vector
LIMIT 10
SETTINGS (ruvector.ivfflat_probes = 10);
```
## Advanced Use Cases
### Semantic Search with Ranking
```sql
WITH similar_products AS (
SELECT
id,
name,
description,
embedding <-> query_embedding AS vector_distance,
ts_rank(to_tsvector('english', description),
to_tsquery('laptop')) AS text_rank
FROM products,
(SELECT '[0.1, 0.2, ...]'::vector AS query_embedding) q
ORDER BY embedding <-> query_embedding
LIMIT 100
)
SELECT
id,
name,
description,
vector_distance,
text_rank,
(0.7 * (1 - vector_distance) + 0.3 * text_rank) AS combined_score
FROM similar_products
ORDER BY combined_score DESC
LIMIT 10;
```
### Multi-Vector Search
```sql
-- Find products similar to multiple queries
WITH queries AS (
SELECT unnest(ARRAY[
'[0.1, 0.2, ...]'::vector,
'[0.4, 0.5, ...]'::vector,
'[0.7, 0.8, ...]'::vector
]) AS query_vec
),
all_results AS (
SELECT DISTINCT
p.id,
p.name,
MIN(p.embedding <-> q.query_vec) AS min_distance
FROM products p
CROSS JOIN queries q
GROUP BY p.id, p.name
)
SELECT id, name, min_distance
FROM all_results
ORDER BY min_distance
LIMIT 10;
```
### Batch Processing
```sql
-- Process embeddings in batches
DO $$
DECLARE
batch_size INT := 1000;
offset_val INT := 0;
total_count INT;
BEGIN
SELECT COUNT(*) INTO total_count FROM unprocessed_products;
WHILE offset_val < total_count LOOP
-- Process batch
WITH batch AS (
SELECT id, description
FROM unprocessed_products
ORDER BY id
LIMIT batch_size
OFFSET offset_val
)
UPDATE products p
SET embedding = get_embedding(b.description)
FROM batch b
WHERE p.id = b.id;
offset_val := offset_val + batch_size;
RAISE NOTICE 'Processed % of % vectors', offset_val, total_count;
END LOOP;
END $$;
```
## Monitoring and Maintenance
### Check Index Statistics
```sql
-- Get index metadata
SELECT * FROM ruvector_ivfflat_stats('products_embedding_idx');
-- Check index size
SELECT
schemaname,
tablename,
indexname,
pg_size_pretty(pg_relation_size(indexrelid)) AS index_size,
pg_size_pretty(pg_table_size(tablename::regclass)) AS table_size
FROM pg_indexes
JOIN pg_stat_user_indexes USING (schemaname, tablename, indexname)
WHERE indexname = 'products_embedding_idx';
```
### Analyze Query Performance
```sql
-- Enable timing
\timing on
-- Explain analyze
EXPLAIN (ANALYZE, BUFFERS)
SELECT id, name
FROM products
ORDER BY embedding <-> '[0.1, 0.2, ...]'::vector
LIMIT 10;
```
### Rebuild Index
```sql
-- After significant data changes
REINDEX INDEX products_embedding_idx;
-- Or rebuild concurrently (PostgreSQL 12+)
REINDEX INDEX CONCURRENTLY products_embedding_idx;
```
### Vacuum and Analyze
```sql
-- Update statistics
ANALYZE products;
-- Vacuum to reclaim space
VACUUM products;
-- Or full vacuum
VACUUM FULL products;
```
## Best Practices
### 1. Choose Appropriate Number of Lists
```sql
-- Rule of thumb: lists = sqrt(total_vectors)
-- Example for 100K vectors
CREATE INDEX ON products USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 316); -- sqrt(100000) ≈ 316
-- Example for 1M vectors
CREATE INDEX ON products USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 1000); -- sqrt(1000000) = 1000
```
### 2. Balance Speed vs Accuracy
```sql
-- Production: Start conservative, increase probes if needed
SET ruvector.ivfflat_probes = 5;
-- Development/Testing: Higher probes for better results
SET ruvector.ivfflat_probes = 10;
-- Critical queries: Maximum accuracy
SET ruvector.ivfflat_probes = 20;
```
### 3. Regular Maintenance
```sql
-- Weekly or after large data changes
VACUUM ANALYZE products;
REINDEX INDEX CONCURRENTLY products_embedding_idx;
```
### 4. Monitor Index Health
```sql
-- Create monitoring view
CREATE VIEW index_health AS
SELECT
indexname,
pg_size_pretty(pg_relation_size(indexrelid)) AS size,
idx_scan AS scans,
idx_tup_read AS tuples_read,
idx_tup_fetch AS tuples_fetched,
(idx_tup_read::float / NULLIF(idx_scan, 0))::numeric(10,2) AS avg_tuples_per_scan
FROM pg_stat_user_indexes
WHERE indexrelname LIKE '%embedding%';
-- Check regularly
SELECT * FROM index_health;
```
## Troubleshooting
### Slow Queries
```sql
-- Increase probes
SET ruvector.ivfflat_probes = 10;
-- Check if index is being used
EXPLAIN SELECT * FROM products ORDER BY embedding <-> '[...]' LIMIT 10;
-- Rebuild index
REINDEX INDEX products_embedding_idx;
```
### Low Recall
```sql
-- Increase probes
SET ruvector.ivfflat_probes = 15;
-- Or rebuild with more lists
DROP INDEX products_embedding_idx;
CREATE INDEX products_embedding_idx ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 500);
```
### Memory Issues
```sql
-- Reduce lists during build
CREATE INDEX products_embedding_idx ON products
USING ruivfflat (embedding vector_l2_ops)
WITH (lists = 100); -- Smaller lists = less memory
-- Or build in multiple steps
```

View File

@@ -0,0 +1,256 @@
-- Sparse Vectors Example Usage
-- This file demonstrates the sparse vector functionality
-- ============================================================================
-- Setup
-- ============================================================================
-- Create extension (assuming already installed)
-- CREATE EXTENSION IF NOT EXISTS ruvector_postgres;
-- Create sample tables
CREATE TABLE IF NOT EXISTS sparse_documents (
id SERIAL PRIMARY KEY,
title TEXT,
content TEXT,
sparse_embedding sparsevec,
created_at TIMESTAMP DEFAULT NOW()
);
-- ============================================================================
-- Inserting Data
-- ============================================================================
-- Method 1: String format
INSERT INTO sparse_documents (title, content, sparse_embedding) VALUES
('Machine Learning Basics',
'Introduction to neural networks and deep learning',
'{1024:0.5, 2048:0.3, 4096:0.8, 8192:0.2}'::sparsevec),
('Natural Language Processing',
'Text processing and language models',
'{1024:0.3, 3072:0.7, 4096:0.4, 9216:0.6}'::sparsevec),
('Computer Vision',
'Image recognition and object detection',
'{2048:0.9, 5120:0.4, 6144:0.5, 7168:0.3}'::sparsevec);
-- Method 2: Array construction
INSERT INTO sparse_documents (title, content, sparse_embedding) VALUES
('Reinforcement Learning',
'Q-learning and policy gradients',
ruvector_to_sparse(
ARRAY[1024, 4096, 10240]::int[],
ARRAY[0.6, 0.8, 0.4]::real[],
30000
));
-- Method 3: Convert from dense
INSERT INTO sparse_documents (title, sparse_embedding)
SELECT 'From Dense Vector',
ruvector_dense_to_sparse(
ARRAY[0, 0.5, 0, 0.3, 0, 0, 0.8, 0, 0, 0.2]::real[]
);
-- ============================================================================
-- Basic Queries
-- ============================================================================
-- View all documents with sparse vectors
SELECT id, title,
ruvector_sparse_nnz(sparse_embedding) as num_nonzero,
ruvector_sparse_dim(sparse_embedding) as dimension,
ruvector_sparse_norm(sparse_embedding) as l2_norm
FROM sparse_documents;
-- ============================================================================
-- Similarity Search
-- ============================================================================
-- Define a query vector
WITH query AS (
SELECT '{1024:0.5, 2048:0.3, 4096:0.8}'::sparsevec AS query_vec
)
-- Search by dot product (inner product)
SELECT d.id, d.title,
ruvector_sparse_dot(d.sparse_embedding, q.query_vec) AS dot_product,
ruvector_sparse_cosine(d.sparse_embedding, q.query_vec) AS cosine_sim,
ruvector_sparse_euclidean(d.sparse_embedding, q.query_vec) AS euclidean_dist
FROM sparse_documents d, query q
ORDER BY dot_product DESC
LIMIT 5;
-- Find documents with high cosine similarity
WITH query AS (
SELECT '{1024:0.5, 4096:0.8}'::sparsevec AS query_vec
)
SELECT id, title,
ruvector_sparse_cosine(sparse_embedding, query_vec) AS similarity
FROM sparse_documents, query
WHERE ruvector_sparse_cosine(sparse_embedding, query_vec) > 0.3
ORDER BY similarity DESC;
-- ============================================================================
-- Sparsification Operations
-- ============================================================================
-- Keep only top-k elements
SELECT id, title,
sparse_embedding AS original,
ruvector_sparse_top_k(sparse_embedding, 2) AS top_2_elements
FROM sparse_documents
LIMIT 3;
-- Prune small values
SELECT id, title,
sparse_embedding AS original,
ruvector_sparse_prune(sparse_embedding, 0.4) AS pruned
FROM sparse_documents
LIMIT 3;
-- ============================================================================
-- BM25 Text Search Example
-- ============================================================================
-- Create BM25-specific table
CREATE TABLE IF NOT EXISTS bm25_articles (
id SERIAL PRIMARY KEY,
title TEXT,
content TEXT,
term_frequencies sparsevec, -- TF values
doc_length REAL
);
-- Insert sample documents with term frequencies
INSERT INTO bm25_articles (title, content, term_frequencies, doc_length) VALUES
('AI Research Paper',
'Deep learning models for natural language processing',
'{100:2.0, 200:1.0, 300:3.0, 400:1.0}'::sparsevec, -- TF values
7.0),
('Machine Learning Tutorial',
'Introduction to supervised and unsupervised learning',
'{100:1.0, 250:2.0, 300:1.0, 500:2.0}'::sparsevec,
6.0),
('Data Science Guide',
'Statistical analysis and data visualization techniques',
'{150:1.0, 250:1.0, 350:2.0, 450:1.0}'::sparsevec,
6.0);
-- BM25 search
WITH
query AS (
-- Query with IDF weights (normally computed from corpus)
SELECT '{100:1.5, 300:2.0, 400:1.2}'::sparsevec AS query_idf
),
collection_stats AS (
SELECT AVG(doc_length) AS avg_doc_len
FROM bm25_articles
)
SELECT a.id, a.title,
ruvector_sparse_bm25(
q.query_idf,
a.term_frequencies,
a.doc_length,
cs.avg_doc_len,
1.2, -- k1 parameter
0.75 -- b parameter
) AS bm25_score
FROM bm25_articles a, query q, collection_stats cs
ORDER BY bm25_score DESC
LIMIT 5;
-- ============================================================================
-- Hybrid Search (Dense + Sparse)
-- ============================================================================
-- Create hybrid table (requires vector extension)
-- Uncomment if you have dense vector support
/*
CREATE TABLE IF NOT EXISTS hybrid_documents (
id SERIAL PRIMARY KEY,
title TEXT,
dense_embedding vector(768),
sparse_embedding sparsevec
);
-- Hybrid search combining both signals
WITH query AS (
SELECT
random_vector(768) AS query_dense, -- Replace with actual query
'{1024:0.5, 2048:0.3}'::sparsevec AS query_sparse
)
SELECT id, title,
0.7 * (1 - (dense_embedding <=> query_dense)) + -- Dense similarity
0.3 * ruvector_sparse_dot(sparse_embedding, query_sparse) AS hybrid_score
FROM hybrid_documents, query
ORDER BY hybrid_score DESC
LIMIT 10;
*/
-- ============================================================================
-- Utility Operations
-- ============================================================================
-- Convert sparse to dense
SELECT id, title,
ruvector_sparse_to_dense(sparse_embedding) AS dense_array
FROM sparse_documents
LIMIT 3;
-- Get vector statistics
SELECT
COUNT(*) as num_documents,
AVG(ruvector_sparse_nnz(sparse_embedding)) AS avg_nonzero,
MIN(ruvector_sparse_nnz(sparse_embedding)) AS min_nonzero,
MAX(ruvector_sparse_nnz(sparse_embedding)) AS max_nonzero,
AVG(ruvector_sparse_norm(sparse_embedding)) AS avg_norm
FROM sparse_documents;
-- Find documents with similar sparsity
WITH target AS (
SELECT sparse_embedding, ruvector_sparse_nnz(sparse_embedding) AS target_nnz
FROM sparse_documents
WHERE id = 1
)
SELECT d.id, d.title,
ruvector_sparse_nnz(d.sparse_embedding) AS doc_nnz,
ABS(ruvector_sparse_nnz(d.sparse_embedding) - t.target_nnz) AS nnz_diff
FROM sparse_documents d, target t
WHERE d.id != 1
ORDER BY nnz_diff
LIMIT 5;
-- ============================================================================
-- Performance Analysis
-- ============================================================================
-- Check storage size
SELECT id, title,
pg_column_size(sparse_embedding) AS sparse_bytes,
ruvector_sparse_nnz(sparse_embedding) AS num_nonzero,
pg_column_size(sparse_embedding)::float /
GREATEST(ruvector_sparse_nnz(sparse_embedding), 1) AS bytes_per_element
FROM sparse_documents
ORDER BY sparse_bytes DESC;
-- Batch similarity computation
EXPLAIN ANALYZE
WITH queries AS (
SELECT generate_series(1, 3) AS query_id,
'{1024:0.5, 2048:0.3}'::sparsevec AS query_vec
)
SELECT q.query_id, d.id, d.title,
ruvector_sparse_dot(d.sparse_embedding, q.query_vec) AS score
FROM sparse_documents d
CROSS JOIN queries q
ORDER BY q.query_id, score DESC;
-- ============================================================================
-- Cleanup (optional)
-- ============================================================================
-- DROP TABLE IF EXISTS sparse_documents CASCADE;
-- DROP TABLE IF EXISTS bm25_articles CASCADE;
-- DROP TABLE IF EXISTS hybrid_documents CASCADE;