Tree:
				017e7d32cf
			
			
		
		add-ec-vacuum
			
				add-foundation-db
			
				add_fasthttp_client
			
				add_remote_storage
			
				adding-message-queue-integration-tests
			
				avoid_releasing_temp_file_on_write
			
				changing-to-zap
			
				collect-public-metrics
			
				create-table-snapshot-api-design
			
				data_query_pushdown
			
				dependabot/maven/other/java/client/com.google.protobuf-protobuf-java-3.25.5
			
				dependabot/maven/other/java/examples/org.apache.hadoop-hadoop-common-3.4.0
			
				detect-and-plan-ec-tasks
			
				do-not-retry-if-error-is-NotFound
			
				fasthttp
			
				filer1_maintenance_branch
			
				fix-GetObjectLockConfigurationHandler
			
				fix-versioning-listing-only
			
				ftp
			
				gh-pages
			
				improve-fuse-mount
			
				improve-fuse-mount2
			
				logrus
			
				master
			
				message_send
			
				mount2
			
				mq-subscribe
			
				mq2
			
				original_weed_mount
			
				random_access_file
			
				refactor-needle-read-operations
			
				refactor-volume-write
			
				remote_overlay
			
				revert-5134-patch-1
			
				revert-5819-patch-1
			
				revert-6434-bugfix-missing-s3-audit
			
				s3-select
			
				sub
			
				tcp_read
			
				test-reverting-lock-table
			
				test_udp
			
				testing
			
				testing-sdx-generation
			
				tikv
			
				track-mount-e2e
			
				volume_buffered_writes
			
				worker-execute-ec-tasks
			
			
				0.72
			
				0.72.release
			
				0.73
			
				0.74
			
				0.75
			
				0.76
			
				0.77
			
				0.90
			
				0.91
			
				0.92
			
				0.93
			
				0.94
			
				0.95
			
				0.96
			
				0.97
			
				0.98
			
				0.99
			
				1.00
			
				1.01
			
				1.02
			
				1.03
			
				1.04
			
				1.05
			
				1.06
			
				1.07
			
				1.08
			
				1.09
			
				1.10
			
				1.11
			
				1.12
			
				1.14
			
				1.15
			
				1.16
			
				1.17
			
				1.18
			
				1.19
			
				1.20
			
				1.21
			
				1.22
			
				1.23
			
				1.24
			
				1.25
			
				1.26
			
				1.27
			
				1.28
			
				1.29
			
				1.30
			
				1.31
			
				1.32
			
				1.33
			
				1.34
			
				1.35
			
				1.36
			
				1.37
			
				1.38
			
				1.40
			
				1.41
			
				1.42
			
				1.43
			
				1.44
			
				1.45
			
				1.46
			
				1.47
			
				1.48
			
				1.49
			
				1.50
			
				1.51
			
				1.52
			
				1.53
			
				1.54
			
				1.55
			
				1.56
			
				1.57
			
				1.58
			
				1.59
			
				1.60
			
				1.61
			
				1.61RC
			
				1.62
			
				1.63
			
				1.64
			
				1.65
			
				1.66
			
				1.67
			
				1.68
			
				1.69
			
				1.70
			
				1.71
			
				1.72
			
				1.73
			
				1.74
			
				1.75
			
				1.76
			
				1.77
			
				1.78
			
				1.79
			
				1.80
			
				1.81
			
				1.82
			
				1.83
			
				1.84
			
				1.85
			
				1.86
			
				1.87
			
				1.88
			
				1.90
			
				1.91
			
				1.92
			
				1.93
			
				1.94
			
				1.95
			
				1.96
			
				1.97
			
				1.98
			
				1.99
			
				1;70
			
				2.00
			
				2.01
			
				2.02
			
				2.03
			
				2.04
			
				2.05
			
				2.06
			
				2.07
			
				2.08
			
				2.09
			
				2.10
			
				2.11
			
				2.12
			
				2.13
			
				2.14
			
				2.15
			
				2.16
			
				2.17
			
				2.18
			
				2.19
			
				2.20
			
				2.21
			
				2.22
			
				2.23
			
				2.24
			
				2.25
			
				2.26
			
				2.27
			
				2.28
			
				2.29
			
				2.30
			
				2.31
			
				2.32
			
				2.33
			
				2.34
			
				2.35
			
				2.36
			
				2.37
			
				2.38
			
				2.39
			
				2.40
			
				2.41
			
				2.42
			
				2.43
			
				2.47
			
				2.48
			
				2.49
			
				2.50
			
				2.51
			
				2.52
			
				2.53
			
				2.54
			
				2.55
			
				2.56
			
				2.57
			
				2.58
			
				2.59
			
				2.60
			
				2.61
			
				2.62
			
				2.63
			
				2.64
			
				2.65
			
				2.66
			
				2.67
			
				2.68
			
				2.69
			
				2.70
			
				2.71
			
				2.72
			
				2.73
			
				2.74
			
				2.75
			
				2.76
			
				2.77
			
				2.78
			
				2.79
			
				2.80
			
				2.81
			
				2.82
			
				2.83
			
				2.84
			
				2.85
			
				2.86
			
				2.87
			
				2.88
			
				2.89
			
				2.90
			
				2.91
			
				2.92
			
				2.93
			
				2.94
			
				2.95
			
				2.96
			
				2.97
			
				2.98
			
				2.99
			
				3.00
			
				3.01
			
				3.02
			
				3.03
			
				3.04
			
				3.05
			
				3.06
			
				3.07
			
				3.08
			
				3.09
			
				3.10
			
				3.11
			
				3.12
			
				3.13
			
				3.14
			
				3.15
			
				3.16
			
				3.18
			
				3.19
			
				3.20
			
				3.21
			
				3.22
			
				3.23
			
				3.24
			
				3.25
			
				3.26
			
				3.27
			
				3.28
			
				3.29
			
				3.30
			
				3.31
			
				3.32
			
				3.33
			
				3.34
			
				3.35
			
				3.36
			
				3.37
			
				3.38
			
				3.39
			
				3.40
			
				3.41
			
				3.42
			
				3.43
			
				3.44
			
				3.45
			
				3.46
			
				3.47
			
				3.48
			
				3.50
			
				3.51
			
				3.52
			
				3.53
			
				3.54
			
				3.55
			
				3.56
			
				3.57
			
				3.58
			
				3.59
			
				3.60
			
				3.61
			
				3.62
			
				3.63
			
				3.64
			
				3.65
			
				3.66
			
				3.67
			
				3.68
			
				3.69
			
				3.71
			
				3.72
			
				3.73
			
				3.74
			
				3.75
			
				3.76
			
				3.77
			
				3.78
			
				3.79
			
				3.80
			
				3.81
			
				3.82
			
				3.83
			
				3.84
			
				3.85
			
				3.86
			
				3.87
			
				3.88
			
				3.89
			
				3.90
			
				3.91
			
				3.92
			
				3.93
			
				3.94
			
				3.95
			
				3.96
			
				3.97
			
				3.98
			
				dev
			
				helm-3.65.1
			
				v0.69
			
				v0.70beta
			
				v3.33
			
		${ noResults }
		
	
		
			1 Commits (017e7d32cfed196bf70e61257ae6ce8ab8cc374a)
		
	
	
		
	
    | Author | SHA1 | Message | Date | 
|---|---|---|---|
|  | a7fdc0d137 | Message Queue: Add sql querying (#7185) * feat: Phase 1 - Add SQL query engine foundation for MQ topics Implements core SQL infrastructure with metadata operations: New Components: - SQL parser integration using github.com/xwb1989/sqlparser - Query engine framework in weed/query/engine/ - Schema catalog mapping MQ topics to SQL tables - Interactive SQL CLI command 'weed sql' Supported Operations: - SHOW DATABASES (lists MQ namespaces) - SHOW TABLES (lists MQ topics) - SQL statement parsing and routing - Error handling and result formatting Key Design Decisions: - MQ namespaces ↔ SQL databases - MQ topics ↔ SQL tables - Parquet message storage ready for querying - Backward-compatible schema evolution support Testing: - Unit tests for core engine functionality - Command integration tests - Parse error handling validation Assumptions (documented in code): - All MQ messages stored in Parquet format - Schema evolution maintains backward compatibility - MySQL-compatible SQL syntax via sqlparser - Single-threaded usage per SQL session Next Phase: DDL operations (CREATE/ALTER/DROP TABLE) * feat: Phase 2 - Add DDL operations and real MQ broker integration Implements comprehensive DDL support for MQ topic management: New Components: - Real MQ broker connectivity via BrokerClient - CREATE TABLE → ConfigureTopic gRPC calls - DROP TABLE → DeleteTopic operations - DESCRIBE table → Schema introspection - SQL type mapping (SQL ↔ MQ schema types) Enhanced Features: - Live topic discovery from MQ broker - Fallback to cached/sample data when broker unavailable - MySQL-compatible DESCRIBE output - Schema validation and error handling - CREATE TABLE with column definitions Key Infrastructure: - broker_client.go: gRPC communication with MQ broker - sql_types.go: Bidirectional SQL/MQ type conversion - describe.go: Table schema introspection - Enhanced engine.go: Full DDL routing and execution Supported SQL Operations: ✅ SHOW DATABASES, SHOW TABLES (live + fallback) ✅ CREATE TABLE table_name (col1 INT, col2 VARCHAR(50), ...) ✅ DROP TABLE table_name ✅ DESCRIBE table_name / SHOW COLUMNS FROM table_name Known Limitations: - SQL parser issues with reserved keywords (e.g., 'timestamp') - Requires running MQ broker for full functionality - ALTER TABLE not yet implemented - DeleteTopic method needs broker-side implementation Architecture Decisions: - Broker discovery via filer lock mechanism (same as shell commands) - Graceful fallback when broker unavailable - ConfigureTopic for CREATE TABLE with 6 default partitions - Schema versioning ready for ALTER TABLE support Testing: - Unit tests updated with filer address parameter - Integration tests for DDL operations - Error handling for connection failures Next Phase: SELECT query execution with Parquet scanning * fix: Resolve High Priority TODOs - Real MQ Broker Integration ✅ COMPLETED HIGH PRIORITY TODOs: 🔧 **Real FilerClient Integration** (engine.go:131) - Implemented GetFilerClient() method in BrokerClient - Added filerClientImpl with full FilerClient interface compliance - Added AdjustedUrl() and GetDataCenter() methods - Real filerClient connection replaces nil fallback 🔧 **Partition Discovery via MQ Broker** (hybrid_message_scanner.go:116) - Added ListTopicPartitions() method using topic configuration - Implemented discoverTopicPartitions() in HybridMessageScanner - Reads actual partition count from BrokerPartitionAssignments - Generates proper partition ranges based on topic.PartitionCount 📋 **Technical Fixes:** - Fixed compilation errors with undefined variables - Proper error handling with filerClientErr variable - Corrected ConfigureTopicResponse field usage (BrokerPartitionAssignments vs PartitionCount) - Complete FilerClient interface implementation 🎯 **Impact:** - SQL engine now connects to real MQ broker infrastructure - Actual topic partition discovery instead of hardcoded defaults - Production-ready broker integration with graceful fallbacks - Maintains backward compatibility with sample data when broker unavailable ✅ All tests passing - High priority TODO resolution complete! Next: Schema-aware message parsing and time filter optimization. * feat: Time Filter Extraction - Complete Performance Optimization ✅ FOURTH HIGH PRIORITY TODO COMPLETED! ⏰ **Time Filter Extraction & Push-Down Optimization** (engine.go:198-199) - Replaced hardcoded StartTimeNs=0, StopTimeNs=0 with intelligent extraction - Added extractTimeFilters() with recursive WHERE clause analysis - Smart time column detection (\_timestamp_ns, created_at, timestamp, etc.) - Comprehensive time value parsing (nanoseconds, ISO dates, datetime formats) - Operator reversal handling (column op value vs value op column) 🧠 **Intelligent WHERE Clause Processing:** - AND expressions: Combine time bounds (intersection) ✅ - OR expressions: Skip extraction (safety) ✅ - Parentheses: Recursive unwrapping ✅ - Comparison operators: >, >=, <, <=, = ✅ - Multiple time formats: nanoseconds, RFC3339, date-only, datetime ✅ 🚀 **Performance Impact:** - Push-down filtering to hybrid scanner level - Reduced data scanning at source (live logs + Parquet files) - Time-based partition pruning potential - Significant performance gains for time-series queries 📊 **Comprehensive Testing (21 tests passing):** - ✅ Time filter extraction (6 test scenarios) - ✅ Time column recognition (case-insensitive) - ✅ Time value parsing (5 formats) - ✅ Full integration with SELECT queries - ✅ Backward compatibility maintained 💡 **Real-World Query Examples:** Before: Scans ALL data, filters in memory SELECT * FROM events WHERE \_timestamp_ns > 1672531200000000000; After: Scans ONLY relevant time range at source level → StartTimeNs=1672531200000000000, StopTimeNs=0 → Massive performance improvement for large datasets! 🎯 **Production Ready Features:** - Multiple time column formats supported - Graceful fallbacks for invalid dates - OR clause safety (avoids incorrect optimization) - Comprehensive error handling **ALL MEDIUM PRIORITY TODOs NOW READY FOR NEXT PHASEtest ./weed/query/engine/ -v* 🎉 * feat: Extended WHERE Operators - Complete Advanced Filtering ✅ **EXTENDED WHERE OPERATORS IMPLEMENTEDtest ./weed/query/engine/ -v | grep -E PASS * feat: Enhanced SQL CLI Experience ✅ COMPLETE ENHANCED CLI IMPLEMENTATION: 🚀 **Multiple Execution Modes:** - Interactive shell with enhanced prompts and context - Single query execution: --query 'SQL' --output format - Batch file processing: --file queries.sql --output csv - Database context switching: --database dbname 📊 **Multi-Format Output:** - Table format (ASCII) - default for interactive - JSON format - structured data for programmatic use - CSV format - spreadsheet-friendly output - Smart auto-detection based on execution mode ⚙️ **Enhanced Interactive Shell:** - Database context switching: USE database_name; - Output format switching: \format table|json|csv - Command history tracking (basic implementation) - Enhanced help with WHERE operator examples - Contextual prompts: seaweedfs:dbname> 🛠️ **Production Features:** - Comprehensive error handling (JSON + user-friendly) - Query execution timing and performance metrics - 30-second timeout protection with graceful handling - Real MQ integration with hybrid data scanning 📖 **Complete CLI Interface:** - Full flag support: --server, --interactive, --file, --output, --database, --query - Auto-detection of execution mode and output format - Structured help system with practical examples - Batch processing with multi-query file support 💡 **Advanced WHERE Integration:** All extended operators (<=, >=, !=, LIKE, IN) fully supported across all execution modes and output formats. 🎯 **Usage Examples:** - weed sql --interactive - weed sql --query 'SHOW DATABASES' --output json - weed sql --file queries.sql --output csv - weed sql --database analytics --interactive Enhanced CLI experience complete - production ready! 🚀 * Delete test_utils_test.go * fmt * integer conversion * show databases works * show tables works * Update describe.go * actual column types * Update .gitignore * scan topic messages * remove emoji * support aggregation functions * column name case insensitive, better auto column names * fmt * fix reading system fields * use parquet statistics for optimization * remove emoji * parquet file generate stats * scan all files * parquet file generation remember the sources also * fmt * sql * truncate topic * combine parquet results with live logs * explain * explain the execution plan * add tests * improve tests * skip * use mock for testing * add tests * refactor * fix after refactoring * detailed logs during explain. Fix bugs on reading live logs. * fix decoding data * save source buffer index start for log files * process buffer from brokers * filter out already flushed messages * dedup with buffer start index * explain with broker buffer * the parquet file should also remember the first buffer_start attribute from the sources * parquet file can query messages in broker memory, if log files do not exist * buffer start stored as 8 bytes * add jdbc * add postgres protocol * Revert "add jdbc" This reverts commit | 2 months ago |