切换导航
{{systemName}}
{{ info.Title }}
{{info.Title}}
{{ menu.Title }}
{{menu.Title}}
登录
|
退出
搜索
Sqlserver单表一亿数据生成测试及优化方案
作者:ych
### 构建测试数据 #### SQL Server 表创建及数据生成脚本 以下是一个包含10个字段的SQL Server表创建脚本,以及生成1亿条差异较大数据的存储过程。 ##### 表创建脚本 ``` CREATE TABLE LargeDataTable ( ID BIGINT PRIMARY KEY IDENTITY(1,1), CustomerName NVARCHAR(100), Email NVARCHAR(100), BirthDate DATE, RegistrationDate DATETIME, AccountBalance DECIMAL(18,2), CreditScore INT, IsActive BIT, LastPurchaseDate DATETIME2, PurchaseCount INT, LocationGeography GEOGRAPHY ); ``` ##### 数据生成存储过程 ``` CREATE PROCEDURE GenerateLargeData @RecordCount INT = 100000000 -- 默认生成1亿条记录 AS BEGIN SET NOCOUNT ON; DECLARE @BatchSize INT = 10000; -- 每批插入的记录数 DECLARE @BatchCount INT = @RecordCount / @BatchSize; DECLARE @CurrentBatch INT = 0; -- 创建临时表存储随机名字和城市 IF OBJECT_ID('tempdb..#RandomData') IS NOT NULL DROP TABLE #RandomData; CREATE TABLE #RandomData ( FirstName NVARCHAR(50), LastName NVARCHAR(50), City NVARCHAR(50), Country NVARCHAR(50)); -- 插入一些随机名字和城市 INSERT INTO #RandomData VALUES ('张', '伟', '北京', '中国'), ('王', '芳', '上海', '中国'), ('李', '娜', '广州', '中国'), ('赵', '勇', '深圳', '中国'), ('刘', '洋', '成都', '中国'), ('陈', '杰', '重庆', '中国'), ('杨', '敏', '武汉', '中国'), ('黄', '强', '南京', '中国'), ('周', '雪', '杭州', '中国'), ('吴', '磊', '西安', '中国'), ('James', 'Smith', 'New York', 'USA'), ('Maria', 'Garcia', 'Los Angeles', 'USA'), ('John', 'Johnson', 'Chicago', 'USA'), ('Robert', 'Brown', 'Houston', 'USA'), ('Michael', 'Davis', 'Phoenix', 'USA'), ('David', 'Miller', 'Philadelphia', 'USA'), ('William', 'Wilson', 'San Antonio', 'USA'), ('Richard', 'Moore', 'San Diego', 'USA'), ('Charles', 'Taylor', 'Dallas', 'USA'), ('Thomas', 'Anderson', 'San Jose', 'USA'), ('Hans', 'Mller', 'Berlin', 'Germany'), ('Peter', 'Schmidt', 'Hamburg', 'Germany'), ('Klaus', 'Schneider', 'Munich', 'Germany'), ('Wolfgang', 'Fischer', 'Cologne', 'Germany'), ('Helmut', 'Weber', 'Frankfurt', 'Germany'); -- 创建数字辅助表 IF OBJECT_ID('tempdb..#Numbers') IS NOT NULL DROP TABLE #Numbers; -- 使用更可靠的方式生成数字序列 ;WITH L0 AS (SELECT 1 AS c UNION ALL SELECT 1), L1 AS (SELECT 1 AS c FROM L0 a CROSS JOIN L0 b), L2 AS (SELECT 1 AS c FROM L1 a CROSS JOIN L1 b), L3 AS (SELECT 1 AS c FROM L2 a CROSS JOIN L2 b), Numbers AS (SELECT ROW_NUMBER() OVER(ORDER BY (SELECT NULL)) AS n FROM L3) SELECT TOP 1000 n INTO #Numbers FROM Numbers; -- 开始批量插入 WHILE @CurrentBatch < @BatchCount BEGIN BEGIN TRY BEGIN TRANSACTION; INSERT INTO LargeDataTable ( CustomerName, Email, BirthDate, RegistrationDate, AccountBalance, CreditScore, IsActive, LastPurchaseDate, PurchaseCount, LocationGeography ) SELECT TOP (@BatchSize) -- 客户名: 随机组合名字和姓氏 rd.FirstName + rd.LastName AS CustomerName, -- 邮箱: 名字+随机数+域名 LOWER(rd.FirstName + CAST(ABS(CHECKSUM(NEWID())) % 100000 AS NVARCHAR(10))) + CASE WHEN ABS(CHECKSUM(NEWID())) % 10 > 5 THEN '@gmail.com' ELSE '@hotmail.com' END AS Email, -- 出生日期: 1940-2010之间的随机日期 DATEADD(DAY, ABS(CHECKSUM(NEWID())) % 25550, '1940-01-01') AS BirthDate, -- 注册日期: 2010-2023之间的随机日期时间 DATEADD(SECOND, ABS(CHECKSUM(NEWID())) % 410240000, '2010-01-01') AS RegistrationDate, -- 账户余额: 0到1百万之间的随机数,有些为负 CASE WHEN ABS(CHECKSUM(NEWID())) % 100 = 1 THEN -1 * (ABS(CHECKSUM(NEWID())) % 100000) ELSE ABS(CHECKSUM(NEWID())) % 1000000 END / 100.0 AS AccountBalance, -- 信用分数: 300-850之间的随机数,但分布不均匀 CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 10 THEN 300 + ABS(CHECKSUM(NEWID())) % 100 WHEN ABS(CHECKSUM(NEWID())) % 100 < 30 THEN 400 + ABS(CHECKSUM(NEWID())) % 150 WHEN ABS(CHECKSUM(NEWID())) % 100 < 60 THEN 550 + ABS(CHECKSUM(NEWID())) % 150 ELSE 700 + ABS(CHECKSUM(NEWID())) % 150 END AS CreditScore, -- 是否活跃: 70%概率为活跃 CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 70 THEN 1 ELSE 0 END AS IsActive, -- 最后购买日期: 可能为NULL(30%概率),否则为注册日期之后的随机日期 CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 30 THEN NULL ELSE DATEADD(DAY, ABS(CHECKSUM(NEWID())) % 3650, DATEADD(SECOND, ABS(CHECKSUM(NEWID())) % 410240000, '2010-01-01')) END AS LastPurchaseDate, -- 购买次数: 0-1000之间的随机数,但分布不均匀 CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 40 THEN ABS(CHECKSUM(NEWID())) % 10 WHEN ABS(CHECKSUM(NEWID())) % 100 < 70 THEN ABS(CHECKSUM(NEWID())) % 100 WHEN ABS(CHECKSUM(NEWID())) % 100 < 90 THEN ABS(CHECKSUM(NEWID())) % 500 ELSE ABS(CHECKSUM(NEWID())) % 1000 END AS PurchaseCount, -- 地理位置: 随机生成的地理点 CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 80 THEN geography::Point( 20 + (ABS(CHECKSUM(NEWID())) % 50), -- 纬度 (20-70) 70 + (ABS(CHECKSUM(NEWID())) % 60), -- 经度 (70-130) 4326) ELSE NULL END AS LocationGeography FROM #RandomData rd CROSS JOIN #Numbers nums CROSS JOIN (SELECT TOP 10 n FROM #Numbers) nums2 ORDER BY NEWID(); COMMIT TRANSACTION; -- 显示进度 IF @CurrentBatch % 100 = 0 PRINT '已插入 ' + CAST(@CurrentBatch * @BatchSize AS VARCHAR(20)) + ' 条记录'; SET @CurrentBatch = @CurrentBatch + 1; END TRY BEGIN CATCH ROLLBACK TRANSACTION; PRINT '错误: ' + ERROR_MESSAGE(); END CATCH END PRINT '数据生成完成,共插入 ' + CAST(@RecordCount AS VARCHAR(20)) + ' 条记录'; END; ``` ##### 执行存储过程 ``` -- 执行存储过程生成1亿条数据 EXEC GenerateLargeData @RecordCount = 100000000; ``` ##### 单条插入测试脚本 ``` -- 插入单条数据 INSERT INTO LargeDataTable ( CustomerName, Email, BirthDate, RegistrationDate, AccountBalance, CreditScore, IsActive, LastPurchaseDate, PurchaseCount, LocationGeography ) VALUES ( -- 客户名 (中英文混合) CASE WHEN ABS(CHECKSUM(NEWID())) % 2 = 0 THEN '张伟' ELSE 'John Smith' END, -- 邮箱 (随机生成) LOWER( CASE WHEN ABS(CHECKSUM(NEWID())) % 2 = 0 THEN 'zhangwei' ELSE 'john.smith' END + CAST(ABS(CHECKSUM(NEWID())) % 10000 AS NVARCHAR(10)) + CASE WHEN ABS(CHECKSUM(NEWID())) % 2 = 0 THEN '@gmail.com' ELSE '@company.com' END ), -- 出生日期 (1940-2000之间随机) DATEADD(DAY, ABS(CHECKSUM(NEWID())) % 21900, '1940-01-01'), -- 注册日期 (2010-2023之间随机) DATEADD(DAY, ABS(CHECKSUM(NEWID())) % 4745, '2010-01-01'), -- 账户余额 (-50000到500000之间随机) (ABS(CHECKSUM(NEWID())) % 550000 - 50000) / 100.0, -- 信用分数 (300-850之间随机) 300 + ABS(CHECKSUM(NEWID())) % 551, -- 是否活跃 (70%概率为活跃) CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 70 THEN 1 ELSE 0 END, -- 最后购买日期 (可能为NULL,30%概率) CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 30 THEN NULL ELSE DATEADD(DAY, ABS(CHECKSUM(NEWID())) % 3650, '2015-01-01') END, -- 购买次数 (0-1000之间随机) ABS(CHECKSUM(NEWID())) % 1001, -- 地理位置 (可能为NULL,20%概率) CASE WHEN ABS(CHECKSUM(NEWID())) % 100 < 80 THEN geography::Point( 20 + (ABS(CHECKSUM(NEWID())) % 50), -- 纬度 (20-70) 70 + (ABS(CHECKSUM(NEWID())) % 60), -- 经度 (70-130) 4326) ELSE NULL END ); -- 查看插入的数据 SELECT TOP 1 * FROM LargeDataTable ORDER BY ID DESC; ``` ##### 一次插入5条测试 ``` -- 插入5条不同数据 INSERT INTO LargeDataTable (CustomerName, Email, BirthDate, RegistrationDate, AccountBalance, CreditScore, IsActive, LastPurchaseDate, PurchaseCount, LocationGeography) SELECT TOP 5 CASE WHEN n % 5 = 0 THEN '王芳' WHEN n % 5 = 1 THEN 'Maria Garcia' WHEN n % 5 = 2 THEN '李娜' WHEN n % 5 = 3 THEN 'Robert Brown' ELSE '杨敏' END, CASE WHEN n % 5 = 0 THEN 'wangfang' + CAST(n*100 AS VARCHAR) + '@163.com' WHEN n % 5 = 1 THEN 'maria.garcia' + CAST(n*50 AS VARCHAR) + '@gmail.com' WHEN n % 5 = 2 THEN 'lina' + CAST(n*20 AS VARCHAR) + '@qq.com' WHEN n % 5 = 3 THEN 'robert.brown' + CAST(n*10 AS VARCHAR) + '@company.com' ELSE 'yangmin' + CAST(n*5 AS VARCHAR) + '@hotmail.com' END, DATEADD(DAY, n*500, '1950-01-01'), DATEADD(DAY, n*100, '2015-01-01'), CASE WHEN n % 3 = 0 THEN 10000.00 + n*100 WHEN n % 3 = 1 THEN -5000.00 + n*50 ELSE 500.50 + n*10 END, CASE WHEN n % 10 = 0 THEN 300 + n*10 WHEN n % 10 = 1 THEN 400 + n*5 WHEN n % 10 = 2 THEN 500 + n*3 ELSE 600 + n END, CASE WHEN n % 10 < 7 THEN 1 ELSE 0 END, CASE WHEN n % 10 < 3 THEN NULL ELSE DATEADD(DAY, n*30, '2018-01-01') END, n*10 % 1000, CASE WHEN n % 5 = 0 THEN NULL ELSE geography::Point(30 + n%20, 100 + n%30, 4326) END FROM (SELECT ROW_NUMBER() OVER (ORDER BY (SELECT NULL)) AS n FROM master.dbo.spt_values WHERE number < 100) AS numbers ORDER BY NEWID(); -- 查看最新插入的5条数据 SELECT TOP 5 * FROM LargeDataTable ORDER BY ID DESC; ``` #### SQL Server LIKE 查询优化方案 LIKE 查询在SQL Server中确实容易出现性能问题,特别是当处理大数据量时。以下是针对LIKE查询慢的全面优化方案: ##### 一、基础优化方法 ###### 1.避免前导通配符 ``` -- 慢: 使用了前导通配符,无法使用索引 SELECT * FROM Customers WHERE Name LIKE '%张%' -- 优化: 尽量使用后置通配符 SELECT * FROM Customers WHERE Name LIKE '张%' ``` ###### 2.使用全文索引(最佳方案) ``` -- 创建全文目录 CREATE FULLTEXT CATALOG ftCatalog AS DEFAULT; -- 创建全文索引 CREATE FULLTEXT INDEX ON Products(ProductName) KEY INDEX PK_Products ON ftCatalog; -- 使用CONTAINS代替LIKE SELECT * FROM Products WHERE CONTAINS(ProductName, '"智能手机" OR "平板"') ``` ##### 二、高级优化技术 ###### 1. 计算列+索引方案 ``` -- 添加计算列(去掉前导%) ALTER TABLE Customers ADD NameReverse AS REVERSE(Name) -- 在计算列上创建索引 CREATE INDEX IX_Customers_NameReverse ON Customers(NameReverse) -- 优化查询 SELECT * FROM Customers WHERE REVERSE(Name) LIKE REVERSE('%张') ``` ###### 2. 使用SQL Server 2016+的STRING_SPLIT函数 ``` -- 适用于多关键词搜索 SELECT DISTINCT p.* FROM Products p CROSS APPLY STRING_SPLIT('手机,智能,5G', ',') s WHERE p.ProductName LIKE '%' + s.value + '%' ``` #### 性能对比 方法|百万数据查询时间|优点|缺点 -|-|-|-|- 原始LIKE|5-15秒|简单|性能差 后置通配符|0.1-1秒|可用索引|功能受限 全文索引|0.01-0.1秒|高性能|需要维护 计算列方案|0.1-0.5秒|支持前导%|额外存储 搜索引擎|0.001-0.01秒|极高性能|系统复杂(另外部署Elasticsearch) #### 全文索引与普通非聚集索引的区别 全文索引和普通非聚集索引在SQL Server中有着根本性的不同,主要区别如下: ##### 一、核心区别对比 特性|全文索引|普通非聚集索引 -|-|- 设计目的|文本内容的高级搜索|快速定位特定值或范围 索引内容|分词后的词汇及其位置|列值的精确或排序表示 搜索方式|语义搜索、模糊匹配、近义词|精确匹配或范围查询 适用数据类型|CHAR/VARCHAR/NVARCHAR/TEXT等文本|所有可索引类型 是否支持通配符|原生支持(无需LIKE语法)|仅支持前缀通配符(如'abc%') 语言支持|支持多语言分词和词干分析|无语言特性 ##### 二、技术实现差异 ###### 1. 全文索引工作流程 ``` -- 全文索引实际是外部组件 CREATE FULLTEXT CATALOG ftCatalog AS DEFAULT; CREATE FULLTEXT INDEX ON Products(ProductName) KEY INDEX PK_Products ON ftCatalog WITH STOPLIST = SYSTEM; -- 使用系统停用词列表 -- 搜索时使用专用语法 SELECT * FROM Products WHERE CONTAINS(ProductName, '"智能手机" NEAR "防水"'); ``` ###### 2. 普通非聚集索引工作流程 ``` -- 常规B树索引 CREATE INDEX IX_Products_Name ON Products(ProductName); -- 搜索使用标准语法 SELECT * FROM Products WHERE ProductName LIKE '智能%'; -- 只能有效使用前缀匹配 ``` ##### 三、性能差异示例 假设表中有100万条产品记录,搜索"高端智能手机": 查询方式|执行时间|扫描方式|是否使用索引 -|-|-|- LIKE '%智能手机%'|1200ms|全表扫描|否 LIKE '智能%'|15ms|索引范围扫描|是 CONTAINS(...,'智能手机')|8ms|全文索引查找|是 ### 过亿级数据表(LargeDataTable)的优化案例 下面我将基于您提供的 LargeDataTable 结构(1亿+数据量),展示完整的性能优化方案,包括索引设计、查询优化和性能监控。 #### 一、优化后的索引设计方案 ``` -- 1. 聚集索引(已存在,主键默认创建) -- ID列上的聚集索引已自动创建 -- 2. 非聚集索引(针对常用查询条件) CREATE INDEX IX_LargeDataTable_CustomerName ON LargeDataTable(CustomerName); CREATE INDEX IX_LargeDataTable_Email ON LargeDataTable(Email); CREATE INDEX IX_LargeDataTable_CreditScore ON LargeDataTable(CreditScore); CREATE INDEX IX_LargeDataTable_IsActive ON LargeDataTable(IsActive) WHERE IsActive = 1; -- 过滤索引,仅索引活跃用户 -- 3. 复合索引(针对多条件查询) CREATE INDEX IX_LargeDataTable_ActiveCredit ON LargeDataTable(IsActive, CreditScore) WHERE IsActive = 1; -- 4. 日期范围查询索引 CREATE INDEX IX_LargeDataTable_RegistrationDate ON LargeDataTable(RegistrationDate); CREATE INDEX IX_LargeDataTable_LastPurchaseDate ON LargeDataTable(LastPurchaseDate); -- 5. 包含列索引(覆盖索引) CREATE INDEX IX_LargeDataTable_Covering1 ON LargeDataTable(CreditScore) INCLUDE (CustomerName, Email, AccountBalance); -- 6. 地理空间索引 CREATE SPATIAL INDEX SIX_LargeDataTable_Location ON LargeDataTable(LocationGeography) WITH (BOUNDING_BOX = (70, 20, 130, 50)); -- 针对中国区域优化 ``` #### 二、典型查询优化案例 ##### 案例1:活跃客户分页查询 ``` -- 优化前(全表扫描) SELECT * FROM LargeDataTable WHERE IsActive = 1 ORDER BY CreditScore DESC OFFSET 10000 ROWS FETCH NEXT 50 ROWS ONLY; -- 优化后(使用过滤索引+覆盖索引) SELECT * FROM LargeDataTable WITH(INDEX(IX_LargeDataTable_ActiveCredit)) WHERE IsActive = 1 ORDER BY CreditScore DESC OFFSET 10000 ROWS FETCH NEXT 50 ROWS ONLY; ``` ##### 案例2:地理空间查询 ``` -- 查找100公里内的客户(使用空间索引) DECLARE @center GEOGRAPHY = geography::Point(39.9, 116.4, 4326); -- 北京坐标 SELECT CustomerName, Email, @center.STDistance(LocationGeography)/1000 AS DistanceKM FROM LargeDataTable WITH(INDEX(SIX_LargeDataTable_Location)) WHERE LocationGeography IS NOT NULL AND @center.STDistance(LocationGeography) <= 100000 -- 100公里 ORDER BY DistanceKM; ``` ##### 案例3:时间范围统计 ``` -- 按月统计注册用户数(使用日期索引) SELECT YEAR(RegistrationDate) AS RegYear, MONTH(RegistrationDate) AS RegMonth, COUNT(*) AS UserCount, AVG(CreditScore) AS AvgCreditScore FROM LargeDataTable WITH(INDEX(IX_LargeDataTable_RegistrationDate)) WHERE RegistrationDate BETWEEN '2020-01-01' AND '2023-12-31' GROUP BY YEAR(RegistrationDate), MONTH(RegistrationDate) ORDER BY RegYear, RegMonth; ``` #### 三、分区方案(针对超大数据量) ``` -- 1. 创建分区函数(按注册日期范围分区) CREATE PARTITION FUNCTION pf_RegistrationDateRange (DATETIME) AS RANGE RIGHT FOR VALUES ( '2015-01-01', '2016-01-01', '2017-01-01', '2018-01-01', '2019-01-01', '2020-01-01', '2021-01-01', '2022-01-01', '2023-01-01' ); -- 2. 创建分区方案 CREATE PARTITION SCHEME ps_RegistrationDate AS PARTITION pf_RegistrationDateRange ALL TO ([PRIMARY]); -- 3. 重建表为分区表(需要创建新表迁移数据) -- 此处省略具体迁移步骤... ``` #### 四、性能监控脚本 ``` -- 1. 查找缺失索引 SELECT CONVERT(DECIMAL(10,2), migs.avg_total_user_cost * migs.avg_user_impact * (migs.user_seeks + migs.user_scans)) AS improvement_measure, DB_NAME(mid.database_id) AS database_name, OBJECT_NAME(mid.object_id, mid.database_id) AS table_name, 'CREATE INDEX [IX_' + OBJECT_NAME(mid.object_id, mid.database_id) + '_' + REPLACE(REPLACE(REPLACE(ISNULL(mid.equality_columns,''),', ','_'),'[',''),']','') + CASE WHEN mid.equality_columns IS NOT NULL AND mid.inequality_columns IS NOT NULL THEN '_' ELSE '' END + REPLACE(REPLACE(REPLACE(ISNULL(mid.inequality_columns,''),', ','_'),'[',''),']','') + ']' + ' ON ' + mid.statement + ' (' + ISNULL(mid.equality_columns,'') + CASE WHEN mid.equality_columns IS NOT NULL AND mid.inequality_columns IS NOT NULL THEN ',' ELSE '' END + ISNULL(mid.inequality_columns,'') + ')' + ISNULL(' INCLUDE (' + mid.included_columns + ')', '') AS create_index_statement, migs.unique_compiles, migs.user_seeks, migs.user_scans, migs.last_user_seek, migs.last_user_scan, migs.avg_total_user_cost, migs.avg_user_impact FROM sys.dm_db_missing_index_groups mig INNER JOIN sys.dm_db_missing_index_group_stats migs ON migs.group_handle = mig.index_group_handle INNER JOIN sys.dm_db_missing_index_details mid ON mig.index_handle = mid.index_handle WHERE mid.database_id = DB_ID() -- AND OBJECT_NAME(mid.object_id, mid.database_id) = 'LargeDataTable' -- 可以指定特定表 ORDER BY improvement_measure DESC; -- 2. 监控索引使用情况 SELECT OBJECT_NAME(i.object_id) AS TableName, i.name AS IndexName, i.type_desc AS IndexType, user_seeks, user_scans, user_lookups, user_updates AS Writes, last_user_seek, last_user_scan FROM sys.indexes i LEFT JOIN sys.dm_db_index_usage_stats s ON s.object_id = i.object_id AND s.index_id = i.index_id WHERE OBJECT_NAME(i.object_id) = 'LargeDataTable' ORDER BY user_seeks + user_scans DESC; ``` #### 五、维护计划建议 ``` -- 1. 索引重建脚本(每周维护) ALTER INDEX ALL ON LargeDataTable REBUILD WITH (ONLINE = OFF, FILLFACTOR = 90); -- 2. 统计信息更新(每天维护) EXEC sp_updatestats; -- 3. 碎片检查 SELECT OBJECT_NAME(ind.object_id) AS TableName, ind.name AS IndexName, indexstats.avg_fragmentation_in_percent FROM sys.dm_db_index_physical_stats(DB_ID(), OBJECT_ID('LargeDataTable'), NULL, NULL, NULL) indexstats JOIN sys.indexes ind ON ind.object_id = indexstats.object_id AND ind.index_id = indexstats.index_id WHERE indexstats.avg_fragmentation_in_percent > 30 ORDER BY indexstats.avg_fragmentation_in_percent DESC; ``` #### 六、关键优化总结 ##### 索引策略: 为高频查询条件创建专用索引 使用包含列减少键查找 为空间数据创建空间索引 ##### 查询优化: 避免SELECT *,只查询必要列 对大结果集使用分页 使用索引提示强制关键查询使用最优索引 ##### 分区考虑: 对时间序列数据按日期分区 对超大数据量考虑水平分区 ##### 定期维护: 重建索引减少碎片 更新统计信息保证执行计划质量 监控索引使用情况,删除无用索引 ### SQL Server 索引删除方法 要删除您在 LargeDataTable 上创建的各种索引,可以使用以下 SQL 命令: #### 一、基本索引删除语法 ``` DROP INDEX [索引名称] ON [表名称]; ``` #### 二、针对您创建的索引的删除命令 ##### 1. 删除非聚集索引 ``` DROP INDEX IX_LargeDataTable_CustomerName ON LargeDataTable; DROP INDEX IX_LargeDataTable_Email ON LargeDataTable; DROP INDEX IX_LargeDataTable_CreditScore ON LargeDataTable; DROP INDEX IX_LargeDataTable_IsActive ON LargeDataTable; ``` ##### 2. 删除复合索引 ``` DROP INDEX IX_LargeDataTable_ActiveCredit ON LargeDataTable; ``` ##### 3. 删除日期范围索引 ``` DROP INDEX IX_LargeDataTable_RegistrationDate ON LargeDataTable; DROP INDEX IX_LargeDataTable_LastPurchaseDate ON LargeDataTable; ``` ##### 4. 删除包含列索引(覆盖索引) ``` DROP INDEX IX_LargeDataTable_Covering1 ON LargeDataTable; ``` ##### 5. 删除空间索引(特殊语法) ``` DROP INDEX SIX_LargeDataTable_Location ON LargeDataTable; ``` #### 三、批量删除所有非主键索引 如果您想一次性删除表上的所有非聚集索引(保留主键的聚集索引): ``` DECLARE @sql NVARCHAR(MAX) = ''; SELECT @sql = @sql + 'DROP INDEX ' + QUOTENAME(i.name) + ' ON ' + QUOTENAME(SCHEMA_NAME(t.schema_id)) + '.' + QUOTENAME(t.name) + ';' + CHAR(13) FROM sys.indexes i JOIN sys.tables t ON i.object_id = t.object_id WHERE i.name IS NOT NULL AND i.type_desc = 'NONCLUSTERED' AND t.name = 'LargeDataTable' AND i.is_primary_key = 0; PRINT @sql; -- 先打印查看要执行的语句 -- EXEC sp_executesql @sql; -- 确认无误后执行 ``` #### 四、注意事项 主键索引:要删除主键聚集索引,需要先删除主键约束 ``` ALTER TABLE LargeDataTable DROP CONSTRAINT [PK_LargeDataTable]; ``` 系统生成名称:如果索引名是系统自动生成的,可以先查询: ``` SELECT name, type_desc FROM sys.indexes WHERE object_id = OBJECT_ID('LargeDataTable'); ``` 删除影响: 删除索引不会删除表中的数据 删除后依赖该索引的查询性能可能下降 在业务低峰期执行删除操作 空间索引:删除空间索引使用普通 DROP INDEX 语法即可,不需要特殊参数 事务考虑:可以在事务中执行删除以便回滚 ``` BEGIN TRANSACTION; DROP INDEX IX_LargeDataTable_CustomerName ON LargeDataTable; -- 检查影响后执行 COMMIT 或 ROLLBACK ``` 建议在删除索引前,先确认该索引的使用频率: ``` SELECT OBJECT_NAME(i.object_id) AS table_name, i.name AS index_name, s.user_seeks, s.user_scans, s.user_lookups, s.last_user_seek, s.last_user_scan FROM sys.indexes i LEFT JOIN sys.dm_db_index_usage_stats s ON s.object_id = i.object_id AND s.index_id = i.index_id AND s.database_id = DB_ID() WHERE i.object_id = OBJECT_ID('LargeDataTable'); ``` ### 分析库表 #### 分析表查询 ``` SELECT TOP 20 qs.execution_count, qs.total_logical_reads/qs.execution_count AS avg_logical_reads, qs.total_elapsed_time/qs.execution_count AS avg_elapsed_time, SUBSTRING(qt.text, (qs.statement_start_offset/2)+1, ((CASE qs.statement_end_offset WHEN -1 THEN DATALENGTH(qt.text) ELSE qs.statement_end_offset END - qs.statement_start_offset)/2)+1) AS query_text, qp.query_plan FROM sys.dm_exec_query_stats qs CROSS APPLY sys.dm_exec_sql_text(qs.sql_handle) qt CROSS APPLY sys.dm_exec_query_plan(qs.plan_handle) qp WHERE qt.text LIKE '%LargeDataTable%' ORDER BY qs.total_logical_reads DESC; ``` ### 总结 这个方案针对1亿+数据的LargeDataTable表提供了从索引设计到查询优化的完整解决方案,可以显著提高查询性能。实际应用中需要根据具体查询模式和数据分布特点进行调整。
相关推荐
Microsoft SQL Server 2016
sqlserver2016安装避坑指南
SQL Server 复制订阅及发布
SQL Server 2022 企业版中文简体版安装指南
Sqlserver中进行异常捕获
评论区
先去登录
版权所有:机遇屋在线 Copyright © 2021-2025 jiyuwu Co., Ltd.
鲁ICP备16042261号-1