2023年6月21日发(作者:)
SQLServer使⽤表值参数,⾼性能批量插⼊数据记得前段时间帮同事写了个解析账号并⼊库的⼩⼯具,来批量导⼊账号信息,账号量相当⼤,程序每读取⼀条记录便执⾏⼀次insert来插⼊数据,整整跑了⼀下午才把账号全部⼊库。今天⼜接到同事类似的需求,不过这次的账号量更⼤,考虑到上次遇到的问题,这次打算采⽤某种⽅案来提⾼插⼊数据的性能。了解了下SQLServer批量插⼊数据的技术,主要有两种:Bulk和表值参数(SQLServer 2008的特性),这两种⽅式相⽐循环使⽤insert插⼊数据,效率和性能明显上升。使⽤表值参数带来的提升更为显著。使⽤表值参数插⼊数据的⼀个例⼦82936373839464748495657using System;using c;using ;using ;using rExpressions;using ;using tions;using del;using ;using ent;using uration;using ;namespace Account2DB{ class Program { public static void TableValuedToDB(DataTable dt) { SqlConnection sqlConn = new SqlConnection(tionStrings["DefaultConnection"].ConnectionString const string TSqlStatement = "insert into NewAccount (Account,Ting,Jie)" + " SELECT t, ," + " FROM @BulkTvp AS nc"; SqlCommand cmd = new SqlCommand(TSqlStatement, sqlConn); SqlParameter catParam = hValue("@BulkTvp", dt); ype = ured; //表值参数的名字叫BulkUdt,在上⾯的建⽴测试环境的SQL中有。
me = "count"; try { (); if (dt != null && != 0) { eNonQuery(); } } catch (Exception ex) { throw ex; } finally { (); } } public static DataTable GetTableSchema() { DataTable dt = new DataTable(); ge(new DataColumn[]{
new DataColumn("Account",typeof(string)),
new DataColumn("Ting",typeof(DateTime)),
new DataColumn("Jie",typeof(DateTime))}); return dt; } 57585966676869767778798687888996979899108109110 }
static void Main(string[] args) { ("正在插⼊数据,请稍后..."); try { StreamReader sr = new StreamReader(args[0], t); StringBuilder error = new StringBuilder(); DataTable dt = GetTableSchema(); int count = 0; while (!tream) { string acc = ne(); if (With("账号") || With("---") || OrEmpty(acc)) { continue; } var data = (','); if (() != 3) { ("错误数据:" + acc); Line(acc); } DataRow r = (); r[0] = data[0].Trim(); r[1] = Time(data[1]); r[2] = Time(data[2]); (r); count++; // 每⼀万个账号作为⼀批账号插⼊ if (count % 10000 == 0) { TableValuedToDB(dt); ("已插⼊:" + count); (); } } // 插⼊剩余账号 TableValuedToDB(dt); ("已插⼊:" + count); (); AllText(e(rectory, ""), ng(), t } catch (Exception ex) { (ng()); } ("执⾏结束,按任意键关闭..."); ne();
} }}
2023年6月21日发(作者:)
SQLServer使⽤表值参数,⾼性能批量插⼊数据记得前段时间帮同事写了个解析账号并⼊库的⼩⼯具,来批量导⼊账号信息,账号量相当⼤,程序每读取⼀条记录便执⾏⼀次insert来插⼊数据,整整跑了⼀下午才把账号全部⼊库。今天⼜接到同事类似的需求,不过这次的账号量更⼤,考虑到上次遇到的问题,这次打算采⽤某种⽅案来提⾼插⼊数据的性能。了解了下SQLServer批量插⼊数据的技术,主要有两种:Bulk和表值参数(SQLServer 2008的特性),这两种⽅式相⽐循环使⽤insert插⼊数据,效率和性能明显上升。使⽤表值参数带来的提升更为显著。使⽤表值参数插⼊数据的⼀个例⼦82936373839464748495657using System;using c;using ;using ;using rExpressions;using ;using tions;using del;using ;using ent;using uration;using ;namespace Account2DB{ class Program { public static void TableValuedToDB(DataTable dt) { SqlConnection sqlConn = new SqlConnection(tionStrings["DefaultConnection"].ConnectionString const string TSqlStatement = "insert into NewAccount (Account,Ting,Jie)" + " SELECT t, ," + " FROM @BulkTvp AS nc"; SqlCommand cmd = new SqlCommand(TSqlStatement, sqlConn); SqlParameter catParam = hValue("@BulkTvp", dt); ype = ured; //表值参数的名字叫BulkUdt,在上⾯的建⽴测试环境的SQL中有。
me = "count"; try { (); if (dt != null && != 0) { eNonQuery(); } } catch (Exception ex) { throw ex; } finally { (); } } public static DataTable GetTableSchema() { DataTable dt = new DataTable(); ge(new DataColumn[]{
new DataColumn("Account",typeof(string)),
new DataColumn("Ting",typeof(DateTime)),
new DataColumn("Jie",typeof(DateTime))}); return dt; } 57585966676869767778798687888996979899108109110 }
static void Main(string[] args) { ("正在插⼊数据,请稍后..."); try { StreamReader sr = new StreamReader(args[0], t); StringBuilder error = new StringBuilder(); DataTable dt = GetTableSchema(); int count = 0; while (!tream) { string acc = ne(); if (With("账号") || With("---") || OrEmpty(acc)) { continue; } var data = (','); if (() != 3) { ("错误数据:" + acc); Line(acc); } DataRow r = (); r[0] = data[0].Trim(); r[1] = Time(data[1]); r[2] = Time(data[2]); (r); count++; // 每⼀万个账号作为⼀批账号插⼊ if (count % 10000 == 0) { TableValuedToDB(dt); ("已插⼊:" + count); (); } } // 插⼊剩余账号 TableValuedToDB(dt); ("已插⼊:" + count); (); AllText(e(rectory, ""), ng(), t } catch (Exception ex) { (ng()); } ("执⾏结束,按任意键关闭..."); ne();
} }}
发布评论