作为一名在武汉光谷摸爬滚打多年的C#开发者,最近接到一个看似简单实则暗藏杀机的需求:实现2GB大文件的分块上传功能。这个需求背后隐藏着几个技术痛点:
最初尝试使用WebUploader,但在实际测试中发现几个致命问题:
经过对比测试,最终选择了Uppy.io作为替代方案,主要基于以下考虑:
后端采用.NET Core 3.1+,主要考虑因素:
javascript复制const uppy = new Uppy({
autoProceed: false,
restrictions: {
maxFileSize: 2147483648, // 2GB
allowedFileTypes: ['*/*']
}
})
// 添加文件选择器
uppy.use(Dashboard, {
inline: true,
target: '#upload-container',
showProgressDetails: true
})
// 添加分片上传支持
uppy.use(XHRUpload, {
endpoint: '/api/upload-chunk',
chunkSize: 5 * 1024 * 1024, // 5MB分片
retryDelays: [1000, 3000, 5000],
headers: {
'X-Requested-With': 'XMLHttpRequest'
}
})
利用IndexedDB存储上传状态:
javascript复制// 初始化IndexedDB存储
uppy.use(IndexedDB, {
dbName: 'UploadDB',
storeName: 'chunks'
})
// 恢复上传状态
uppy.restoreFilesFromIndexedDB().then(() => {
console.log('恢复上传状态成功')
})
csharp复制[HttpPost("upload-chunk")]
public async Task<IActionResult> UploadChunk(
IFormFile file,
[FromForm] string fileHash,
[FromForm] int chunkIndex,
[FromForm] int totalChunks)
{
// 验证参数
if (file == null || file.Length == 0)
return BadRequest("无效的文件");
try {
// 创建分片存储目录
var chunkDir = Path.Combine("uploads", fileHash);
Directory.CreateDirectory(chunkDir);
// 保存分片文件
var chunkPath = Path.Combine(chunkDir, $"{chunkIndex}.part");
await using var stream = new FileStream(chunkPath, FileMode.Create);
await file.CopyToAsync(stream);
// 更新上传进度
await _uploadService.UpdateProgress(fileHash, chunkIndex, totalChunks);
return Ok(new { success = true });
} catch (Exception ex) {
_logger.LogError(ex, "分片上传失败");
return StatusCode(500, "分片上传失败");
}
}
csharp复制[HttpPost("merge-chunks")]
public async Task<IActionResult> MergeChunks(
[FromBody] MergeRequest request)
{
try {
var chunkDir = Path.Combine("uploads", request.FileHash);
var finalPath = Path.Combine("uploads", request.FileName);
// 使用内存映射文件提高合并效率
using (var mmf = MemoryMappedFile.CreateFromFile(
finalPath,
FileMode.Create,
request.FileSize))
{
using (var accessor = mmf.CreateViewAccessor())
{
long offset = 0;
for (int i = 0; i < request.TotalChunks; i++)
{
var chunkPath = Path.Combine(chunkDir, $"{i}.part");
var chunkData = await File.ReadAllBytesAsync(chunkPath);
accessor.WriteArray(offset, chunkData, 0, chunkData.Length);
offset += chunkData.Length;
// 更新合并进度
await _uploadService.UpdateMergeProgress(
request.FileHash,
i,
request.TotalChunks);
}
}
}
// 清理分片文件
Directory.Delete(chunkDir, true);
// 更新数据库状态
await _uploadService.CompleteUpload(request.FileHash);
return Ok(new { success = true });
} catch (Exception ex) {
_logger.LogError(ex, "文件合并失败");
return StatusCode(500, "文件合并失败");
}
}
sql复制CREATE TABLE UploadTasks (
Id UNIQUEIDENTIFIER PRIMARY KEY DEFAULT NEWID(),
FileHash NVARCHAR(64) NOT NULL,
FileName NVARCHAR(255) NOT NULL,
FileSize BIGINT NOT NULL,
TotalChunks INT NOT NULL,
UploadedChunks INT NOT NULL DEFAULT 0,
MergedChunks INT NOT NULL DEFAULT 0,
Status TINYINT NOT NULL DEFAULT 0, -- 0=上传中,1=上传完成,2=合并中,3=合并完成,4=失败
CreatedAt DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(),
UpdatedAt DATETIME2 NOT NULL DEFAULT SYSUTCDATETIME(),
CONSTRAINT UQ_FileHash UNIQUE (FileHash)
);
CREATE INDEX IX_UploadTasks_Status ON UploadTasks (Status);
CREATE INDEX IX_UploadTasks_FileHash ON UploadTasks (FileHash);
sql复制-- 启用FILESTREAM
EXEC sp_configure 'filestream access level', 2
RECONFIGURE
-- 创建支持FILESTREAM的文件组
ALTER DATABASE UploadDB
ADD FILEGROUP FileStreamGroup CONTAINS FILESTREAM
-- 添加FILESTREAM文件
ALTER DATABASE UploadDB
ADD FILE (
NAME = 'UploadFS',
FILENAME = 'C:\Data\UploadFS'
) TO FILEGROUP FileStreamGroup
-- 创建FILESTREAM表
CREATE TABLE FileRecords (
FileId UNIQUEIDENTIFIER ROWGUIDCOL NOT NULL UNIQUE,
FileData VARBINARY(MAX) FILESTREAM NOT NULL,
FileName NVARCHAR(255) NOT NULL,
UploadTaskId UNIQUEIDENTIFIER NOT NULL,
FOREIGN KEY (UploadTaskId) REFERENCES UploadTasks(Id)
)
处理大文件时,传统文件流操作可能导致内存压力过大。采用以下优化策略:
实现客户端速率限制中间件:
csharp复制public class RateLimitMiddleware
{
private readonly RequestDelegate _next;
private readonly IConnectionMultiplexer _redis;
public RateLimitMiddleware(
RequestDelegate next,
IConnectionMultiplexer redis)
{
_next = next;
_redis = redis;
}
public async Task InvokeAsync(HttpContext context)
{
if (context.Request.Path.StartsWithSegments("/api/upload"))
{
var clientIp = context.Connection.RemoteIpAddress?.ToString();
if (!string.IsNullOrEmpty(clientIp))
{
var db = _redis.GetDatabase();
var key = $"upload:rate:{clientIp}";
var current = await db.StringIncrementAsync(key);
if (current == 1)
{
await db.KeyExpireAsync(key, TimeSpan.FromMinutes(1));
}
if (current > 100) // 每分钟最多100次请求
{
context.Response.StatusCode = 429;
await context.Response.WriteAsync("请求过于频繁,请稍后再试");
return;
}
}
}
await _next(context);
}
}
使用Hangfire处理耗时操作:
csharp复制// 配置Hangfire
services.AddHangfire(config =>
config.UseSqlServerStorage(Configuration.GetConnectionString("Hangfire")));
services.AddHangfireServer();
// 注册合并任务
_backgroundJobClient.Schedule<UploadService>(
s => s.MergeFileChunks(fileHash),
TimeSpan.FromMinutes(1));
增大请求限制:
xml复制<system.webServer>
<security>
<requestFiltering>
<requestLimits maxAllowedContentLength="2147483648" />
</requestFiltering>
</security>
</system.webServer>
调整超时设置:
xml复制<system.webServer>
<httpRuntime executionTimeout="3600" maxRequestLength="2097152" />
</system.webServer>
健康检查端点:
csharp复制[HttpGet("health")]
public IActionResult HealthCheck()
{
var status = new {
Status = "Healthy",
Timestamp = DateTime.UtcNow,
Memory = GC.GetTotalMemory(false) / 1024 / 1024 + "MB"
};
return Ok(status);
}
日志记录策略:
问题现象:部分分片上传失败,导致最终无法合并
解决方案:
csharp复制// 分片校验中间件
[HttpPost("verify-chunk")]
public IActionResult VerifyChunk(
[FromQuery] string fileHash,
[FromQuery] int chunkIndex)
{
var chunkPath = Path.Combine("uploads", fileHash, $"{chunkIndex}.part");
if (System.IO.File.Exists(chunkPath))
{
var fileInfo = new FileInfo(chunkPath);
return Ok(new {
exists = true,
size = fileInfo.Length
});
}
return Ok(new { exists = false });
}
问题现象:合并大文件时耗时过长,客户端等待超时
解决方案:
csharp复制// 合并进度接口
[HttpGet("merge-progress")]
public IActionResult GetMergeProgress(
[FromQuery] string fileHash)
{
var progress = _uploadService.GetMergeProgress(fileHash);
return Ok(progress);
}
问题现象:某些浏览器下上传功能异常
解决方案:
javascript复制// 浏览器能力检测
function checkBrowserCompatibility() {
const requiredFeatures = [
'File',
'Blob',
'FormData',
'IndexedDB',
'FileReader'
];
const missingFeatures = requiredFeatures.filter(
feature => !(feature in window));
if (missingFeatures.length > 0) {
alert(`您的浏览器缺少以下必要功能: ${missingFeatures.join(', ')}`);
return false;
}
return true;
}
code复制Up6-ASP.NET/
├── ClientApp/ # 前端代码
│ ├── src/
│ │ ├── components/ # Vue组件
│ │ ├── stores/ # Pinia状态管理
│ │ ├── utils/ # 工具函数
│ │ └── main.js # 应用入口
├── Controllers/ # API控制器
│ ├── UploadController.cs # 上传相关API
├── Services/ # 业务逻辑
│ ├── UploadService.cs # 上传服务
├── Models/ # 数据模型
│ ├── UploadTask.cs # 上传任务模型
├── Migrations/ # 数据库迁移
├── wwwroot/ # 静态资源
├── appsettings.json # 配置文件
└── Program.cs # 应用入口
在实际开发过程中,我总结了以下几点重要经验:
分片大小选择:经过测试,5MB大小的分片在大多数场景下表现最佳,过小会增加请求数量,过大则容易因网络问题导致失败
状态管理:前端使用IndexedDB存储上传状态,后端使用Redis缓存当前任务,确保状态一致性
错误处理:对每个可能失败的环节都添加了详细的错误处理和日志记录,便于问题排查
进度反馈:实现细粒度的进度反馈机制,包括上传进度和合并进度,提升用户体验
资源清理:添加定时任务清理过期未完成的上传任务,避免存储空间浪费
测试策略:针对不同文件大小、网络条件和浏览器环境制定了详细的测试方案
这个方案在实际项目中成功支持了2GB以上文件的上传,并且在各种网络条件下表现稳定。通过合理的架构设计和性能优化,即使在资源有限的服务器上也能可靠运行。