Resumable File Upload/Download with Chunking
Chunked upload and download service that supports pausing and resuming large file transfers with checksum validation.
main.go
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254
package main
import ( "crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"log"
"net/http"
"os"
"path/filepath"
"strconv"
"sync"
"time")
// ChunkMetadata holds metadata for a single file chunk
type ChunkMetadata struct {
FileID string `json:"file_id"` // Unique file identifier
ChunkID int `json:"chunk_id"` // Chunk index (0-based)
TotalChunks int `json:"total_chunks"` // Total number of chunks
Checksum string `json:"checksum"` // SHA256 checksum of chunk data
Size int64 `json:"size"` // Chunk size in bytes
}
// FileMetadata holds metadata for an uploaded file
type FileMetadata struct {
FileID string `json:"file_id"`
Filename string `json:"filename"`
TotalSize int64 `json:"total_size"`
TotalChunks int `json:"total_chunks"`
ChunksReceived map[int]bool `json:"chunks_received"` // Track received chunks
Checksum string `json:"checksum"` // SHA256 checksum of entire file
mu sync.Mutex
}
// FileTransferService manages resumable file uploads/downloads
type FileTransferService struct {
uploadDir string // Directory to store uploaded files and chunks
files map[string]*FileMetadata
mu sync.RWMutex
}
// NewFileTransferService creates a new service instance
func NewFileTransferService(uploadDir string) (*FileTransferService, error) {
// Create upload directory if it doesn't exist
if err := os.MkdirAll(uploadDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create upload dir: %v", err)
}
return &FileTransferService{
uploadDir: uploadDir,
files: make(map[string]*FileMetadata),
}, nil
}
// calculateChecksum computes SHA256 checksum for a byte slice
func calculateChecksum(data []byte) string {
hash := sha256.Sum256(data)
return hex.EncodeToString(hash[:])
}
// InitUpload initializes a new file upload (called by client before sending chunks)
func (s *FileTransferService) InitUpload(w http.ResponseWriter, r *http.Request) {
var req struct {
Filename string `json:"filename"`
TotalSize int64 `json:"total_size"`
ChunkSize int64 `json:"chunk_size"` // Client-specified chunk size (e.g., 5MB)
}
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, fmt.Sprintf("invalid request: %v", err), http.StatusBadRequest)
return
}
// Generate unique file ID
fileID := fmt.Sprintf("%x", sha256.Sum256([]byte(req.Filename+strconv.FormatInt(time.Now().UnixNano(), 10))))[:16]
// Calculate total chunks
totalChunks := int((req.TotalSize + req.ChunkSize - 1) / req.ChunkSize)
// Create file metadata
fileMeta := &FileMetadata{
FileID: fileID,
Filename: req.Filename,
TotalSize: req.TotalSize,
TotalChunks: totalChunks,
ChunksReceived: make(map[int]bool),
}
s.mu.Lock()
s.files[fileID] = fileMeta
s.mu.Unlock()
// Create chunk directory for this file
chunkDir := filepath.Join(s.uploadDir, fileID)
if err := os.MkdirAll(chunkDir, 0755); err != nil {
http.Error(w, fmt.Sprintf("failed to create chunk dir: %v", err), http.StatusInternalServerError)
return
}
// Return file ID and total chunks to client
resp := struct {
FileID string `json:"file_id"`
TotalChunks int `json:"total_chunks"`
}{fileID, totalChunks}
w.Header().Set("Content-Type", "application/json")
json.NewEncoder(w).Encode(resp)
}
// UploadChunk handles upload of a single file chunk
func (s *FileTransferService) UploadChunk(w http.ResponseWriter, r *http.Request) {
// Parse chunk metadata from URL query
fileID := r.URL.Query().Get("file_id")
chunkIDStr := r.URL.Query().Get("chunk_id")
chunkID, err := strconv.Atoi(chunkIDStr)
if err != nil {
http.Error(w, "invalid chunk_id", http.StatusBadRequest)
return
}
s.mu.RLock()
fileMeta, exists := s.files[fileID]
s.mu.RUnlock()
if !exists {
http.Error(w, fmt.Sprintf("file %s not found", fileID), http.StatusNotFound)
return
}
// Read chunk data from request body
chunkData, err := io.ReadAll(r.Body)
if err != nil {
http.Error(w, fmt.Sprintf("failed to read chunk: %v", err), http.StatusBadRequest)
return
}
// Validate checksum (client should send checksum in header)
clientChecksum := r.Header.Get("X-Chunk-Checksum")
serverChecksum := calculateChecksum(chunkData)
if clientChecksum != "" && clientChecksum != serverChecksum {
http.Error(w, "chunk checksum mismatch", http.StatusBadRequest)
return
}
// Save chunk to disk
chunkDir := filepath.Join(s.uploadDir, fileID)
chunkPath := filepath.Join(chunkDir, fmt.Sprintf("chunk_%04d", chunkID))
if err := os.WriteFile(chunkPath, chunkData, 0644); err != nil {
http.Error(w, fmt.Sprintf("failed to save chunk: %v", err), http.StatusInternalServerError)
return
}
// Mark chunk as received
fileMeta.mu.Lock()
fileMeta.ChunksReceived[chunkID] = true
fileMeta.mu.Unlock()
// Check if all chunks are received
allReceived := true
fileMeta.mu.Lock()
for i := 0; i < fileMeta.TotalChunks; i++ {
if !fileMeta.ChunksReceived[i] {
allReceived = false
break
}
}
fileMeta.mu.Unlock()
// If all chunks received, merge into final file
if allReceived {
if err := s.mergeChunks(fileMeta); err != nil {
http.Error(w, fmt.Sprintf("failed to merge chunks: %v", err), http.StatusInternalServerError)
return
}
}
// Return success response
w.WriteHeader(http.StatusOK)
json.NewEncoder(w).Encode(map[string]interface{}{
"status": "success",
"chunk_id": chunkID,
"all_received": allReceived,
})
}
// mergeChunks combines all chunks into the final file
func (s *FileTransferService) mergeChunks(fileMeta *FileMetadata) error {
finalPath := filepath.Join(s.uploadDir, fileMeta.Filename)
finalFile, err := os.Create(finalPath)
if err != nil {
return err
}
defer finalFile.Close()
// Merge chunks in order
for i := 0; i < fileMeta.TotalChunks; i++ {
chunkPath := filepath.Join(s.uploadDir, fileMeta.FileID, fmt.Sprintf("chunk_%04d", i))
chunkData, err := os.ReadFile(chunkPath)
if err != nil {
return err
}
if _, err := finalFile.Write(chunkData); err != nil {
return err
}
// Delete chunk after merging (optional)
os.Remove(chunkPath)
}
// Calculate final file checksum
finalData, err := os.ReadFile(finalPath)
if err != nil {
return err
}
fileMeta.mu.Lock()
fileMeta.Checksum = calculateChecksum(finalData)
fileMeta.mu.Unlock()
// Delete chunk directory
os.RemoveAll(filepath.Join(s.uploadDir, fileMeta.FileID))
log.Printf("File %s merged successfully (size: %d bytes)", fileMeta.Filename, fileMeta.TotalSize)
return nil
}
// DownloadFile serves a file with resumable range requests
func (s *FileTransferService) DownloadFile(w http.ResponseWriter, r *http.Request) {
filename := r.URL.Query().Get("filename")
filePath := filepath.Join(s.uploadDir, filename)
// Check if file exists
fileInfo, err := os.Stat(filePath)
if err != nil {
http.Error(w, "file not found", http.StatusNotFound)
return
}
// Handle range requests (resumable downloads)
http.ServeFile(w, r, filePath)
}
// Example Usage
func main() {
// Create file transfer service with upload directory
service, err := NewFileTransferService("./uploads")
if err != nil {
log.Fatalf("Failed to create service: %v", err)
}
// Register HTTP handlers
http.HandleFunc("/api/init-upload", service.InitUpload)
http.HandleFunc("/api/upload-chunk", service.UploadChunk)
http.HandleFunc("/api/download", service.DownloadFile)
log.Println("File transfer server starting on :8080")
log.Fatal(http.ListenAndServe(":8080", nil))
}How It Works
Chunked upload and download service that supports pausing and resuming large file transfers with checksum validation.
Splits files into numbered chunks, stores metadata for progress, exposes HTTP handlers to upload or download specific chunks, validates chunk hashes, reassembles completed files, and serves range-aware downloads.
Key Concepts
- 1Chunk metadata tracks offset, checksum, and completion state.
- 2Server can resume interrupted transfers without resending finished chunks.
- 3Checksum validation protects against corrupted uploads.
When to Use This Pattern
- Large file transfers over unstable connections.
- Client-side resumable uploads for browsers or mobile apps.
- Backup tools that need restartable, verified transfers.
Best Practices
- Store chunk manifests durably so restarts do not lose progress.
- Validate chunk order and size to avoid malicious uploads.
- Stream reassembly to avoid loading full files into memory.
Go Version1.16
Difficultyadvanced
Production ReadyYes
Lines of Code254