main.go
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116
package main

import (
	"crypto/sha256"
	"encoding/hex"
	"encoding/json"
	"fmt"
	"io"
	"log"
	"net/http"
	"os"
	"path/filepath"
	"time"
)

func writeJSON(w http.ResponseWriter, status int, v any) {
	w.Header().Set("Content-Type", "application/json")
	w.WriteHeader(status)
	_ = json.NewEncoder(w).Encode(v)
}

func uploadHandler(uploadDir string) http.HandlerFunc {
	return func(w http.ResponseWriter, r *http.Request) {
		if r.Method != http.MethodPost {
			http.Error(w, "method not allowed", http.StatusMethodNotAllowed)
			return
		}

		const maxBytes = 25 << 20
		r.Body = http.MaxBytesReader(w, r.Body, maxBytes)

		mr, err := r.MultipartReader()
		if err != nil {
			http.Error(w, "invalid multipart request", http.StatusBadRequest)
			return
		}

		if err := os.MkdirAll(uploadDir, 0o755); err != nil {
			http.Error(w, "failed to create upload dir", http.StatusInternalServerError)
			return
		}

		var savedPath string
		var written int64
		hasher := sha256.New()

		for {
			part, err := mr.NextPart()
			if err == io.EOF {
				break
			}
			if err != nil {
				http.Error(w, "failed to read multipart", http.StatusBadRequest)
				return
			}

			if part.FormName() != "file" {
				io.Copy(io.Discard, part)
				part.Close()
				continue
			}

			name := filepath.Base(part.FileName())
			if name == "." || name == "" {
				part.Close()
				http.Error(w, "missing filename", http.StatusBadRequest)
				return
			}

			dstPath := filepath.Join(uploadDir, fmt.Sprintf("%d-%s", time.Now().UnixNano(), name))
			f, err := os.Create(dstPath)
			if err != nil {
				part.Close()
				http.Error(w, "failed to create file", http.StatusInternalServerError)
				return
			}

			n, err := io.Copy(io.MultiWriter(f, hasher), part)
			part.Close()
			f.Sync()
			f.Close()

			if err != nil {
				os.Remove(dstPath)
				http.Error(w, "failed to save file", http.StatusInternalServerError)
				return
			}

			savedPath = dstPath
			written = n
			break
		}

		if savedPath == "" {
			http.Error(w, "missing file field", http.StatusBadRequest)
			return
		}

		writeJSON(w, http.StatusOK, map[string]any{
			"path":   savedPath,
			"bytes":  written,
			"sha256": hex.EncodeToString(hasher.Sum(nil)),
		})
	}
}

func main() {
	mux := http.NewServeMux()
	mux.HandleFunc("/upload", uploadHandler("uploads"))

	srv := &http.Server{Addr: ":8080", Handler: mux, ReadHeaderTimeout: 5 * time.Second}
	log.Printf("upload server on http://localhost:8080/upload")
	if err := srv.ListenAndServe(); err != nil {
		log.Fatalf("server failed: %v", err)
	}
}

How It Works

Processes multipart file uploads without buffering entire files, enforcing size limits and computing SHA-256 while streaming to disk.

Sets MaxBytesReader to cap request size, parses the multipart stream incrementally, writes each part to a temp file while hashing, and reports file metadata and checksum.

Key Concepts

  • 1Streaming write avoids memory spikes for large uploads.
  • 2Size limits and validation protect the server.
  • 3Checksum calculation verifies file integrity during upload.

When to Use This Pattern

  • Uploading large assets or backups safely.
  • APIs for user uploads where resource control matters.
  • Ingestion services that must validate file contents.

Best Practices

  • Place temp files on disks with enough space and I/O budget.
  • Validate filenames and extensions before writing.
  • Clean up temp files on errors or client disconnects.
Go Version1.18+
Difficultyadvanced
Production ReadyYes
Lines of Code116