diff --git a/Dockerfile b/Dockerfile index addad3f..c1a2f2e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -13,6 +13,8 @@ FROM alpine:3.8 COPY --from=build /go/bin/linx-server /usr/local/bin/linx-server ENV GOPATH /go +ENV SSL_CERT_FILE /etc/ssl/cert.pem + COPY static /go/src/github.com/andreimarcu/linx-server/static/ COPY templates /go/src/github.com/andreimarcu/linx-server/templates/ diff --git a/backends/localfs/localfs.go b/backends/localfs/localfs.go index b55c986..3f6f5ad 100644 --- a/backends/localfs/localfs.go +++ b/backends/localfs/localfs.go @@ -1,63 +1,149 @@ package localfs import ( - "errors" + "encoding/json" "io" "io/ioutil" - "net/http" "os" "path" + "time" "github.com/andreimarcu/linx-server/backends" + "github.com/andreimarcu/linx-server/helpers" ) type LocalfsBackend struct { - basePath string + metaPath string + filesPath string } -func (b LocalfsBackend) Delete(key string) error { - return os.Remove(path.Join(b.basePath, key)) +type MetadataJSON struct { + DeleteKey string `json:"delete_key"` + Sha256sum string `json:"sha256sum"` + Mimetype string `json:"mimetype"` + Size int64 `json:"size"` + Expiry int64 `json:"expiry"` + ArchiveFiles []string `json:"archive_files,omitempty"` +} + +func (b LocalfsBackend) Delete(key string) (err error) { + err = os.Remove(path.Join(b.filesPath, key)) + if err != nil { + return + } + err = os.Remove(path.Join(b.metaPath, key)) + return } func (b LocalfsBackend) Exists(key string) (bool, error) { - _, err := os.Stat(path.Join(b.basePath, key)) + _, err := os.Stat(path.Join(b.filesPath, key)) return err == nil, err } -func (b LocalfsBackend) Get(key string) ([]byte, error) { - return ioutil.ReadFile(path.Join(b.basePath, key)) +func (b LocalfsBackend) Head(key string) (metadata backends.Metadata, err error) { + f, err := os.Open(path.Join(b.metaPath, key)) + if os.IsNotExist(err) { + return metadata, backends.NotFoundErr + } else if err != nil { + return metadata, backends.BadMetadata + } + defer f.Close() + + decoder := json.NewDecoder(f) + + mjson := MetadataJSON{} + if err := decoder.Decode(&mjson); err != nil { + return metadata, backends.BadMetadata + } + + metadata.DeleteKey = mjson.DeleteKey + metadata.Mimetype = mjson.Mimetype + metadata.ArchiveFiles = mjson.ArchiveFiles + metadata.Sha256sum = mjson.Sha256sum + metadata.Expiry = time.Unix(mjson.Expiry, 0) + metadata.Size = mjson.Size + + return } -func (b LocalfsBackend) Put(key string, r io.Reader) (int64, error) { - dst, err := os.Create(path.Join(b.basePath, key)) +func (b LocalfsBackend) Get(key string) (metadata backends.Metadata, f io.ReadCloser, err error) { + metadata, err = b.Head(key) if err != nil { - return 0, err + return + } + + f, err = os.Open(path.Join(b.filesPath, key)) + if err != nil { + return + } + + return +} + +func (b LocalfsBackend) writeMetadata(key string, metadata backends.Metadata) error { + metaPath := path.Join(b.metaPath, key) + + mjson := MetadataJSON{ + DeleteKey: metadata.DeleteKey, + Mimetype: metadata.Mimetype, + ArchiveFiles: metadata.ArchiveFiles, + Sha256sum: metadata.Sha256sum, + Expiry: metadata.Expiry.Unix(), + Size: metadata.Size, + } + + dst, err := os.Create(metaPath) + if err != nil { + return err + } + defer dst.Close() + + encoder := json.NewEncoder(dst) + err = encoder.Encode(mjson) + if err != nil { + os.Remove(metaPath) + return err + } + + return nil +} + +func (b LocalfsBackend) Put(key string, r io.Reader, expiry time.Time, deleteKey string) (m backends.Metadata, err error) { + filePath := path.Join(b.filesPath, key) + + dst, err := os.Create(filePath) + if err != nil { + return } defer dst.Close() bytes, err := io.Copy(dst, r) if bytes == 0 { - b.Delete(key) - return bytes, errors.New("Empty file") + os.Remove(filePath) + return m, backends.FileEmptyError } else if err != nil { - b.Delete(key) - return bytes, err + os.Remove(filePath) + return m, err } - return bytes, err -} + m.Expiry = expiry + m.DeleteKey = deleteKey + m.Size = bytes + m.Mimetype, _ = helpers.DetectMime(dst) + m.Sha256sum, _ = helpers.Sha256sum(dst) + m.ArchiveFiles, _ = helpers.ListArchiveFiles(m.Mimetype, m.Size, dst) -func (b LocalfsBackend) Open(key string) (backends.ReadSeekCloser, error) { - return os.Open(path.Join(b.basePath, key)) -} + err = b.writeMetadata(key, m) + if err != nil { + os.Remove(filePath) + return + } -func (b LocalfsBackend) ServeFile(key string, w http.ResponseWriter, r *http.Request) { - filePath := path.Join(b.basePath, key) - http.ServeFile(w, r, filePath) + return } func (b LocalfsBackend) Size(key string) (int64, error) { - fileInfo, err := os.Stat(path.Join(b.basePath, key)) + fileInfo, err := os.Stat(path.Join(b.filesPath, key)) if err != nil { return 0, err } @@ -68,7 +154,7 @@ func (b LocalfsBackend) Size(key string) (int64, error) { func (b LocalfsBackend) List() ([]string, error) { var output []string - files, err := ioutil.ReadDir(b.basePath) + files, err := ioutil.ReadDir(b.filesPath) if err != nil { return nil, err } @@ -80,6 +166,9 @@ func (b LocalfsBackend) List() ([]string, error) { return output, nil } -func NewLocalfsBackend(basePath string) LocalfsBackend { - return LocalfsBackend{basePath: basePath} +func NewLocalfsBackend(metaPath string, filesPath string) LocalfsBackend { + return LocalfsBackend{ + metaPath: metaPath, + filesPath: filesPath, + } } diff --git a/backends/meta.go b/backends/meta.go index 27c3e41..7ba522d 100644 --- a/backends/meta.go +++ b/backends/meta.go @@ -5,11 +5,6 @@ import ( "time" ) -type MetaBackend interface { - Get(key string) (Metadata, error) - Put(key string, metadata *Metadata) error -} - type Metadata struct { DeleteKey string Sha256sum string diff --git a/backends/metajson/metajson.go b/backends/metajson/metajson.go deleted file mode 100644 index 8ec53c4..0000000 --- a/backends/metajson/metajson.go +++ /dev/null @@ -1,70 +0,0 @@ -package metajson - -import ( - "bytes" - "encoding/json" - "time" - - "github.com/andreimarcu/linx-server/backends" -) - -type MetadataJSON struct { - DeleteKey string `json:"delete_key"` - Sha256sum string `json:"sha256sum"` - Mimetype string `json:"mimetype"` - Size int64 `json:"size"` - Expiry int64 `json:"expiry"` - ArchiveFiles []string `json:"archive_files,omitempty"` -} - -type MetaJSONBackend struct { - storage backends.MetaStorageBackend -} - -func (m MetaJSONBackend) Put(key string, metadata *backends.Metadata) error { - mjson := MetadataJSON{} - mjson.DeleteKey = metadata.DeleteKey - mjson.Mimetype = metadata.Mimetype - mjson.ArchiveFiles = metadata.ArchiveFiles - mjson.Sha256sum = metadata.Sha256sum - mjson.Expiry = metadata.Expiry.Unix() - mjson.Size = metadata.Size - - byt, err := json.Marshal(mjson) - if err != nil { - return err - } - - if _, err := m.storage.Put(key, bytes.NewBuffer(byt)); err != nil { - return err - } - - return nil -} - -func (m MetaJSONBackend) Get(key string) (metadata backends.Metadata, err error) { - b, err := m.storage.Get(key) - if err != nil { - return metadata, backends.BadMetadata - } - - mjson := MetadataJSON{} - - err = json.Unmarshal(b, &mjson) - if err != nil { - return metadata, backends.BadMetadata - } - - metadata.DeleteKey = mjson.DeleteKey - metadata.Mimetype = mjson.Mimetype - metadata.ArchiveFiles = mjson.ArchiveFiles - metadata.Sha256sum = mjson.Sha256sum - metadata.Expiry = time.Unix(mjson.Expiry, 0) - metadata.Size = mjson.Size - - return -} - -func NewMetaJSONBackend(storage backends.MetaStorageBackend) MetaJSONBackend { - return MetaJSONBackend{storage: storage} -} diff --git a/backends/s3/s3.go b/backends/s3/s3.go new file mode 100644 index 0000000..7ae326c --- /dev/null +++ b/backends/s3/s3.go @@ -0,0 +1,192 @@ +package s3 + +import ( + "io" + "io/ioutil" + "os" + "strconv" + "time" + + "github.com/andreimarcu/linx-server/backends" + "github.com/andreimarcu/linx-server/helpers" + "github.com/aws/aws-sdk-go/aws" + "github.com/aws/aws-sdk-go/aws/awserr" + "github.com/aws/aws-sdk-go/aws/session" + "github.com/aws/aws-sdk-go/service/s3" + "github.com/aws/aws-sdk-go/service/s3/s3manager" +) + +type S3Backend struct { + bucket string + svc *s3.S3 +} + +func (b S3Backend) Delete(key string) error { + _, err := b.svc.DeleteObject(&s3.DeleteObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + }) + if err != nil { + return err + } + return nil +} + +func (b S3Backend) Exists(key string) (bool, error) { + _, err := b.svc.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + }) + return err == nil, err +} + +func (b S3Backend) Head(key string) (metadata backends.Metadata, err error) { + var result *s3.HeadObjectOutput + result, err = b.svc.HeadObject(&s3.HeadObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() == s3.ErrCodeNoSuchKey || aerr.Code() == "NotFound" { + err = backends.NotFoundErr + } + } + return + } + + metadata, err = unmapMetadata(result.Metadata) + return +} + +func (b S3Backend) Get(key string) (metadata backends.Metadata, r io.ReadCloser, err error) { + var result *s3.GetObjectOutput + result, err = b.svc.GetObject(&s3.GetObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + }) + if err != nil { + if aerr, ok := err.(awserr.Error); ok { + if aerr.Code() == s3.ErrCodeNoSuchKey || aerr.Code() == "NotFound" { + err = backends.NotFoundErr + } + } + return + } + + metadata, err = unmapMetadata(result.Metadata) + r = result.Body + return +} + +func mapMetadata(m backends.Metadata) map[string]*string { + return map[string]*string{ + "Expiry": aws.String(strconv.FormatInt(m.Expiry.Unix(), 10)), + "Delete_key": aws.String(m.DeleteKey), + "Size": aws.String(strconv.FormatInt(m.Size, 10)), + "Mimetype": aws.String(m.Mimetype), + "Sha256sum": aws.String(m.Sha256sum), + } +} + +func unmapMetadata(input map[string]*string) (m backends.Metadata, err error) { + expiry, err := strconv.ParseInt(aws.StringValue(input["Expiry"]), 10, 64) + if err != nil { + return m, err + } + m.Expiry = time.Unix(expiry, 0) + + m.Size, err = strconv.ParseInt(aws.StringValue(input["Size"]), 10, 64) + if err != nil { + return + } + + m.DeleteKey = aws.StringValue(input["Delete_key"]) + m.Mimetype = aws.StringValue(input["Mimetype"]) + m.Sha256sum = aws.StringValue(input["Sha256sum"]) + return +} + +func (b S3Backend) Put(key string, r io.Reader, expiry time.Time, deleteKey string) (m backends.Metadata, err error) { + tmpDst, err := ioutil.TempFile("", "linx-server-upload") + if err != nil { + return m, err + } + defer tmpDst.Close() + defer os.Remove(tmpDst.Name()) + + bytes, err := io.Copy(tmpDst, r) + if bytes == 0 { + return m, backends.FileEmptyError + } else if err != nil { + return m, err + } + + m.Expiry = expiry + m.DeleteKey = deleteKey + m.Size = bytes + m.Mimetype, _ = helpers.DetectMime(tmpDst) + m.Sha256sum, _ = helpers.Sha256sum(tmpDst) + // XXX: we may not be able to write this to AWS easily + //m.ArchiveFiles, _ = helpers.ListArchiveFiles(m.Mimetype, m.Size, tmpDst) + + uploader := s3manager.NewUploaderWithClient(b.svc) + input := &s3manager.UploadInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + Body: tmpDst, + Metadata: mapMetadata(m), + } + _, err = uploader.Upload(input) + if err != nil { + return + } + + return +} + +func (b S3Backend) Size(key string) (int64, error) { + input := &s3.HeadObjectInput{ + Bucket: aws.String(b.bucket), + Key: aws.String(key), + } + result, err := b.svc.HeadObject(input) + if err != nil { + return 0, err + } + + return *result.ContentLength, nil +} + +func (b S3Backend) List() ([]string, error) { + var output []string + input := &s3.ListObjectsInput{ + Bucket: aws.String(b.bucket), + } + + results, err := b.svc.ListObjects(input) + if err != nil { + return nil, err + } + + + for _, object := range results.Contents { + output = append(output, *object.Key) + } + + return output, nil +} + +func NewS3Backend(bucket string, region string, endpoint string) S3Backend { + awsConfig := &aws.Config{} + if region != "" { + awsConfig.Region = aws.String(region) + } + if endpoint != "" { + awsConfig.Endpoint = aws.String(endpoint) + } + + sess := session.Must(session.NewSession(awsConfig)) + svc := s3.New(sess) + return S3Backend{bucket: bucket, svc: svc} +} diff --git a/backends/storage.go b/backends/storage.go index 2b51a2c..d40a2b9 100644 --- a/backends/storage.go +++ b/backends/storage.go @@ -1,24 +1,17 @@ package backends import ( + "errors" "io" - "net/http" + "time" ) -type ReadSeekCloser interface { - io.Reader - io.Closer - io.Seeker - io.ReaderAt -} - type StorageBackend interface { Delete(key string) error Exists(key string) (bool, error) - Get(key string) ([]byte, error) - Put(key string, r io.Reader) (int64, error) - Open(key string) (ReadSeekCloser, error) - ServeFile(key string, w http.ResponseWriter, r *http.Request) + Head(key string) (Metadata, error) + Get(key string) (Metadata, io.ReadCloser, error) + Put(key string, r io.Reader, expiry time.Time, deleteKey string) (Metadata, error) Size(key string) (int64, error) } @@ -26,3 +19,6 @@ type MetaStorageBackend interface { StorageBackend List() ([]string, error) } + +var NotFoundErr = errors.New("File not found.") +var FileEmptyError = errors.New("Empty file") diff --git a/delete.go b/delete.go index 61c6fa8..38e36e3 100644 --- a/delete.go +++ b/delete.go @@ -3,8 +3,8 @@ package main import ( "fmt" "net/http" - "os" + "github.com/andreimarcu/linx-server/backends" "github.com/zenazn/goji/web" ) @@ -13,24 +13,19 @@ func deleteHandler(c web.C, w http.ResponseWriter, r *http.Request) { filename := c.URLParams["name"] - // Ensure requested file actually exists - if _, readErr := fileBackend.Exists(filename); os.IsNotExist(readErr) { + // Ensure that file exists and delete key is correct + metadata, err := storageBackend.Head(filename) + if err == backends.NotFoundErr { notFoundHandler(c, w, r) // 404 - file doesn't exist return - } - - // Ensure delete key is correct - metadata, err := metadataRead(filename) - if err != nil { + } else if err != nil { unauthorizedHandler(c, w, r) // 401 - no metadata available return } if metadata.DeleteKey == requestKey { - fileDelErr := fileBackend.Delete(filename) - metaDelErr := metaStorageBackend.Delete(filename) - - if (fileDelErr != nil) || (metaDelErr != nil) { + err := storageBackend.Delete(filename) + if err != nil { oopsHandler(c, w, r, RespPLAIN, "Could not delete") return } diff --git a/display.go b/display.go index 63ba765..7258904 100644 --- a/display.go +++ b/display.go @@ -2,6 +2,7 @@ package main import ( "encoding/json" + "io/ioutil" "net/http" "path/filepath" "regexp" @@ -9,6 +10,7 @@ import ( "strings" "time" + "github.com/andreimarcu/linx-server/backends" "github.com/andreimarcu/linx-server/expiry" "github.com/dustin/go-humanize" "github.com/flosch/pongo2" @@ -29,14 +31,11 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) { fileName := c.URLParams["name"] - _, err := checkFile(fileName) - if err == NotFoundErr { + metadata, err := checkFile(fileName) + if err == backends.NotFoundErr { notFoundHandler(c, w, r) return - } - - metadata, err := metadataRead(fileName) - if err != nil { + } else if err != nil { oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.") return } @@ -78,8 +77,13 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) { tpl = Templates["display/pdf.html"] } else if extension == "story" { + metadata, reader, err := storageBackend.Get(fileName) + if err != nil { + oopsHandler(c, w, r, RespHTML, err.Error()) + } + if metadata.Size < maxDisplayFileSizeBytes { - bytes, err := fileBackend.Get(fileName) + bytes, err := ioutil.ReadAll(reader) if err == nil { extra["contents"] = string(bytes) lines = strings.Split(extra["contents"], "\n") @@ -88,8 +92,13 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) { } } else if extension == "md" { + metadata, reader, err := storageBackend.Get(fileName) + if err != nil { + oopsHandler(c, w, r, RespHTML, err.Error()) + } + if metadata.Size < maxDisplayFileSizeBytes { - bytes, err := fileBackend.Get(fileName) + bytes, err := ioutil.ReadAll(reader) if err == nil { unsafe := blackfriday.MarkdownCommon(bytes) html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) @@ -100,8 +109,13 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) { } } else if strings.HasPrefix(metadata.Mimetype, "text/") || supportedBinExtension(extension) { + metadata, reader, err := storageBackend.Get(fileName) + if err != nil { + oopsHandler(c, w, r, RespHTML, err.Error()) + } + if metadata.Size < maxDisplayFileSizeBytes { - bytes, err := fileBackend.Get(fileName) + bytes, err := ioutil.ReadAll(reader) if err == nil { extra["extension"] = extension extra["lang_hl"], extra["lang_ace"] = extensionToHlAndAceLangs(extension) @@ -117,14 +131,14 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) { } err = renderTemplate(tpl, pongo2.Context{ - "mime": metadata.Mimetype, - "filename": fileName, - "size": sizeHuman, - "expiry": expiryHuman, + "mime": metadata.Mimetype, + "filename": fileName, + "size": sizeHuman, + "expiry": expiryHuman, "expirylist": listExpirationTimes(), - "extra": extra, - "lines": lines, - "files": metadata.ArchiveFiles, + "extra": extra, + "lines": lines, + "files": metadata.ArchiveFiles, }, r, w) if err != nil { diff --git a/expiry.go b/expiry.go index 6d8887d..63b7757 100644 --- a/expiry.go +++ b/expiry.go @@ -24,7 +24,7 @@ type ExpirationTime struct { // Determine if the given filename is expired func isFileExpired(filename string) (bool, error) { - metadata, err := metadataRead(filename) + metadata, err := storageBackend.Head(filename) if err != nil { return false, err } diff --git a/fileserve.go b/fileserve.go index 3b20c4c..a3a249e 100644 --- a/fileserve.go +++ b/fileserve.go @@ -1,8 +1,10 @@ package main import ( + "io" "net/http" "net/url" + "strconv" "strings" "github.com/andreimarcu/linx-server/backends" @@ -14,14 +16,11 @@ func fileServeHandler(c web.C, w http.ResponseWriter, r *http.Request) { fileName := c.URLParams["name"] metadata, err := checkFile(fileName) - if err == NotFoundErr { + if err == backends.NotFoundErr { notFoundHandler(c, w, r) return - } else if err == backends.BadMetadata { - oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.") - return } else if err != nil { - oopsHandler(c, w, r, RespAUTO, err.Error()) + oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.") return } @@ -38,10 +37,23 @@ func fileServeHandler(c web.C, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Security-Policy", Config.fileContentSecurityPolicy) w.Header().Set("Referrer-Policy", Config.fileReferrerPolicy) + _, reader, err := storageBackend.Get(fileName) + if err != nil { + oopsHandler(c, w, r, RespAUTO, err.Error()) + } + + w.Header().Set("Content-Type", metadata.Mimetype) + w.Header().Set("Content-Length", strconv.FormatInt(metadata.Size, 10)) w.Header().Set("Etag", metadata.Sha256sum) w.Header().Set("Cache-Control", "max-age=0") - fileBackend.ServeFile(fileName, w, r) + if r.Method != "HEAD" { + defer reader.Close() + + if _, err = io.CopyN(w, reader, metadata.Size); err != nil { + oopsHandler(c, w, r, RespAUTO, err.Error()) + } + } } func staticHandler(c web.C, w http.ResponseWriter, r *http.Request) { @@ -69,21 +81,14 @@ func staticHandler(c web.C, w http.ResponseWriter, r *http.Request) { } func checkFile(filename string) (metadata backends.Metadata, err error) { - _, err = fileBackend.Exists(filename) - if err != nil { - err = NotFoundErr - return - } - - metadata, err = metadataRead(filename) + metadata, err = storageBackend.Head(filename) if err != nil { return } if expiry.IsTsExpired(metadata.Expiry) { - fileBackend.Delete(filename) - metaStorageBackend.Delete(filename) - err = NotFoundErr + storageBackend.Delete(filename) + err = backends.NotFoundErr return } diff --git a/helpers/archive.go b/helpers/archive.go new file mode 100644 index 0000000..2a4380b --- /dev/null +++ b/helpers/archive.go @@ -0,0 +1,70 @@ +package helpers + +import ( + "archive/tar" + "archive/zip" + "compress/bzip2" + "compress/gzip" + "io" + "sort" +) + +type ReadSeekerAt interface { + io.Reader + io.Seeker + io.ReaderAt +} + +func ListArchiveFiles(mimetype string, size int64, r ReadSeekerAt) (files []string, err error) { + if mimetype == "application/x-tar" { + tReadr := tar.NewReader(r) + for { + hdr, err := tReadr.Next() + if err == io.EOF || err != nil { + break + } + if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg { + files = append(files, hdr.Name) + } + } + sort.Strings(files) + } else if mimetype == "application/x-gzip" { + gzf, err := gzip.NewReader(r) + if err == nil { + tReadr := tar.NewReader(gzf) + for { + hdr, err := tReadr.Next() + if err == io.EOF || err != nil { + break + } + if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg { + files = append(files, hdr.Name) + } + } + sort.Strings(files) + } + } else if mimetype == "application/x-bzip" { + bzf := bzip2.NewReader(r) + tReadr := tar.NewReader(bzf) + for { + hdr, err := tReadr.Next() + if err == io.EOF || err != nil { + break + } + if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg { + files = append(files, hdr.Name) + } + } + sort.Strings(files) + } else if mimetype == "application/zip" { + zf, err := zip.NewReader(r, size) + if err == nil { + for _, f := range zf.File { + files = append(files, f.Name) + } + } + sort.Strings(files) + } + + return +} diff --git a/helpers/helpers.go b/helpers/helpers.go new file mode 100644 index 0000000..aef68ff --- /dev/null +++ b/helpers/helpers.go @@ -0,0 +1,67 @@ +package helpers + +import ( + "encoding/hex" + "io" + "unicode" + + "github.com/minio/sha256-simd" + "gopkg.in/h2non/filetype.v1" +) + +func DetectMime(r io.ReadSeeker) (string, error) { + // Get first 512 bytes for mimetype detection + header := make([]byte, 512) + + r.Seek(0, 0) + r.Read(header) + r.Seek(0, 0) + + kind, err := filetype.Match(header) + if err != nil { + return "application/octet-stream", err + } else if kind.MIME.Value != "" { + return kind.MIME.Value, nil + } + + // Check if the file seems anything like text + if printable(header) { + return "text/plain", nil + } else { + return "application/octet-stream", nil + } +} + +func Sha256sum(r io.ReadSeeker) (string, error) { + hasher := sha256.New() + + r.Seek(0, 0) + _, err := io.Copy(hasher, r) + if err != nil { + return "", err + } + + r.Seek(0, 0) + + return hex.EncodeToString(hasher.Sum(nil)), nil +} + +func printable(data []byte) bool { + for i, b := range data { + r := rune(b) + + // A null terminator that's not at the beginning of the file + if r == 0 && i == 0 { + return false + } else if r == 0 && i < 0 { + continue + } + + if r > unicode.MaxASCII { + return false + } + + } + + return true +} diff --git a/linx-cleanup/cleanup.go b/linx-cleanup/cleanup.go index 9ea89ae..88c2bce 100644 --- a/linx-cleanup/cleanup.go +++ b/linx-cleanup/cleanup.go @@ -5,7 +5,6 @@ import ( "log" "github.com/andreimarcu/linx-server/backends/localfs" - "github.com/andreimarcu/linx-server/backends/metajson" "github.com/andreimarcu/linx-server/expiry" ) @@ -22,17 +21,15 @@ func main() { "don't log deleted files") flag.Parse() - metaStorageBackend := localfs.NewLocalfsBackend(metaDir) - metaBackend := metajson.NewMetaJSONBackend(metaStorageBackend) - fileBackend := localfs.NewLocalfsBackend(filesDir) + fileBackend := localfs.NewLocalfsBackend(metaDir, filesDir) - files, err := metaStorageBackend.List() + files, err := fileBackend.List() if err != nil { panic(err) } for _, filename := range files { - metadata, err := metaBackend.Get(filename) + metadata, err := fileBackend.Head(filename) if err != nil { if !noLogs { log.Printf("Failed to find metadata for %s", filename) @@ -44,7 +41,6 @@ func main() { log.Printf("Delete %s", filename) } fileBackend.Delete(filename) - metaStorageBackend.Delete(filename) } } } diff --git a/meta.go b/meta.go deleted file mode 100644 index 2fc7b81..0000000 --- a/meta.go +++ /dev/null @@ -1,165 +0,0 @@ -package main - -import ( - "archive/tar" - "archive/zip" - "compress/bzip2" - "compress/gzip" - "encoding/hex" - "errors" - "io" - "sort" - "time" - "unicode" - - "github.com/andreimarcu/linx-server/backends" - "github.com/andreimarcu/linx-server/expiry" - "github.com/dchest/uniuri" - "github.com/minio/sha256-simd" - "gopkg.in/h2non/filetype.v1" -) - -var NotFoundErr = errors.New("File not found.") - -func generateMetadata(fName string, exp time.Time, delKey string) (m backends.Metadata, err error) { - file, err := fileBackend.Open(fName) - if err != nil { - return - } - defer file.Close() - - m.Size, err = fileBackend.Size(fName) - if err != nil { - return - } - - m.Expiry = exp - - if delKey == "" { - m.DeleteKey = uniuri.NewLen(30) - } else { - m.DeleteKey = delKey - } - - // Get first 512 bytes for mimetype detection - header := make([]byte, 512) - file.Read(header) - - kind, err := filetype.Match(header) - if err != nil { - m.Mimetype = "application/octet-stream" - } else { - m.Mimetype = kind.MIME.Value - } - - if m.Mimetype == "" { - // Check if the file seems anything like text - if printable(header) { - m.Mimetype = "text/plain" - } else { - m.Mimetype = "application/octet-stream" - } - } - - // Compute the sha256sum - hasher := sha256.New() - file.Seek(0, 0) - _, err = io.Copy(hasher, file) - if err == nil { - m.Sha256sum = hex.EncodeToString(hasher.Sum(nil)) - } - file.Seek(0, 0) - - // If archive, grab list of filenames - if m.Mimetype == "application/x-tar" { - tReadr := tar.NewReader(file) - for { - hdr, err := tReadr.Next() - if err == io.EOF || err != nil { - break - } - if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg { - m.ArchiveFiles = append(m.ArchiveFiles, hdr.Name) - } - } - sort.Strings(m.ArchiveFiles) - } else if m.Mimetype == "application/x-gzip" { - gzf, err := gzip.NewReader(file) - if err == nil { - tReadr := tar.NewReader(gzf) - for { - hdr, err := tReadr.Next() - if err == io.EOF || err != nil { - break - } - if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg { - m.ArchiveFiles = append(m.ArchiveFiles, hdr.Name) - } - } - sort.Strings(m.ArchiveFiles) - } - } else if m.Mimetype == "application/x-bzip" { - bzf := bzip2.NewReader(file) - tReadr := tar.NewReader(bzf) - for { - hdr, err := tReadr.Next() - if err == io.EOF || err != nil { - break - } - if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg { - m.ArchiveFiles = append(m.ArchiveFiles, hdr.Name) - } - } - sort.Strings(m.ArchiveFiles) - } else if m.Mimetype == "application/zip" { - zf, err := zip.NewReader(file, m.Size) - if err == nil { - for _, f := range zf.File { - m.ArchiveFiles = append(m.ArchiveFiles, f.Name) - } - } - sort.Strings(m.ArchiveFiles) - } - - return -} - -func metadataWrite(filename string, metadata *backends.Metadata) error { - return metaBackend.Put(filename, metadata) -} - -func metadataRead(filename string) (metadata backends.Metadata, err error) { - metadata, err = metaBackend.Get(filename) - if err != nil { - // Metadata does not exist, generate one - newMData, err := generateMetadata(filename, expiry.NeverExpire, "") - if err != nil { - return metadata, err - } - metadataWrite(filename, &newMData) - - metadata, err = metaBackend.Get(filename) - } - - return -} - -func printable(data []byte) bool { - for i, b := range data { - r := rune(b) - - // A null terminator that's not at the beginning of the file - if r == 0 && i == 0 { - return false - } else if r == 0 && i < 0 { - continue - } - - if r > unicode.MaxASCII { - return false - } - - } - - return true -} diff --git a/pages.go b/pages.go index f58fa88..bb38f37 100644 --- a/pages.go +++ b/pages.go @@ -64,12 +64,10 @@ func oopsHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, m w.WriteHeader(500) renderTemplate(Templates["oops.html"], pongo2.Context{"msg": msg}, r, w) return - } else if rt == RespPLAIN { w.WriteHeader(500) fmt.Fprintf(w, "%s", msg) return - } else if rt == RespJSON { js, _ := json.Marshal(map[string]string{ "error": msg, @@ -79,7 +77,6 @@ func oopsHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, m w.WriteHeader(500) w.Write(js) return - } else if rt == RespAUTO { if strings.EqualFold("application/json", r.Header.Get("Accept")) { oopsHandler(c, w, r, RespJSON, msg) @@ -89,11 +86,33 @@ func oopsHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, m } } -func badRequestHandler(c web.C, w http.ResponseWriter, r *http.Request) { - w.WriteHeader(http.StatusBadRequest) - err := renderTemplate(Templates["400.html"], pongo2.Context{}, r, w) - if err != nil { - http.Error(w, err.Error(), http.StatusInternalServerError) +func badRequestHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, msg string) { + if rt == RespHTML { + w.WriteHeader(http.StatusBadRequest) + err := renderTemplate(Templates["400.html"], pongo2.Context{"msg": msg}, r, w) + if err != nil { + http.Error(w, err.Error(), http.StatusInternalServerError) + } + return + } else if rt == RespPLAIN { + w.WriteHeader(http.StatusBadRequest) + fmt.Fprintf(w, "%s", msg) + return + } else if rt == RespJSON { + js, _ := json.Marshal(map[string]string{ + "error": msg, + }) + + w.Header().Set("Content-Type", "application/json; charset=UTF-8") + w.WriteHeader(http.StatusBadRequest) + w.Write(js) + return + } else if rt == RespAUTO { + if strings.EqualFold("application/json", r.Header.Get("Accept")) { + badRequestHandler(c, w, r, RespJSON, msg) + } else { + badRequestHandler(c, w, r, RespHTML, msg) + } } } diff --git a/server.go b/server.go index c7b4342..851a7cf 100644 --- a/server.go +++ b/server.go @@ -16,7 +16,7 @@ import ( "github.com/GeertJohan/go.rice" "github.com/andreimarcu/linx-server/backends" "github.com/andreimarcu/linx-server/backends/localfs" - "github.com/andreimarcu/linx-server/backends/metajson" + "github.com/andreimarcu/linx-server/backends/s3" "github.com/flosch/pongo2" "github.com/vharitonsky/iniflags" "github.com/zenazn/goji/graceful" @@ -61,6 +61,9 @@ var Config struct { remoteAuthFile string addHeaders headerList noDirectAgents bool + s3Endpoint string + s3Region string + s3Bucket string } var Templates = make(map[string]*pongo2.Template) @@ -70,8 +73,7 @@ var timeStarted time.Time var timeStartedStr string var remoteAuthKeys []string var metaStorageBackend backends.MetaStorageBackend -var metaBackend backends.MetaBackend -var fileBackend backends.StorageBackend +var storageBackend backends.StorageBackend func setup() *web.Mux { mux := web.New() @@ -135,9 +137,11 @@ func setup() *web.Mux { Config.selifPath = Config.selifPath + "/" } - metaStorageBackend = localfs.NewLocalfsBackend(Config.metaDir) - metaBackend = metajson.NewMetaJSONBackend(metaStorageBackend) - fileBackend = localfs.NewLocalfsBackend(Config.filesDir) + if Config.s3Bucket != "" { + storageBackend = s3.NewS3Backend(Config.s3Bucket, Config.s3Region, Config.s3Endpoint) + } else { + storageBackend = localfs.NewLocalfsBackend(Config.metaDir, Config.filesDir) + } // Template setup p2l, err := NewPongo2TemplatesLoader() @@ -255,6 +259,12 @@ func main() { "Add an arbitrary header to the response. This option can be used multiple times.") flag.BoolVar(&Config.noDirectAgents, "nodirectagents", false, "disable serving files directly for wget/curl user agents") + flag.StringVar(&Config.s3Endpoint, "s3-endpoint", "", + "S3 endpoint") + flag.StringVar(&Config.s3Region, "s3-region", "", + "S3 region") + flag.StringVar(&Config.s3Bucket, "s3-bucket", "", + "S3 bucket to use for files and metadata") iniflags.Parse() diff --git a/server_test.go b/server_test.go index d9374ab..a1ec853 100644 --- a/server_test.go +++ b/server_test.go @@ -486,7 +486,6 @@ func TestPostJSONUploadMaxExpiry(t *testing.T) { var myjson RespOkJSON err = json.Unmarshal([]byte(w.Body.String()), &myjson) if err != nil { - fmt.Println(w.Body.String()) t.Fatal(err) } @@ -643,14 +642,45 @@ func TestPostEmptyUpload(t *testing.T) { mux.ServeHTTP(w, req) - if w.Code != 500 { + if w.Code != 400 { t.Log(w.Body.String()) - t.Fatalf("Status code is not 500, but %d", w.Code) + t.Fatalf("Status code is not 400, but %d", w.Code) + } +} + +func TestPostTooLargeUpload(t *testing.T) { + mux := setup() + oldMaxSize := Config.maxSize + Config.maxSize = 2 + w := httptest.NewRecorder() + + filename := generateBarename() + ".txt" + + var b bytes.Buffer + mw := multipart.NewWriter(&b) + fw, err := mw.CreateFormFile("file", filename) + if err != nil { + t.Fatal(err) } - if !strings.Contains(w.Body.String(), "Empty file") { - t.Fatal("Response did not contain 'Empty file'") + fw.Write([]byte("test content")) + mw.Close() + + req, err := http.NewRequest("POST", "/upload/", &b) + req.Header.Set("Content-Type", mw.FormDataContentType()) + req.Header.Set("Referer", Config.siteURL) + if err != nil { + t.Fatal(err) } + + mux.ServeHTTP(w, req) + + if w.Code != 400 { + t.Log(w.Body.String()) + t.Fatalf("Status code is not 400, but %d", w.Code) + } + + Config.maxSize = oldMaxSize } func TestPostEmptyJSONUpload(t *testing.T) { @@ -679,9 +709,9 @@ func TestPostEmptyJSONUpload(t *testing.T) { mux.ServeHTTP(w, req) - if w.Code != 500 { + if w.Code != 400 { t.Log(w.Body.String()) - t.Fatalf("Status code is not 500, but %d", w.Code) + t.Fatalf("Status code is not 400, but %d", w.Code) } var myjson RespErrJSON @@ -690,7 +720,7 @@ func TestPostEmptyJSONUpload(t *testing.T) { t.Fatal(err) } - if myjson.Error != "Could not upload file: Empty file" { + if myjson.Error != "Empty file" { t.Fatal("Json 'error' was not 'Empty file' but " + myjson.Error) } } @@ -768,11 +798,41 @@ func TestPutEmptyUpload(t *testing.T) { mux.ServeHTTP(w, req) - if !strings.Contains(w.Body.String(), "Empty file") { - t.Fatal("Response doesn't contain'Empty file'") + if w.Code != 400 { + t.Fatalf("Status code is not 400, but %d", w.Code) } } +func TestPutTooLargeUpload(t *testing.T) { + mux := setup() + oldMaxSize := Config.maxSize + Config.maxSize = 2 + + w := httptest.NewRecorder() + + filename := generateBarename() + ".file" + + req, err := http.NewRequest("PUT", "/upload/"+filename, strings.NewReader("File too big")) + if err != nil { + t.Fatal(err) + } + + req.Header.Set("Linx-Randomize", "yes") + + mux.ServeHTTP(w, req) + + if w.Code != 500 { + t.Log(w.Body.String()) + t.Fatalf("Status code is not 500, but %d", w.Code) + } + + if !strings.Contains(w.Body.String(), "request body too large") { + t.Fatal("Response did not contain 'request body too large'") + } + + Config.maxSize = oldMaxSize +} + func TestPutJSONUpload(t *testing.T) { var myjson RespOkJSON diff --git a/torrent.go b/torrent.go index 4155872..c5e7a58 100644 --- a/torrent.go +++ b/torrent.go @@ -2,65 +2,44 @@ package main import ( "bytes" - "crypto/sha1" "fmt" "io" "net/http" "time" "github.com/andreimarcu/linx-server/backends" + "github.com/andreimarcu/linx-server/expiry" + "github.com/andreimarcu/linx-server/torrent" "github.com/zeebo/bencode" "github.com/zenazn/goji/web" ) -const ( - TORRENT_PIECE_LENGTH = 262144 -) +func createTorrent(fileName string, f io.Reader, r *http.Request) ([]byte, error) { + url := getSiteURL(r) + Config.selifPath + fileName + chunk := make([]byte, torrent.TORRENT_PIECE_LENGTH) -type TorrentInfo struct { - PieceLength int `bencode:"piece length"` - Pieces string `bencode:"pieces"` - Name string `bencode:"name"` - Length int `bencode:"length"` -} - -type Torrent struct { - Encoding string `bencode:"encoding"` - Info TorrentInfo `bencode:"info"` - UrlList []string `bencode:"url-list"` -} - -func hashPiece(piece []byte) []byte { - h := sha1.New() - h.Write(piece) - return h.Sum(nil) -} - -func createTorrent(fileName string, f io.ReadCloser, r *http.Request) ([]byte, error) { - chunk := make([]byte, TORRENT_PIECE_LENGTH) - - torrent := Torrent{ + t := torrent.Torrent{ Encoding: "UTF-8", - Info: TorrentInfo{ - PieceLength: TORRENT_PIECE_LENGTH, + Info: torrent.TorrentInfo{ + PieceLength: torrent.TORRENT_PIECE_LENGTH, Name: fileName, }, - UrlList: []string{fmt.Sprintf("%s%s%s", getSiteURL(r), Config.selifPath, fileName)}, + UrlList: []string{url}, } for { - n, err := f.Read(chunk) + n, err := io.ReadFull(f, chunk) if err == io.EOF { break - } else if err != nil { + } else if err != nil && err != io.ErrUnexpectedEOF { return []byte{}, err } - torrent.Info.Length += n - torrent.Info.Pieces += string(hashPiece(chunk[:n])) + t.Info.Length += n + t.Info.Pieces += string(torrent.HashPiece(chunk[:n])) } - data, err := bencode.EncodeBytes(&torrent) + data, err := bencode.EncodeBytes(&t) if err != nil { return []byte{}, err } @@ -71,22 +50,25 @@ func createTorrent(fileName string, f io.ReadCloser, r *http.Request) ([]byte, e func fileTorrentHandler(c web.C, w http.ResponseWriter, r *http.Request) { fileName := c.URLParams["name"] - _, err := checkFile(fileName) - if err == NotFoundErr { + metadata, f, err := storageBackend.Get(fileName) + if err == backends.NotFoundErr { notFoundHandler(c, w, r) return } else if err == backends.BadMetadata { oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.") return - } - - f, err := fileBackend.Open(fileName) - if err != nil { - oopsHandler(c, w, r, RespHTML, "Could not create torrent.") + } else if err != nil { + oopsHandler(c, w, r, RespAUTO, err.Error()) return } defer f.Close() + if expiry.IsTsExpired(metadata.Expiry) { + storageBackend.Delete(fileName) + notFoundHandler(c, w, r) + return + } + encoded, err := createTorrent(fileName, f, r) if err != nil { oopsHandler(c, w, r, RespHTML, "Could not create torrent.") diff --git a/torrent/torrent.go b/torrent/torrent.go new file mode 100644 index 0000000..a47d884 --- /dev/null +++ b/torrent/torrent.go @@ -0,0 +1,28 @@ +package torrent + +import ( + "crypto/sha1" +) + +const ( + TORRENT_PIECE_LENGTH = 262144 +) + +type TorrentInfo struct { + PieceLength int `bencode:"piece length"` + Pieces string `bencode:"pieces"` + Name string `bencode:"name"` + Length int `bencode:"length"` +} + +type Torrent struct { + Encoding string `bencode:"encoding"` + Info TorrentInfo `bencode:"info"` + UrlList []string `bencode:"url-list"` +} + +func HashPiece(piece []byte) []byte { + h := sha1.New() + h.Write(piece) + return h.Sum(nil) +} diff --git a/torrent_test.go b/torrent_test.go index b553231..1d227fd 100644 --- a/torrent_test.go +++ b/torrent_test.go @@ -5,12 +5,13 @@ import ( "os" "testing" + "github.com/andreimarcu/linx-server/torrent" "github.com/zeebo/bencode" ) func TestCreateTorrent(t *testing.T) { fileName := "server.go" - var decoded Torrent + var decoded torrent.Torrent f, err := os.Open("server.go") if err != nil { @@ -52,7 +53,7 @@ func TestCreateTorrent(t *testing.T) { } func TestCreateTorrentWithImage(t *testing.T) { - var decoded Torrent + var decoded torrent.Torrent f, err := os.Open("static/images/404.jpg") if err != nil { diff --git a/upload.go b/upload.go index acdd204..d46c4d5 100644 --- a/upload.go +++ b/upload.go @@ -22,6 +22,7 @@ import ( "gopkg.in/h2non/filetype.v1" ) +var FileTooLargeError = errors.New("File too large.") var fileBlacklist = map[string]bool{ "favicon.ico": true, "index.htm": true, @@ -34,10 +35,11 @@ var fileBlacklist = map[string]bool{ // Describes metadata directly from the user request type UploadRequest struct { src io.Reader + size int64 filename string expiry time.Duration // Seconds until expiry, 0 = never + deleteKey string // Empty string if not defined randomBarename bool - deletionKey string // Empty string if not defined } // Metadata associated with a file as it would actually be stored @@ -48,7 +50,7 @@ type Upload struct { func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) { if !strictReferrerCheck(r, getSiteURL(r), []string{"Linx-Delete-Key", "Linx-Expiry", "Linx-Randomize", "X-Requested-With"}) { - badRequestHandler(c, w, r) + badRequestHandler(c, w, r, RespAUTO, "") return } @@ -65,32 +67,39 @@ func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) { } defer file.Close() - r.ParseForm() - if r.Form.Get("randomize") == "true" { - upReq.randomBarename = true - } - upReq.expiry = parseExpiry(r.Form.Get("expires")) upReq.src = file + upReq.size = headers.Size upReq.filename = headers.Filename } else { - if r.FormValue("content") == "" { - oopsHandler(c, w, r, RespHTML, "Empty file") + if r.PostFormValue("content") == "" { + badRequestHandler(c, w, r, RespAUTO, "Empty file") return } - extension := r.FormValue("extension") + extension := r.PostFormValue("extension") if extension == "" { extension = "txt" } - upReq.src = strings.NewReader(r.FormValue("content")) - upReq.expiry = parseExpiry(r.FormValue("expires")) - upReq.filename = r.FormValue("filename") + "." + extension + content := r.PostFormValue("content") + + upReq.src = strings.NewReader(content) + upReq.size = int64(len(content)) + upReq.filename = r.PostFormValue("filename") + "." + extension + } + + upReq.expiry = parseExpiry(r.PostFormValue("expires")) + + if r.PostFormValue("randomize") == "true" { + upReq.randomBarename = true } upload, err := processUpload(upReq) if strings.EqualFold("application/json", r.Header.Get("Accept")) { - if err != nil { + if err == FileTooLargeError || err == backends.FileEmptyError { + badRequestHandler(c, w, r, RespJSON, err.Error()) + return + } else if err != nil { oopsHandler(c, w, r, RespJSON, "Could not upload file: "+err.Error()) return } @@ -99,14 +108,16 @@ func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Write(js) } else { - if err != nil { + if err == FileTooLargeError || err == backends.FileEmptyError { + badRequestHandler(c, w, r, RespHTML, err.Error()) + return + } else if err != nil { oopsHandler(c, w, r, RespHTML, "Could not upload file: "+err.Error()) return } http.Redirect(w, r, Config.sitePath+upload.Filename, 303) } - } func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) { @@ -115,12 +126,15 @@ func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) { defer r.Body.Close() upReq.filename = c.URLParams["name"] - upReq.src = r.Body + upReq.src = http.MaxBytesReader(w, r.Body, Config.maxSize) upload, err := processUpload(upReq) if strings.EqualFold("application/json", r.Header.Get("Accept")) { - if err != nil { + if err == FileTooLargeError || err == backends.FileEmptyError { + badRequestHandler(c, w, r, RespJSON, err.Error()) + return + } else if err != nil { oopsHandler(c, w, r, RespJSON, "Could not upload file: "+err.Error()) return } @@ -129,7 +143,10 @@ func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Write(js) } else { - if err != nil { + if err == FileTooLargeError || err == backends.FileEmptyError { + badRequestHandler(c, w, r, RespPLAIN, err.Error()) + return + } else if err != nil { oopsHandler(c, w, r, RespPLAIN, "Could not upload file: "+err.Error()) return } @@ -162,8 +179,8 @@ func uploadRemote(c web.C, w http.ResponseWriter, r *http.Request) { } upReq.filename = filepath.Base(grabUrl.Path) - upReq.src = resp.Body - upReq.deletionKey = r.FormValue("deletekey") + upReq.src = http.MaxBytesReader(w, resp.Body, Config.maxSize) + upReq.deleteKey = r.FormValue("deletekey") upReq.randomBarename = r.FormValue("randomize") == "yes" upReq.expiry = parseExpiry(r.FormValue("expiry")) @@ -193,15 +210,18 @@ func uploadHeaderProcess(r *http.Request, upReq *UploadRequest) { upReq.randomBarename = true } - upReq.deletionKey = r.Header.Get("Linx-Delete-Key") + upReq.deleteKey = r.Header.Get("Linx-Delete-Key") // Get seconds until expiry. Non-integer responses never expire. expStr := r.Header.Get("Linx-Expiry") upReq.expiry = parseExpiry(expStr) - } func processUpload(upReq UploadRequest) (upload Upload, err error) { + if upReq.size > Config.maxSize { + return upload, FileTooLargeError + } + // Determine the appropriate filename, then write to disk barename, extension := barePlusExt(upReq.filename) @@ -215,7 +235,7 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) { header = make([]byte, 512) n, _ := upReq.src.Read(header) if n == 0 { - return upload, errors.New("Empty file") + return upload, backends.FileEmptyError } header = header[:n] @@ -231,13 +251,13 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) { upload.Filename = strings.Join([]string{barename, extension}, ".") upload.Filename = strings.Replace(upload.Filename, " ", "", -1) - fileexists, _ := fileBackend.Exists(upload.Filename) + fileexists, _ := storageBackend.Exists(upload.Filename) // Check if the delete key matches, in which case overwrite if fileexists { - metad, merr := metadataRead(upload.Filename) + metad, merr := storageBackend.Head(upload.Filename) if merr == nil { - if upReq.deletionKey == metad.DeleteKey { + if upReq.deleteKey == metad.DeleteKey { fileexists = false } } @@ -252,7 +272,7 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) { } upload.Filename = strings.Join([]string{barename, extension}, ".") - fileexists, err = fileBackend.Exists(upload.Filename) + fileexists, err = storageBackend.Exists(upload.Filename) } if fileBlacklist[strings.ToLower(upload.Filename)] { @@ -267,24 +287,15 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) { fileExpiry = time.Now().Add(upReq.expiry) } - bytes, err := fileBackend.Put(upload.Filename, io.MultiReader(bytes.NewReader(header), upReq.src)) - if err != nil { - return upload, err - } else if bytes > Config.maxSize { - fileBackend.Delete(upload.Filename) - return upload, errors.New("File too large") + if upReq.deleteKey == "" { + upReq.deleteKey = uniuri.NewLen(30) } - upload.Metadata, err = generateMetadata(upload.Filename, fileExpiry, upReq.deletionKey) + upload.Metadata, err = storageBackend.Put(upload.Filename, io.MultiReader(bytes.NewReader(header), upReq.src), fileExpiry, upReq.deleteKey) if err != nil { - fileBackend.Delete(upload.Filename) - return - } - err = metadataWrite(upload.Filename, &upload.Metadata) - if err != nil { - fileBackend.Delete(upload.Filename) - return + return upload, err } + return }