Add S3 backend (#156)

This commit is contained in:
mutantmonkey 2019-01-25 07:33:11 +00:00 committed by Andrei Marcu
parent 0fb5fa1c51
commit 5d9a93b1e2
21 changed files with 738 additions and 441 deletions

View File

@ -13,6 +13,8 @@ FROM alpine:3.8
COPY --from=build /go/bin/linx-server /usr/local/bin/linx-server COPY --from=build /go/bin/linx-server /usr/local/bin/linx-server
ENV GOPATH /go ENV GOPATH /go
ENV SSL_CERT_FILE /etc/ssl/cert.pem
COPY static /go/src/github.com/andreimarcu/linx-server/static/ COPY static /go/src/github.com/andreimarcu/linx-server/static/
COPY templates /go/src/github.com/andreimarcu/linx-server/templates/ COPY templates /go/src/github.com/andreimarcu/linx-server/templates/

View File

@ -1,63 +1,149 @@
package localfs package localfs
import ( import (
"errors" "encoding/json"
"io" "io"
"io/ioutil" "io/ioutil"
"net/http"
"os" "os"
"path" "path"
"time"
"github.com/andreimarcu/linx-server/backends" "github.com/andreimarcu/linx-server/backends"
"github.com/andreimarcu/linx-server/helpers"
) )
type LocalfsBackend struct { type LocalfsBackend struct {
basePath string metaPath string
filesPath string
} }
func (b LocalfsBackend) Delete(key string) error { type MetadataJSON struct {
return os.Remove(path.Join(b.basePath, key)) DeleteKey string `json:"delete_key"`
Sha256sum string `json:"sha256sum"`
Mimetype string `json:"mimetype"`
Size int64 `json:"size"`
Expiry int64 `json:"expiry"`
ArchiveFiles []string `json:"archive_files,omitempty"`
}
func (b LocalfsBackend) Delete(key string) (err error) {
err = os.Remove(path.Join(b.filesPath, key))
if err != nil {
return
}
err = os.Remove(path.Join(b.metaPath, key))
return
} }
func (b LocalfsBackend) Exists(key string) (bool, error) { func (b LocalfsBackend) Exists(key string) (bool, error) {
_, err := os.Stat(path.Join(b.basePath, key)) _, err := os.Stat(path.Join(b.filesPath, key))
return err == nil, err return err == nil, err
} }
func (b LocalfsBackend) Get(key string) ([]byte, error) { func (b LocalfsBackend) Head(key string) (metadata backends.Metadata, err error) {
return ioutil.ReadFile(path.Join(b.basePath, key)) f, err := os.Open(path.Join(b.metaPath, key))
if os.IsNotExist(err) {
return metadata, backends.NotFoundErr
} else if err != nil {
return metadata, backends.BadMetadata
}
defer f.Close()
decoder := json.NewDecoder(f)
mjson := MetadataJSON{}
if err := decoder.Decode(&mjson); err != nil {
return metadata, backends.BadMetadata
}
metadata.DeleteKey = mjson.DeleteKey
metadata.Mimetype = mjson.Mimetype
metadata.ArchiveFiles = mjson.ArchiveFiles
metadata.Sha256sum = mjson.Sha256sum
metadata.Expiry = time.Unix(mjson.Expiry, 0)
metadata.Size = mjson.Size
return
} }
func (b LocalfsBackend) Put(key string, r io.Reader) (int64, error) { func (b LocalfsBackend) Get(key string) (metadata backends.Metadata, f io.ReadCloser, err error) {
dst, err := os.Create(path.Join(b.basePath, key)) metadata, err = b.Head(key)
if err != nil { if err != nil {
return 0, err return
}
f, err = os.Open(path.Join(b.filesPath, key))
if err != nil {
return
}
return
}
func (b LocalfsBackend) writeMetadata(key string, metadata backends.Metadata) error {
metaPath := path.Join(b.metaPath, key)
mjson := MetadataJSON{
DeleteKey: metadata.DeleteKey,
Mimetype: metadata.Mimetype,
ArchiveFiles: metadata.ArchiveFiles,
Sha256sum: metadata.Sha256sum,
Expiry: metadata.Expiry.Unix(),
Size: metadata.Size,
}
dst, err := os.Create(metaPath)
if err != nil {
return err
}
defer dst.Close()
encoder := json.NewEncoder(dst)
err = encoder.Encode(mjson)
if err != nil {
os.Remove(metaPath)
return err
}
return nil
}
func (b LocalfsBackend) Put(key string, r io.Reader, expiry time.Time, deleteKey string) (m backends.Metadata, err error) {
filePath := path.Join(b.filesPath, key)
dst, err := os.Create(filePath)
if err != nil {
return
} }
defer dst.Close() defer dst.Close()
bytes, err := io.Copy(dst, r) bytes, err := io.Copy(dst, r)
if bytes == 0 { if bytes == 0 {
b.Delete(key) os.Remove(filePath)
return bytes, errors.New("Empty file") return m, backends.FileEmptyError
} else if err != nil { } else if err != nil {
b.Delete(key) os.Remove(filePath)
return bytes, err return m, err
} }
return bytes, err m.Expiry = expiry
} m.DeleteKey = deleteKey
m.Size = bytes
m.Mimetype, _ = helpers.DetectMime(dst)
m.Sha256sum, _ = helpers.Sha256sum(dst)
m.ArchiveFiles, _ = helpers.ListArchiveFiles(m.Mimetype, m.Size, dst)
func (b LocalfsBackend) Open(key string) (backends.ReadSeekCloser, error) { err = b.writeMetadata(key, m)
return os.Open(path.Join(b.basePath, key)) if err != nil {
} os.Remove(filePath)
return
}
func (b LocalfsBackend) ServeFile(key string, w http.ResponseWriter, r *http.Request) { return
filePath := path.Join(b.basePath, key)
http.ServeFile(w, r, filePath)
} }
func (b LocalfsBackend) Size(key string) (int64, error) { func (b LocalfsBackend) Size(key string) (int64, error) {
fileInfo, err := os.Stat(path.Join(b.basePath, key)) fileInfo, err := os.Stat(path.Join(b.filesPath, key))
if err != nil { if err != nil {
return 0, err return 0, err
} }
@ -68,7 +154,7 @@ func (b LocalfsBackend) Size(key string) (int64, error) {
func (b LocalfsBackend) List() ([]string, error) { func (b LocalfsBackend) List() ([]string, error) {
var output []string var output []string
files, err := ioutil.ReadDir(b.basePath) files, err := ioutil.ReadDir(b.filesPath)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -80,6 +166,9 @@ func (b LocalfsBackend) List() ([]string, error) {
return output, nil return output, nil
} }
func NewLocalfsBackend(basePath string) LocalfsBackend { func NewLocalfsBackend(metaPath string, filesPath string) LocalfsBackend {
return LocalfsBackend{basePath: basePath} return LocalfsBackend{
metaPath: metaPath,
filesPath: filesPath,
}
} }

View File

@ -5,11 +5,6 @@ import (
"time" "time"
) )
type MetaBackend interface {
Get(key string) (Metadata, error)
Put(key string, metadata *Metadata) error
}
type Metadata struct { type Metadata struct {
DeleteKey string DeleteKey string
Sha256sum string Sha256sum string

View File

@ -1,70 +0,0 @@
package metajson
import (
"bytes"
"encoding/json"
"time"
"github.com/andreimarcu/linx-server/backends"
)
type MetadataJSON struct {
DeleteKey string `json:"delete_key"`
Sha256sum string `json:"sha256sum"`
Mimetype string `json:"mimetype"`
Size int64 `json:"size"`
Expiry int64 `json:"expiry"`
ArchiveFiles []string `json:"archive_files,omitempty"`
}
type MetaJSONBackend struct {
storage backends.MetaStorageBackend
}
func (m MetaJSONBackend) Put(key string, metadata *backends.Metadata) error {
mjson := MetadataJSON{}
mjson.DeleteKey = metadata.DeleteKey
mjson.Mimetype = metadata.Mimetype
mjson.ArchiveFiles = metadata.ArchiveFiles
mjson.Sha256sum = metadata.Sha256sum
mjson.Expiry = metadata.Expiry.Unix()
mjson.Size = metadata.Size
byt, err := json.Marshal(mjson)
if err != nil {
return err
}
if _, err := m.storage.Put(key, bytes.NewBuffer(byt)); err != nil {
return err
}
return nil
}
func (m MetaJSONBackend) Get(key string) (metadata backends.Metadata, err error) {
b, err := m.storage.Get(key)
if err != nil {
return metadata, backends.BadMetadata
}
mjson := MetadataJSON{}
err = json.Unmarshal(b, &mjson)
if err != nil {
return metadata, backends.BadMetadata
}
metadata.DeleteKey = mjson.DeleteKey
metadata.Mimetype = mjson.Mimetype
metadata.ArchiveFiles = mjson.ArchiveFiles
metadata.Sha256sum = mjson.Sha256sum
metadata.Expiry = time.Unix(mjson.Expiry, 0)
metadata.Size = mjson.Size
return
}
func NewMetaJSONBackend(storage backends.MetaStorageBackend) MetaJSONBackend {
return MetaJSONBackend{storage: storage}
}

192
backends/s3/s3.go Normal file
View File

@ -0,0 +1,192 @@
package s3
import (
"io"
"io/ioutil"
"os"
"strconv"
"time"
"github.com/andreimarcu/linx-server/backends"
"github.com/andreimarcu/linx-server/helpers"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/s3"
"github.com/aws/aws-sdk-go/service/s3/s3manager"
)
type S3Backend struct {
bucket string
svc *s3.S3
}
func (b S3Backend) Delete(key string) error {
_, err := b.svc.DeleteObject(&s3.DeleteObjectInput{
Bucket: aws.String(b.bucket),
Key: aws.String(key),
})
if err != nil {
return err
}
return nil
}
func (b S3Backend) Exists(key string) (bool, error) {
_, err := b.svc.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(b.bucket),
Key: aws.String(key),
})
return err == nil, err
}
func (b S3Backend) Head(key string) (metadata backends.Metadata, err error) {
var result *s3.HeadObjectOutput
result, err = b.svc.HeadObject(&s3.HeadObjectInput{
Bucket: aws.String(b.bucket),
Key: aws.String(key),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() == s3.ErrCodeNoSuchKey || aerr.Code() == "NotFound" {
err = backends.NotFoundErr
}
}
return
}
metadata, err = unmapMetadata(result.Metadata)
return
}
func (b S3Backend) Get(key string) (metadata backends.Metadata, r io.ReadCloser, err error) {
var result *s3.GetObjectOutput
result, err = b.svc.GetObject(&s3.GetObjectInput{
Bucket: aws.String(b.bucket),
Key: aws.String(key),
})
if err != nil {
if aerr, ok := err.(awserr.Error); ok {
if aerr.Code() == s3.ErrCodeNoSuchKey || aerr.Code() == "NotFound" {
err = backends.NotFoundErr
}
}
return
}
metadata, err = unmapMetadata(result.Metadata)
r = result.Body
return
}
func mapMetadata(m backends.Metadata) map[string]*string {
return map[string]*string{
"Expiry": aws.String(strconv.FormatInt(m.Expiry.Unix(), 10)),
"Delete_key": aws.String(m.DeleteKey),
"Size": aws.String(strconv.FormatInt(m.Size, 10)),
"Mimetype": aws.String(m.Mimetype),
"Sha256sum": aws.String(m.Sha256sum),
}
}
func unmapMetadata(input map[string]*string) (m backends.Metadata, err error) {
expiry, err := strconv.ParseInt(aws.StringValue(input["Expiry"]), 10, 64)
if err != nil {
return m, err
}
m.Expiry = time.Unix(expiry, 0)
m.Size, err = strconv.ParseInt(aws.StringValue(input["Size"]), 10, 64)
if err != nil {
return
}
m.DeleteKey = aws.StringValue(input["Delete_key"])
m.Mimetype = aws.StringValue(input["Mimetype"])
m.Sha256sum = aws.StringValue(input["Sha256sum"])
return
}
func (b S3Backend) Put(key string, r io.Reader, expiry time.Time, deleteKey string) (m backends.Metadata, err error) {
tmpDst, err := ioutil.TempFile("", "linx-server-upload")
if err != nil {
return m, err
}
defer tmpDst.Close()
defer os.Remove(tmpDst.Name())
bytes, err := io.Copy(tmpDst, r)
if bytes == 0 {
return m, backends.FileEmptyError
} else if err != nil {
return m, err
}
m.Expiry = expiry
m.DeleteKey = deleteKey
m.Size = bytes
m.Mimetype, _ = helpers.DetectMime(tmpDst)
m.Sha256sum, _ = helpers.Sha256sum(tmpDst)
// XXX: we may not be able to write this to AWS easily
//m.ArchiveFiles, _ = helpers.ListArchiveFiles(m.Mimetype, m.Size, tmpDst)
uploader := s3manager.NewUploaderWithClient(b.svc)
input := &s3manager.UploadInput{
Bucket: aws.String(b.bucket),
Key: aws.String(key),
Body: tmpDst,
Metadata: mapMetadata(m),
}
_, err = uploader.Upload(input)
if err != nil {
return
}
return
}
func (b S3Backend) Size(key string) (int64, error) {
input := &s3.HeadObjectInput{
Bucket: aws.String(b.bucket),
Key: aws.String(key),
}
result, err := b.svc.HeadObject(input)
if err != nil {
return 0, err
}
return *result.ContentLength, nil
}
func (b S3Backend) List() ([]string, error) {
var output []string
input := &s3.ListObjectsInput{
Bucket: aws.String(b.bucket),
}
results, err := b.svc.ListObjects(input)
if err != nil {
return nil, err
}
for _, object := range results.Contents {
output = append(output, *object.Key)
}
return output, nil
}
func NewS3Backend(bucket string, region string, endpoint string) S3Backend {
awsConfig := &aws.Config{}
if region != "" {
awsConfig.Region = aws.String(region)
}
if endpoint != "" {
awsConfig.Endpoint = aws.String(endpoint)
}
sess := session.Must(session.NewSession(awsConfig))
svc := s3.New(sess)
return S3Backend{bucket: bucket, svc: svc}
}

View File

@ -1,24 +1,17 @@
package backends package backends
import ( import (
"errors"
"io" "io"
"net/http" "time"
) )
type ReadSeekCloser interface {
io.Reader
io.Closer
io.Seeker
io.ReaderAt
}
type StorageBackend interface { type StorageBackend interface {
Delete(key string) error Delete(key string) error
Exists(key string) (bool, error) Exists(key string) (bool, error)
Get(key string) ([]byte, error) Head(key string) (Metadata, error)
Put(key string, r io.Reader) (int64, error) Get(key string) (Metadata, io.ReadCloser, error)
Open(key string) (ReadSeekCloser, error) Put(key string, r io.Reader, expiry time.Time, deleteKey string) (Metadata, error)
ServeFile(key string, w http.ResponseWriter, r *http.Request)
Size(key string) (int64, error) Size(key string) (int64, error)
} }
@ -26,3 +19,6 @@ type MetaStorageBackend interface {
StorageBackend StorageBackend
List() ([]string, error) List() ([]string, error)
} }
var NotFoundErr = errors.New("File not found.")
var FileEmptyError = errors.New("Empty file")

View File

@ -3,8 +3,8 @@ package main
import ( import (
"fmt" "fmt"
"net/http" "net/http"
"os"
"github.com/andreimarcu/linx-server/backends"
"github.com/zenazn/goji/web" "github.com/zenazn/goji/web"
) )
@ -13,24 +13,19 @@ func deleteHandler(c web.C, w http.ResponseWriter, r *http.Request) {
filename := c.URLParams["name"] filename := c.URLParams["name"]
// Ensure requested file actually exists // Ensure that file exists and delete key is correct
if _, readErr := fileBackend.Exists(filename); os.IsNotExist(readErr) { metadata, err := storageBackend.Head(filename)
if err == backends.NotFoundErr {
notFoundHandler(c, w, r) // 404 - file doesn't exist notFoundHandler(c, w, r) // 404 - file doesn't exist
return return
} } else if err != nil {
// Ensure delete key is correct
metadata, err := metadataRead(filename)
if err != nil {
unauthorizedHandler(c, w, r) // 401 - no metadata available unauthorizedHandler(c, w, r) // 401 - no metadata available
return return
} }
if metadata.DeleteKey == requestKey { if metadata.DeleteKey == requestKey {
fileDelErr := fileBackend.Delete(filename) err := storageBackend.Delete(filename)
metaDelErr := metaStorageBackend.Delete(filename) if err != nil {
if (fileDelErr != nil) || (metaDelErr != nil) {
oopsHandler(c, w, r, RespPLAIN, "Could not delete") oopsHandler(c, w, r, RespPLAIN, "Could not delete")
return return
} }

View File

@ -2,6 +2,7 @@ package main
import ( import (
"encoding/json" "encoding/json"
"io/ioutil"
"net/http" "net/http"
"path/filepath" "path/filepath"
"regexp" "regexp"
@ -9,6 +10,7 @@ import (
"strings" "strings"
"time" "time"
"github.com/andreimarcu/linx-server/backends"
"github.com/andreimarcu/linx-server/expiry" "github.com/andreimarcu/linx-server/expiry"
"github.com/dustin/go-humanize" "github.com/dustin/go-humanize"
"github.com/flosch/pongo2" "github.com/flosch/pongo2"
@ -29,14 +31,11 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) {
fileName := c.URLParams["name"] fileName := c.URLParams["name"]
_, err := checkFile(fileName) metadata, err := checkFile(fileName)
if err == NotFoundErr { if err == backends.NotFoundErr {
notFoundHandler(c, w, r) notFoundHandler(c, w, r)
return return
} } else if err != nil {
metadata, err := metadataRead(fileName)
if err != nil {
oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.") oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.")
return return
} }
@ -78,8 +77,13 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) {
tpl = Templates["display/pdf.html"] tpl = Templates["display/pdf.html"]
} else if extension == "story" { } else if extension == "story" {
metadata, reader, err := storageBackend.Get(fileName)
if err != nil {
oopsHandler(c, w, r, RespHTML, err.Error())
}
if metadata.Size < maxDisplayFileSizeBytes { if metadata.Size < maxDisplayFileSizeBytes {
bytes, err := fileBackend.Get(fileName) bytes, err := ioutil.ReadAll(reader)
if err == nil { if err == nil {
extra["contents"] = string(bytes) extra["contents"] = string(bytes)
lines = strings.Split(extra["contents"], "\n") lines = strings.Split(extra["contents"], "\n")
@ -88,8 +92,13 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) {
} }
} else if extension == "md" { } else if extension == "md" {
metadata, reader, err := storageBackend.Get(fileName)
if err != nil {
oopsHandler(c, w, r, RespHTML, err.Error())
}
if metadata.Size < maxDisplayFileSizeBytes { if metadata.Size < maxDisplayFileSizeBytes {
bytes, err := fileBackend.Get(fileName) bytes, err := ioutil.ReadAll(reader)
if err == nil { if err == nil {
unsafe := blackfriday.MarkdownCommon(bytes) unsafe := blackfriday.MarkdownCommon(bytes)
html := bluemonday.UGCPolicy().SanitizeBytes(unsafe) html := bluemonday.UGCPolicy().SanitizeBytes(unsafe)
@ -100,8 +109,13 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) {
} }
} else if strings.HasPrefix(metadata.Mimetype, "text/") || supportedBinExtension(extension) { } else if strings.HasPrefix(metadata.Mimetype, "text/") || supportedBinExtension(extension) {
metadata, reader, err := storageBackend.Get(fileName)
if err != nil {
oopsHandler(c, w, r, RespHTML, err.Error())
}
if metadata.Size < maxDisplayFileSizeBytes { if metadata.Size < maxDisplayFileSizeBytes {
bytes, err := fileBackend.Get(fileName) bytes, err := ioutil.ReadAll(reader)
if err == nil { if err == nil {
extra["extension"] = extension extra["extension"] = extension
extra["lang_hl"], extra["lang_ace"] = extensionToHlAndAceLangs(extension) extra["lang_hl"], extra["lang_ace"] = extensionToHlAndAceLangs(extension)
@ -117,14 +131,14 @@ func fileDisplayHandler(c web.C, w http.ResponseWriter, r *http.Request) {
} }
err = renderTemplate(tpl, pongo2.Context{ err = renderTemplate(tpl, pongo2.Context{
"mime": metadata.Mimetype, "mime": metadata.Mimetype,
"filename": fileName, "filename": fileName,
"size": sizeHuman, "size": sizeHuman,
"expiry": expiryHuman, "expiry": expiryHuman,
"expirylist": listExpirationTimes(), "expirylist": listExpirationTimes(),
"extra": extra, "extra": extra,
"lines": lines, "lines": lines,
"files": metadata.ArchiveFiles, "files": metadata.ArchiveFiles,
}, r, w) }, r, w)
if err != nil { if err != nil {

View File

@ -24,7 +24,7 @@ type ExpirationTime struct {
// Determine if the given filename is expired // Determine if the given filename is expired
func isFileExpired(filename string) (bool, error) { func isFileExpired(filename string) (bool, error) {
metadata, err := metadataRead(filename) metadata, err := storageBackend.Head(filename)
if err != nil { if err != nil {
return false, err return false, err
} }

View File

@ -1,8 +1,10 @@
package main package main
import ( import (
"io"
"net/http" "net/http"
"net/url" "net/url"
"strconv"
"strings" "strings"
"github.com/andreimarcu/linx-server/backends" "github.com/andreimarcu/linx-server/backends"
@ -14,14 +16,11 @@ func fileServeHandler(c web.C, w http.ResponseWriter, r *http.Request) {
fileName := c.URLParams["name"] fileName := c.URLParams["name"]
metadata, err := checkFile(fileName) metadata, err := checkFile(fileName)
if err == NotFoundErr { if err == backends.NotFoundErr {
notFoundHandler(c, w, r) notFoundHandler(c, w, r)
return return
} else if err == backends.BadMetadata {
oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.")
return
} else if err != nil { } else if err != nil {
oopsHandler(c, w, r, RespAUTO, err.Error()) oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.")
return return
} }
@ -38,10 +37,23 @@ func fileServeHandler(c web.C, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Security-Policy", Config.fileContentSecurityPolicy) w.Header().Set("Content-Security-Policy", Config.fileContentSecurityPolicy)
w.Header().Set("Referrer-Policy", Config.fileReferrerPolicy) w.Header().Set("Referrer-Policy", Config.fileReferrerPolicy)
_, reader, err := storageBackend.Get(fileName)
if err != nil {
oopsHandler(c, w, r, RespAUTO, err.Error())
}
w.Header().Set("Content-Type", metadata.Mimetype)
w.Header().Set("Content-Length", strconv.FormatInt(metadata.Size, 10))
w.Header().Set("Etag", metadata.Sha256sum) w.Header().Set("Etag", metadata.Sha256sum)
w.Header().Set("Cache-Control", "max-age=0") w.Header().Set("Cache-Control", "max-age=0")
fileBackend.ServeFile(fileName, w, r) if r.Method != "HEAD" {
defer reader.Close()
if _, err = io.CopyN(w, reader, metadata.Size); err != nil {
oopsHandler(c, w, r, RespAUTO, err.Error())
}
}
} }
func staticHandler(c web.C, w http.ResponseWriter, r *http.Request) { func staticHandler(c web.C, w http.ResponseWriter, r *http.Request) {
@ -69,21 +81,14 @@ func staticHandler(c web.C, w http.ResponseWriter, r *http.Request) {
} }
func checkFile(filename string) (metadata backends.Metadata, err error) { func checkFile(filename string) (metadata backends.Metadata, err error) {
_, err = fileBackend.Exists(filename) metadata, err = storageBackend.Head(filename)
if err != nil {
err = NotFoundErr
return
}
metadata, err = metadataRead(filename)
if err != nil { if err != nil {
return return
} }
if expiry.IsTsExpired(metadata.Expiry) { if expiry.IsTsExpired(metadata.Expiry) {
fileBackend.Delete(filename) storageBackend.Delete(filename)
metaStorageBackend.Delete(filename) err = backends.NotFoundErr
err = NotFoundErr
return return
} }

70
helpers/archive.go Normal file
View File

@ -0,0 +1,70 @@
package helpers
import (
"archive/tar"
"archive/zip"
"compress/bzip2"
"compress/gzip"
"io"
"sort"
)
type ReadSeekerAt interface {
io.Reader
io.Seeker
io.ReaderAt
}
func ListArchiveFiles(mimetype string, size int64, r ReadSeekerAt) (files []string, err error) {
if mimetype == "application/x-tar" {
tReadr := tar.NewReader(r)
for {
hdr, err := tReadr.Next()
if err == io.EOF || err != nil {
break
}
if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg {
files = append(files, hdr.Name)
}
}
sort.Strings(files)
} else if mimetype == "application/x-gzip" {
gzf, err := gzip.NewReader(r)
if err == nil {
tReadr := tar.NewReader(gzf)
for {
hdr, err := tReadr.Next()
if err == io.EOF || err != nil {
break
}
if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg {
files = append(files, hdr.Name)
}
}
sort.Strings(files)
}
} else if mimetype == "application/x-bzip" {
bzf := bzip2.NewReader(r)
tReadr := tar.NewReader(bzf)
for {
hdr, err := tReadr.Next()
if err == io.EOF || err != nil {
break
}
if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg {
files = append(files, hdr.Name)
}
}
sort.Strings(files)
} else if mimetype == "application/zip" {
zf, err := zip.NewReader(r, size)
if err == nil {
for _, f := range zf.File {
files = append(files, f.Name)
}
}
sort.Strings(files)
}
return
}

67
helpers/helpers.go Normal file
View File

@ -0,0 +1,67 @@
package helpers
import (
"encoding/hex"
"io"
"unicode"
"github.com/minio/sha256-simd"
"gopkg.in/h2non/filetype.v1"
)
func DetectMime(r io.ReadSeeker) (string, error) {
// Get first 512 bytes for mimetype detection
header := make([]byte, 512)
r.Seek(0, 0)
r.Read(header)
r.Seek(0, 0)
kind, err := filetype.Match(header)
if err != nil {
return "application/octet-stream", err
} else if kind.MIME.Value != "" {
return kind.MIME.Value, nil
}
// Check if the file seems anything like text
if printable(header) {
return "text/plain", nil
} else {
return "application/octet-stream", nil
}
}
func Sha256sum(r io.ReadSeeker) (string, error) {
hasher := sha256.New()
r.Seek(0, 0)
_, err := io.Copy(hasher, r)
if err != nil {
return "", err
}
r.Seek(0, 0)
return hex.EncodeToString(hasher.Sum(nil)), nil
}
func printable(data []byte) bool {
for i, b := range data {
r := rune(b)
// A null terminator that's not at the beginning of the file
if r == 0 && i == 0 {
return false
} else if r == 0 && i < 0 {
continue
}
if r > unicode.MaxASCII {
return false
}
}
return true
}

View File

@ -5,7 +5,6 @@ import (
"log" "log"
"github.com/andreimarcu/linx-server/backends/localfs" "github.com/andreimarcu/linx-server/backends/localfs"
"github.com/andreimarcu/linx-server/backends/metajson"
"github.com/andreimarcu/linx-server/expiry" "github.com/andreimarcu/linx-server/expiry"
) )
@ -22,17 +21,15 @@ func main() {
"don't log deleted files") "don't log deleted files")
flag.Parse() flag.Parse()
metaStorageBackend := localfs.NewLocalfsBackend(metaDir) fileBackend := localfs.NewLocalfsBackend(metaDir, filesDir)
metaBackend := metajson.NewMetaJSONBackend(metaStorageBackend)
fileBackend := localfs.NewLocalfsBackend(filesDir)
files, err := metaStorageBackend.List() files, err := fileBackend.List()
if err != nil { if err != nil {
panic(err) panic(err)
} }
for _, filename := range files { for _, filename := range files {
metadata, err := metaBackend.Get(filename) metadata, err := fileBackend.Head(filename)
if err != nil { if err != nil {
if !noLogs { if !noLogs {
log.Printf("Failed to find metadata for %s", filename) log.Printf("Failed to find metadata for %s", filename)
@ -44,7 +41,6 @@ func main() {
log.Printf("Delete %s", filename) log.Printf("Delete %s", filename)
} }
fileBackend.Delete(filename) fileBackend.Delete(filename)
metaStorageBackend.Delete(filename)
} }
} }
} }

165
meta.go
View File

@ -1,165 +0,0 @@
package main
import (
"archive/tar"
"archive/zip"
"compress/bzip2"
"compress/gzip"
"encoding/hex"
"errors"
"io"
"sort"
"time"
"unicode"
"github.com/andreimarcu/linx-server/backends"
"github.com/andreimarcu/linx-server/expiry"
"github.com/dchest/uniuri"
"github.com/minio/sha256-simd"
"gopkg.in/h2non/filetype.v1"
)
var NotFoundErr = errors.New("File not found.")
func generateMetadata(fName string, exp time.Time, delKey string) (m backends.Metadata, err error) {
file, err := fileBackend.Open(fName)
if err != nil {
return
}
defer file.Close()
m.Size, err = fileBackend.Size(fName)
if err != nil {
return
}
m.Expiry = exp
if delKey == "" {
m.DeleteKey = uniuri.NewLen(30)
} else {
m.DeleteKey = delKey
}
// Get first 512 bytes for mimetype detection
header := make([]byte, 512)
file.Read(header)
kind, err := filetype.Match(header)
if err != nil {
m.Mimetype = "application/octet-stream"
} else {
m.Mimetype = kind.MIME.Value
}
if m.Mimetype == "" {
// Check if the file seems anything like text
if printable(header) {
m.Mimetype = "text/plain"
} else {
m.Mimetype = "application/octet-stream"
}
}
// Compute the sha256sum
hasher := sha256.New()
file.Seek(0, 0)
_, err = io.Copy(hasher, file)
if err == nil {
m.Sha256sum = hex.EncodeToString(hasher.Sum(nil))
}
file.Seek(0, 0)
// If archive, grab list of filenames
if m.Mimetype == "application/x-tar" {
tReadr := tar.NewReader(file)
for {
hdr, err := tReadr.Next()
if err == io.EOF || err != nil {
break
}
if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg {
m.ArchiveFiles = append(m.ArchiveFiles, hdr.Name)
}
}
sort.Strings(m.ArchiveFiles)
} else if m.Mimetype == "application/x-gzip" {
gzf, err := gzip.NewReader(file)
if err == nil {
tReadr := tar.NewReader(gzf)
for {
hdr, err := tReadr.Next()
if err == io.EOF || err != nil {
break
}
if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg {
m.ArchiveFiles = append(m.ArchiveFiles, hdr.Name)
}
}
sort.Strings(m.ArchiveFiles)
}
} else if m.Mimetype == "application/x-bzip" {
bzf := bzip2.NewReader(file)
tReadr := tar.NewReader(bzf)
for {
hdr, err := tReadr.Next()
if err == io.EOF || err != nil {
break
}
if hdr.Typeflag == tar.TypeDir || hdr.Typeflag == tar.TypeReg {
m.ArchiveFiles = append(m.ArchiveFiles, hdr.Name)
}
}
sort.Strings(m.ArchiveFiles)
} else if m.Mimetype == "application/zip" {
zf, err := zip.NewReader(file, m.Size)
if err == nil {
for _, f := range zf.File {
m.ArchiveFiles = append(m.ArchiveFiles, f.Name)
}
}
sort.Strings(m.ArchiveFiles)
}
return
}
func metadataWrite(filename string, metadata *backends.Metadata) error {
return metaBackend.Put(filename, metadata)
}
func metadataRead(filename string) (metadata backends.Metadata, err error) {
metadata, err = metaBackend.Get(filename)
if err != nil {
// Metadata does not exist, generate one
newMData, err := generateMetadata(filename, expiry.NeverExpire, "")
if err != nil {
return metadata, err
}
metadataWrite(filename, &newMData)
metadata, err = metaBackend.Get(filename)
}
return
}
func printable(data []byte) bool {
for i, b := range data {
r := rune(b)
// A null terminator that's not at the beginning of the file
if r == 0 && i == 0 {
return false
} else if r == 0 && i < 0 {
continue
}
if r > unicode.MaxASCII {
return false
}
}
return true
}

View File

@ -64,12 +64,10 @@ func oopsHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, m
w.WriteHeader(500) w.WriteHeader(500)
renderTemplate(Templates["oops.html"], pongo2.Context{"msg": msg}, r, w) renderTemplate(Templates["oops.html"], pongo2.Context{"msg": msg}, r, w)
return return
} else if rt == RespPLAIN { } else if rt == RespPLAIN {
w.WriteHeader(500) w.WriteHeader(500)
fmt.Fprintf(w, "%s", msg) fmt.Fprintf(w, "%s", msg)
return return
} else if rt == RespJSON { } else if rt == RespJSON {
js, _ := json.Marshal(map[string]string{ js, _ := json.Marshal(map[string]string{
"error": msg, "error": msg,
@ -79,7 +77,6 @@ func oopsHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, m
w.WriteHeader(500) w.WriteHeader(500)
w.Write(js) w.Write(js)
return return
} else if rt == RespAUTO { } else if rt == RespAUTO {
if strings.EqualFold("application/json", r.Header.Get("Accept")) { if strings.EqualFold("application/json", r.Header.Get("Accept")) {
oopsHandler(c, w, r, RespJSON, msg) oopsHandler(c, w, r, RespJSON, msg)
@ -89,11 +86,33 @@ func oopsHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, m
} }
} }
func badRequestHandler(c web.C, w http.ResponseWriter, r *http.Request) { func badRequestHandler(c web.C, w http.ResponseWriter, r *http.Request, rt RespType, msg string) {
w.WriteHeader(http.StatusBadRequest) if rt == RespHTML {
err := renderTemplate(Templates["400.html"], pongo2.Context{}, r, w) w.WriteHeader(http.StatusBadRequest)
if err != nil { err := renderTemplate(Templates["400.html"], pongo2.Context{"msg": msg}, r, w)
http.Error(w, err.Error(), http.StatusInternalServerError) if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
} else if rt == RespPLAIN {
w.WriteHeader(http.StatusBadRequest)
fmt.Fprintf(w, "%s", msg)
return
} else if rt == RespJSON {
js, _ := json.Marshal(map[string]string{
"error": msg,
})
w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.WriteHeader(http.StatusBadRequest)
w.Write(js)
return
} else if rt == RespAUTO {
if strings.EqualFold("application/json", r.Header.Get("Accept")) {
badRequestHandler(c, w, r, RespJSON, msg)
} else {
badRequestHandler(c, w, r, RespHTML, msg)
}
} }
} }

View File

@ -16,7 +16,7 @@ import (
"github.com/GeertJohan/go.rice" "github.com/GeertJohan/go.rice"
"github.com/andreimarcu/linx-server/backends" "github.com/andreimarcu/linx-server/backends"
"github.com/andreimarcu/linx-server/backends/localfs" "github.com/andreimarcu/linx-server/backends/localfs"
"github.com/andreimarcu/linx-server/backends/metajson" "github.com/andreimarcu/linx-server/backends/s3"
"github.com/flosch/pongo2" "github.com/flosch/pongo2"
"github.com/vharitonsky/iniflags" "github.com/vharitonsky/iniflags"
"github.com/zenazn/goji/graceful" "github.com/zenazn/goji/graceful"
@ -61,6 +61,9 @@ var Config struct {
remoteAuthFile string remoteAuthFile string
addHeaders headerList addHeaders headerList
noDirectAgents bool noDirectAgents bool
s3Endpoint string
s3Region string
s3Bucket string
} }
var Templates = make(map[string]*pongo2.Template) var Templates = make(map[string]*pongo2.Template)
@ -70,8 +73,7 @@ var timeStarted time.Time
var timeStartedStr string var timeStartedStr string
var remoteAuthKeys []string var remoteAuthKeys []string
var metaStorageBackend backends.MetaStorageBackend var metaStorageBackend backends.MetaStorageBackend
var metaBackend backends.MetaBackend var storageBackend backends.StorageBackend
var fileBackend backends.StorageBackend
func setup() *web.Mux { func setup() *web.Mux {
mux := web.New() mux := web.New()
@ -135,9 +137,11 @@ func setup() *web.Mux {
Config.selifPath = Config.selifPath + "/" Config.selifPath = Config.selifPath + "/"
} }
metaStorageBackend = localfs.NewLocalfsBackend(Config.metaDir) if Config.s3Bucket != "" {
metaBackend = metajson.NewMetaJSONBackend(metaStorageBackend) storageBackend = s3.NewS3Backend(Config.s3Bucket, Config.s3Region, Config.s3Endpoint)
fileBackend = localfs.NewLocalfsBackend(Config.filesDir) } else {
storageBackend = localfs.NewLocalfsBackend(Config.metaDir, Config.filesDir)
}
// Template setup // Template setup
p2l, err := NewPongo2TemplatesLoader() p2l, err := NewPongo2TemplatesLoader()
@ -255,6 +259,12 @@ func main() {
"Add an arbitrary header to the response. This option can be used multiple times.") "Add an arbitrary header to the response. This option can be used multiple times.")
flag.BoolVar(&Config.noDirectAgents, "nodirectagents", false, flag.BoolVar(&Config.noDirectAgents, "nodirectagents", false,
"disable serving files directly for wget/curl user agents") "disable serving files directly for wget/curl user agents")
flag.StringVar(&Config.s3Endpoint, "s3-endpoint", "",
"S3 endpoint")
flag.StringVar(&Config.s3Region, "s3-region", "",
"S3 region")
flag.StringVar(&Config.s3Bucket, "s3-bucket", "",
"S3 bucket to use for files and metadata")
iniflags.Parse() iniflags.Parse()

View File

@ -486,7 +486,6 @@ func TestPostJSONUploadMaxExpiry(t *testing.T) {
var myjson RespOkJSON var myjson RespOkJSON
err = json.Unmarshal([]byte(w.Body.String()), &myjson) err = json.Unmarshal([]byte(w.Body.String()), &myjson)
if err != nil { if err != nil {
fmt.Println(w.Body.String())
t.Fatal(err) t.Fatal(err)
} }
@ -643,14 +642,45 @@ func TestPostEmptyUpload(t *testing.T) {
mux.ServeHTTP(w, req) mux.ServeHTTP(w, req)
if w.Code != 500 { if w.Code != 400 {
t.Log(w.Body.String()) t.Log(w.Body.String())
t.Fatalf("Status code is not 500, but %d", w.Code) t.Fatalf("Status code is not 400, but %d", w.Code)
}
}
func TestPostTooLargeUpload(t *testing.T) {
mux := setup()
oldMaxSize := Config.maxSize
Config.maxSize = 2
w := httptest.NewRecorder()
filename := generateBarename() + ".txt"
var b bytes.Buffer
mw := multipart.NewWriter(&b)
fw, err := mw.CreateFormFile("file", filename)
if err != nil {
t.Fatal(err)
} }
if !strings.Contains(w.Body.String(), "Empty file") { fw.Write([]byte("test content"))
t.Fatal("Response did not contain 'Empty file'") mw.Close()
req, err := http.NewRequest("POST", "/upload/", &b)
req.Header.Set("Content-Type", mw.FormDataContentType())
req.Header.Set("Referer", Config.siteURL)
if err != nil {
t.Fatal(err)
} }
mux.ServeHTTP(w, req)
if w.Code != 400 {
t.Log(w.Body.String())
t.Fatalf("Status code is not 400, but %d", w.Code)
}
Config.maxSize = oldMaxSize
} }
func TestPostEmptyJSONUpload(t *testing.T) { func TestPostEmptyJSONUpload(t *testing.T) {
@ -679,9 +709,9 @@ func TestPostEmptyJSONUpload(t *testing.T) {
mux.ServeHTTP(w, req) mux.ServeHTTP(w, req)
if w.Code != 500 { if w.Code != 400 {
t.Log(w.Body.String()) t.Log(w.Body.String())
t.Fatalf("Status code is not 500, but %d", w.Code) t.Fatalf("Status code is not 400, but %d", w.Code)
} }
var myjson RespErrJSON var myjson RespErrJSON
@ -690,7 +720,7 @@ func TestPostEmptyJSONUpload(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
if myjson.Error != "Could not upload file: Empty file" { if myjson.Error != "Empty file" {
t.Fatal("Json 'error' was not 'Empty file' but " + myjson.Error) t.Fatal("Json 'error' was not 'Empty file' but " + myjson.Error)
} }
} }
@ -768,11 +798,41 @@ func TestPutEmptyUpload(t *testing.T) {
mux.ServeHTTP(w, req) mux.ServeHTTP(w, req)
if !strings.Contains(w.Body.String(), "Empty file") { if w.Code != 400 {
t.Fatal("Response doesn't contain'Empty file'") t.Fatalf("Status code is not 400, but %d", w.Code)
} }
} }
func TestPutTooLargeUpload(t *testing.T) {
mux := setup()
oldMaxSize := Config.maxSize
Config.maxSize = 2
w := httptest.NewRecorder()
filename := generateBarename() + ".file"
req, err := http.NewRequest("PUT", "/upload/"+filename, strings.NewReader("File too big"))
if err != nil {
t.Fatal(err)
}
req.Header.Set("Linx-Randomize", "yes")
mux.ServeHTTP(w, req)
if w.Code != 500 {
t.Log(w.Body.String())
t.Fatalf("Status code is not 500, but %d", w.Code)
}
if !strings.Contains(w.Body.String(), "request body too large") {
t.Fatal("Response did not contain 'request body too large'")
}
Config.maxSize = oldMaxSize
}
func TestPutJSONUpload(t *testing.T) { func TestPutJSONUpload(t *testing.T) {
var myjson RespOkJSON var myjson RespOkJSON

View File

@ -2,65 +2,44 @@ package main
import ( import (
"bytes" "bytes"
"crypto/sha1"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"time" "time"
"github.com/andreimarcu/linx-server/backends" "github.com/andreimarcu/linx-server/backends"
"github.com/andreimarcu/linx-server/expiry"
"github.com/andreimarcu/linx-server/torrent"
"github.com/zeebo/bencode" "github.com/zeebo/bencode"
"github.com/zenazn/goji/web" "github.com/zenazn/goji/web"
) )
const ( func createTorrent(fileName string, f io.Reader, r *http.Request) ([]byte, error) {
TORRENT_PIECE_LENGTH = 262144 url := getSiteURL(r) + Config.selifPath + fileName
) chunk := make([]byte, torrent.TORRENT_PIECE_LENGTH)
type TorrentInfo struct { t := torrent.Torrent{
PieceLength int `bencode:"piece length"`
Pieces string `bencode:"pieces"`
Name string `bencode:"name"`
Length int `bencode:"length"`
}
type Torrent struct {
Encoding string `bencode:"encoding"`
Info TorrentInfo `bencode:"info"`
UrlList []string `bencode:"url-list"`
}
func hashPiece(piece []byte) []byte {
h := sha1.New()
h.Write(piece)
return h.Sum(nil)
}
func createTorrent(fileName string, f io.ReadCloser, r *http.Request) ([]byte, error) {
chunk := make([]byte, TORRENT_PIECE_LENGTH)
torrent := Torrent{
Encoding: "UTF-8", Encoding: "UTF-8",
Info: TorrentInfo{ Info: torrent.TorrentInfo{
PieceLength: TORRENT_PIECE_LENGTH, PieceLength: torrent.TORRENT_PIECE_LENGTH,
Name: fileName, Name: fileName,
}, },
UrlList: []string{fmt.Sprintf("%s%s%s", getSiteURL(r), Config.selifPath, fileName)}, UrlList: []string{url},
} }
for { for {
n, err := f.Read(chunk) n, err := io.ReadFull(f, chunk)
if err == io.EOF { if err == io.EOF {
break break
} else if err != nil { } else if err != nil && err != io.ErrUnexpectedEOF {
return []byte{}, err return []byte{}, err
} }
torrent.Info.Length += n t.Info.Length += n
torrent.Info.Pieces += string(hashPiece(chunk[:n])) t.Info.Pieces += string(torrent.HashPiece(chunk[:n]))
} }
data, err := bencode.EncodeBytes(&torrent) data, err := bencode.EncodeBytes(&t)
if err != nil { if err != nil {
return []byte{}, err return []byte{}, err
} }
@ -71,22 +50,25 @@ func createTorrent(fileName string, f io.ReadCloser, r *http.Request) ([]byte, e
func fileTorrentHandler(c web.C, w http.ResponseWriter, r *http.Request) { func fileTorrentHandler(c web.C, w http.ResponseWriter, r *http.Request) {
fileName := c.URLParams["name"] fileName := c.URLParams["name"]
_, err := checkFile(fileName) metadata, f, err := storageBackend.Get(fileName)
if err == NotFoundErr { if err == backends.NotFoundErr {
notFoundHandler(c, w, r) notFoundHandler(c, w, r)
return return
} else if err == backends.BadMetadata { } else if err == backends.BadMetadata {
oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.") oopsHandler(c, w, r, RespAUTO, "Corrupt metadata.")
return return
} } else if err != nil {
oopsHandler(c, w, r, RespAUTO, err.Error())
f, err := fileBackend.Open(fileName)
if err != nil {
oopsHandler(c, w, r, RespHTML, "Could not create torrent.")
return return
} }
defer f.Close() defer f.Close()
if expiry.IsTsExpired(metadata.Expiry) {
storageBackend.Delete(fileName)
notFoundHandler(c, w, r)
return
}
encoded, err := createTorrent(fileName, f, r) encoded, err := createTorrent(fileName, f, r)
if err != nil { if err != nil {
oopsHandler(c, w, r, RespHTML, "Could not create torrent.") oopsHandler(c, w, r, RespHTML, "Could not create torrent.")

28
torrent/torrent.go Normal file
View File

@ -0,0 +1,28 @@
package torrent
import (
"crypto/sha1"
)
const (
TORRENT_PIECE_LENGTH = 262144
)
type TorrentInfo struct {
PieceLength int `bencode:"piece length"`
Pieces string `bencode:"pieces"`
Name string `bencode:"name"`
Length int `bencode:"length"`
}
type Torrent struct {
Encoding string `bencode:"encoding"`
Info TorrentInfo `bencode:"info"`
UrlList []string `bencode:"url-list"`
}
func HashPiece(piece []byte) []byte {
h := sha1.New()
h.Write(piece)
return h.Sum(nil)
}

View File

@ -5,12 +5,13 @@ import (
"os" "os"
"testing" "testing"
"github.com/andreimarcu/linx-server/torrent"
"github.com/zeebo/bencode" "github.com/zeebo/bencode"
) )
func TestCreateTorrent(t *testing.T) { func TestCreateTorrent(t *testing.T) {
fileName := "server.go" fileName := "server.go"
var decoded Torrent var decoded torrent.Torrent
f, err := os.Open("server.go") f, err := os.Open("server.go")
if err != nil { if err != nil {
@ -52,7 +53,7 @@ func TestCreateTorrent(t *testing.T) {
} }
func TestCreateTorrentWithImage(t *testing.T) { func TestCreateTorrentWithImage(t *testing.T) {
var decoded Torrent var decoded torrent.Torrent
f, err := os.Open("static/images/404.jpg") f, err := os.Open("static/images/404.jpg")
if err != nil { if err != nil {

View File

@ -22,6 +22,7 @@ import (
"gopkg.in/h2non/filetype.v1" "gopkg.in/h2non/filetype.v1"
) )
var FileTooLargeError = errors.New("File too large.")
var fileBlacklist = map[string]bool{ var fileBlacklist = map[string]bool{
"favicon.ico": true, "favicon.ico": true,
"index.htm": true, "index.htm": true,
@ -34,10 +35,11 @@ var fileBlacklist = map[string]bool{
// Describes metadata directly from the user request // Describes metadata directly from the user request
type UploadRequest struct { type UploadRequest struct {
src io.Reader src io.Reader
size int64
filename string filename string
expiry time.Duration // Seconds until expiry, 0 = never expiry time.Duration // Seconds until expiry, 0 = never
deleteKey string // Empty string if not defined
randomBarename bool randomBarename bool
deletionKey string // Empty string if not defined
} }
// Metadata associated with a file as it would actually be stored // Metadata associated with a file as it would actually be stored
@ -48,7 +50,7 @@ type Upload struct {
func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) { func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) {
if !strictReferrerCheck(r, getSiteURL(r), []string{"Linx-Delete-Key", "Linx-Expiry", "Linx-Randomize", "X-Requested-With"}) { if !strictReferrerCheck(r, getSiteURL(r), []string{"Linx-Delete-Key", "Linx-Expiry", "Linx-Randomize", "X-Requested-With"}) {
badRequestHandler(c, w, r) badRequestHandler(c, w, r, RespAUTO, "")
return return
} }
@ -65,32 +67,39 @@ func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) {
} }
defer file.Close() defer file.Close()
r.ParseForm()
if r.Form.Get("randomize") == "true" {
upReq.randomBarename = true
}
upReq.expiry = parseExpiry(r.Form.Get("expires"))
upReq.src = file upReq.src = file
upReq.size = headers.Size
upReq.filename = headers.Filename upReq.filename = headers.Filename
} else { } else {
if r.FormValue("content") == "" { if r.PostFormValue("content") == "" {
oopsHandler(c, w, r, RespHTML, "Empty file") badRequestHandler(c, w, r, RespAUTO, "Empty file")
return return
} }
extension := r.FormValue("extension") extension := r.PostFormValue("extension")
if extension == "" { if extension == "" {
extension = "txt" extension = "txt"
} }
upReq.src = strings.NewReader(r.FormValue("content")) content := r.PostFormValue("content")
upReq.expiry = parseExpiry(r.FormValue("expires"))
upReq.filename = r.FormValue("filename") + "." + extension upReq.src = strings.NewReader(content)
upReq.size = int64(len(content))
upReq.filename = r.PostFormValue("filename") + "." + extension
}
upReq.expiry = parseExpiry(r.PostFormValue("expires"))
if r.PostFormValue("randomize") == "true" {
upReq.randomBarename = true
} }
upload, err := processUpload(upReq) upload, err := processUpload(upReq)
if strings.EqualFold("application/json", r.Header.Get("Accept")) { if strings.EqualFold("application/json", r.Header.Get("Accept")) {
if err != nil { if err == FileTooLargeError || err == backends.FileEmptyError {
badRequestHandler(c, w, r, RespJSON, err.Error())
return
} else if err != nil {
oopsHandler(c, w, r, RespJSON, "Could not upload file: "+err.Error()) oopsHandler(c, w, r, RespJSON, "Could not upload file: "+err.Error())
return return
} }
@ -99,14 +108,16 @@ func uploadPostHandler(c web.C, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(js) w.Write(js)
} else { } else {
if err != nil { if err == FileTooLargeError || err == backends.FileEmptyError {
badRequestHandler(c, w, r, RespHTML, err.Error())
return
} else if err != nil {
oopsHandler(c, w, r, RespHTML, "Could not upload file: "+err.Error()) oopsHandler(c, w, r, RespHTML, "Could not upload file: "+err.Error())
return return
} }
http.Redirect(w, r, Config.sitePath+upload.Filename, 303) http.Redirect(w, r, Config.sitePath+upload.Filename, 303)
} }
} }
func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) { func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) {
@ -115,12 +126,15 @@ func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) {
defer r.Body.Close() defer r.Body.Close()
upReq.filename = c.URLParams["name"] upReq.filename = c.URLParams["name"]
upReq.src = r.Body upReq.src = http.MaxBytesReader(w, r.Body, Config.maxSize)
upload, err := processUpload(upReq) upload, err := processUpload(upReq)
if strings.EqualFold("application/json", r.Header.Get("Accept")) { if strings.EqualFold("application/json", r.Header.Get("Accept")) {
if err != nil { if err == FileTooLargeError || err == backends.FileEmptyError {
badRequestHandler(c, w, r, RespJSON, err.Error())
return
} else if err != nil {
oopsHandler(c, w, r, RespJSON, "Could not upload file: "+err.Error()) oopsHandler(c, w, r, RespJSON, "Could not upload file: "+err.Error())
return return
} }
@ -129,7 +143,10 @@ func uploadPutHandler(c web.C, w http.ResponseWriter, r *http.Request) {
w.Header().Set("Content-Type", "application/json; charset=UTF-8") w.Header().Set("Content-Type", "application/json; charset=UTF-8")
w.Write(js) w.Write(js)
} else { } else {
if err != nil { if err == FileTooLargeError || err == backends.FileEmptyError {
badRequestHandler(c, w, r, RespPLAIN, err.Error())
return
} else if err != nil {
oopsHandler(c, w, r, RespPLAIN, "Could not upload file: "+err.Error()) oopsHandler(c, w, r, RespPLAIN, "Could not upload file: "+err.Error())
return return
} }
@ -162,8 +179,8 @@ func uploadRemote(c web.C, w http.ResponseWriter, r *http.Request) {
} }
upReq.filename = filepath.Base(grabUrl.Path) upReq.filename = filepath.Base(grabUrl.Path)
upReq.src = resp.Body upReq.src = http.MaxBytesReader(w, resp.Body, Config.maxSize)
upReq.deletionKey = r.FormValue("deletekey") upReq.deleteKey = r.FormValue("deletekey")
upReq.randomBarename = r.FormValue("randomize") == "yes" upReq.randomBarename = r.FormValue("randomize") == "yes"
upReq.expiry = parseExpiry(r.FormValue("expiry")) upReq.expiry = parseExpiry(r.FormValue("expiry"))
@ -193,15 +210,18 @@ func uploadHeaderProcess(r *http.Request, upReq *UploadRequest) {
upReq.randomBarename = true upReq.randomBarename = true
} }
upReq.deletionKey = r.Header.Get("Linx-Delete-Key") upReq.deleteKey = r.Header.Get("Linx-Delete-Key")
// Get seconds until expiry. Non-integer responses never expire. // Get seconds until expiry. Non-integer responses never expire.
expStr := r.Header.Get("Linx-Expiry") expStr := r.Header.Get("Linx-Expiry")
upReq.expiry = parseExpiry(expStr) upReq.expiry = parseExpiry(expStr)
} }
func processUpload(upReq UploadRequest) (upload Upload, err error) { func processUpload(upReq UploadRequest) (upload Upload, err error) {
if upReq.size > Config.maxSize {
return upload, FileTooLargeError
}
// Determine the appropriate filename, then write to disk // Determine the appropriate filename, then write to disk
barename, extension := barePlusExt(upReq.filename) barename, extension := barePlusExt(upReq.filename)
@ -215,7 +235,7 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) {
header = make([]byte, 512) header = make([]byte, 512)
n, _ := upReq.src.Read(header) n, _ := upReq.src.Read(header)
if n == 0 { if n == 0 {
return upload, errors.New("Empty file") return upload, backends.FileEmptyError
} }
header = header[:n] header = header[:n]
@ -231,13 +251,13 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) {
upload.Filename = strings.Join([]string{barename, extension}, ".") upload.Filename = strings.Join([]string{barename, extension}, ".")
upload.Filename = strings.Replace(upload.Filename, " ", "", -1) upload.Filename = strings.Replace(upload.Filename, " ", "", -1)
fileexists, _ := fileBackend.Exists(upload.Filename) fileexists, _ := storageBackend.Exists(upload.Filename)
// Check if the delete key matches, in which case overwrite // Check if the delete key matches, in which case overwrite
if fileexists { if fileexists {
metad, merr := metadataRead(upload.Filename) metad, merr := storageBackend.Head(upload.Filename)
if merr == nil { if merr == nil {
if upReq.deletionKey == metad.DeleteKey { if upReq.deleteKey == metad.DeleteKey {
fileexists = false fileexists = false
} }
} }
@ -252,7 +272,7 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) {
} }
upload.Filename = strings.Join([]string{barename, extension}, ".") upload.Filename = strings.Join([]string{barename, extension}, ".")
fileexists, err = fileBackend.Exists(upload.Filename) fileexists, err = storageBackend.Exists(upload.Filename)
} }
if fileBlacklist[strings.ToLower(upload.Filename)] { if fileBlacklist[strings.ToLower(upload.Filename)] {
@ -267,24 +287,15 @@ func processUpload(upReq UploadRequest) (upload Upload, err error) {
fileExpiry = time.Now().Add(upReq.expiry) fileExpiry = time.Now().Add(upReq.expiry)
} }
bytes, err := fileBackend.Put(upload.Filename, io.MultiReader(bytes.NewReader(header), upReq.src)) if upReq.deleteKey == "" {
if err != nil { upReq.deleteKey = uniuri.NewLen(30)
return upload, err
} else if bytes > Config.maxSize {
fileBackend.Delete(upload.Filename)
return upload, errors.New("File too large")
} }
upload.Metadata, err = generateMetadata(upload.Filename, fileExpiry, upReq.deletionKey) upload.Metadata, err = storageBackend.Put(upload.Filename, io.MultiReader(bytes.NewReader(header), upReq.src), fileExpiry, upReq.deleteKey)
if err != nil { if err != nil {
fileBackend.Delete(upload.Filename) return upload, err
return
}
err = metadataWrite(upload.Filename, &upload.Metadata)
if err != nil {
fileBackend.Delete(upload.Filename)
return
} }
return return
} }