Răsfoiți Sursa

.github/workflows: adding golangci-lint as new job (#453)

In order to follow golang's best practices, we should lint the code base properly beyond usual syntax mistakes.
Stefan Benten 4 ani în urmă
părinte
comite
2fbd19365c
9 a modificat fișierele cu 98 adăugiri și 53 ștergeri
  1. 14 0
      .github/workflows/test.yml
  2. 20 0
      .golangci.yml
  3. 10 2
      main.go
  4. 2 5
      server/clamav.go
  5. 18 28
      server/handlers.go
  6. 11 5
      server/server.go
  7. 15 8
      server/storage.go
  8. 6 0
      server/utils.go
  9. 2 5
      server/virustotal.go

+ 14 - 0
.github/workflows/test.yml

@@ -29,3 +29,17 @@ jobs:
           go version
           go vet ./...
           go test ./...
+  golangci:
+    name: Linting
+    runs-on: ubuntu-latest
+    steps:
+      - uses: actions/checkout@v2
+      - uses: actions/setup-go@v1
+        with:
+          go-version: 1.17
+      - name: golangci-lint
+        uses: golangci/golangci-lint-action@v2
+        with:
+          version: latest
+          skip-go-installation: true
+          args: "--config .golangci.yml"

+ 20 - 0
.golangci.yml

@@ -0,0 +1,20 @@
+run:
+  deadline: 10m
+  issues-exit-code: 1
+  tests: true
+
+output:
+  format: colored-line-number
+  print-issued-lines: true
+  print-linter-name: true
+
+linters:
+  disable:
+    - deadcode
+    - unused
+
+issues:
+  max-issues-per-linter: 0
+  max-same-issues: 0
+  new: false
+  exclude-use-default: false

+ 10 - 2
main.go

@@ -1,8 +1,16 @@
 package main
 
-import "github.com/dutchcoders/transfer.sh/cmd"
+import (
+	"log"
+	"os"
+
+	"github.com/dutchcoders/transfer.sh/cmd"
+)
 
 func main() {
 	app := cmd.New()
-	app.RunAndExitOnError()
+	err := app.Run(os.Args)
+	if err != nil {
+		log.Fatal(err)
+	}
 }

+ 2 - 5
server/clamav.go

@@ -31,7 +31,6 @@ import (
 	// _ "transfer.sh/app/utils"
 
 	"fmt"
-	"io"
 	"net/http"
 	"time"
 
@@ -50,9 +49,7 @@ func (s *Server) scanHandler(w http.ResponseWriter, r *http.Request) {
 
 	s.logger.Printf("Scanning %s %d %s", filename, contentLength, contentType)
 
-	var reader io.Reader
-
-	reader = r.Body
+	reader := r.Body
 
 	c := clamd.NewClamd(s.ClamAVDaemonHost)
 
@@ -67,7 +64,7 @@ func (s *Server) scanHandler(w http.ResponseWriter, r *http.Request) {
 
 	select {
 	case s := <-response:
-		w.Write([]byte(fmt.Sprintf("%v\n", s.Status)))
+		_, _ = w.Write([]byte(fmt.Sprintf("%v\n", s.Status)))
 	case <-time.After(time.Second * 60):
 		abort <- true
 	}

+ 18 - 28
server/handlers.go

@@ -462,14 +462,13 @@ func (s *Server) putHandler(w http.ResponseWriter, r *http.Request) {
 
 	reader = r.Body
 
-	defer r.Body.Close()
+	defer CloseCheck(r.Body.Close)
 
 	if contentLength == -1 {
 		// queue file to disk, because s3 needs content length
 		var err error
-		var f io.Reader
 
-		f = reader
+		f := reader
 
 		var b bytes.Buffer
 
@@ -575,13 +574,9 @@ func resolveURL(r *http.Request, u *url.URL, proxyPort string) string {
 }
 
 func resolveKey(key, proxyPath string) string {
-	if strings.HasPrefix(key, "/") {
-		key = key[1:]
-	}
+	key = strings.TrimPrefix(key, "/")
 
-	if strings.HasPrefix(key, proxyPath) {
-		key = key[len(proxyPath):]
-	}
+	key = strings.TrimPrefix(key, proxyPath)
 
 	key = strings.Replace(key, "\\", "/", -1)
 
@@ -660,7 +655,7 @@ func (metadata metadata) remainingLimitHeaderValues() (remainingDownloads, remai
 	if metadata.MaxDate.IsZero() {
 		remainingDays = "n/a"
 	} else {
-		timeDifference := metadata.MaxDate.Sub(time.Now())
+		timeDifference := time.Until(metadata.MaxDate)
 		remainingDays = strconv.Itoa(int(timeDifference.Hours()/24) + 1)
 	}
 
@@ -679,8 +674,6 @@ func (s *Server) lock(token, filename string) {
 	lock, _ := s.locks.LoadOrStore(key, &sync.Mutex{})
 
 	lock.(*sync.Mutex).Lock()
-
-	return
 }
 
 func (s *Server) unlock(token, filename string) {
@@ -702,7 +695,7 @@ func (s *Server) checkMetadata(token, filename string, increaseDownload bool) (m
 		return metadata, err
 	}
 
-	defer r.Close()
+	defer CloseCheck(r.Close)
 
 	if err := json.NewDecoder(r).Decode(&metadata); err != nil {
 		return metadata, err
@@ -740,7 +733,7 @@ func (s *Server) checkDeletionToken(deletionToken, token, filename string) error
 		return err
 	}
 
-	defer r.Close()
+	defer CloseCheck(r.Close)
 
 	if err := json.NewDecoder(r).Decode(&metadata); err != nil {
 		return err
@@ -755,9 +748,9 @@ func (s *Server) purgeHandler() {
 	ticker := time.NewTicker(s.purgeInterval)
 	go func() {
 		for {
-			select {
-			case <-ticker.C:
-				err := s.storage.Purge(s.purgeDays)
+			<-ticker.C
+			err := s.storage.Purge(s.purgeDays)
+			if err != nil {
 				s.logger.Printf("error cleaning up expired files: %v", err)
 			}
 		}
@@ -825,7 +818,7 @@ func (s *Server) zipHandler(w http.ResponseWriter, r *http.Request) {
 			return
 		}
 
-		defer reader.Close()
+		defer CloseCheck(reader.Close)
 
 		header := &zip.FileHeader{
 			Name:         strings.Split(key, "/")[1],
@@ -868,10 +861,10 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
 	w.Header().Set("Connection", "close")
 
 	os := gzip.NewWriter(w)
-	defer os.Close()
+	defer CloseCheck(os.Close)
 
 	zw := tar.NewWriter(os)
-	defer zw.Close()
+	defer CloseCheck(zw.Close)
 
 	for _, key := range strings.Split(files, ",") {
 		key = resolveKey(key, s.proxyPath)
@@ -896,7 +889,7 @@ func (s *Server) tarGzHandler(w http.ResponseWriter, r *http.Request) {
 			return
 		}
 
-		defer reader.Close()
+		defer CloseCheck(reader.Close)
 
 		header := &tar.Header{
 			Name: strings.Split(key, "/")[1],
@@ -930,7 +923,7 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
 	w.Header().Set("Connection", "close")
 
 	zw := tar.NewWriter(w)
-	defer zw.Close()
+	defer CloseCheck(zw.Close)
 
 	for _, key := range strings.Split(files, ",") {
 		key = resolveKey(key, s.proxyPath)
@@ -955,7 +948,7 @@ func (s *Server) tarHandler(w http.ResponseWriter, r *http.Request) {
 			return
 		}
 
-		defer reader.Close()
+		defer CloseCheck(reader.Close)
 
 		header := &tar.Header{
 			Name: strings.Split(key, "/")[1],
@@ -1037,7 +1030,7 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
 		return
 	}
 
-	defer reader.Close()
+	defer CloseCheck(reader.Close)
 
 	var disposition string
 
@@ -1086,8 +1079,6 @@ func (s *Server) getHandler(w http.ResponseWriter, r *http.Request) {
 		http.Error(w, "Error occurred copying to output stream", 500)
 		return
 	}
-
-	return
 }
 
 // RedirectHandler handles redirect
@@ -1142,7 +1133,6 @@ func ipFilterHandler(h http.Handler, ipFilterOptions *IPFilterOptions) http.Hand
 		} else {
 			WrapIPFilter(h, *ipFilterOptions).ServeHTTP(w, r)
 		}
-		return
 	}
 }
 
@@ -1156,7 +1146,7 @@ func (s *Server) basicAuthHandler(h http.Handler) http.HandlerFunc {
 		w.Header().Set("WWW-Authenticate", "Basic realm=\"Restricted\"")
 
 		username, password, authOK := r.BasicAuth()
-		if authOK == false {
+		if !authOK {
 			http.Error(w, "Not authorized", 401)
 			return
 		}

+ 11 - 5
server/server.go

@@ -393,7 +393,7 @@ func (s *Server) Run() {
 		go func() {
 			s.logger.Println("Profiled listening at: :6060")
 
-			http.ListenAndServe(":6060", nil)
+			_ = http.ListenAndServe(":6060", nil)
 		}()
 	}
 
@@ -424,8 +424,14 @@ func (s *Server) Run() {
 				s.logger.Panicf("Unable to parse: path=%s, err=%s", path, err)
 			}
 
-			htmlTemplates.New(stripPrefix(path)).Parse(string(bytes))
-			textTemplates.New(stripPrefix(path)).Parse(string(bytes))
+			_, err = htmlTemplates.New(stripPrefix(path)).Parse(string(bytes))
+			if err != nil {
+				s.logger.Panicln("Unable to parse template")
+			}
+			_, err = textTemplates.New(stripPrefix(path)).Parse(string(bytes))
+			if err != nil {
+				s.logger.Panicln("Unable to parse template")
+			}
 		}
 	}
 
@@ -493,7 +499,7 @@ func (s *Server) Run() {
 
 	r.NotFoundHandler = http.HandlerFunc(s.notFoundHandler)
 
-	mime.AddExtensionType(".md", "text/x-markdown")
+	_ = mime.AddExtensionType(".md", "text/x-markdown")
 
 	s.logger.Printf("Transfer.sh server started.\nusing temp folder: %s\nusing storage provider: %s", s.tempPath, s.storage.Type())
 
@@ -532,7 +538,7 @@ func (s *Server) Run() {
 		s.logger.Printf("listening on port: %v\n", s.ListenerString)
 
 		go func() {
-			srvr.ListenAndServe()
+			_ = srvr.ListenAndServe()
 		}()
 	}
 

+ 15 - 8
server/storage.go

@@ -99,7 +99,7 @@ func (s *LocalStorage) Get(token string, filename string) (reader io.ReadCloser,
 // Delete removes a file from storage
 func (s *LocalStorage) Delete(token string, filename string) (err error) {
 	metadata := filepath.Join(s.basedir, token, fmt.Sprintf("%s.metadata", filename))
-	os.Remove(metadata)
+	_ = os.Remove(metadata)
 
 	path := filepath.Join(s.basedir, token, filename)
 	err = os.Remove(path)
@@ -152,7 +152,7 @@ func (s *LocalStorage) Put(token string, filename string, reader io.Reader, cont
 		return err
 	}
 
-	defer f.Close()
+	defer CloseCheck(f.Close)
 
 	if _, err = io.Copy(f, reader); err != nil {
 		return err
@@ -336,8 +336,9 @@ func NewGDriveStorage(clientJSONFilepath string, localConfigPath string, basedir
 	if err != nil {
 		return nil, err
 	}
-
-	srv, err := drive.New(getGDriveClient(config, localConfigPath, logger))
+	
+	// ToDo: Upgrade deprecated version
+	srv, err := drive.New(getGDriveClient(config, localConfigPath, logger)) // nolint: staticcheck
 	if err != nil {
 		return nil, err
 	}
@@ -493,6 +494,9 @@ func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, conte
 
 	var fi *drive.File
 	fi, err = s.service.Files.Get(fileID).Fields("size", "md5Checksum").Do()
+	if err != nil {
+		return
+	}
 	if !s.hasChecksum(fi) {
 		err = fmt.Errorf("Cannot find file %s/%s", token, filename)
 		return
@@ -515,7 +519,7 @@ func (s *GDrive) Get(token string, filename string) (reader io.ReadCloser, conte
 // Delete removes a file from storage
 func (s *GDrive) Delete(token string, filename string) (err error) {
 	metadata, _ := s.findID(fmt.Sprintf("%s.metadata", filename), token)
-	s.service.Files.Delete(metadata).Do()
+	_ = s.service.Files.Delete(metadata).Do()
 
 	var fileID string
 	fileID, err = s.findID(filename, token)
@@ -644,7 +648,7 @@ func getGDriveTokenFromWeb(config *oauth2.Config, logger *log.Logger) *oauth2.To
 // Retrieves a token from a local file.
 func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
 	f, err := os.Open(file)
-	defer f.Close()
+	defer CloseCheck(f.Close)
 	if err != nil {
 		return nil, err
 	}
@@ -657,12 +661,15 @@ func gDriveTokenFromFile(file string) (*oauth2.Token, error) {
 func saveGDriveToken(path string, token *oauth2.Token, logger *log.Logger) {
 	logger.Printf("Saving credential file to: %s\n", path)
 	f, err := os.OpenFile(path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0600)
-	defer f.Close()
+	defer CloseCheck(f.Close)
 	if err != nil {
 		logger.Fatalf("Unable to cache oauth token: %v", err)
 	}
 
-	json.NewEncoder(f).Encode(token)
+	err = json.NewEncoder(f).Encode(token)
+	if err != nil {
+		logger.Fatalf("Unable to encode oauth token: %v", err)
+	}
 }
 
 // StorjStorage is a storage backed by Storj

+ 6 - 0
server/utils.go

@@ -279,3 +279,9 @@ func formatSize(size int64) string {
 	getSuffix := suffixes[int(math.Floor(base))]
 	return fmt.Sprintf("%s %s", strconv.FormatFloat(newVal, 'f', -1, 64), getSuffix)
 }
+
+func CloseCheck(f func() error) {
+	if err := f(); err != nil {
+		fmt.Println("Received close error:", err)
+	}
+}

+ 2 - 5
server/virustotal.go

@@ -26,7 +26,6 @@ package server
 
 import (
 	"fmt"
-	"io"
 	"net/http"
 
 	"github.com/gorilla/mux"
@@ -49,9 +48,7 @@ func (s *Server) virusTotalHandler(w http.ResponseWriter, r *http.Request) {
 		http.Error(w, err.Error(), 500)
 	}
 
-	var reader io.Reader
-
-	reader = r.Body
+	reader := r.Body
 
 	result, err := vt.Scan(filename, reader)
 	if err != nil {
@@ -59,5 +56,5 @@ func (s *Server) virusTotalHandler(w http.ResponseWriter, r *http.Request) {
 	}
 
 	s.logger.Println(result)
-	w.Write([]byte(fmt.Sprintf("%v\n", result.Permalink)))
+	_, _ = w.Write([]byte(fmt.Sprintf("%v\n", result.Permalink)))
 }