Skip to content

Commit

Permalink
Package archive/helper
Browse files Browse the repository at this point in the history
- adding package to compress/uncompress with reader or writer
- refactor to allowing to use same source of io as result: io.reader or io.writer
- optimize code & buf to limit mem use
- rework variable to be thread safe

Package archive/compress
- add function DetectOnly to detect algo and return an updated reader but not the decompressor reader
- update function Detect to use DetectOnly to limit duplication code

Other
- bump dependencies
  • Loading branch information
swbo97 authored and nabbar committed Nov 27, 2024
1 parent 5e6e294 commit cd083bb
Show file tree
Hide file tree
Showing 9 changed files with 5,396 additions and 821 deletions.
25 changes: 17 additions & 8 deletions archive/compress/interface.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,12 +40,27 @@ func Parse(s string) Algorithm {
}

func Detect(r io.Reader) (Algorithm, io.ReadCloser, error) {
var (
err error
alg Algorithm
rdr io.ReadCloser
)

if alg, rdr, err = DetectOnly(r); err != nil {
return None, nil, err
} else if rdr, err = alg.Reader(rdr); err != nil {
return None, nil, err
} else {
return alg, rdr, nil
}
}

func DetectOnly(r io.Reader) (Algorithm, io.ReadCloser, error) {
var (
err error
alg Algorithm
bfr = bufio.NewReader(r)
buf []byte
res io.ReadCloser
)

if buf, err = bfr.Peek(6); err != nil {
Expand All @@ -66,11 +81,5 @@ func Detect(r io.Reader) (Algorithm, io.ReadCloser, error) {
alg = None
}

if err != nil {
return None, nil, err
} else if res, err = alg.Reader(bfr); err != nil {
return None, nil, err
} else {
return alg, res, err
}
return alg, io.NopCloser(bfr), err
}
21 changes: 21 additions & 0 deletions archive/compress/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,27 @@ const (
XZ
)

func List() []Algorithm {
return []Algorithm{
None,
Bzip2,
Gzip,
LZ4,
XZ,
}
}

func ListString() []string {
var (
lst = List()
res = make([]string, len(lst))
)
for i := range lst {
res[i] = lst[i].String()
}
return res
}

func (a Algorithm) IsNone() bool {
return a == None
}
Expand Down
157 changes: 157 additions & 0 deletions archive/helper/compressor.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,157 @@
/*
* MIT License
*
* Copyright (c) 2024 Salim Amine BOU ARAM & Nicolas JUHEL
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/

package helper

import (
"bytes"
"io"
"sync/atomic"
)

type compressWriter struct {
dst io.WriteCloser
}

func (o *compressWriter) Read(p []byte) (n int, err error) {
return 0, ErrInvalidSource
}

func (o *compressWriter) Write(p []byte) (n int, err error) {
return o.dst.Write(p)
}

func (o *compressWriter) Close() error {
return o.dst.Close()
}

// compressor handles data compression in chunks.
type compressReader struct {
src io.ReadCloser
wrt io.WriteCloser
buf *bytes.Buffer
clo *atomic.Bool
}

// Read for compressor compresses the data and reads it from the buffer in chunks.
func (o *compressReader) Read(p []byte) (n int, err error) {
if o.src == nil {
return 0, ErrInvalidSource
}

var size int

if s := cap(p); s < chunkSize {
size = chunkSize
} else {
size = s
}

if o.clo.Load() && o.buf.Len() == 0 {
return 0, io.EOF
}

if o.buf.Len() < size && !o.clo.Load() {
if _, err = o.fill(size); err != nil {
return 0, err
}
}

n, err = o.buf.Read(p)

if n > 0 {
return n, nil
} else if err == nil {
err = io.EOF
}

return 0, err
}

// fill handles compressing data from the source and writing to the buffer.
func (o *compressReader) fill(size int) (n int, err error) {
var (
buf = make([]byte, size)
errWrt error
errclo error
)

for o.buf.Len() < size {
if n, err = o.src.Read(buf); err != nil && err != io.EOF {
return 0, err
}

if n > 0 {
if _, errWrt = o.wrt.Write(buf[:n]); errWrt != nil {
return 0, errWrt
}
}

if err == io.EOF {
o.clo.Store(true)

errWrt = o.wrt.Close()
errclo = o.src.Close()

if errclo != nil {
return 0, errclo
} else if errWrt != nil {
return 0, errWrt
}

return o.buf.Len(), nil
} else if err != nil {
return n, err
}
}

data := o.buf.Bytes()
o.buf.Reset()

if _, err = o.buf.Write(data); err != nil {
return 0, err
}

return o.buf.Len(), nil
}

// Close closes the compressor and underlying writer.
func (o *compressReader) Close() (err error) {
a := o.clo.Swap(true)

if o.buf != nil {
o.buf.Reset()
}

if o.wrt != nil && !a {
return o.wrt.Close()
}

return nil
}

func (o *compressReader) Write(p []byte) (n int, err error) {
return 0, ErrInvalidSource
}
Loading

0 comments on commit cd083bb

Please sign in to comment.