gocage fetch finished
This commit is contained in:
parent
9e506145a8
commit
667c73216e
12
README.md
12
README.md
@ -189,10 +189,20 @@ Migrate jail filesystem dataset to fastdata/iocage/jails/srv-random/root: Done
|
|||||||
</pre></code>
|
</pre></code>
|
||||||
|
|
||||||
|
|
||||||
|
Fetch
|
||||||
|
----------
|
||||||
|
Files can be fetched from custom repository, or from local directory with "from" option.
|
||||||
|
For example if you destroyed releases/12.3-RELEASE and still have the downloaded files in /iocage/download/12.3-RELEASE:
|
||||||
|
<pre><code>
|
||||||
|
gocage fetch -r 12.3 -o iocage --from file:/iocage/download
|
||||||
|
</pre></code>
|
||||||
|
|
||||||
|
|
||||||
TODO
|
TODO
|
||||||
----------
|
----------
|
||||||
gocage update
|
gocage update
|
||||||
gocage upgrade
|
gocage upgrade
|
||||||
gocage create
|
gocage create
|
||||||
gocage destroy
|
gocage destroy
|
||||||
|
gocage init
|
||||||
|
create default pool with defaults.json
|
||||||
|
304
cmd/fetch.go
304
cmd/fetch.go
@ -9,9 +9,11 @@ import (
|
|||||||
//"errors"
|
//"errors"
|
||||||
"strings"
|
"strings"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
//"archive/tar"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"crypto/sha256"
|
"crypto/sha256"
|
||||||
|
|
||||||
|
//"github.com/ulikunitz/xz"
|
||||||
log "github.com/sirupsen/logrus"
|
log "github.com/sirupsen/logrus"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -25,18 +27,106 @@ var (
|
|||||||
)
|
)
|
||||||
|
|
||||||
// TODO: Check if files already exist
|
// TODO: Check if files already exist
|
||||||
// Fetch release files, verify, put in datastore under ${datastore}/download, then extract into ${datastore}/releases
|
// Fetch release files, verify, put in datastore under ${datastore}/download
|
||||||
// Only support http
|
// Only support http and file protocols
|
||||||
func fetchRelease(release string, proto string, arch string, datastore string) {
|
func fetchRelease(release string, proto string, arch string, datastore string, fetchFrom string) error {
|
||||||
var ds Datastore
|
var ds Datastore
|
||||||
|
|
||||||
log.SetReportCaller(true)
|
log.SetReportCaller(true)
|
||||||
|
|
||||||
if false == strings.EqualFold(proto, "http") {
|
if len(fetchFrom) > 0 {
|
||||||
fmt.Printf("Unsupported protocol: %s\n", proto)
|
proto = strings.Split(fetchFrom, ":")[0]
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if false == strings.EqualFold(proto, "http") &&
|
||||||
|
false == strings.EqualFold(proto, "file") {
|
||||||
|
return fmt.Errorf("Unsupported protocol: %s\n", proto)
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, ds = range gDatastores {
|
||||||
|
if strings.EqualFold(datastore, ds.Name) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if false == strings.EqualFold(datastore, ds.Name) {
|
||||||
|
return fmt.Errorf("Datastore not found: %s\n", datastore)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check datastore have a download dataset, and it is mounted
|
||||||
|
downloadDsName := fmt.Sprintf("%s/download", ds.ZFSDataset)
|
||||||
|
downloadDsMountPoint := fmt.Sprintf("%s/download", ds.Mountpoint)
|
||||||
|
exist, err := doZfsDatasetExist(downloadDsName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error accessing dataset %s: %v\n", downloadDsName, err)
|
||||||
|
}
|
||||||
|
if false == exist {
|
||||||
|
// Then create dataset
|
||||||
|
if err := createZfsDataset(downloadDsName, downloadDsMountPoint, "lz4"); err != nil {
|
||||||
|
return fmt.Errorf("Error creating dataset %s: %v\n", downloadDsName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create download/XX.X-RELEASE dataset if necessary
|
||||||
|
thisDownloadDsName := fmt.Sprintf("%s/%s-RELEASE", downloadDsName, release)
|
||||||
|
thisDownloadDsMountPoint := fmt.Sprintf("%s/%s-RELEASE", downloadDsMountPoint, release)
|
||||||
|
exist, err = doZfsDatasetExist(thisDownloadDsName)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error accessing dataset %s: %v\n", thisDownloadDsName, err)
|
||||||
|
}
|
||||||
|
if false == exist {
|
||||||
|
// Then create dataset
|
||||||
|
if err := createZfsDataset(thisDownloadDsName, thisDownloadDsMountPoint, "lz4"); err != nil {
|
||||||
|
return fmt.Errorf("Error creating dataset %s: %v\n", thisDownloadDsName, err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var fetchUrl string
|
||||||
|
if len(fetchFrom) > 0 {
|
||||||
|
fetchUrl = fmt.Sprintf("%s/%s-RELEASE", fetchFrom, release)
|
||||||
|
} else {
|
||||||
|
fetchUrl = fmt.Sprintf("%s://%s/%s/%s/%s-RELEASE", proto, ReleaseServer, ReleaseRootDir, arch, release)
|
||||||
|
}
|
||||||
|
log.Debugf("FetchURL = %s", fetchUrl)
|
||||||
|
|
||||||
|
// check if proto/server/arch/release is available
|
||||||
|
if strings.EqualFold(proto, "http") {
|
||||||
|
resp, err := http.Get(fetchUrl)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can not get %s: %v\n", fetchUrl, err)
|
||||||
|
}
|
||||||
|
defer resp.Body.Close()
|
||||||
|
if resp.StatusCode != 200 {
|
||||||
|
return fmt.Errorf("Get %s returned %d, check release name\n", fetchUrl, resp.StatusCode)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch files
|
||||||
|
// Get MANIFEST so we get sha256 sums
|
||||||
|
if err := fetchFile(proto, fetchUrl, "MANIFEST", thisDownloadDsMountPoint, []byte{}); err != nil {
|
||||||
|
return fmt.Errorf("%v\n", err)
|
||||||
|
}
|
||||||
|
// Build an array of "file;checksum"
|
||||||
|
checksumMap, err := buildFileChecksumFromManifest(fmt.Sprintf("%s/MANIFEST", thisDownloadDsMountPoint), FetchFiles)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("%v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Fetch remaining files, verify integrity and write to disk
|
||||||
|
for f, c := range checksumMap {
|
||||||
|
if err := fetchFile(proto, fetchUrl, f, thisDownloadDsMountPoint, c); err != nil {
|
||||||
|
return fmt.Errorf("%v\n", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract release files stored in iocage/download/$RELEASE/ to iocage/releases/$RELEASE/root/
|
||||||
|
func extractRelease(release string, datastore string) {
|
||||||
|
log.SetReportCaller(true)
|
||||||
|
|
||||||
|
var ds Datastore
|
||||||
|
|
||||||
for _, ds = range gDatastores {
|
for _, ds = range gDatastores {
|
||||||
if strings.EqualFold(datastore, ds.Name) {
|
if strings.EqualFold(datastore, ds.Name) {
|
||||||
break
|
break
|
||||||
@ -47,77 +137,138 @@ func fetchRelease(release string, proto string, arch string, datastore string) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check datastore have a download dataset, and it is mounted
|
// Check datastore have a releases dataset, and it is mounted
|
||||||
downloadDsName := fmt.Sprintf("%s/download", ds.ZFSDataset)
|
releaseDsName := fmt.Sprintf("%s/releases", ds.ZFSDataset)
|
||||||
downloadDsMountPoint := fmt.Sprintf("%s/download", ds.Mountpoint)
|
releaseDsMountPoint := fmt.Sprintf("%s/releases", ds.Mountpoint)
|
||||||
exist, err := doZfsDatasetExist(downloadDsName)
|
exist, err := doZfsDatasetExist(releaseDsName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("Error accessing dataset %s: %v\n", downloadDsName, err)
|
fmt.Printf("Error accessing dataset %s: %v\n", releaseDsName, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if false == exist {
|
if false == exist {
|
||||||
// Then create dataset
|
// Then create dataset
|
||||||
if err := createZfsDataset(downloadDsName, downloadDsMountPoint, "lz4"); err != nil {
|
if err := createZfsDataset(releaseDsName, releaseDsMountPoint, "lz4"); err != nil {
|
||||||
fmt.Printf("Error creating dataset %s: %v\n", downloadDsName, err)
|
fmt.Printf("Error creating dataset %s: %v\n", releaseDsName, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create XX.X-RELEASE directory if necessary
|
// Create releases/XX.X-RELEASE dataset if necessary
|
||||||
fileDir := fmt.Sprintf("%s/%s-RELEASE", downloadDsMountPoint, release)
|
thisReleaseDsName := fmt.Sprintf("%s/%s-RELEASE", releaseDsName, release)
|
||||||
_, err = os.Stat(fileDir)
|
thisReleaseDsMountPoint := fmt.Sprintf("%s/%s-RELEASE", releaseDsMountPoint, release)
|
||||||
if os.IsNotExist(err) {
|
exist, err = doZfsDatasetExist(thisReleaseDsName)
|
||||||
if err := os.Mkdir(fileDir, 0755); err != nil {
|
|
||||||
fmt.Printf("Error creating directory %s: %v\n", fileDir, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
fetchUrl := fmt.Sprintf("%s://%s/%s/%s/%s-RELEASE", proto, ReleaseServer, ReleaseRootDir, arch, release)
|
|
||||||
log.Debugf("FetchURL = %s", fetchUrl)
|
|
||||||
|
|
||||||
// check if proto/server/arch/release is available
|
|
||||||
if strings.EqualFold(proto, "http") {
|
|
||||||
resp, err := http.Get(fetchUrl)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Printf("Can not get %s: %v\n", fetchUrl, err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer resp.Body.Close()
|
|
||||||
if resp.StatusCode != 200 {
|
|
||||||
fmt.Printf("Get %s returned %d, check release name\n", fetchUrl, resp.StatusCode)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fetch files
|
|
||||||
// Get MANIFEST so we get sha256 sums
|
|
||||||
if err := fetchHTTPFile(fetchUrl, "MANIFEST", fileDir, []byte{}); err != nil {
|
|
||||||
fmt.Printf("%v\n", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
// Build an array of "file;checksum"
|
|
||||||
checksumMap, err := buildFileChecksumFromManifest(fmt.Sprintf("%s/MANIFEST", fileDir), FetchFiles)
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Printf("%v\n", err)
|
fmt.Printf("Error accessing dataset %s: %v\n", thisReleaseDsName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if false == exist {
|
||||||
|
// Then create dataset
|
||||||
|
if err := createZfsDataset(thisReleaseDsName, thisReleaseDsMountPoint, "lz4"); err != nil {
|
||||||
|
fmt.Printf("Error creating dataset %s: %v\n", thisReleaseDsName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Create releases/XX.X-RELEASE/root dataset if necessary
|
||||||
|
thisReleaseRootDsName := fmt.Sprintf("%s/root", thisReleaseDsName)
|
||||||
|
thisReleaseRootDsMountPoint := fmt.Sprintf("%s/root", thisReleaseDsMountPoint)
|
||||||
|
exist, err = doZfsDatasetExist(thisReleaseRootDsName)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Error accessing dataset %s: %v\n", thisReleaseRootDsName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if false == exist {
|
||||||
|
// Then create dataset
|
||||||
|
if err := createZfsDataset(thisReleaseRootDsName, thisReleaseRootDsMountPoint, "lz4"); err != nil {
|
||||||
|
fmt.Printf("Error creating dataset %s: %v\n", thisReleaseRootDsName, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Now extract download/$RELEASE/*.txz to releases/XX.X-RELEASE/root
|
||||||
|
downloadDsMountPoint := fmt.Sprintf("%s/download", ds.Mountpoint)
|
||||||
|
downloadDir := fmt.Sprintf("%s/%s-RELEASE", downloadDsMountPoint, release)
|
||||||
|
|
||||||
|
d, err := os.Open(downloadDir)
|
||||||
|
defer d.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Can not read %s directory: %v\n", downloadDir, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
files, err := d.Readdir(0)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Can not browse %s directory: %v\n", downloadDir, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Fetch remaining files, verify integrity and write to disk
|
// Extract every .txz files
|
||||||
for f, c := range checksumMap {
|
for _, fi := range files {
|
||||||
if strings.EqualFold(proto, "http") {
|
if false == fi.IsDir() {
|
||||||
if err := fetchHTTPFile(fetchUrl, f, fileDir, c); err != nil {
|
if strings.HasSuffix(fi.Name(), ".txz") {
|
||||||
fmt.Printf("%v\n", err)
|
ar := fmt.Sprintf("%s/%s", downloadDir, fi.Name())
|
||||||
return
|
fmt.Printf("Extracting file %s... ", ar)
|
||||||
|
// pure Go method, sorry this is so slow. Also I did not handle permissions in this
|
||||||
|
/* f, err := os.Open(ar)
|
||||||
|
defer f.Close()
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Can not open %s: %v\n", ar, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// xz reader
|
||||||
|
r, err := xz.NewReader(f)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("Can not read %s: %v\n", ar, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
// tar reader
|
||||||
|
tr := tar.NewReader(r)
|
||||||
|
// Iterate through the files in the archive.
|
||||||
|
for {
|
||||||
|
hdr, err := tr.Next()
|
||||||
|
if err == io.EOF {
|
||||||
|
// end of tar archive
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
switch hdr.Typeflag {
|
||||||
|
case tar.TypeDir:
|
||||||
|
// create a directory
|
||||||
|
dest := fmt.Sprintf("%s/%s", thisReleaseRootDsMountPoint, hdr.Name)
|
||||||
|
// FIXME: Access rights?
|
||||||
|
err = os.MkdirAll(dest, 0777)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
case tar.TypeReg, tar.TypeRegA:
|
||||||
|
// write a file
|
||||||
|
dest := fmt.Sprintf("%s/%s", thisReleaseRootDsMountPoint, hdr.Name)
|
||||||
|
w, err := os.Create(dest)
|
||||||
|
defer w.Close()
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
_, err = io.Copy(w, tr)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
*/
|
||||||
|
cmd := fmt.Sprintf("/usr/bin/tar xpf %s -C %s", ar, thisReleaseRootDsMountPoint)
|
||||||
|
out, err := executeCommand(cmd)
|
||||||
|
if err != nil && len(out) > 0 {
|
||||||
|
fmt.Printf("Error: %v: %s\n", err, out)
|
||||||
|
} else {
|
||||||
|
fmt.Printf("Done\n")
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// TODO: Extract download/$RELEASE/files to releases/$RELEASE/
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func fetchHTTPFile(baseUrl, fileName, storeDir string, checksum []byte) error {
|
func fetchFile(proto, baseUrl, fileName, storeDir string, checksum []byte) error {
|
||||||
// Check storeDir exist
|
// Check storeDir exist
|
||||||
_, err := os.Stat(storeDir)
|
_, err := os.Stat(storeDir)
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
@ -126,15 +277,30 @@ func fetchHTTPFile(baseUrl, fileName, storeDir string, checksum []byte) error {
|
|||||||
|
|
||||||
url := fmt.Sprintf("%s/%s", baseUrl, fileName)
|
url := fmt.Sprintf("%s/%s", baseUrl, fileName)
|
||||||
fmt.Printf("Fetching %s...", url)
|
fmt.Printf("Fetching %s...", url)
|
||||||
resp, err := http.Get(url)
|
|
||||||
if err != nil {
|
var body []byte
|
||||||
fmt.Printf(" Error\n")
|
if strings.EqualFold(proto, "http") {
|
||||||
return fmt.Errorf("Can not get %s: %v\n", url, err)
|
resp, err := http.Get(url)
|
||||||
}
|
if err != nil {
|
||||||
defer resp.Body.Close()
|
fmt.Printf(" Error\n")
|
||||||
body, err := io.ReadAll(resp.Body)
|
return fmt.Errorf("Can not get %s: %v\n", url, err)
|
||||||
if err != nil {
|
}
|
||||||
return fmt.Errorf("Can not read %s response body: %v\n", url, err)
|
defer resp.Body.Close()
|
||||||
|
body, err = io.ReadAll(resp.Body)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can not read %s response body: %v\n", url, err)
|
||||||
|
}
|
||||||
|
} else if strings.EqualFold(proto, "file") {
|
||||||
|
url = strings.Replace(url, "file:", "", 1)
|
||||||
|
f, err := os.Open(url)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Error accessing file %s", url)
|
||||||
|
}
|
||||||
|
defer f.Close()
|
||||||
|
body, err = io.ReadAll(f)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("Can not read file %s: %v\n", url, err)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Check integrity
|
// Check integrity
|
||||||
@ -153,7 +319,7 @@ func fetchHTTPFile(baseUrl, fileName, storeDir string, checksum []byte) error {
|
|||||||
}
|
}
|
||||||
defer f.Close()
|
defer f.Close()
|
||||||
f.Write(body)
|
f.Write(body)
|
||||||
fmt.Printf(" Done.\n")
|
fmt.Printf(" Done\n")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
11
cmd/root.go
11
cmd/root.go
@ -14,7 +14,7 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
gVersion = "0.31"
|
gVersion = "0.32a"
|
||||||
|
|
||||||
// TODO : Get from $jail_zpool/defaults.json
|
// TODO : Get from $jail_zpool/defaults.json
|
||||||
MIN_DYN_DEVFS_RULESET = 1000
|
MIN_DYN_DEVFS_RULESET = 1000
|
||||||
@ -53,6 +53,7 @@ var (
|
|||||||
|
|
||||||
gFetchRelease string
|
gFetchRelease string
|
||||||
gFetchIntoDS string
|
gFetchIntoDS string
|
||||||
|
gFetchFrom string
|
||||||
|
|
||||||
|
|
||||||
rootCmd = &cobra.Command{
|
rootCmd = &cobra.Command{
|
||||||
@ -293,7 +294,12 @@ You can specify multiple datastores.`,
|
|||||||
Use: "fetch",
|
Use: "fetch",
|
||||||
Short: "Fetch FreeBSD release to local datastore",
|
Short: "Fetch FreeBSD release to local datastore",
|
||||||
Run: func(cmd *cobra.Command, args []string) {
|
Run: func(cmd *cobra.Command, args []string) {
|
||||||
fetchRelease(gFetchRelease, "http", gJailHost.arch, gFetchIntoDS)
|
err := fetchRelease(gFetchRelease, "http", gJailHost.arch, gFetchIntoDS, gFetchFrom)
|
||||||
|
if err != nil {
|
||||||
|
fmt.Printf("%v\n", err)
|
||||||
|
} else {
|
||||||
|
extractRelease(gFetchRelease, gFetchIntoDS)
|
||||||
|
}
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -352,6 +358,7 @@ func init() {
|
|||||||
|
|
||||||
fetchCmd.Flags().StringVarP(&gFetchRelease, "release", "r", "", "Release to fetch (e.g.: \"13.1\"")
|
fetchCmd.Flags().StringVarP(&gFetchRelease, "release", "r", "", "Release to fetch (e.g.: \"13.1\"")
|
||||||
fetchCmd.Flags().StringVarP(&gFetchIntoDS, "datastore", "o", "", "Datastore release will be saved to")
|
fetchCmd.Flags().StringVarP(&gFetchIntoDS, "datastore", "o", "", "Datastore release will be saved to")
|
||||||
|
fetchCmd.Flags().StringVarP(&gFetchFrom, "from", "d", "", "Repository to download from. Should contain XY.Z-RELEASE. File protocol supported")
|
||||||
fetchCmd.MarkFlagRequired("release")
|
fetchCmd.MarkFlagRequired("release")
|
||||||
fetchCmd.MarkFlagRequired("datastore")
|
fetchCmd.MarkFlagRequired("datastore")
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user