diff --git a/drivers/115/driver.go b/drivers/115/driver.go index 22db74e38..f6d2b4a90 100644 --- a/drivers/115/driver.go +++ b/drivers/115/driver.go @@ -258,4 +258,51 @@ func (d *Pan115) GetDetails(ctx context.Context) (*model.StorageDetails, error) }, nil } +func (d *Pan115) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []string + for _, srcObj := range srcObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + return d.client.Move(dstDir.GetID(), srcObjIds...) +} + +func (d *Pan115) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []string + for _, srcObj := range srcObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + return d.client.Copy(dstDir.GetID(), srcObjIds...) +} + +func (d *Pan115) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []string + for _, srcObj := range batchRemoveObj.RemoveObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + return d.client.Delete(srcObjIds...) +} + +func (d *Pan115) BatchRename(ctx context.Context, batchRenameObj model.BatchRenameObj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + return d.batchRename(batchRenameObj) +} + var _ driver.Driver = (*Pan115)(nil) diff --git a/drivers/115/util.go b/drivers/115/util.go index b000436b2..2a175d727 100644 --- a/drivers/115/util.go +++ b/drivers/115/util.go @@ -547,3 +547,23 @@ func SplitFileByPartSize(fileSize int64, chunkSize int64) ([]oss.FileChunk, erro return chunks, nil } + +func (d *Pan115) batchRename(renameObj model.BatchRenameObj) error { + + form := make(map[string]string) + + for _, obj := range renameObj.RenameObjs { + form[fmt.Sprintf("files_new_name[%s]", obj.GetID())] = obj.NewName + } + + result := driver115.BasicResp{} + + req := d.client.NewRequest(). + SetFormData(form). + ForceContentType("application/json;charset=UTF-8"). + SetResult(&result) + + resp, err := req.Post(driver115.ApiFileRename) + + return driver115.CheckErr(err, &result, resp) +} diff --git a/drivers/115_open/driver.go b/drivers/115_open/driver.go index afccb2a7e..18c14b9db 100644 --- a/drivers/115_open/driver.go +++ b/drivers/115_open/driver.go @@ -367,4 +367,57 @@ func (d *Open115) GetDetails(ctx context.Context) (*model.StorageDetails, error) // return nil, errs.NotSupport //} +func (d *Open115) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []string + for _, srcObj := range srcObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + _, err := d.client.Move(ctx, &sdk.MoveReq{ + FileIDs: strings.Join(srcObjIds, ","), + ToCid: dstDir.GetID(), + }) + return err +} + +func (d *Open115) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []string + for _, srcObj := range srcObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + _, err := d.client.Copy(ctx, &sdk.CopyReq{ + PID: dstDir.GetID(), + FileID: strings.Join(srcObjIds, ","), + NoDupli: "1", + }) + return err +} + +func (d *Open115) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []string + for _, srcObj := range batchRemoveObj.RemoveObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + _, err := d.client.DelFile(ctx, &sdk.DelFileReq{ + FileIDs: strings.Join(srcObjIds, ","), + ParentID: batchRemoveObj.Dir.GetID(), + }) + + return err +} + var _ driver.Driver = (*Open115)(nil) diff --git a/drivers/123_open/driver.go b/drivers/123_open/driver.go index ac75e51d7..8b2cabd01 100644 --- a/drivers/123_open/driver.go +++ b/drivers/123_open/driver.go @@ -120,7 +120,7 @@ func (d *Open123) MakeDir(ctx context.Context, parentDir model.Obj, dirName stri func (d *Open123) Move(ctx context.Context, srcObj, dstDir model.Obj) error { toParentFileID, _ := strconv.ParseInt(dstDir.GetID(), 10, 64) - return d.move(srcObj.(File).FileId, toParentFileID) + return d.move([]int64{srcObj.(File).FileId}, toParentFileID) } func (d *Open123) Rename(ctx context.Context, srcObj model.Obj, newName string) error { @@ -152,7 +152,7 @@ func (d *Open123) Copy(ctx context.Context, srcObj, dstDir model.Obj) error { func (d *Open123) Remove(ctx context.Context, obj model.Obj) error { fileId, _ := strconv.ParseInt(obj.GetID(), 10, 64) - return d.trash(fileId) + return d.trash([]int64{fileId}) } func (d *Open123) Put(ctx context.Context, dstDir model.Obj, file model.FileStreamer, up driver.UpdateProgress) (model.Obj, error) { @@ -237,6 +237,32 @@ func (d *Open123) OfflineDownloadProcess(ctx context.Context, taskID int) (float return d.queryOfflineDownloadStatus(ctx, taskID) } +func (d *Open123) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var ids []int64 + for _, obj := range srcObjs { + ids = append(ids, obj.(File).FileId) + } + + return d.move(ids, dstDir.(File).FileId) +} + +func (d *Open123) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + var ids []int64 + for _, obj := range batchRemoveObj.RemoveObjs { + ids = append(ids, obj.(File).FileId) + } + return d.trash(ids) +} + +func (d *Open123) BatchRename(ctx context.Context, batchRenameObj model.BatchRenameObj, args model.BatchArgs) error { + var renameList []string + for _, obj := range batchRenameObj.RenameObjs { + renameList = append(renameList, fmt.Sprintf("%d|%s", obj.Obj.(File).FileId, obj.NewName)) + } + + return d.batchRename(renameList) +} + var ( _ driver.Driver = (*Open123)(nil) _ driver.PutResult = (*Open123)(nil) diff --git a/drivers/123_open/util.go b/drivers/123_open/util.go index b09d9eb8b..f49aefa54 100644 --- a/drivers/123_open/util.go +++ b/drivers/123_open/util.go @@ -8,6 +8,7 @@ import ( "fmt" "net/http" "net/url" + "slices" "strconv" "strings" "time" @@ -31,6 +32,7 @@ var ( // 不同情况下获取的AccessTokenQPS限制不同 如下模块化易 Mkdir = InitApiInfo(Api+"/upload/v1/file/mkdir", 2) Move = InitApiInfo(Api+"/api/v1/file/move", 1) Rename = InitApiInfo(Api+"/api/v1/file/name", 1) + BatchRename = InitApiInfo(Api+"/api/v1/file/rename", 0) Trash = InitApiInfo(Api+"/api/v1/file/trash", 2) UploadCreate = InitApiInfo(Api+"/upload/v2/file/create", 2) UploadComplete = InitApiInfo(Api+"/upload/v2/file/upload_complete", 0) @@ -240,15 +242,18 @@ func (d *Open123) mkdir(parentID int64, name string) error { return nil } -func (d *Open123) move(fileID, toParentFileID int64) error { - _, err := d.Request(Move, http.MethodPost, func(req *resty.Request) { - req.SetBody(base.Json{ - "fileIDs": []int64{fileID}, - "toParentFileID": toParentFileID, - }) - }, nil) - if err != nil { - return err +func (d *Open123) move(fileIDs []int64, toParentFileID int64) error { + + for ids := range slices.Chunk(fileIDs, 100) { + _, err := d.Request(Move, http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "fileIDs": ids, + "toParentFileID": toParentFileID, + }) + }, nil) + if err != nil { + return err + } } return nil @@ -268,14 +273,31 @@ func (d *Open123) rename(fileId int64, fileName string) error { return nil } -func (d *Open123) trash(fileId int64) error { - _, err := d.Request(Trash, http.MethodPost, func(req *resty.Request) { - req.SetBody(base.Json{ - "fileIDs": []int64{fileId}, - }) - }, nil) - if err != nil { - return err +func (d *Open123) batchRename(renameList []string) error { + for names := range slices.Chunk(renameList, 30) { + _, err := d.Request(BatchRename, http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "renameList": names, + }) + }, nil) + if err != nil { + return err + } + } + + return nil +} + +func (d *Open123) trash(fileIDs []int64) error { + for cids := range slices.Chunk(fileIDs, 100) { + _, err := d.Request(Trash, http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "fileIDs": cids, + }) + }, nil) + if err != nil { + return err + } } return nil diff --git a/drivers/189/driver.go b/drivers/189/driver.go index 0489ef015..adf5b100f 100644 --- a/drivers/189/driver.go +++ b/drivers/189/driver.go @@ -204,4 +204,93 @@ func (d *Cloud189) GetDetails(ctx context.Context) (*model.StorageDetails, error }, nil } +func (d *Cloud189) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var taskInfos []base.Json + for _, srcObj := range srcObjs { + taskInfos = append(taskInfos, base.Json{ + "fileId": srcObj.GetID(), + "fileName": srcObj.GetName(), + "isFolder": func() int { + if srcObj.IsDir() { + return 1 + } + return 0 + }(), + }) + } + taskInfosBytes, err := utils.Json.Marshal(taskInfos) + if err != nil { + return err + } + form := map[string]string{ + "type": "MOVE", + "targetFolderId": dstDir.GetID(), + "taskInfos": string(taskInfosBytes), + } + _, err = d.request("https://cloud.189.cn/api/open/batch/createBatchTask.action", http.MethodPost, func(req *resty.Request) { + req.SetFormData(form) + }, nil) + return err +} + +func (d *Cloud189) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var taskInfos []base.Json + for _, srcObj := range srcObjs { + taskInfos = append(taskInfos, base.Json{ + "fileId": srcObj.GetID(), + "fileName": srcObj.GetName(), + "isFolder": func() int { + if srcObj.IsDir() { + return 1 + } + return 0 + }(), + }) + } + + taskInfosBytes, err := utils.Json.Marshal(taskInfos) + if err != nil { + return err + } + form := map[string]string{ + "type": "COPY", + "targetFolderId": dstDir.GetID(), + "taskInfos": string(taskInfosBytes), + } + _, err = d.request("https://cloud.189.cn/api/open/batch/createBatchTask.action", http.MethodPost, func(req *resty.Request) { + req.SetFormData(form) + }, nil) + return err +} + +func (d *Cloud189) BatchRemove(ctx context.Context, batchRenameObj model.BatchRemoveObj, args model.BatchArgs) error { + + var taskInfos []base.Json + for _, srcObj := range batchRenameObj.RemoveObjs { + taskInfos = append(taskInfos, base.Json{ + "fileId": srcObj.GetID(), + "fileName": srcObj.GetName(), + "isFolder": func() int { + if srcObj.IsDir() { + return 1 + } + return 0 + }(), + }) + } + taskInfosBytes, err := utils.Json.Marshal(taskInfos) + if err != nil { + return err + } + form := map[string]string{ + "type": "DELETE", + "targetFolderId": "", + "taskInfos": string(taskInfosBytes), + } + _, err = d.request("https://cloud.189.cn/api/open/batch/createBatchTask.action", http.MethodPost, func(req *resty.Request) { + req.SetFormData(form) + }, nil) + return err +} + var _ driver.Driver = (*Cloud189)(nil) diff --git a/drivers/189_tv/driver.go b/drivers/189_tv/driver.go index aec56b3a6..f98f73afe 100644 --- a/drivers/189_tv/driver.go +++ b/drivers/189_tv/driver.go @@ -296,3 +296,72 @@ func (y *Cloud189TV) GetDetails(ctx context.Context) (*model.StorageDetails, err DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total), }, nil } + +func (y *Cloud189TV) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + + isFamily := y.isFamily() + other := map[string]string{"targetFileName": dstDir.GetName()} + + var tasks []BatchTaskInfo + for _, obj := range srcObjs { + tasks = append(tasks, BatchTaskInfo{ + FileId: obj.GetID(), + FileName: obj.GetName(), + IsFolder: BoolToNumber(obj.IsDir()), + }) + } + + resp, err := y.CreateBatchTask("MOVE", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, tasks...) + if err != nil { + return err + } + + if err = y.WaitBatchTask("MOVE", resp.TaskID, time.Millisecond*400); err != nil { + return err + } + return nil + +} + +func (y *Cloud189TV) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + isFamily := y.isFamily() + other := map[string]string{"targetFileName": dstDir.GetName()} + + var tasks []BatchTaskInfo + for _, obj := range srcObjs { + tasks = append(tasks, BatchTaskInfo{ + FileId: obj.GetID(), + FileName: obj.GetName(), + IsFolder: BoolToNumber(obj.IsDir()), + }) + } + + resp, err := y.CreateBatchTask("COPY", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, tasks...) + if err != nil { + return err + } + + if err = y.WaitBatchTask("COPY", resp.TaskID, time.Second); err != nil { + return err + } + return nil +} +func (y *Cloud189TV) BatchRemove(ctx context.Context, batchRenameObj model.BatchRemoveObj, args model.BatchArgs) error { + isFamily := y.isFamily() + + var tasks []BatchTaskInfo + for _, obj := range batchRenameObj.RemoveObjs { + tasks = append(tasks, BatchTaskInfo{ + FileId: obj.GetID(), + FileName: obj.GetName(), + IsFolder: BoolToNumber(obj.IsDir()), + }) + } + + resp, err := y.CreateBatchTask("DELETE", IF(isFamily, y.FamilyID, ""), "", nil, tasks...) + if err != nil { + return err + } + // 批量任务数量限制,过快会导致无法删除 + return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200) +} diff --git a/drivers/189pc/driver.go b/drivers/189pc/driver.go index 5b04d58f6..101314e82 100644 --- a/drivers/189pc/driver.go +++ b/drivers/189pc/driver.go @@ -428,3 +428,72 @@ func (y *Cloud189PC) GetDetails(ctx context.Context) (*model.StorageDetails, err DiskUsage: driver.DiskUsageFromUsedAndTotal(used, total), }, nil } + +func (y *Cloud189PC) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + + isFamily := y.isFamily() + other := map[string]string{"targetFileName": dstDir.GetName()} + + var tasks []BatchTaskInfo + for _, obj := range srcObjs { + tasks = append(tasks, BatchTaskInfo{ + FileId: obj.GetID(), + FileName: obj.GetName(), + IsFolder: BoolToNumber(obj.IsDir()), + }) + } + + resp, err := y.CreateBatchTask("MOVE", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, tasks...) + if err != nil { + return err + } + + if err = y.WaitBatchTask("MOVE", resp.TaskID, time.Millisecond*400); err != nil { + return err + } + return nil + +} + +func (y *Cloud189PC) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + isFamily := y.isFamily() + other := map[string]string{"targetFileName": dstDir.GetName()} + + var tasks []BatchTaskInfo + for _, obj := range srcObjs { + tasks = append(tasks, BatchTaskInfo{ + FileId: obj.GetID(), + FileName: obj.GetName(), + IsFolder: BoolToNumber(obj.IsDir()), + }) + } + + resp, err := y.CreateBatchTask("COPY", IF(isFamily, y.FamilyID, ""), dstDir.GetID(), other, tasks...) + if err != nil { + return err + } + + if err = y.WaitBatchTask("COPY", resp.TaskID, time.Second); err != nil { + return err + } + return nil +} +func (y *Cloud189PC) BatchRemove(ctx context.Context, batchRenameObj model.BatchRemoveObj, args model.BatchArgs) error { + isFamily := y.isFamily() + + var tasks []BatchTaskInfo + for _, obj := range batchRenameObj.RemoveObjs { + tasks = append(tasks, BatchTaskInfo{ + FileId: obj.GetID(), + FileName: obj.GetName(), + IsFolder: BoolToNumber(obj.IsDir()), + }) + } + + resp, err := y.CreateBatchTask("DELETE", IF(isFamily, y.FamilyID, ""), "", nil, tasks...) + if err != nil { + return err + } + // 批量任务数量限制,过快会导致无法删除 + return y.WaitBatchTask("DELETE", resp.TaskID, time.Millisecond*200) +} diff --git a/drivers/baidu_netdisk/driver.go b/drivers/baidu_netdisk/driver.go index 25864a105..8d3d594cc 100644 --- a/drivers/baidu_netdisk/driver.go +++ b/drivers/baidu_netdisk/driver.go @@ -485,4 +485,57 @@ func (d *BaiduNetdisk) GetDetails(ctx context.Context) (*model.StorageDetails, e return &model.StorageDetails{DiskUsage: du}, nil } +func (d *BaiduNetdisk) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var data []base.Json + + for _, obj := range srcObjs { + data = append(data, base.Json{ + "path": obj.GetPath(), + "dest": dstDir.GetPath(), + "newname": obj.GetName(), + }) + } + + _, err := d.manage("move", data) + return err +} + +func (d *BaiduNetdisk) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var data []base.Json + + for _, obj := range srcObjs { + data = append(data, base.Json{ + "path": obj.GetPath(), + "dest": dstDir.GetPath(), + "newname": obj.GetName(), + }) + } + + _, err := d.manage("copy", data) + return err +} + +func (d *BaiduNetdisk) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + var data []string + for _, obj := range batchRemoveObj.RemoveObjs { + data = append(data, obj.GetPath()) + } + + _, err := d.manage("delete", data) + return err +} + +func (d *BaiduNetdisk) BatchRename(ctx context.Context, batchRenameObj model.BatchRenameObj, args model.BatchArgs) error { + var data []base.Json + for _, ro := range batchRenameObj.RenameObjs { + data = append(data, base.Json{ + "path": ro.GetPath(), + "newname": ro.NewName, + }) + } + + _, err := d.manage("rename", data) + return err +} + var _ driver.Driver = (*BaiduNetdisk)(nil) diff --git a/drivers/crypt/driver.go b/drivers/crypt/driver.go index 1398ff1cb..3b34e6012 100644 --- a/drivers/crypt/driver.go +++ b/drivers/crypt/driver.go @@ -194,24 +194,12 @@ func (d *Crypt) Get(ctx context.Context, path string) (model.Obj, error) { Path: "/", }, nil } - remoteFullPath := "" - var remoteObj model.Obj - var err, err2 error - firstTryIsFolder, secondTry := guessPath(path) - remoteFullPath = d.getPathForRemote(path, firstTryIsFolder) - remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true}) + + remoteObj, err := d.getEncryptedObject(ctx, path) if err != nil { - if errs.IsObjectNotFound(err) && secondTry { - // try the opposite - remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder) - remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true}) - if err2 != nil { - return nil, err2 - } - } else { - return nil, err - } + return nil, err } + var size int64 = 0 name := "" if !remoteObj.IsDir() { @@ -425,4 +413,196 @@ func (d *Crypt) GetDetails(ctx context.Context) (*model.StorageDetails, error) { // return nil, errs.NotSupport //} +func (d *Crypt) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + + batchMover, ok := d.remoteStorage.(driver.BatchMove) + if !ok { + return errs.NotImplement + } + + srcEncryptedObj, dstEncryptedObj, encryptedObjs, err := d.convertEncryptedObj(ctx, srcDir, srcObjs, args) + if err != nil { + return err + } + + err = batchMover.BatchMove(ctx, srcEncryptedObj, encryptedObjs, dstEncryptedObj, args) + if err != nil { + return err + } + + if remoteActualPath, err := d.getActualPathForRemote(srcDir.GetPath(), true); err != nil { + log.Warnf("Failed to get actual path for remote storage: %v", err) + } else { + op.Cache.DeleteDirectory(d.remoteStorage, remoteActualPath) + } + + if remoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true); err != nil { + log.Warnf("Failed to get actual path for remote storage: %v", err) + } else { + op.Cache.DeleteDirectory(d.remoteStorage, remoteActualPath) + } + + return nil + +} + +func (d *Crypt) BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + + batchCopier, ok := d.remoteStorage.(driver.BatchCopy) + if !ok { + return errs.NotImplement + } + + srcEncryptedObj, dstEncryptedObj, encryptedObjs, err := d.convertEncryptedObj(ctx, srcDir, srcObjs, args) + if err != nil { + return err + } + + err = batchCopier.BatchCopy(ctx, srcEncryptedObj, encryptedObjs, dstEncryptedObj, args) + if err != nil { + return err + } + + if remoteActualPath, err := d.getActualPathForRemote(dstDir.GetPath(), true); err != nil { + log.Warnf("Failed to get actual path for remote storage: %v", err) + } else { + op.Cache.DeleteDirectory(d.remoteStorage, remoteActualPath) + } + + return nil + +} + +func (d *Crypt) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + + batchRemover, ok := d.remoteStorage.(driver.BatchRemove) + if !ok { + return errs.NotImplement + } + + srcEncryptedObj, _, encryptedObjs, err := d.convertEncryptedObj(ctx, batchRemoveObj.Dir, batchRemoveObj.RemoveObjs, args) + if err != nil { + return err + } + + removeObj := model.BatchRemoveObj{ + Dir: srcEncryptedObj, + } + + nameSetMap := make(map[string]bool, len(encryptedObjs)) + for _, obj := range batchRemoveObj.RemoveObjs { + nameSetMap[obj.GetName()] = true + } + + for _, obj := range encryptedObjs { + decryptedName := "" + if obj.IsDir() { + decryptedName, _ = d.cipher.DecryptDirName(obj.GetName()) + if decryptedName == "" || !nameSetMap[decryptedName] { + continue + } + removeObj.RemoveObjs = append(removeObj.RemoveObjs, obj) + } else { + decryptedName, _ = d.cipher.DecryptFileName(obj.GetName()) + if decryptedName == "" || !nameSetMap[decryptedName] { + continue + } + removeObj.RemoveObjs = append(removeObj.RemoveObjs, obj) + } + } + + err = batchRemover.BatchRemove(ctx, removeObj, args) + if err != nil { + return err + } + + if remoteActualPath, err := d.getActualPathForRemote(batchRemoveObj.Dir.GetPath(), true); err != nil { + log.Warnf("Failed to get actual path for remote storage: %v", err) + } else { + op.Cache.DeleteDirectory(d.remoteStorage, remoteActualPath) + } + + return nil +} + +func (d *Crypt) BatchRename(ctx context.Context, batchRenameObj model.BatchRenameObj, args model.BatchArgs) error { + + batchRenamer, ok := d.remoteStorage.(driver.BatchRename) + if !ok { + return errs.NotImplement + } + + convert, err := utils.SliceConvert(batchRenameObj.RenameObjs, func(src model.RenameObj) (model.Obj, error) { + return &src, nil + }) + if err != nil { + return err + } + + srcEncryptedObj, _, encryptedObjs, err := d.convertEncryptedObj(ctx, batchRenameObj.Dir, convert, args) + if err != nil { + return err + } + + renameObj := model.BatchRenameObj{ + Dir: srcEncryptedObj, + } + + nameSetMap := make(map[string]model.RenameObj, len(encryptedObjs)) + for _, obj := range batchRenameObj.RenameObjs { + nameSetMap[obj.GetName()] = obj + } + + for _, obj := range encryptedObjs { + decryptedName := "" + encryptedName := "" + if obj.IsDir() { + decryptedName, _ = d.cipher.DecryptDirName(obj.GetName()) + if decryptedName == "" { + continue + } + + newObj, exist := nameSetMap[decryptedName] + if !exist { + continue + } + + encryptedName = d.cipher.EncryptDirName(newObj.NewName) + renameObj.RenameObjs = append(renameObj.RenameObjs, model.RenameObj{ + Obj: obj, + NewName: encryptedName, + }) + } else { + decryptedName, _ = d.cipher.DecryptFileName(obj.GetName()) + if decryptedName == "" { + continue + } + newObj, exist := nameSetMap[decryptedName] + if !exist { + continue + } + + encryptedName = d.cipher.EncryptFileName(newObj.NewName) + renameObj.RenameObjs = append(renameObj.RenameObjs, model.RenameObj{ + Obj: obj, + NewName: encryptedName, + }) + } + } + + err = batchRenamer.BatchRename(ctx, renameObj, args) + if err != nil { + return err + } + + if remoteActualPath, err := d.getActualPathForRemote(batchRenameObj.Dir.GetPath(), true); err != nil { + log.Warnf("Failed to get actual path for remote storage: %v", err) + } else { + op.Cache.DeleteDirectory(d.remoteStorage, remoteActualPath) + } + + return nil + +} + var _ driver.Driver = (*Crypt)(nil) diff --git a/drivers/crypt/util.go b/drivers/crypt/util.go index 417059d38..95487eb11 100644 --- a/drivers/crypt/util.go +++ b/drivers/crypt/util.go @@ -1,11 +1,16 @@ package crypt import ( + "context" stdpath "path" "path/filepath" "strings" + "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/fs" + "github.com/OpenListTeam/OpenList/v4/internal/model" "github.com/OpenListTeam/OpenList/v4/internal/op" + log "github.com/sirupsen/logrus" ) // will give the best guessing based on the path @@ -42,3 +47,77 @@ func (d *Crypt) getActualPathForRemote(path string, isFolder bool) (string, erro _, remoteActualPath, err := op.GetStorageAndActualPath(d.getPathForRemote(path, isFolder)) return remoteActualPath, err } + +func (d *Crypt) getEncryptedObject(ctx context.Context, path string) (model.Obj, error) { + remoteFullPath := "" + var remoteObj model.Obj + var err, err2 error + firstTryIsFolder, secondTry := guessPath(path) + remoteFullPath = d.getPathForRemote(path, firstTryIsFolder) + remoteObj, err = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true}) + if err != nil { + if errs.IsObjectNotFound(err) && secondTry { + // try the opposite + remoteFullPath = d.getPathForRemote(path, !firstTryIsFolder) + remoteObj, err2 = fs.Get(ctx, remoteFullPath, &fs.GetArgs{NoLog: true}) + if err2 != nil { + return nil, err2 + } + } else { + return nil, err + } + } + return remoteObj, nil +} + +func (d *Crypt) convertEncryptedObj(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, args model.BatchArgs) (model.Obj, model.Obj, []model.Obj, error) { + + srcEncryptedObj, err := d.getEncryptedObject(ctx, args.SrcDirActualPath) + if err != nil { + return nil, nil, nil, err + } + + var dstEncryptedObj model.Obj + if args.DstDirActualPath != "" { + dstEncryptedObj, err = d.getEncryptedObject(ctx, args.DstDirActualPath) + if err != nil { + return nil, nil, nil, err + } + } + + path := srcDir.GetPath() + + objs, err := fs.List(ctx, d.getPathForRemote(path, true), &fs.ListArgs{NoLog: true, Refresh: false}) + if err != nil { + return nil, nil, nil, err + } + + nameSet := make(map[string]bool) + for _, obj := range srcObjs { + nameSet[obj.GetName()] = true + } + + var encryptedObjs []model.Obj + for _, obj := range objs { + if obj.IsDir() { + dirName, err1 := d.cipher.DecryptDirName(obj.GetName()) + if err1 != nil { + log.Warnf("failed to decrypt dir name: %v", err1) + continue + } + if nameSet[dirName] { + encryptedObjs = append(encryptedObjs, obj) + } + } else { + fileName, err1 := d.cipher.DecryptFileName(obj.GetName()) + if err1 != nil { + log.Warnf("failed to decrypt file name: %v", err1) + continue + } + if nameSet[fileName] { + encryptedObjs = append(encryptedObjs, obj) + } + } + } + return srcEncryptedObj, dstEncryptedObj, encryptedObjs, nil +} diff --git a/drivers/doubao/driver.go b/drivers/doubao/driver.go index 6d4a4fc6e..0730e7800 100644 --- a/drivers/doubao/driver.go +++ b/drivers/doubao/driver.go @@ -4,6 +4,7 @@ import ( "context" "errors" "net/http" + "slices" "strconv" "strings" "time" @@ -305,6 +306,66 @@ func (d *Doubao) ArchiveDecompress(ctx context.Context, srcObj, dstDir model.Obj return nil, errs.NotImplement } +func (d *Doubao) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + + var srcObjIds []base.Json + + for _, obj := range srcObjs { + srcObjIds = append(srcObjIds, base.Json{"id": obj.GetID()}) + } + + currentParentId := srcObjs[0].GetPath() + + // Doubao service limits the number of files that can be moved in one request + for movingFiles := range slices.Chunk(srcObjs, 50) { + if err := d.WaitLimit(ctx); err != nil { + return err + } + var r UploadNodeResp + _, err := d.request("/samantha/aispace/move_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{ + "node_list": movingFiles, + "current_parent_id": currentParentId, + "target_parent_id": dstDir.GetID(), + }) + }, &r) + if err != nil { + return err + } + } + + return nil + +} + +func (d *Doubao) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var srcObjIds []base.Json + + for _, obj := range batchRemoveObj.RemoveObjs { + srcObjIds = append(srcObjIds, base.Json{"id": obj.GetID()}) + } + + for removingFiles := range slices.Chunk(srcObjIds, 50) { + if err := d.WaitLimit(ctx); err != nil { + return err + } + + var r BaseResp + _, err := d.request("/samantha/aispace/delete_node", http.MethodPost, func(req *resty.Request) { + req.SetBody(base.Json{"node_list": removingFiles}) + }, &r) + if err != nil { + return err + } + } + + return nil +} + //func (d *Doubao) Other(ctx context.Context, args model.OtherArgs) (interface{}, error) { // return nil, errs.NotSupport //} diff --git a/drivers/quark_open/driver.go b/drivers/quark_open/driver.go index 7b6b31331..fe9a27696 100644 --- a/drivers/quark_open/driver.go +++ b/drivers/quark_open/driver.go @@ -231,4 +231,40 @@ func (d *QuarkOpen) Put(ctx context.Context, dstDir model.Obj, stream model.File return d.upFinish(ctx, pre, partInfo, etags) } +func (d *QuarkOpen) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var srcObjIds []string + for _, srcObj := range srcObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + data := base.Json{ + "action_type": 1, + "fid_list": srcObjIds, + "to_pdir_fid": dstDir.GetID(), + } + _, err := d.request(ctx, "/open/v1/file/move", http.MethodPost, func(req *resty.Request) { + req.SetBody(data) + }, nil) + + return err +} + +func (d *QuarkOpen) BatchRemove(ctx context.Context, batchRenameObj model.BatchRemoveObj, args model.BatchArgs) error { + + var srcObjIds []string + for _, srcObj := range batchRenameObj.RemoveObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + data := base.Json{ + "action_type": 1, + "fid_list": srcObjIds, + } + _, err := d.request(ctx, "/open/v1/file/delete", http.MethodPost, func(req *resty.Request) { + req.SetBody(data) + }, nil) + + return err +} + var _ driver.Driver = (*QuarkOpen)(nil) diff --git a/drivers/quark_uc/driver.go b/drivers/quark_uc/driver.go index 68406ea9f..6a6ab4aac 100644 --- a/drivers/quark_uc/driver.go +++ b/drivers/quark_uc/driver.go @@ -224,4 +224,41 @@ func (d *QuarkOrUC) GetDetails(ctx context.Context) (*model.StorageDetails, erro }, nil } +func (d *QuarkOrUC) BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error { + var srcObjIds []string + for _, srcObj := range srcObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + data := base.Json{ + "action_type": 1, + "exclude_fids": []string{}, + "filelist": srcObjIds, + "to_pdir_fid": dstDir.GetID(), + } + _, err := d.request("/file/move", http.MethodPost, func(req *resty.Request) { + req.SetBody(data) + }, nil) + + return err +} + +func (d *QuarkOrUC) BatchRemove(ctx context.Context, batchRenameObj model.BatchRemoveObj, args model.BatchArgs) error { + + var srcObjIds []string + for _, srcObj := range batchRenameObj.RemoveObjs { + srcObjIds = append(srcObjIds, srcObj.GetID()) + } + + data := base.Json{ + "action_type": 1, + "exclude_fids": []string{}, + "filelist": srcObjIds, + } + _, err := d.request("/file/delete", http.MethodPost, func(req *resty.Request) { + req.SetBody(data) + }, nil) + return err +} + var _ driver.Driver = (*QuarkOrUC)(nil) diff --git a/drivers/s3/driver.go b/drivers/s3/driver.go index a9edef10b..fe2980386 100644 --- a/drivers/s3/driver.go +++ b/drivers/s3/driver.go @@ -243,4 +243,22 @@ func (d *S3) GetDirectUploadInfo(ctx context.Context, _ string, dstDir model.Obj }, nil } +func (d *S3) BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error { + var keys []string + + for _, obj := range batchRemoveObj.RemoveObjs { + if obj.IsDir() { + subDirKeys, err := d.collectAllKeys(ctx, obj.GetPath()) + if err != nil { + return err + } + keys = append(keys, subDirKeys...) + } else { + keys = append(keys, getKey(obj.GetPath(), false)) + } + } + + return d.batchDelete(keys) +} + var _ driver.Driver = (*S3)(nil) diff --git a/drivers/s3/util.go b/drivers/s3/util.go index 8b0fe4b26..3e43b3fa9 100644 --- a/drivers/s3/util.go +++ b/drivers/s3/util.go @@ -6,6 +6,7 @@ import ( "net/http" "net/url" "path" + "slices" "strings" "github.com/OpenListTeam/OpenList/v4/internal/model" @@ -246,33 +247,83 @@ func (d *S3) copyDir(ctx context.Context, src string, dst string) error { return nil } +func (d *S3) removeFile(src string) error { + key := getKey(src, false) + input := &s3.DeleteObjectInput{ + Bucket: &d.Bucket, + Key: &key, + } + _, err := d.client.DeleteObject(input) + return err +} + func (d *S3) removeDir(ctx context.Context, src string) error { - objs, err := op.List(ctx, d, src, model.ListArgs{}) + + keys, err := d.collectAllKeys(ctx, src) if err != nil { return err } - for _, obj := range objs { - cSrc := path.Join(src, obj.GetName()) - if obj.IsDir() { - err = d.removeDir(ctx, cSrc) - } else { - err = d.removeFile(cSrc) + + err2 := d.batchDelete(keys) + if err2 != nil { + return err2 + } + + return nil +} + +func (d *S3) batchDelete(keys []string) error { + for deletingKeys := range slices.Chunk(keys, 1000) { + var objectsToDelete []*s3.ObjectIdentifier + for _, key := range deletingKeys { + objectsToDelete = append(objectsToDelete, &s3.ObjectIdentifier{Key: aws.String(key)}) } - if err != nil { + + input := &s3.DeleteObjectsInput{ + Bucket: aws.String(d.Bucket), + Delete: &s3.Delete{ + Objects: objectsToDelete, + Quiet: aws.Bool(true), + }, + } + + if _, err := d.client.DeleteObjects(input); err != nil { return err } } - _ = d.removeFile(path.Join(src, getPlaceholderName(d.Placeholder))) - _ = d.removeFile(path.Join(src, d.Placeholder)) return nil } -func (d *S3) removeFile(src string) error { - key := getKey(src, false) - input := &s3.DeleteObjectInput{ - Bucket: &d.Bucket, - Key: &key, +func (d *S3) collectAllKeys(ctx context.Context, prefix string) ([]string, error) { + var keys []string + + objs, err := op.List(ctx, d, prefix, model.ListArgs{}) + if err != nil { + return nil, err } - _, err := d.client.DeleteObject(input) - return err + + for _, obj := range objs { + cSrc := path.Join(prefix, obj.GetName()) + if obj.IsDir() { + subKeys, err := d.collectAllKeys(ctx, cSrc) + if err != nil { + return nil, err + } + keys = append(keys, subKeys...) + + keys = append(keys, + getKey(path.Join(cSrc, getPlaceholderName(d.Placeholder)), false), + getKey(path.Join(cSrc, d.Placeholder), false), + ) + } else { + keys = append(keys, getKey(cSrc, false)) + } + } + + keys = append(keys, + getKey(path.Join(prefix, getPlaceholderName(d.Placeholder)), false), + getKey(path.Join(prefix, d.Placeholder), false), + ) + + return keys, nil } diff --git a/internal/driver/driver.go b/internal/driver/driver.go index 1f73d35b6..a2d64adf8 100644 --- a/internal/driver/driver.go +++ b/internal/driver/driver.go @@ -227,3 +227,19 @@ type DirectUploader interface { // return errs.NotImplement if the driver does not support the given direct upload tool GetDirectUploadInfo(ctx context.Context, tool string, dstDir model.Obj, fileName string, fileSize int64) (any, error) } + +type BatchMove interface { + BatchMove(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error +} + +type BatchCopy interface { + BatchCopy(ctx context.Context, srcDir model.Obj, srcObjs []model.Obj, dstDir model.Obj, args model.BatchArgs) error +} + +type BatchRemove interface { + BatchRemove(ctx context.Context, batchRemoveObj model.BatchRemoveObj, args model.BatchArgs) error +} + +type BatchRename interface { + BatchRename(ctx context.Context, batchRenameObj model.BatchRenameObj, args model.BatchArgs) error +} diff --git a/internal/fs/batch.go b/internal/fs/batch.go new file mode 100644 index 000000000..11c74089a --- /dev/null +++ b/internal/fs/batch.go @@ -0,0 +1,95 @@ +package fs + +import ( + "context" + + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/op" + "github.com/pkg/errors" +) + +func BatchMove(ctx context.Context, srcDirPath, dstDirPath string, objectNames []string) (bool, error) { + return batchFsOperate(ctx, srcDirPath, dstDirPath, objectNames, + func(d driver.Driver) bool { + _, ok := d.(driver.BatchMove) + return ok + }, + op.BatchMove, + ) +} + +func BatchCopy(ctx context.Context, srcDirPath, dstDirPath string, objectNames []string) (bool, error) { + return batchFsOperate(ctx, srcDirPath, dstDirPath, objectNames, + func(d driver.Driver) bool { + _, ok := d.(driver.BatchCopy) + return ok + }, + op.BatchCopy, + ) +} + +func BatchRemove(ctx context.Context, srcDirPath string, objectNames []string) (bool, error) { + + srcStorage, srcActualPath, err := op.GetStorageAndActualPath(srcDirPath) + if err != nil { + return false, errors.WithMessage(err, "failed to get source storage") + } + + if _, ok := srcStorage.(driver.BatchRemove); !ok { + return false, nil + } else { + err1 := op.BatchRemove(ctx, srcStorage, srcActualPath, objectNames) + if err1 != nil { + return false, err1 + } + return true, nil + } + +} + +func BatchRename(ctx context.Context, srcDirPath string, nameMapping map[string]string) (bool, error) { + srcStorage, srcActualPath, err := op.GetStorageAndActualPath(srcDirPath) + if err != nil { + return false, errors.WithMessage(err, "failed to get source storage") + } + + if _, ok := srcStorage.(driver.BatchRemove); !ok { + return false, nil + } else { + err1 := op.BatchRename(ctx, srcStorage, srcActualPath, nameMapping) + if err1 != nil { + return false, err1 + } + return true, nil + } + +} + +func batchFsOperate(ctx context.Context, srcDirPath, dstDirPath string, objectNames []string, + capabilityCheck func(d driver.Driver) bool, + operation func(ctx context.Context, storage driver.Driver, srcPath, dstPath string, names []string) error) (bool, error) { + + srcStorage, srcActualPath, err := op.GetStorageAndActualPath(srcDirPath) + if err != nil { + return false, errors.WithMessage(err, "failed to get source storage") + } + dstStorage, dstActualPath, err := op.GetStorageAndActualPath(dstDirPath) + if err != nil { + return false, errors.WithMessage(err, "failed to get destination storage") + } + + if srcStorage.GetStorage() != dstStorage.GetStorage() { + return false, nil + } + + if !capabilityCheck(srcStorage) { + return false, nil + } + + err = operation(ctx, srcStorage, srcActualPath, dstActualPath, objectNames) + if err != nil { + return false, err + } + + return true, nil +} diff --git a/internal/model/args.go b/internal/model/args.go index a1f811171..c93753ea3 100644 --- a/internal/model/args.go +++ b/internal/model/args.go @@ -120,3 +120,8 @@ func (r *RangeReadCloser) RangeRead(ctx context.Context, httpRange http_range.Ra r.Add(rc) return rc, err } + +type BatchArgs struct { + SrcDirActualPath string + DstDirActualPath string +} diff --git a/internal/model/object.go b/internal/model/object.go index 8e5cdf047..921c8382c 100644 --- a/internal/model/object.go +++ b/internal/model/object.go @@ -112,3 +112,18 @@ type ObjectProvider struct { Object Provider } + +type BatchRemoveObj struct { + Dir Obj + RemoveObjs []Obj +} + +type BatchRenameObj struct { + Dir Obj + RenameObjs []RenameObj +} + +type RenameObj struct { + Obj + NewName string +} diff --git a/internal/op/batch.go b/internal/op/batch.go new file mode 100644 index 000000000..10c4298e5 --- /dev/null +++ b/internal/op/batch.go @@ -0,0 +1,196 @@ +package op + +import ( + "context" + + "github.com/OpenListTeam/OpenList/v4/internal/driver" + "github.com/OpenListTeam/OpenList/v4/internal/errs" + "github.com/OpenListTeam/OpenList/v4/internal/model" + "github.com/OpenListTeam/OpenList/v4/pkg/utils" + "github.com/pkg/errors" +) + +func BatchMove(ctx context.Context, storage driver.Driver, srcDirPath, dstDirPath string, movingObjs []string) error { + + return batchOperate(ctx, storage, srcDirPath, dstDirPath, movingObjs, func(storage driver.Driver, srcDir, dstDir model.Obj, changingObjs []model.Obj) error { + batchOperator, ok := storage.(driver.BatchMove) + if !ok { + return errors.New("storage driver doesn't support batch move") + } + + return batchOperator.BatchMove(ctx, srcDir, changingObjs, dstDir, model.BatchArgs{ + SrcDirActualPath: srcDirPath, + DstDirActualPath: dstDirPath, + }) + }) + +} + +func BatchCopy(ctx context.Context, storage driver.Driver, srcDirPath, dstDirPath string, copingObjs []string) error { + + return batchOperate(ctx, storage, srcDirPath, dstDirPath, copingObjs, func(storage driver.Driver, srcDir, dstDir model.Obj, changingObjs []model.Obj) error { + batchOperator, ok := storage.(driver.BatchCopy) + if !ok { + return errors.New("storage driver doesn't support batch copy") + } + + return batchOperator.BatchCopy(ctx, srcDir, changingObjs, dstDir, model.BatchArgs{ + SrcDirActualPath: srcDirPath, + DstDirActualPath: dstDirPath, + }) + }) + +} + +func BatchRemove(ctx context.Context, storage driver.Driver, srcDirPath string, removingObjs []string) error { + + batchOperator, ok := storage.(driver.BatchRemove) + if !ok { + return errors.New("storage driver doesn't support batch remove") + } + + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) + } + + srcDirPath = utils.FixAndCleanPath(srcDirPath) + + srcDirFiles, err := List(ctx, storage, srcDirPath, model.ListArgs{}) + if err != nil { + return err + } + + srcDir, err := Get(ctx, storage, srcDirPath) + if err != nil { + return err + } + + removingNameSet := make(map[string]bool) + for _, obj := range removingObjs { + removingNameSet[obj] = true + } + + batchRemoveObj := model.BatchRemoveObj{ + Dir: srcDir, + } + for _, obj := range srcDirFiles { + if removingNameSet[obj.GetName()] { + batchRemoveObj.RemoveObjs = append(batchRemoveObj.RemoveObjs, obj) + } + } + + err = batchOperator.BatchRemove(ctx, batchRemoveObj, model.BatchArgs{ + SrcDirActualPath: srcDirPath, + }) + if err != nil { + return err + } + + Cache.DeleteDirectory(storage, srcDirPath) + + return nil +} + +func BatchRename(ctx context.Context, storage driver.Driver, srcDirPath string, nameMapping map[string]string) error { + + batchOperator, ok := storage.(driver.BatchRename) + if !ok { + return errors.New("storage driver doesn't support batch rename") + } + + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) + } + + srcDirPath = utils.FixAndCleanPath(srcDirPath) + + srcDirFiles, err := List(ctx, storage, srcDirPath, model.ListArgs{}) + if err != nil { + return err + } + + srcDir, err := Get(ctx, storage, srcDirPath) + if err != nil { + return err + } + + batchRenameObj := model.BatchRenameObj{ + Dir: srcDir, + } + for _, obj := range srcDirFiles { + if newName, exist := nameMapping[obj.GetName()]; exist { + batchRenameObj.RenameObjs = append(batchRenameObj.RenameObjs, model.RenameObj{ + Obj: obj, + NewName: newName, + }) + } + } + + err = batchOperator.BatchRename(ctx, batchRenameObj, model.BatchArgs{ + SrcDirActualPath: srcDirPath, + }) + if err != nil { + return err + } + + Cache.DeleteDirectory(storage, srcDirPath) + + return nil +} + +func batchOperate(ctx context.Context, storage driver.Driver, srcDirPath, dstDirPath string, changingObjs []string, + operation func(storage driver.Driver, srcDir, dstDir model.Obj, changingObjs []model.Obj) error) error { + + if storage.Config().CheckStatus && storage.GetStorage().Status != WORK { + return errors.WithMessagef(errs.StorageNotInit, "storage status: %s", storage.GetStorage().Status) + } + + srcDirPath = utils.FixAndCleanPath(srcDirPath) + dstDirPath = utils.FixAndCleanPath(dstDirPath) + + if srcDirPath == dstDirPath { + return errors.New("src and dst can't be the same") + } + + srcDirFiles, err := List(ctx, storage, srcDirPath, model.ListArgs{}) + if err != nil { + return err + } + + srcDir, err := Get(ctx, storage, srcDirPath) + if err != nil { + return err + } + + dstDir, err := Get(ctx, storage, dstDirPath) + if err != nil { + return err + } + + changingObjMap := make(map[string]bool, len(changingObjs)) + for _, obj := range changingObjs { + changingObjMap[obj] = true + } + + var changingFiles []model.Obj + for _, file := range srcDirFiles { + if changingObjMap[file.GetName()] { + changingFiles = append(changingFiles, file) + } + } + + if len(changingFiles) == 0 { + return errors.New("file doesn't exist") + } + + err = operation(storage, srcDir, dstDir, changingFiles) + if err != nil { + return err + } + + Cache.DeleteDirectory(storage, srcDirPath) + Cache.DeleteDirectory(storage, dstDirPath) + + return nil + +} diff --git a/server/handles/fsbatch.go b/server/handles/fsbatch.go index 162419f7b..265a2f7f2 100644 --- a/server/handles/fsbatch.go +++ b/server/handles/fsbatch.go @@ -169,6 +169,9 @@ func FsBatchRename(c *gin.Context) { } } common.GinWithValue(c, conf.MetaKey, meta) + + nameMapping := make(map[string]string) + for _, renameObject := range req.RenameObjects { if renameObject.SrcName == "" || renameObject.NewName == "" { continue @@ -178,8 +181,25 @@ func FsBatchRename(c *gin.Context) { common.ErrorResp(c, err, 403) return } - filePath := fmt.Sprintf("%s/%s", reqPath, renameObject.SrcName) - if err := fs.Rename(c.Request.Context(), filePath, renameObject.NewName); err != nil { + + nameMapping[renameObject.SrcName] = renameObject.NewName + } + clear(req.RenameObjects) + + batchRename, err := fs.BatchRename(c, reqPath, nameMapping) + if err != nil && !errors.Is(errs.NotImplement, err) { + common.ErrorResp(c, err, 500) + return + } else if batchRename { + common.SuccessResp(c, gin.H{ + "message": "Rename operations completed immediately", + }) + return + } + + for srcName, newName := range nameMapping { + filePath := fmt.Sprintf("%s/%s", reqPath, srcName) + if err := fs.Rename(c.Request.Context(), filePath, newName); err != nil { common.ErrorResp(c, err, 500) return } diff --git a/server/handles/fsmanage.go b/server/handles/fsmanage.go index 3fe86726f..dab6d8796 100644 --- a/server/handles/fsmanage.go +++ b/server/handles/fsmanage.go @@ -90,14 +90,42 @@ func FsMove(c *gin.Context) { } if !req.Overwrite { + + // Previously, the existence check was done via the Get() method, which could trigger multiple API calls. + // If the target driver does not implement Get(), it would fallback to a List operation. + // When the target folder is empty, the cache mechanism cannot take effect, leading to a large number of List requests. + // The more files being operated on, the more requests are sent, resulting in very long response times. + // Therefore, we directly retrieve all files under the target folder to perform the existence check. + dstDirFiles, err1 := fs.List(c.Request.Context(), dstDir, &fs.ListArgs{NoLog: true}) + if err1 != nil { + common.ErrorResp(c, err1, 500) + return + } + + nameSet := make(map[string]bool) + for _, file := range dstDirFiles { + nameSet[file.GetName()] = true + } + for _, name := range req.Names { - if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { + if nameSet[name] { common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403) return } } } + batchMove, err := fs.BatchMove(c.Request.Context(), req.SrcDir, req.DstDir, req.Names) + if err != nil && !errors.Is(errs.NotImplement, err) { + common.ErrorResp(c, err, 500) + return + } else if batchMove { + common.SuccessResp(c, gin.H{ + "message": "Move operations completed immediately", + }) + return + } + // Create all tasks immediately without any synchronous validation // All validation will be done asynchronously in the background var addedTasks []task.TaskExtensionInfo @@ -152,14 +180,36 @@ func FsCopy(c *gin.Context) { } if !req.Overwrite { + + dstDirFiles, err1 := fs.List(c.Request.Context(), dstDir, &fs.ListArgs{NoLog: true}) + if err1 != nil { + common.ErrorResp(c, err1, 500) + return + } + + nameSet := make(map[string]bool) + for _, file := range dstDirFiles { + nameSet[file.GetName()] = true + } + for _, name := range req.Names { - if res, _ := fs.Get(c.Request.Context(), stdpath.Join(dstDir, name), &fs.GetArgs{NoLog: true}); res != nil { + if nameSet[name] { common.ErrorStrResp(c, fmt.Sprintf("file [%s] exists", name), 403) return } } } + batchCopy, err := fs.BatchCopy(c.Request.Context(), req.SrcDir, req.DstDir, req.Names) + if err != nil && !errors.Is(errs.NotImplement, err) { + common.ErrorResp(c, err, 500) + } else if batchCopy { + common.SuccessResp(c, gin.H{ + "message": "Copy operations completed immediately", + }) + return + } + // Create all tasks immediately without any synchronous validation // All validation will be done asynchronously in the background var addedTasks []task.TaskExtensionInfo @@ -260,6 +310,18 @@ func FsRemove(c *gin.Context) { common.ErrorResp(c, err, 403) return } + + batchRemove, err := fs.BatchRemove(c, reqDir, req.Names) + if err != nil && !errors.Is(errs.NotImplement, err) { + common.ErrorResp(c, err, 500) + return + } else if batchRemove { + common.SuccessResp(c, gin.H{ + "message": "Remove operations completed immediately", + }) + return + } + for _, name := range req.Names { err := fs.Remove(c.Request.Context(), stdpath.Join(reqDir, name)) if err != nil {