package logic import ( "encoding/json" "fusenapi/utils/auth" "fusenapi/utils/basic" "fusenapi/utils/hash" "time" "context" "fusenapi/server/upload/internal/svc" "fusenapi/server/upload/internal/types" "github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/service/s3" "github.com/zeromicro/go-zero/core/logx" "github.com/zeromicro/go-zero/core/mr" ) type UploadFilesFrontendLogic struct { logx.Logger ctx context.Context svcCtx *svc.ServiceContext } func NewUploadFilesFrontendLogic(ctx context.Context, svcCtx *svc.ServiceContext) *UploadFilesFrontendLogic { return &UploadFilesFrontendLogic{ Logger: logx.WithContext(ctx), ctx: ctx, svcCtx: svcCtx, } } // 处理进入前逻辑w,r // func (l *UploadFilesFrontendLogic) BeforeLogic(w http.ResponseWriter, r *http.Request) { // } // 处理逻辑后 w,r 如:重定向, resp 必须重新处理 // func (l *UploadFilesFrontendLogic) AfterLogic(w http.ResponseWriter, r *http.Request, resp *basic.Response) { // // httpx.OkJsonCtx(r.Context(), w, resp) // } func (l *UploadFilesFrontendLogic) UploadFilesFrontend(req *types.UploadFilesReq, userinfo *auth.UserInfo) (resp *basic.Response) { // 返回值必须调用Set重新返回, resp可以空指针调用 resp.SetStatus(basic.CodeOK, data) // userinfo 传入值时, 一定不为null var uploadInfoList []UploadInfo err := json.Unmarshal([]byte(req.UploadInfo), &uploadInfoList) if err != nil { logx.Error(err) return resp.SetStatus(basic.CodeFileUploadErr, "file upload err,params Unmarshal failed") } var fileLen = len(uploadInfoList) if fileLen == 0 { return resp.SetStatus(basic.CodeFileUploadErr, "file upload err,no files") } if req.ApiType == 1 && fileLen > 100 { return resp.SetStatus(basic.CodeFileUploadErr, "file upload err, files count is beyond the maximum") } // 定义存储桶名称 var bucketName *string // 根据类别选择存储桶 switch req.UploadBucket { case 2: bucketName = basic.TempfileBucketName default: bucketName = basic.StorageBucketName } // 设置AWS会话的区域 l.svcCtx.AwsSession.Config.Region = aws.String("us-west-1") // 创建新的S3服务实例 svc := s3.New(l.svcCtx.AwsSession) result, err := mr.MapReduce(func(source chan<- interface{}) { for _, info := range uploadInfoList { if info.FileSize <= 1024*1024*500 { // 一系列业务逻辑....验证类型,文件大小 var hashKey string = hash.JsonHashKey(info.FileKeys) source <- UploadData{ FileKey: info.FileKeys, FileSize: info.FileSize, Bucket: bucketName, HashKey: hashKey, } } } }, func(item interface{}, writer mr.Writer[interface{}], cancel func(error)) { uploadDataInfo := item.(UploadData) var uploadUrl = UploadUrl{} uploadUrl.Key = uploadDataInfo.FileKey uploadUrl.ApiType = uploadDataInfo.ApiType uploadUrl.ResourceType = uploadDataInfo.FileType s3req, _ := svc.PutObjectRequest( &s3.PutObjectInput{ Bucket: uploadDataInfo.Bucket, Key: &uploadDataInfo.HashKey, ContentLength: aws.Int64(uploadDataInfo.FileSize), }, ) url, err := s3req.Presign(time.Minute * 5) if err != nil { logx.Error(err) uploadUrl.Status = 0 } else { // 打印请求URL logx.Info(url) uploadUrl.Status = 1 uploadUrl.ResourceUrl = url uploadUrl.ResourceId = uploadDataInfo.HashKey } // Notice 这个必须加! writer.Write(uploadUrl) }, func(pipe <-chan interface{}, writer mr.Writer[interface{}], cancel func(error)) { var uploadUrlList = make(map[string][]*UploadUrl) var uploadUrlListFail []*UploadUrl var uploadUrlListSuccess []*UploadUrl for p := range pipe { var uploadUrl = p.(UploadUrl) if uploadUrl.Status == 1 { uploadUrlListSuccess = append(uploadUrlListSuccess, &uploadUrl) } else { uploadUrlListFail = append(uploadUrlListFail, &uploadUrl) } } // Notice 这个必须加! uploadUrlList["success"] = uploadUrlListSuccess uploadUrlList["fail"] = uploadUrlListFail writer.Write(uploadUrlList) }) if err != nil { logx.Error(err) } // 返回成功的响应和上传URL return resp.SetStatus(basic.CodeOK, map[string]interface{}{ "upload_urls": result, }) }