ssm架構檔案分片上傳

語言: CN / TW / HK

ssm架構檔案分片上傳

首先匯入兩個依賴的jar

  <dependency>
	  <groupId>cn.hutool</groupId>
	  <artifactId>hutool-all</artifactId>
	  <version>4.0.12</version>
	</dependency>

  <dependency>
	  <groupId>com.github.tobato</groupId>
	  <artifactId>fastdfs-client</artifactId>
	  <version>1.26.2</version>
  </dependency>

前端需要把檔案分成片段上傳,每一個片段都有一個id
在正式上傳檔案伺服器fastdfs之前還要有一次檢查功能,就是檢查你這個上傳的檔案是否之前存在或者上傳過。
如果是之前上傳過沒上傳完成,就會給前端返回一個上一次中斷在哪裡的片段id

public RestResponse<CheckFileResult> checkFile(@RequestParam Map<String, Object> paramMap,HttpServletRequest request) throws BusinessException  {

	String fileMd5 = (String) paramMap.get("fileMd5");
	Long userId = Optional.ofNullable(request.getHeader("userId")).map(Long::valueOf).orElse(1L);
	if (StrUtil.isEmpty(fileMd5)) {
		return new RestResponse<>(RestRespCode.PARAM_ERROR_md5, message(MessageKeys.PARAM_ERROR_md5), null);
	}
	CheckFileResult checkFileResult = new CheckFileResult();
	//查詢上傳過的資料
    List<String> fileList= jedisClusterTemplate.lrange(UpLoadConstant.completedList,0,-1);
    if (CollUtil.isNotEmpty(fileList)){
        for (String e:fileList){
            JSONObject obj= JSONUtil.parseObj(e);
            if (obj.get("md5").equals(fileMd5)){
                checkFileResult.setTotalSize(obj.getLong("lenght"));
                checkFileResult.setViewPath(obj.getStr("url"));
                return new RestResponse<CheckFileResult>(RestRespCode.OK, message(MessageKeys.SYSTEM_SUCCESS), checkFileResult);
            }
        }
    }
	// 查詢鎖佔用
	String lockName = UpLoadConstant.currLocks + fileMd5;
	Long lock = jedisClusterTemplate.incrBy(lockName, 1);
	String lockOwner = UpLoadConstant.lockOwner + fileMd5;
	String chunkCurrkey = UpLoadConstant.chunkCurr + fileMd5;
	if (lock > 1) {
		checkFileResult.setLock(1);
		//檢查是否為鎖的擁有者,如果是放行
		String oWner = jedisClusterTemplate.get(lockOwner);

		if (StrUtil.isEmpty(oWner)) {
			return new RestResponse<>(RestRespCode.UNABLE_TO_OBTAIN_FILE_LOCK_OWNER, message(MessageKeys.UNABLE_TO_OBTAIN_FILE_LOCK_OWNER), null);
		} else {
			if ( Long.valueOf(oWner).equals(userId)) {
				String chunkCurr = jedisClusterTemplate.get(chunkCurrkey);
				if (StrUtil.isEmpty(chunkCurr)) {
					return new RestResponse<>(RestRespCode.NOT_GET_CURRENT_FILE_CHUNKCURR, message(MessageKeys.NOT_GET_CURRENT_FILE_CHUNKCURR), null);
				}
				checkFileResult.setChunkCurr(Convert.toInt(chunkCurr));
				return new RestResponse<CheckFileResult>(RestRespCode.OK, message(MessageKeys.SYSTEM_SUCCESS),
						checkFileResult);
			} else {
				return new RestResponse<>(RestRespCode.PARAM_ERROR, message(MessageKeys.SYS_ERROR), null);
			}
		}
	} else {
		// 初始化鎖.分塊

		saveRedisDataToJedis(lockOwner, String.valueOf(userId));
		saveRedisDataToJedis(chunkCurrkey, "0");
		checkFileResult.setChunkCurr(0);
		return new RestResponse<CheckFileResult>(RestRespCode.OK, message(MessageKeys.SYSTEM_SUCCESS), checkFileResult);
	}

}

檔案分片上傳功能需要依賴redis來記錄檔案上傳的片數,防止重複上傳,

public RestResponse<List<Map<String, Object>>> upload_do(@RequestParam Map<String, Object> paramMap,
		HttpServletRequest request) throws BusinessException  {

    List<Map<String, Object>> resultMap = new ArrayList<Map<String, Object>>();

    String noGroupPath;//儲存在fastdfs不帶組的路徑
    String fileMd5= (String) paramMap.get("fileMd5");
    String chunklockName= UpLoadConstant.chunkLock+fileMd5;

    String temOwner= RandomUtil.randomUUID();
    //真正的擁有者
    boolean currOwner=false;
    try {
        if (!paramMap.containsKey("chunk")){
            paramMap.put("chunk","0");
        }

        if (!paramMap.containsKey("chunks")){
            paramMap.put("chunks","1");
        }
      Long lock= jedisClusterTemplate.incrBy(chunklockName,1);
        if (lock>1){
            return new RestResponse<>(RestRespCode.PARAM_ERROR, message(MessageKeys.SYS_ERROR), null);
        }
        //寫入鎖的當前擁有者
        currOwner=true;

        List<MultipartFile> files = ((MultipartHttpServletRequest) request).getFiles("file");
        MultipartFile file = null;
        BufferedOutputStream stream = null;
        String chunk= (String) paramMap.get("chunk");
        String chunkCurrkey= UpLoadConstant.chunkCurr+fileMd5; //redis中記錄當前應該穿第幾塊(從0開始)
        String  chunkCurr=  jedisClusterTemplate.get(chunkCurrkey);
        noGroupPath = "";
        Integer chunkSize= Convert.toInt(paramMap.get("chunkSize"));
        if (StrUtil.isEmpty(chunkCurr)){

            return new RestResponse<>(RestRespCode.NOT_GET_CURRENT_FILE_CHUNKCURR, message(MessageKeys.NOT_GET_CURRENT_FILE_CHUNKCURR), null);
        }
        Integer chunkCurr_int= Convert.toInt(chunkCurr);
        Integer chunk_int= Convert.toInt(chunk);


        if (chunk_int<chunkCurr_int){
            return new RestResponse<>(RestRespCode.REPEAT_UPLOAD, message(MessageKeys.REPEAT_UPLOAD), null);
        }else if (chunk_int>chunkCurr_int){
            return new RestResponse<>(RestRespCode.WAIT_A_MOMENT, message(MessageKeys.WAIT_A_MOMENT), null);

		}
		StorePath path = null;
		String name =null;
		// 暫時不支援多檔案上傳
		for (int i = 0; i < files.size(); ++i) {
			Map<String, Object> map = new HashMap<String, Object>();
			file = files.get(i);
			String originalFilename = file.getOriginalFilename();
			if (!file.isEmpty()) {
				try {

					// 獲取已經上傳檔案大小
					Long historyUpload = 0L;
					String historyUploadStr = jedisClusterTemplate.get(UpLoadConstant.historyUpload + fileMd5);
					if (StrUtil.isNotEmpty(historyUploadStr)) {
						historyUpload = Convert.toLong(historyUploadStr);
					}
					LOG.debug("historyUpload大小:" + historyUpload);
					if (chunk_int == 0) {
						String s = Convert.toStr(chunk_int + 1);

						saveRedisDataToJedis(chunkCurrkey, Convert.toStr(chunkCurr_int+1));
                        LOG.debug(chunk+":redis塊+1");
                        try {

                            name = FileUtil.extName((String) paramMap.get("fileName"));

                         path = appendFileStorageClient.uploadAppenderFile(UpLoadConstant.DEFAULT_GROUP, file.getInputStream(),
                                    file.getSize(), name);
                            LOG.debug(chunk+":更新完fastdfs");
                            if (path== null ){
                                jedisClusterTemplate.get(chunkCurrkey, Convert.toStr(chunkCurr_int));
                                return new RestResponse<>(RestRespCode.ERROR_GETTING_REMOTE_FILE_PATH, message(MessageKeys.ERROR_GETTING_REMOTE_FILE_PATH), null);
                            }

						} catch (Exception e) {
							jedisClusterTemplate.get(chunkCurrkey, Convert.toStr(chunkCurr_int));
							// 還原歷史塊
							LOG.error("初次上傳遠端檔案出錯", e);
							return new RestResponse<>(RestRespCode.FILE_FOR_THE_FIRST_TIME_ERROR, message(MessageKeys.FILE_FOR_THE_FIRST_TIME_ERROR),
									null);

						}
						noGroupPath = path.getPath();
						saveRedisDataToJedis(UpLoadConstant.fastDfsPath + fileMd5, path.getPath());
						LOG.debug("上傳檔案 result={}", noGroupPath);
					} else {
						saveRedisDataToJedis(chunkCurrkey,  Convert.toStr(chunkCurr_int + 1));
						LOG.debug(chunk + ":redis塊+1");
						noGroupPath = jedisClusterTemplate.get(UpLoadConstant.fastDfsPath + fileMd5);
						if (noGroupPath == null) {
							return new RestResponse<>(RestRespCode.UPLOADED_REMOTE_SERVER_FILE_ERROR, message(MessageKeys.UPLOADED_REMOTE_SERVER_FILE_ERROR),
									null);
						}

						  try {
						  	//追加方式實際實用如果中途出錯多次,可能會出現重複追加情況,這裡改成修改模式,即時多次傳來重複檔案塊,依然可以保證檔案拼接正確
						  appendFileStorageClient.modifyFile(UpLoadConstant.DEFAULT_GROUP, noGroupPath,
						  file.getInputStream(), file.getSize(),historyUpload);
						  LOG.debug(chunk+":更新完fastdfs"); }
						  catch (Exception e) {
							  saveRedisDataToJedis(chunkCurrkey, Convert.toStr(chunkCurr_int));
						  	LOG.error("更新遠端檔案出錯", e);
							  return new RestResponse<>(RestRespCode.ERROR_UPDATING_REMOTE_FILE, message(MessageKeys.ERROR_UPDATING_REMOTE_FILE),
									  null);
						  }

					}
					// 修改歷史上傳大小
					historyUpload = historyUpload + file.getSize();
					saveRedisDataToJedis(UpLoadConstant.historyUpload + fileMd5, Convert.toStr(historyUpload));
					// 最後一塊,清空upload,寫入資料庫

					String fileName = (String) paramMap.get("name");
					Long size = Convert.toLong(paramMap.get("size"));
					Integer chunks_int = Convert.toInt(paramMap.get("chunks"));
					if (chunk_int + 1 == chunks_int) {

						// 持久化上傳完成檔案,也可以儲存在mysql中
						FileResult fileResult = new FileResult();
						fileResult.setMd5(fileMd5);
						fileResult.setName(fileName);
						fileResult.setLenght(size);
						fileResult.setUrl(UpLoadConstant.DEFAULT_GROUP+"/"+noGroupPath);
						// todo
						jedisClusterTemplate.lpush(UpLoadConstant.completedList, JSONUtil.toJsonStr(fileResult));


						jedisClusterTemplate.del(UpLoadConstant.chunkCurr + fileMd5);
						jedisClusterTemplate.del(UpLoadConstant.fastDfsPath + fileMd5);
						jedisClusterTemplate.del(UpLoadConstant.currLocks + fileMd5);
						jedisClusterTemplate.del(UpLoadConstant.lockOwner + fileMd5);

					}
					map.put("originalFilename", originalFilename);
					map.put("url", UpLoadConstant.DEFAULT_GROUP+"/"+noGroupPath);

					resultMap.add(map);

				} catch (Exception e) {
					LOG.error("上傳檔案錯誤", e);
					return new RestResponse<>(RestRespCode.ERROR_UPLOADING_FILE, message(MessageKeys.ERROR_UPLOADING_FILE), null);
				}
			}
			break;
		}
	} finally {
		// 鎖的當前擁有者才能釋放塊上傳鎖
		if (currOwner) {
			saveRedisDataToJedis(chunklockName,"0");
		}

	}
	return new RestResponse<List<Map<String, Object>>>(RestRespCode.OK, message(MessageKeys.SYSTEM_SUCCESS),
			resultMap);

}

檔案分片上傳所依賴的工具類

public class CheckFileResult {
   
   
    private String fileMd5;
    //0:鎖未佔用,1:鎖佔用
    private Integer lock;
    //檔案分塊數量
    private Integer chunkNum;
    //每塊檔案大小
    private Integer chunkSize;
    //當前已傳輸到第幾塊
    private Integer chunkCurr;
    //當前檔案總大小
    private Long totalSize;
    //訪問路徑
    private  String viewPath;
  }
public class FileResult {
   
   
    //檔名
    private String url;
    //檔案md5
    private String md5;
    //檔名稱
    private String name;

    //檔案大小
    private Long lenght;
    }
 public class UpLoadConstant {
   
   
        private UpLoadConstant() {
   
   

        }

        private  final static  String uploading="Uploading:";
        private final  static  String lock=uploading+"lock:";
        private  final  static String file=uploading+"file:";
        //當前所有鎖(用在不同使用者的上傳前或重傳前對整個檔案的鎖)
        public  final static  String  currLocks=lock+"currLocks:";
        //當前鎖的擁有者
        public  final static  String  lockOwner=lock+"lockOwner:";

        //當前檔案傳輸到第幾塊
        public final  static  String chunkCurr=file+"chunkCurr:";

        //當前檔案上傳到fastdfs路徑
        public final static String fastDfsPath=file+"fastDfsPath:";

        //預設分組
        public final static  String DEFAULT_GROUP = "group1";

        //全部上傳成功已完成
        public final static String completedList=uploading+"completedList";

        //檔案塊鎖(解決同一個使用者正在上傳時併發解決,比如後端正在上傳一個大檔案塊,前端頁面點選刪除按鈕,
        // 繼續新增刪除的檔案,這時候要加鎖來阻止其上傳,否則會出現丟塊問題,
         /*因為fastdfs上傳不像迅雷下載一樣,下載時會建立一個完整的檔案,如果上傳第一塊時,
        伺服器能快速建立一個大檔案0填充,那麼這樣可以支援併發亂序來下載檔案塊,上傳速度會成倍提升,
        要實現亂序下載檔案塊,估計就得研究fastdfs原始碼了)*/
        public  final static String chunkLock=lock+"chunkLock:";

        public final static  String historyUpload="historyUpload:";

}
    

如果是springboot的專案就不需要下面這個工具,ssm架構的配置檔案載入和springboot的配置載入不一樣,如果ssm架構的專案在這個檔案分片上傳的程式碼裡是讀取不到你的fastdfs檔案伺服器的位置的,所以下面這個檔案就是寫個工具類讓它幫你載入你的檔案伺服器的ip,如果有多臺直接在list集合裡面新增

@Configuration
@Import(FdfsClientConfig.class)
// 解決jmx重複註冊bean的問題
@EnableMBeanExport(registration = RegistrationPolicy.IGNORE_EXISTING)
//類名無特定限制
@Component
public class FastClient implements BeanPostProcessor , InitializingBean, ApplicationContextAware {
   
   


    @Override
    public Object postProcessBeforeInitialization(Object o, String s) throws BeansException {
   
   

        if (o instanceof TrackerConnectionManager){
   
   
            List list=new ArrayList();
          String ip="106.52.47.126:22122";
          list.add(ip);
          ((TrackerConnectionManager) o).setTrackerList(list);

        }

        return o;
    }

    @Override
    public Object postProcessAfterInitialization(Object o, String s) throws BeansException {
   
   
        return o;
    }

    @Override
    public void afterPropertiesSet() throws Exception {
   
   

    }

    @Override
    public void setApplicationContext(ApplicationContext applicationContext) throws BeansException {
   
   

    }
}