2017-03-08 68 views
0

我试图通过块向我的Bucket块发送Revit文件。我的Revit文件差不多是13 MB。这里是我的代码:Autodesk Forge和416(请求的范围不可满足)

function handleFileSelect(evt) { 
 
    var files = evt.target.files; 
 
    var file = files[0]; 
 

 
    var segmentSize = 1024 * 1024 * 5; //5 MB 
 
    var startingByte = 0; 
 
    var endingByte = startingByte + segmentSize - 1; 
 
    var segments = Math.ceil(file.size/segmentSize); 
 
    var session = Math.floor(100000000 + Math.random() * -900000000); 
 
    
 

 
    for (var i = 0; i < segments; i ++) 
 
    { 
 
     var blob = file.slice(startingByte, endingByte); 
 
     var url = 'https://developer.api.autodesk.com/oss/v2/buckets/' + 'linked_model' + '/objects/' + file.name + '/resumable'; 
 
     //console.log(url); 
 
     var contentRange = 'bytes ' + startingByte + '-' + endingByte + '/' + file.size; 
 

 
     $.ajax({ 
 
      type: 'PUT', 
 
      url: url, 
 
      data: blob, 
 
      headers: { 
 
       'Authorization':'Bearer ' + token, 
 
       'Content-Type':'application/octet-stream', 
 
       'Content-Range': contentRange, 
 
       'Session-Id': session 
 
      }, 
 
      crossDomain: true, 
 
      processData: false, 
 
      success: function (data) { 
 
       console.log(i); 
 
       startingByte = endingByte + 1; 
 
       endingByte = startingByte + segmentSize - 1; 
 
       }, 
 
      error: function (XMLHttpRequest, textStatus, errorThrown) { 
 
       alert("Status: " + textStatus); alert("Error: " + errorThrown); 
 
       console.log(startingByte); 
 
       console.log(endingByte); 
 
       console.log(file.size); 
 
      } 
 
     }); 
 
    } 
 
}

它给我的错误:416(要求的范围不合适)

谁能帮助?

+0

该方法几乎在同一时间进行一堆异步调用。我可以想象你需要一个接一个地给他们打电话。 –

+0

13mb?你为什么要分块,你可以一次上传。 –

+0

它只是一个示例文件,而不是发送1 GB。我只需要知道为什么这不起作用。 –

回答

1

我有同样的416错误,但我的问题是,我试图上传小于2MB的块,这是不可行的(除了最后一块)。

当我将块大小增加到5MB时,它开始工作。我刚刚写了一篇关于它的博客文章:https://forge.autodesk.com/blog/nailing-large-files-uploads-forge-resumable-api

以下是处理分块和上传(在node.js中)的核心代码片段。

顺便说一下,我强烈建议您不要像您的代码段所暗示的那样执行这种客户端操作,这意味着您必须将写访问令牌传递给网页,从而危及应用程序的安全性。您应该首先将文件上传到您的服务器,然后按照文章和我的sample中所述将其安全地上传到Forge。

///////////////////////////////////////////////////////// 
// Uploads object to bucket using resumable endpoint 
// 
///////////////////////////////////////////////////////// 
uploadObjectChunked (getToken, bucketKey, objectKey, 
        file, opts = {}) { 

    return new Promise((resolve, reject) => { 

    const chunkSize = opts.chunkSize || 5 * 1024 * 1024 

    const nbChunks = Math.ceil(file.size/chunkSize) 

    const chunksMap = Array.from({ 
     length: nbChunks 
    }, (e, i) => i) 

    // generates uniques session ID 
    const sessionId = this.guid() 

    // prepare the upload tasks 
    const uploadTasks = chunksMap.map((chunkIdx) => { 

     const start = chunkIdx * chunkSize 

     const end = Math.min(
      file.size, (chunkIdx + 1) * chunkSize) - 1 

     const range = `bytes ${start}-${end}/${file.size}` 

     const length = end - start + 1 

     const readStream = 
     fs.createReadStream(file.path, { 
      start, end: end 
     }) 

     const run = async() => { 

     const token = await getToken() 

     return this._objectsAPI.uploadChunk(
      bucketKey, objectKey, 
      length, range, sessionId, 
      readStream, {}, 
      {autoRefresh: false}, token) 
     } 

     return { 
     chunkIndex: chunkIdx, 
     run 
     } 
    }) 

    let progress = 0 

    // runs asynchronously in parallel the upload tasks 
    // number of simultaneous uploads is defined by 
    // opts.concurrentUploads 
    eachLimit(uploadTasks, opts.concurrentUploads || 3, 
     (task, callback) => { 

     task.run().then((res) => { 

      if (opts.onProgress) { 

      progress += 100.0/nbChunks 

      opts.onProgress ({ 
       progress: Math.round(progress * 100)/100, 
       chunkIndex: task.chunkIndex 
      }) 
      } 

      callback() 

     }, (err) => { 

      console.log('error') 
      console.log(err) 

      callback(err) 
     }) 

    }, (err) => { 

     if (err) { 

      return reject(err) 
     } 

     return resolve({ 
      fileSize: file.size, 
      bucketKey, 
      objectKey, 
      nbChunks 
     }) 
    }) 
    }) 
}