1

我们在使用Swisscom S3动态存储时遇到问题。 在5个以上并行线程中进行并发测试CRUD请求时,Storage Service会随机发送403 Forbidden而不是正确答案。按顺序执行相同的请求,一个接一个,一切正常。我使用瑞士电信S3动态存储并发访问

代码是在这里下面我们正在

import com.amazonaws.ClientConfiguration; 
import com.amazonaws.auth.BasicAWSCredentials; 
import com.amazonaws.services.s3.AmazonS3Client; 
import com.amazonaws.services.s3.model.*; 
import com.amazonaws.util.StringInputStream; 
import org.apache.commons.io.IOUtils; 
import org.junit.Test; 

import java.io.ByteArrayInputStream; 
import java.io.IOException; 
import java.io.InputStream; 
import java.util.concurrent.CountDownLatch; 
import java.util.concurrent.ExecutorService; 
import java.util.concurrent.Executors; 
import java.util.concurrent.TimeUnit; 

/** 
* Tutorial https://javatutorial.net/java-s3-example 
*/ 
public class AmazonS3ManualTest { 

    public static final String BUCKET_NAME = "??"; 
    private static String accessKey = "??"; 
    private static String secretKey = "??"; 

    @Test 
    public void testOperations() throws IOException, InterruptedException { 
     final int maxCount = 5; 

     final AmazonS3Client amazonS3Client = getS3Client(); 

     final CountDownLatch latch = new CountDownLatch(maxCount); 
     final ExecutorService executor = Executors.newFixedThreadPool(maxCount); 
     for (int i = 0; i < maxCount; i++) { 
      final int index = i; 
      executor.submit(() -> { 
       try { 
        final String FolderOne = "testFolderOne" + index; 
        final String FolderTwo = "testFolderTwo" + index; 
        final String FolderCopy = "copyFolder" + index; 

        try { 
         createFile(amazonS3Client, "/" + FolderOne + "/file.txt"); 
         createFolder(amazonS3Client, FolderTwo + "/"); 

         exists(amazonS3Client, FolderOne + "/file.txt"); 
         exists(amazonS3Client, FolderTwo + "/"); 

         copy(amazonS3Client, FolderOne + "/file.txt", FolderCopy + "/filecopy.txt"); 

         delete(amazonS3Client, "/" + FolderOne); 
         delete(amazonS3Client, "/" + FolderTwo); 

         get(amazonS3Client, FolderCopy + "/filecopy.txt"); 
         delete(amazonS3Client, "/" + FolderCopy + "/filecopy.txt"); 

         isEmptyFolder(amazonS3Client, "/" + FolderCopy); 
         delete(amazonS3Client, "/ + FolderCopy"); 
        } catch (Exception e) { 
         e.printStackTrace(); 
        } 
        latch.countDown(); 
       } catch (final Exception ignored) { 
       } 
      }); 
     } 

     if (!latch.await(300, TimeUnit.SECONDS)) { 
      throw new RuntimeException("Waiting too long for the result"); 
     } 
    } 

    private void isEmptyFolder(AmazonS3Client amazonS3Client, String folder) { 
     final ObjectListing objectListing = amazonS3Client.listObjects(BUCKET_NAME, folder); 
     assert(objectListing.getObjectSummaries().isEmpty()); 
    } 

    private void get(AmazonS3Client amazonS3Client, String file) throws IOException { 
     GetObjectRequest request = new GetObjectRequest(BUCKET_NAME, file); 
     final S3Object object = amazonS3Client.getObject(request); 
     final S3ObjectInputStream objectContent = object.getObjectContent(); 
     final String s = IOUtils.toString(objectContent); 
     assert(s.length() > 0); 
    } 

    private void copy(AmazonS3Client amazonS3Client, String source, String target) { 
     CopyObjectRequest request = new CopyObjectRequest(BUCKET_NAME, source, BUCKET_NAME, target); 
     amazonS3Client.copyObject(request); 
    } 

    private void delete(AmazonS3Client amazonS3Client, String path) { 
     deleteRecursive(amazonS3Client, path); 
    } 

    private void deleteRecursive(AmazonS3Client amazonS3Client, String path) { 
     ObjectListing objects = amazonS3Client.listObjects(BUCKET_NAME, path); 

     for (S3ObjectSummary objectSummary : objects.getObjectSummaries()) { 
      if (objectSummary.getKey().equals(path)) { 
       continue; 
      } 

      if (objectSummary.getKey().endsWith("/")) { 
       deleteRecursive(amazonS3Client, objectSummary.getKey()); 
      } else { 
       amazonS3Client.deleteObject(BUCKET_NAME, objectSummary.getKey()); 
      } 
     } 

     amazonS3Client.deleteObject(BUCKET_NAME, path); 
    } 

    private void exists(AmazonS3Client amazonS3Client, String folder) { 
     GetObjectMetadataRequest request = new GetObjectMetadataRequest(BUCKET_NAME, folder); 
     try { 
      final ObjectMetadata objectMetadata = amazonS3Client.getObjectMetadata(request); 
      assert(objectMetadata != null); 
     } catch (AmazonS3Exception e) { 
      if (e.getMessage().contains("404")) { 
       assert(false); 
       return; 
      } 
     } 
     assert(true); 
    } 

    private void createFolder(AmazonS3Client amazonS3Client, String folder) { 
     final InputStream input = new ByteArrayInputStream(new byte[0]); 
     ObjectMetadata metadata = new ObjectMetadata(); 
     metadata.setContentLength(0); 

     amazonS3Client.putObject(new PutObjectRequest(BUCKET_NAME, folder, input, metadata)); 
    } 

    private void createFile(AmazonS3Client amazonS3Client, String fileName) throws IOException { 
     ObjectMetadata omd = new ObjectMetadata(); 
     //omd.setContentType("html/text"); 
     omd.setHeader("filename", fileName); 
     omd.setHeader("x-amz-server-side-encryption", "AES256"); 

     // upload file to folder and set it to public 
     final StringInputStream testFile = new StringInputStream("Test"); 
     final PutObjectRequest putObjectRequest = new PutObjectRequest(BUCKET_NAME, fileName, testFile, omd); 
     amazonS3Client.putObject(putObjectRequest.withCannedAcl(CannedAccessControlList.Private)); 

     testFile.close(); 
    } 

    private AmazonS3Client getS3Client() { 

     ClientConfiguration opts = new ClientConfiguration(); 
     opts.setSignerOverride("S3SignerType"); // NOT "AWS3SignerType" 
     opts.setMaxConnections(100); 

     final AmazonS3Client s3 = new AmazonS3Client(new BasicAWSCredentials(accessKey, secretKey), opts); 
     s3.setEndpoint("ds31s3.swisscom.com"); 

     return s3; 
    } 
} 

例外是在这里:

com.amazonaws.services.s3.model.AmazonS3Exception: The AWS Access Key Id you provided does not exist in our records. (Service: Amazon S3; Status Code: 403; Error Code: InvalidAccessKeyId; Request ID: null), S3 Extended Request ID: null

能否请您推荐,我们能有这样的情况呢,因为它不正常,不可扩展。

+0

请粘贴产生此错误的代码片断。描述你在做什么 –

+0

我用我使用的代码片段编辑了我的问题。 –

+0

我正在观察完全相同的行为。然而,这个问题并没有在即时的下载电话中提出,而是以非常快速和连续的方式上传。然后,我也得到HTTP 403 - 这是非常非常奇怪的,因为对象Id被创建! – gsmachado

回答

1

我重新创建了新的Dynamic Storage S3并重新运行上面的测试。现在例外情况没有升高。似乎以前创建的存储有问题。

0

我们针对Swisscom的S3 Dynamic Storage连续80次运行您的代码段,并且无法重现此问题。

但是,上传后直接访问对象时可能会遇到计时问题。 PUT请求可能会被平衡到另一个节点,而不是GET请求。因此,如果您在上传后立即想要下载对象,请实施短暂睡眠或重试。

+0

您是否在并发模式下运行此代码?让我们说5个平行线程。 –

+1

感谢您运行我的代码片段。真的很奇怪,它不能在你身边重现。你是否在并发模式下运行?让我们说5个平行线程。我再次运行测试,我总是得到这个例外。 '因此,如果您在上传后立即想要下载对象,请执行短暂睡眠或重试 - 此声明是否表示您的存储尚未准备好云? –

+0

官方答案是“请实施短暂睡眠或重试”?真?我认为这简直是不可接受的。然后,我宁愿支付Amazon S3。 :-( – gsmachado

相关问题