开发者社区> 问答> 正文

oss c sdk multipart上传一个文件代码参考


最近有用户反馈如何通过multipart上传一个文件,为了方便大家使用sdk,接下来分享一下我自己开发时候的代码,供大家参考,有写的不好的地方请大家指正:
(1)首先提供一个使用multipart upload上传的接口,其中用到的一些细节后面说明。这个接口允许指定upload_id,方便用户进行续传,如果upload_id是一个NULL值(实际上是aos_str_null(&upload_id); ),那么创建一个新的upload id;如果upload id是已经存在的,那接口会去获取已经上传的part,并进行排序。这里注意一下,如果是续传,请不要改变part size。



aos_status_t *oss_upload_file(oss_request_options_t *options,
    const aos_string_t *bucket, const aos_string_t *object, aos_string_t *upload_id,
    aos_string_t *filepath, int64_t part_size)
{
    aos_pool_t *subpool;
    aos_pool_t *parent_pool;
    int64_t start_pos;
    int64_t end_pos;
    int part_num;
    int part_count = 0;
    int res;
    aos_status_t *s;
    aos_status_t *ret;
    aos_table_t *init_multipart_headers;
    aos_table_t *init_multipart_resp_headers;
    aos_file_buf_t *fb;
    oss_upload_file_t *upload_file;
    aos_table_t *upload_part_resp_headers;
    char *part_num_str;
    char *etag;
    aos_list_t complete_part_list;
    oss_complete_part_content_t *complete_content;
    aos_table_t *complete_resp_headers;


    aos_list_init(&complete_part_list);
    parent_pool = options->pool;


    //get upload_id and uploaded part
    aos_pool_create(&subpool, options->pool);
    options->pool = subpool;
    if (NULL == upload_id->data) {
        init_multipart_headers = aos_table_make(subpool, 0);
        s = oss_init_multipart_upload(options, bucket, object,

            init_multipart_headers, upload_id, &init_multipart_resp_headers);
        if (!aos_status_is_ok(s)) {
            aos_error_log("Init multipart upload fail!\n");
            aos_status_copy(s, ret, parent_pool);
            aos_pool_destroy(subpool);
            options->pool = parent_pool;
            return ret;
        }
    } else {
        s = oss_get_sorted_uploaded_part(options, bucket, object, upload_id,
            &complete_part_list, ∂_count);
        if (!aos_status_is_ok(s)) {
            aos_error_log("Get multipart uploaded part fail!\n");
            aos_status_copy(s, ret, parent_pool);
            aos_pool_destroy(subpool);
            options->pool = parent_pool;
            return ret;
        }
    }
    aos_pool_destroy(subpool);



    //get part size
    fb = aos_create_file_buf(parent_pool);
    res = aos_open_file_for_read(parent_pool, filepath->data, fb);
    if (res != AOSE_OK) {
        s = aos_status_create(parent_pool);
        aos_file_error_status_set(s, res);
        options->pool = parent_pool;
        return s;
    }
    oss_get_part_size(fb->file_last, ∂_size);


    //upload part from file
    upload_file = oss_create_upload_file(parent_pool);
    aos_str_set(&upload_file->filename, filepath->data);
    start_pos = part_size * part_count;
    end_pos = start_pos + part_size;
    part_num = part_count + 1;


    while (1) {
        aos_pool_create(&subpool, parent_pool);
        options->pool = subpool;
        upload_file->file_pos = start_pos;
        upload_file->file_last = end_pos;


        s = oss_upload_part_from_file(options, bucket, object, upload_id,
            part_num, upload_file, &upload_part_resp_headers);
        if (!aos_status_is_ok(s)) {
            aos_error_log("upload file fail, upload_id is %.*s fail part_num:%d\n",
                upload_id->len, upload_id->data, part_num);
            aos_status_copy(s, ret, parent_pool);
            aos_pool_destroy(subpool);
            options->pool = parent_pool;
            return ret;
        }

        complete_content = oss_create_complete_part_content(parent_pool);
        part_num_str = apr_psprintf(parent_pool, "%d", part_num);
        aos_str_set(&complete_content->part_number, part_num_str);
        etag = apr_pstrdup(parent_pool, (char*)apr_table_get(upload_part_resp_headers, "ETag"));
        aos_str_set(&complete_content->etag, etag);
        aos_list_add_tail(&complete_content->node, &complete_part_list);
        aos_pool_destroy(subpool);
        if (end_pos >= fb->file_last) {
            break;
        }
        start_pos += part_size;
        end_pos += part_size;
        if (end_pos > fb->file_last)
            end_pos = fb->file_last;
        part_num += 1;
    }
    //complete multipart
    aos_pool_create(&subpool, parent_pool);
    options->pool = subpool;
    s = oss_complete_multipart_upload(options, bucket, object, upload_id,
        &complete_part_list, &complete_resp_headers);
    if (!aos_status_is_ok(s)) {
        aos_error_log("complete multipart upload fail, upload_id is %.*s\n",
            upload_id->len, upload_id->data);
    }
    aos_status_copy(s, ret, parent_pool);
    aos_pool_destroy(subpool);
    options->pool = parent_pool;
    return ret;
}


(2)其中oss_get_sorted_uploaded_part获得指定upload id已经上传的part,并对part按照part num进行排序
#define OSS_PER_RET_NUM 1000



typedef struct {
    int part_num;
    char *etag;
} oss_upload_part_t;



aos_status_t *oss_get_sorted_uploaded_part(oss_request_options_t *options,
    const aos_string_t *bucket, const aos_string_t *object, const aos_string_t *upload_id,
    aos_list_t *complete_part_list, int *part_count)
{
    aos_pool_t *subpool;
    aos_pool_t *parent_pool;
    aos_status_t *s;
    aos_status_t *ret;
    oss_upload_part_t part_arr[OSS_PER_RET_NUM];
    int part_index = 0;
    int index = 0;
    int uploaded_part_count = 0;
    oss_list_upload_part_params_t *params;
    oss_list_part_content_t *part_content;
    oss_complete_part_content_t *complete_content;
    aos_table_t *list_part_resp_headers;
    char *part_num_str;


    parent_pool = options->pool;
    params = oss_create_list_upload_part_params(parent_pool);
    while (params->truncated) {
        aos_pool_create(&subpool, parent_pool);
        options->pool = subpool;
        s = oss_list_upload_part(options, bucket, object,
            upload_id, params, &list_part_resp_headers);
        if (!aos_status_is_ok(s)) {
            aos_error_log("list upload fail, upload_id is %.*s\n",
                upload_id->len, upload_id->data);
            aos_status_copy(s, ret, parent_pool);
            aos_pool_destroy(subpool);
            options->pool = parent_pool;
            return ret;
        }

        if (!params->truncated) {
            aos_status_copy(s, ret, parent_pool);
        }
        aos_list_for_each_entry(part_content, &params->part_list, node) {
            oss_upload_part_t upload_part;
            upload_part.etag = part_content->etag.data;
            upload_part.part_num = atoi(part_content->part_number.data);
            part_arr[part_index++] = upload_part;
            uploaded_part_count++;
        }
        aos_list_init(&params->part_list);
        aos_str_set(&params->part_number_marker, params->next_part_number_marker.data);


        //sort multipart upload part content
        qsort(part_arr, uploaded_part_count, sizeof(part_arr[0]), part_sort_cmp);


        for (index = 0; index < part_index; ++index) {
            complete_content = oss_create_complete_part_content(parent_pool);
            part_num_str = apr_psprintf(parent_pool, "%d", part_arr[index].part_num);
            aos_str_set(&complete_content->part_number, part_num_str);
            aos_str_set(&complete_content->etag, part_arr[index].etag);
            aos_list_add_tail(&complete_content->node, complete_part_list);
        }
        part_index = 0;
        aos_pool_destroy(subpool);
    }


    *part_count = uploaded_part_count;
    options->pool = parent_pool;


    return ret;
}


(3)其中排序用到了c函数qsort,需要自定义排序函数,实现如下:

int part_sort_cmp(const void *a, const void *b)
{
    return (((oss_upload_part_t*)a)->part_num - ((oss_upload_part_t*)b)->part_num > 0 ? 1 : -1);
}


(4)根据用户提供的part size获得上传时候真实的大小

void oss_get_part_size(int64_t filesize, int64_t *part_size)
{
    if (filesize > (*part_size) * OSS_MAX_PART_NUM) {
        *part_size = (filesize + OSS_MAX_PART_NUM - filesize % OSS_MAX_PART_NUM) / OSS_MAX_PART_NUM;
        aos_warn_log("Part number larger than max limit, part size Changed to:%d\n",*part_size);
    }
}


(5)其他小细节
利用multipart upload上传数据时,可能文件比较大,导致分片非常多。如果从始至终只用一个memory pool,可能会导致内存被严重使用,所以建议在使用oss c sdk时尽量一个请求一个memory pool,使用完后及时释放。这个实现中由于需要使用options中诸如access_id、access_key等信息,利用了apr pool的特性,在parent memory pool(姑且这么称呼)的基础上申请subpool,在subpool上进行具体的请求,这样可以有效地避免过分使用内存。
另外,在实现的时候一定要注意memory pool什么时候被释放的。之前遇到用户在一个memory pool上创建了一个变量,后面释放了memory pool,却仍然使用变量,这样运气好的话可能因为core及时发现问题,运气不好的话取到随机值或者之前遗留下来的值,后面遇到很多诡异的现象。具体到这里,请大家注意aos_status_copy:

#define aos_status_copy(src, dst, p)\
    (dst) = aos_status_create(p);\
    (dst)->code = (src)->code;\
    (dst)->error_code = apr_pstrdup(p, (src)->error_code);\
    (dst)->error_msg = apr_pstrdup(p, (src)->error_msg);


释放memory pool后因为还要用到status,所以需要将相应的内容拷贝到parent memory pool上,这样后面访问才会不出问题。

展开
收起
yjseu 2015-11-12 17:26:48 10614 0
3 条回答
写回答
取消 提交回答
  • 回 2楼(有一天) 的帖子
    发现问题了?

    -------------------------

    回 5楼(有一天) 的帖子
    在oss c sdk源码的aos_buf.c文件中,参考一下代码
    2015-11-13 14:58:13
    赞同 展开评论 打赏
  • Reoss c sdk multipart上传一个文件代码参考

    -------------------------

    回 3楼yjseu的帖子
    没发现问题   只是想发个表情

    -------------------------

    回 3楼yjseu的帖子
    在demo  中没找到 aos_buf_pack 函数的注释   能介绍下 aos_buf_pack 吗    

    -------------------------

    回 6楼yjseu的帖子
    找到了 谢谢
    2015-11-13 13:50:05
    赞同 展开评论 打赏
  • 智能安防行业,C++后端开发
    Reoss c sdk multipart上传一个文件代码参考
    太好了。
    2015-11-12 19:05:24
    赞同 展开评论 打赏
问答排行榜
最热
最新

相关电子书

更多
一个跨平台的云服务SDK需要什么 立即下载
云存储之OSS实战进阶分享 立即下载
OSS运维进阶实战手册 立即下载