╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/conda/lib/python3.7/site-packages/modelscope/utils/registry.py:210 in build_from_cfg │ │ │ │ 207 │ │ │ f'type must be a str or valid type, but got {type(obj_type)}') │ │ 208 │ try: │ │ 209 │ │ if hasattr(obj_cls, '_instantiate'): │ │ ❱ 210 │ │ │ return obj_cls._instantiate(**args) │ │ 211 │ │ else: │ │ 212 │ │ │ return obj_cls(**args) │ │ 213 │ except Exception as e: │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/models/base/base_model.py:66 in _instantiate │ │ │ │ 63 │ │ │ process in constructor of a task model, a load_model method is │ │ 64 │ │ │ added, and thus this method is overloaded │ │ 65 │ │ """ │ │ ❱ 66 │ │ return cls(**kwargs) │ │ 67 │ │ │ 68 │ @classmethod │ │ 69 │ def from_pretrained(cls, │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/models/multi_modal/video_synthesis/text_to_vid │ │ eo_synthesis_model.py:82 in init │ │ │ │ 79 │ │ │ temporal_attention=cfg['temporal_attention']) │ │ 80 │ │ self.sd_model.load_state_dict( │ │ 81 │ │ │ torch.load( │ │ ❱ 82 │ │ │ │ osp.join(model_dir, self.config.model.model_args.ckpt_unet)), │ │ 83 │ │ │ strict=True) │ │ 84 │ │ self.sd_model.eval() │ │ 85 │ │ self.sd_model.to(self.device) │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/serialization.py:712 in load │ │ │ │ 709 │ │ │ │ │ │ │ │ " silence this warning)", UserWarning) │ │ 710 │ │ │ │ │ opened_file.seek(orig_position) │ │ 711 │ │ │ │ │ return torch.jit.load(opened_file) │ │ ❱ 712 │ │ │ │ return load(opened_zipfile, map_location, pickle_module, **pickle_load │ │ 713 │ │ return _legacy_load(opened_file, map_location, pickle_module, **pickle_load_args │ │ 714 │ │ 715 │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/serialization.py:1046 in _load │ │ │ │ 1043 │ │ │ 1044 │ unpickler = UnpicklerWrapper(data_file, **pickle_load_args) │ │ 1045 │ unpickler.persistent_load = persistent_load │ │ ❱ 1046 │ result = unpickler.load() │ │ 1047 │ │ │ 1048 │ torch._utils._validate_loaded_sparse_tensors() │ │ 1049 │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/serialization.py:1016 in persistent_load │ │ │ │ 1013 │ │ │ │ 1014 │ │ if key not in loaded_storages: │ │ 1015 │ │ │ nbytes = numel * torch._utils._element_size(dtype) │ │ ❱ 1016 │ │ │ load_tensor(dtype, nbytes, key, _maybe_decode_ascii(location)) │ │ 1017 │ │ │ │ 1018 │ │ return loaded_storages[key] │ │ 1019 │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/serialization.py:1001 in load_tensor │ │ │ │ 998 │ │ # TODO: Once we decide to break serialization FC, we can │ │ 999 │ │ # stop wrapping with _TypedStorage │ │ 1000 │ │ loaded_storages[key] = torch.storage._TypedStorage( │ │ ❱ 1001 │ │ │ wrap_storage=restore_location(storage, location), │ │ 1002 │ │ │ dtype=dtype) │ │ 1003 │ │ │ 1004 │ def persistent_load(saved_id): │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/serialization.py:176 in default_restore_location │ │ │ │ 173 │ │ 174 def default_restore_location(storage, location): │ │ 175 │ for _, _, fn in _package_registry: │ │ ❱ 176 │ │ result = fn(storage, location) │ │ 177 │ │ if result is not None: │ │ 178 │ │ │ return result │ │ 179 │ raise RuntimeError("don't know how to restore data location of " │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/serialization.py:158 in _cuda_deserialize │ │ │ │ 155 │ │ │ with torch.cuda.device(device): │ │ 156 │ │ │ │ return storage_type(obj.nbytes()) │ │ 157 │ │ else: │ │ ❱ 158 │ │ │ return obj.cuda(device) │ │ 159 │ │ 160 │ │ 161 register_package(10, _cpu_tag, _cpu_deserialize) │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/_utils.py:79 in cuda │ │ │ │ 76 │ │ │ return new_type(indices, values, self.size()) │ │ 77 │ │ else: │ │ 78 │ │ │ new_type = getattr(torch.cuda, self.class.name) │ │ ❱ 79 │ │ │ return new_type(self.size()).copy(self, non_blocking) │ │ 80 │ │ 81 │ │ 82 def _get_async_or_non_blocking(function_name, non_blocking, kwargs): │ │ │ │ /opt/conda/lib/python3.7/site-packages/torch/cuda/init.py:661 in _lazy_new │ │ │ │ 658 │ _lazy_init() │ │ 659 │ # We may need to call lazy init again if we are a forked child │ │ 660 │ # del _CudaBase.new │ │ ❱ 661 │ return super(_CudaBase, cls).new(cls, *args, **kwargs) │ │ 662 │ │ 663 │ │ 664 class _CudaBase(object): │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ RuntimeError: CUDA out of memory. Tried to allocate 114.00 MiB (GPU 0; 15.90 GiB total capacity; 13.83 GiB already allocated; 50.75 MiB free; 13.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
During handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ /opt/conda/lib/python3.7/site-packages/modelscope/utils/registry.py:212 in build_from_cfg │ │ │ │ 209 │ │ if hasattr(obj_cls, '_instantiate'): │ │ 210 │ │ │ return obj_cls.instantiate(**args) │ │ 211 │ │ else: │ │ ❱ 212 │ │ │ return obj_cls(**args) │ │ 213 │ except Exception as e: │ │ 214 │ │ # Normal TypeError does not print class name. │ │ 215 │ │ raise type(e)(f'{obj_cls.name}: {e}') │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/pipelines/multi_modal/text_to_video_synthesis │ │ pipeline.py:46 in init │ │ │ │ 43 │ │ Args: │ │ 44 │ │ │ model: model id on modelscope hub. │ │ 45 │ │ """ │ │ ❱ 46 │ │ super().init(model=model, **kwargs) │ │ 47 │ │ │ 48 │ def preprocess(self, input: Input, **preprocess_params) -> Dict[str, Any]: │ │ 49 │ │ self.model.clip_encoder.to(self.model.device) │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/pipelines/base.py:94 in init │ │ │ │ 91 │ │ self.device_name = device │ │ 92 │ │ │ │ 93 │ │ if not isinstance(model, List): │ │ ❱ 94 │ │ │ self.model = self.initiate_single_model(model) │ │ 95 │ │ │ self.models = [self.model] │ │ 96 │ │ else: │ │ 97 │ │ │ self.model = None │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/pipelines/base.py:57 in initiate_single_model │ │ │ │ 54 │ │ │ │ model, │ │ 55 │ │ │ │ device=self.device_name, │ │ 56 │ │ │ │ model_prefetched=True, │ │ ❱ 57 │ │ │ │ invoked_by=Invoke.PIPELINE) if is_model(model) else model │ │ 58 │ │ else: │ │ 59 │ │ │ return model │ │ 60 │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/models/base/base_model.py:144 in │ │ from_pretrained │ │ │ │ 141 │ │ │ model_cfg.init_backbone = True │ │ 142 │ │ │ model = build_backbone(model_cfg) │ │ 143 │ │ else: │ │ ❱ 144 │ │ │ model = build_model(model_cfg, task_name=task_name) │ │ 145 │ │ │ │ 146 │ │ # dynamically add pipeline info to model for pipeline inference │ │ 147 │ │ if hasattr(cfg, 'pipeline'): │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/models/builder.py:36 in build_model │ │ │ │ 33 │ """ │ │ 34 │ try: │ │ 35 │ │ model = build_from_cfg( │ │ ❱ 36 │ │ │ cfg, MODELS, group_key=task_name, default_args=default_args) │ │ 37 │ except KeyError as e: │ │ 38 │ │ # Handle subtask with a backbone model that hasn't been registered │ │ 39 │ │ # All the subtask with a parent task should have a task model, otherwise it is n │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/utils/registry.py:215 in build_from_cfg │ │ │ │ 212 │ │ │ return obj_cls(**args) │ │ 213 │ except Exception as e: │ │ 214 │ │ # Normal TypeError does not print class name. │ │ ❱ 215 │ │ raise type(e)(f'{obj_cls.name}: {e}') │ │ 216 │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ RuntimeError: TextToVideoSynthesis: CUDA out of memory. Tried to allocate 114.00 MiB (GPU 0; 15.90 GiB total capacity; 13.83 GiB already allocated; 50.75 MiB free; 13.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF
During handling of the above exception, another exception occurred:
╭─────────────────────────────── Traceback (most recent call last) ────────────────────────────────╮ │ in :4 │ │ │ │ 1 from modelscope.pipelines import pipeline │ │ 2 from modelscope.utils.constant import Tasks │ │ 3 │ │ ❱ 4 p = pipeline('text-to-video-synthesis', 'damo/text-to-video-synthesis') │ │ 5 p({'text':'A panda eating bamboo on a rock.'}) │ │ 6 │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/pipelines/builder.py:140 in pipeline │ │ │ │ 137 │ if preprocessor is not None: │ │ 138 │ │ cfg.preprocessor = preprocessor │ │ 139 │ │ │ ❱ 140 │ return build_pipeline(cfg, task_name=task) │ │ 141 │ │ 142 │ │ 143 def add_default_pipeline_info(task: str, │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/pipelines/builder.py:57 in build_pipeline │ │ │ │ 54 │ │ default_args (dict, optional): Default initialization arguments. │ │ 55 │ """ │ │ 56 │ return build_from_cfg( │ │ ❱ 57 │ │ cfg, PIPELINES, group_key=task_name, default_args=default_args) │ │ 58 │ │ 59 │ │ 60 def pipeline(task: str = None, │ │ │ │ /opt/conda/lib/python3.7/site-packages/modelscope/utils/registry.py:215 in build_from_cfg │ │ │ │ 212 │ │ │ return obj_cls(**args) │ │ 213 │ except Exception as e: │ │ 214 │ │ # Normal TypeError does not print class name. │ │ ❱ 215 │ │ raise type(e)(f'{obj_cls.name}: {e}') │ │ 216 │ ╰──────────────────────────────────────────────────────────────────────────────────────────────────╯ RuntimeError: TextToVideoSynthesisPipeline: TextToVideoSynthesis: CUDA out of memory. Tried to allocate 114.00 MiB (GPU 0; 15.90 GiB total capacity; 13.83 GiB already allocated; 50.75 MiB free; 13.95 GiB reserved in total by PyTorch) If reserved memory is >> allocated memory try setting max_split_size_mb to avoid fragmentation. See documentation for Memory Management and PYTORCH_CUDA_ALLOC_CONF