Resolve messages formatting issues (#13095)
Signed-off-by: Emmanuel Ferdman <emmanuelferdman@gmail.com>
This commit is contained in:
		
							parent
							
								
									35b49e4d91
								
							
						
					
					
						commit
						1e4e1353a0
					
				
					 6 changed files with 8 additions and 8 deletions
				
			
		| 
						 | 
					@ -470,7 +470,7 @@ if __name__ == "__main__":
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if args.gpus:
 | 
					    if args.gpus:
 | 
				
			||||||
        invalidInputError(len(args.gpus.split(",")) > args.num_gpus, f"Larger --num-gpus "
 | 
					        invalidInputError(len(args.gpus.split(",")) > args.num_gpus, f"Larger --num-gpus "
 | 
				
			||||||
                          "({args.num_gpus}) than --gpus {args.gpus}!")
 | 
					                          f"({args.num_gpus}) than --gpus {args.gpus}!")
 | 
				
			||||||
        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
 | 
					        os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    gptq_config = GptqConfig(
 | 
					    gptq_config = GptqConfig(
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -672,7 +672,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
                else:
 | 
					                else:
 | 
				
			||||||
                    invalidInputError(False,
 | 
					                    invalidInputError(False,
 | 
				
			||||||
                                      f'`torch_dtype` can be either `torch.dtype` or `"auto"`,'
 | 
					                                      f'`torch_dtype` can be either `torch.dtype` or `"auto"`,'
 | 
				
			||||||
                                      'but received {torch_dtype}')
 | 
					                                      f'but received {torch_dtype}')
 | 
				
			||||||
            dtype_orig = model_class._set_default_torch_dtype(torch_dtype)
 | 
					            dtype_orig = model_class._set_default_torch_dtype(torch_dtype)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # Pretrained Model
 | 
					        # Pretrained Model
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -217,7 +217,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
                    max_prompt_len < max_context_len,
 | 
					                    max_prompt_len < max_context_len,
 | 
				
			||||||
                    (
 | 
					                    (
 | 
				
			||||||
                        f"max_prompt_len ({max_prompt_len}) should be less"
 | 
					                        f"max_prompt_len ({max_prompt_len}) should be less"
 | 
				
			||||||
                        " than max_context_len ({max_context_len})"
 | 
					                        f" than max_context_len ({max_context_len})"
 | 
				
			||||||
                    ),
 | 
					                    ),
 | 
				
			||||||
                )
 | 
					                )
 | 
				
			||||||
                optimize_kwargs = {
 | 
					                optimize_kwargs = {
 | 
				
			||||||
| 
						 | 
					@ -553,7 +553,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
                    invalidInputError(
 | 
					                    invalidInputError(
 | 
				
			||||||
                        False,
 | 
					                        False,
 | 
				
			||||||
                        f'`torch_dtype` can be either `torch.dtype` or `"auto"`,'
 | 
					                        f'`torch_dtype` can be either `torch.dtype` or `"auto"`,'
 | 
				
			||||||
                        "but received {torch_dtype}",
 | 
					                        f"but received {torch_dtype}",
 | 
				
			||||||
                    )
 | 
					                    )
 | 
				
			||||||
            dtype_orig = model_class._set_default_torch_dtype(torch_dtype)
 | 
					            dtype_orig = model_class._set_default_torch_dtype(torch_dtype)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -588,7 +588,7 @@ class _BaseAutoModelClass:
 | 
				
			||||||
                max_prompt_len < max_context_len,
 | 
					                max_prompt_len < max_context_len,
 | 
				
			||||||
                (
 | 
					                (
 | 
				
			||||||
                    f"max_prompt_len ({max_prompt_len}) should be less"
 | 
					                    f"max_prompt_len ({max_prompt_len}) should be less"
 | 
				
			||||||
                    " than max_context_len ({max_context_len})"
 | 
					                    f" than max_context_len ({max_context_len})"
 | 
				
			||||||
                ),
 | 
					                ),
 | 
				
			||||||
            )
 | 
					            )
 | 
				
			||||||
            from ipex_llm.transformers.npu_models.convert_mp import optimize_llm_pre
 | 
					            from ipex_llm.transformers.npu_models.convert_mp import optimize_llm_pre
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -127,7 +127,7 @@ def phi3_attention_forward(
 | 
				
			||||||
                invalidInputError(
 | 
					                invalidInputError(
 | 
				
			||||||
                    False,
 | 
					                    False,
 | 
				
			||||||
                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)},"
 | 
					                    f"Attention mask should be of size {(bsz, 1, q_len, kv_seq_len)},"
 | 
				
			||||||
                    " but is {attention_mask.size()}"
 | 
					                    f" but is {attention_mask.size()}"
 | 
				
			||||||
                )
 | 
					                )
 | 
				
			||||||
            attn_weights = attn_weights + attention_mask
 | 
					            attn_weights = attn_weights + attention_mask
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -92,7 +92,7 @@ def load_state_dict(checkpoint_file: Union[str, os.PathLike]):
 | 
				
			||||||
    except Exception as e:
 | 
					    except Exception as e:
 | 
				
			||||||
        invalidInputError(False,
 | 
					        invalidInputError(False,
 | 
				
			||||||
                          f"Unable to load weights"
 | 
					                          f"Unable to load weights"
 | 
				
			||||||
                          "from pytorch checkpoint file for '{checkpoint_file}' "
 | 
					                          f"from pytorch checkpoint file for '{checkpoint_file}' "
 | 
				
			||||||
                          f"at '{checkpoint_file}'. ")
 | 
					                          f"at '{checkpoint_file}'. ")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -112,7 +112,7 @@ def _load(pickle_fp, map_location, picklemoudle, pickle_file='data.pkl', zip_fil
 | 
				
			||||||
                data = fp.read(size)
 | 
					                data = fp.read(size)
 | 
				
			||||||
                return torch.frombuffer(bytearray(data), dtype=dtype)
 | 
					                return torch.frombuffer(bytearray(data), dtype=dtype)
 | 
				
			||||||
            description = f'storage data_type={data_type} ' \
 | 
					            description = f'storage data_type={data_type} ' \
 | 
				
			||||||
                          'path-in-zip={filename} path={self.zip_file.filename}'
 | 
					                          f'path-in-zip={filename} path={self.zip_file.filename}'
 | 
				
			||||||
            return LazyStorage(load=load, kind=pid[1], description=description)
 | 
					            return LazyStorage(load=load, kind=pid[1], description=description)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        @staticmethod
 | 
					        @staticmethod
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue