Fix IPEX auto importer (#11192)
* Fix ipex auto importer with Python builtins. * Raise errors if the user imports ipex manually before importing ipex_llm. Do nothing if they import ipex after importing ipex_llm. * Remove import ipex in examples.
This commit is contained in:
parent
711fa0199e
commit
ce3f08b25a
19 changed files with 29 additions and 28 deletions
|
|
@ -51,7 +51,6 @@ import json
|
|||
# code change to import from IPEX-LLM API instead of using transformers API
|
||||
from ipex_llm.transformers import AutoModelForCausalLM
|
||||
from transformers import LlamaTokenizer
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
|
||||
def load(model_name_or_path):
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@ from transformers import AutoModelForCausalLM, LlamaTokenizer, AutoTokenizer
|
|||
import deepspeed
|
||||
from ipex_llm import optimize_model
|
||||
import torch
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import time
|
||||
import argparse
|
||||
|
||||
|
|
|
|||
|
|
@ -274,9 +274,6 @@ if __name__ == "__main__":
|
|||
multi_turn = args.multi_turn
|
||||
max_context = args.max_context
|
||||
|
||||
if device == 'xpu':
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
model = model.to(device)
|
||||
|
||||
model.generation_config = GenerationConfig.from_pretrained(
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import argparse
|
|||
|
||||
from transformers import AutoTokenizer
|
||||
from ipex_llm import optimize_model
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
# you could tune the prompt based on your own model,
|
||||
# here the prompt tuning refers to https://huggingface.co/internlm/internlm-chat-7b/blob/main/modeling_internlm.py#L1053
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import argparse
|
|||
import numpy as np
|
||||
|
||||
from transformers import AutoTokenizer, GenerationConfig
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
|
||||
# you could tune the prompt based on your own model,
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import argparse
|
|||
|
||||
from transformers import AutoTokenizer
|
||||
from ipex_llm import optimize_model
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@
|
|||
#
|
||||
|
||||
import torch
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import time
|
||||
import argparse
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
import torch, transformers
|
||||
import sys, os, time
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import argparse
|
||||
from transformers import LlamaTokenizer
|
||||
from ipex_llm.transformers import AutoModelForCausalLM
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
from torch import nn
|
||||
import torch
|
||||
import torch.distributed as dist
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
from typing import List, Optional, Tuple, Union, Iterator
|
||||
import time
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ from pipeline_models import ModelRunner
|
|||
import torch.nn.parallel
|
||||
import torch.distributed as dist
|
||||
import os
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
import oneccl_bindings_for_pytorch
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
#
|
||||
|
||||
import torch
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import time
|
||||
import argparse
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import argparse
|
|||
|
||||
from transformers import AutoTokenizer
|
||||
from ipex_llm import optimize_model
|
||||
import intel_extension_for_pytorch as ipex
|
||||
|
||||
# you could tune the prompt based on your own model,
|
||||
# here the prompt tuning refers to https://huggingface.co/internlm/internlm-chat-7b/blob/main/modeling_internlm.py#L1053
|
||||
|
|
|
|||
|
|
@ -17,7 +17,6 @@
|
|||
import argparse
|
||||
import time
|
||||
import torch
|
||||
import intel_extension_for_pytorch as ipex
|
||||
from ipex_llm import optimize_model
|
||||
from transformers import AutoTokenizer
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import argparse
|
|||
import numpy as np
|
||||
|
||||
from transformers import AutoTokenizer, GenerationConfig
|
||||
import intel_extension_for_pytorch as ipex
|
||||
from ipex_llm import optimize_model
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -20,7 +20,6 @@ import argparse
|
|||
|
||||
from transformers import AutoTokenizer
|
||||
from ipex_llm import optimize_model
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import numpy as np
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -15,7 +15,6 @@
|
|||
#
|
||||
|
||||
import torch
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import time
|
||||
import argparse
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
import torch, transformers
|
||||
import sys, os, time
|
||||
import intel_extension_for_pytorch as ipex
|
||||
import argparse
|
||||
from transformers import LlamaTokenizer, AutoModelForCausalLM
|
||||
from ipex_llm import optimize_model
|
||||
|
|
|
|||
|
|
@ -47,7 +47,6 @@ from eagle.model.ea_model import EaModel
|
|||
from eagle.model.utils import *
|
||||
from eagle.model.kv_cache import initialize_past_key_values
|
||||
from eagle.model.choices import *
|
||||
import intel_extension_for_pytorch as ipex
|
||||
from ipex_llm import optimize_model
|
||||
|
||||
def ea_forward(input_ids, model, tokenizer, tree_choices, logits_processor=None, max_steps=512):
|
||||
|
|
|
|||
|
|
@ -16,6 +16,9 @@
|
|||
|
||||
from importlib.metadata import distribution, PackageNotFoundError
|
||||
import logging
|
||||
import builtins
|
||||
import sys
|
||||
from ipex_llm.utils.common import log4Error
|
||||
|
||||
|
||||
class IPEXImporter:
|
||||
|
|
@ -51,15 +54,36 @@ class IPEXImporter:
|
|||
|
||||
def import_ipex(self):
|
||||
"""
|
||||
Try to import Intel Extension for PyTorch as ipex
|
||||
Try to import Intel Extension for PyTorch as ipex for XPU
|
||||
|
||||
Raises ImportError if failed
|
||||
Raises ImportError and invalidInputError if failed
|
||||
"""
|
||||
if self.is_xpu_version_installed():
|
||||
import intel_extension_for_pytorch as ipex
|
||||
# Check if user import ipex manually
|
||||
if 'ipex' in sys.modules or 'intel_extension_for_pytorch' in sys.modules:
|
||||
logging.error("ipex_llm will automatically import intel_extension_for_pytorch.")
|
||||
log4Error.invalidInputError(False,
|
||||
"Please import ipex_llm before importing \
|
||||
intel_extension_for_pytorch!")
|
||||
self.directly_import_ipex()
|
||||
self.ipex_version = ipex.__version__
|
||||
logging.info("intel_extension_for_pytorch auto imported")
|
||||
|
||||
def directly_import_ipex(self):
|
||||
"""
|
||||
Try to import Intel Extension for PyTorch as ipex
|
||||
|
||||
Raises ImportError and invalidInputError if failed
|
||||
"""
|
||||
# import ipex
|
||||
import intel_extension_for_pytorch as ipex
|
||||
if ipex is not None:
|
||||
# Expose ipex to Python builtins
|
||||
builtins.ipex = ipex
|
||||
else:
|
||||
log4Error.invalidInputError(False,
|
||||
"Can not import intel_extension_for_pytorch.")
|
||||
|
||||
def get_ipex_version(self):
|
||||
"""
|
||||
Get ipex version
|
||||
|
|
@ -69,11 +93,8 @@ class IPEXImporter:
|
|||
if self.ipex_version is not None:
|
||||
return self.ipex_version
|
||||
# try to import Intel Extension for PyTorch and get version
|
||||
try:
|
||||
import intel_extension_for_pytorch as ipex
|
||||
self.directly_import_ipex()
|
||||
self.ipex_version = ipex.__version__
|
||||
except ImportError:
|
||||
self.ipex_version = None
|
||||
return self.ipex_version
|
||||
|
||||
|
||||
|
|
|
|||
Loading…
Reference in a new issue