optimize lookahead init time (#11769)
This commit is contained in:
		
							parent
							
								
									05989ad0f9
								
							
						
					
					
						commit
						8db34057b4
					
				
					 2 changed files with 3 additions and 4 deletions
				
			
		| 
						 | 
					@ -19,7 +19,6 @@ import time
 | 
				
			||||||
import argparse
 | 
					import argparse
 | 
				
			||||||
 | 
					
 | 
				
			||||||
from transformers import AutoTokenizer
 | 
					from transformers import AutoTokenizer
 | 
				
			||||||
from ipex_llm import optimize_model
 | 
					 | 
				
			||||||
import numpy as np
 | 
					import numpy as np
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					@ -36,7 +35,7 @@ if __name__ == '__main__':
 | 
				
			||||||
    args = parser.parse_args()
 | 
					    args = parser.parse_args()
 | 
				
			||||||
    model_path = args.repo_id_or_model_path
 | 
					    model_path = args.repo_id_or_model_path
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    
 | 
					
 | 
				
			||||||
    from ipex_llm.transformers import AutoModelForCausalLM
 | 
					    from ipex_llm.transformers import AutoModelForCausalLM
 | 
				
			||||||
    # Load model in 4 bit,
 | 
					    # Load model in 4 bit,
 | 
				
			||||||
    # which convert the relevant layers in the model into INT4 format
 | 
					    # which convert the relevant layers in the model into INT4 format
 | 
				
			||||||
| 
						 | 
					@ -45,7 +44,7 @@ if __name__ == '__main__':
 | 
				
			||||||
                                                 optimize_model=True,
 | 
					                                                 optimize_model=True,
 | 
				
			||||||
                                                 trust_remote_code=True,
 | 
					                                                 trust_remote_code=True,
 | 
				
			||||||
                                                 use_cache=True)
 | 
					                                                 use_cache=True)
 | 
				
			||||||
    model = model.to("xpu")
 | 
					    model = model.half().to("xpu")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    # Load tokenizer
 | 
					    # Load tokenizer
 | 
				
			||||||
    tokenizer = AutoTokenizer.from_pretrained(model_path,
 | 
					    tokenizer = AutoTokenizer.from_pretrained(model_path,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
| 
						 | 
					@ -149,7 +149,7 @@ class PromptLookupCandidateGenerator():
 | 
				
			||||||
                           input_ids: torch.LongTensor):
 | 
					                           input_ids: torch.LongTensor):
 | 
				
			||||||
        for ngram_size in range(self.max_matching_ngram_size, 0, -1):
 | 
					        for ngram_size in range(self.max_matching_ngram_size, 0, -1):
 | 
				
			||||||
            # Create sliding windows of size ngram_size
 | 
					            # Create sliding windows of size ngram_size
 | 
				
			||||||
            windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
 | 
					            windows = input_ids.cpu().unfold(dimension=1, size=ngram_size, step=1)
 | 
				
			||||||
            for idx in range(windows.size(1)):
 | 
					            for idx in range(windows.size(1)):
 | 
				
			||||||
                window = tensor2key(windows[0, idx])
 | 
					                window = tensor2key(windows[0, idx])
 | 
				
			||||||
                if window not in self.lookup_table:
 | 
					                if window not in self.lookup_table:
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue