optimize lookahead init time (#11769)
This commit is contained in:
		
							parent
							
								
									05989ad0f9
								
							
						
					
					
						commit
						8db34057b4
					
				
					 2 changed files with 3 additions and 4 deletions
				
			
		| 
						 | 
				
			
			@ -19,7 +19,6 @@ import time
 | 
			
		|||
import argparse
 | 
			
		||||
 | 
			
		||||
from transformers import AutoTokenizer
 | 
			
		||||
from ipex_llm import optimize_model
 | 
			
		||||
import numpy as np
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			@ -45,7 +44,7 @@ if __name__ == '__main__':
 | 
			
		|||
                                                 optimize_model=True,
 | 
			
		||||
                                                 trust_remote_code=True,
 | 
			
		||||
                                                 use_cache=True)
 | 
			
		||||
    model = model.to("xpu")
 | 
			
		||||
    model = model.half().to("xpu")
 | 
			
		||||
 | 
			
		||||
    # Load tokenizer
 | 
			
		||||
    tokenizer = AutoTokenizer.from_pretrained(model_path,
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -149,7 +149,7 @@ class PromptLookupCandidateGenerator():
 | 
			
		|||
                           input_ids: torch.LongTensor):
 | 
			
		||||
        for ngram_size in range(self.max_matching_ngram_size, 0, -1):
 | 
			
		||||
            # Create sliding windows of size ngram_size
 | 
			
		||||
            windows = input_ids.unfold(dimension=1, size=ngram_size, step=1)
 | 
			
		||||
            windows = input_ids.cpu().unfold(dimension=1, size=ngram_size, step=1)
 | 
			
		||||
            for idx in range(windows.size(1)):
 | 
			
		||||
                window = tensor2key(windows[0, idx])
 | 
			
		||||
                if window not in self.lookup_table:
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
		Reference in a new issue