fix in transformers 4.36 (#10150)
This commit is contained in:
		
							parent
							
								
									43dac97e03
								
							
						
					
					
						commit
						6fb65bb9d2
					
				
					 1 changed files with 1 additions and 1 deletions
				
			
		| 
						 | 
					@ -288,7 +288,7 @@ if __name__ == "__main__":
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if model.config.architectures is not None and model.config.architectures[0] == "QWenLMHeadModel":
 | 
					    if model.config.architectures is not None and model.config.architectures[0] == "QWenLMHeadModel":
 | 
				
			||||||
        stop_words = get_stop_words_ids("Qwen", tokenizer=tokenizer)
 | 
					        stop_words = get_stop_words_ids("Qwen", tokenizer=tokenizer)
 | 
				
			||||||
        kv_cache = StartRecentKVCache(start_size=start_size)
 | 
					        kv_cache = StartRecentKVCache(start_size=start_size, k_seq_dim=1, v_seq_dim=1)
 | 
				
			||||||
        qwen_stream_chat(model=model, tokenizer=tokenizer,kv_cache=kv_cache, stop_words=stop_words)
 | 
					        qwen_stream_chat(model=model, tokenizer=tokenizer,kv_cache=kv_cache, stop_words=stop_words)
 | 
				
			||||||
    elif model.config.architectures is not None and model.config.architectures[0] == "ChatGLMModel":
 | 
					    elif model.config.architectures is not None and model.config.architectures[0] == "ChatGLMModel":
 | 
				
			||||||
        chatglm3_stream_chat(model=model, tokenizer=tokenizer)
 | 
					        chatglm3_stream_chat(model=model, tokenizer=tokenizer)
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue