fix issue 1407 (#11171)
This commit is contained in:
		
							parent
							
								
									bfa1367149
								
							
						
					
					
						commit
						f6d5c6af78
					
				
					 1 changed files with 1 additions and 1 deletions
				
			
		| 
						 | 
					@ -822,7 +822,7 @@ def ggml_convert_low_bit(model, qtype, optimize_model=True,
 | 
				
			||||||
    if optimize_model:
 | 
					    if optimize_model:
 | 
				
			||||||
        model = _optimize_post(model, lightweight_bmm)
 | 
					        model = _optimize_post(model, lightweight_bmm)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    if hasattr(model, "config") and \
 | 
					    if hasattr(model, "config") and hasattr(model.config, "model_type") and \
 | 
				
			||||||
            model.config.model_type == "qwen" and hasattr(model.config, "visual"):
 | 
					            model.config.model_type == "qwen" and hasattr(model.config, "visual"):
 | 
				
			||||||
        # for Qwen-VL-Chat
 | 
					        # for Qwen-VL-Chat
 | 
				
			||||||
        # Due to issue https://github.com/intel/intel-extension-for-pytorch/issues/454,
 | 
					        # Due to issue https://github.com/intel/intel-extension-for-pytorch/issues/454,
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in a new issue