{"id":3432,"date":"2024-04-28T15:10:57","date_gmt":"2024-04-28T07:10:57","guid":{"rendered":"https:\/\/www.aqwu.net\/wp\/?p=3432"},"modified":"2024-05-01T18:21:49","modified_gmt":"2024-05-01T10:21:49","slug":"%e5%9c%a8%e5%8d%95%e4%b8%aa-4gb-gpu-%e4%b8%8a%e8%bf%90%e8%a1%8c-llama-3-70b-%e4%bd%bf%e7%94%a8-airllm-%e5%92%8c%e5%88%86%e5%b1%82%e6%8e%a8%e7%90%86","status":"publish","type":"post","link":"https:\/\/www.aqwu.net\/wp\/?p=3432","title":{"rendered":"\u5728\u5355\u4e2a 4GB GPU \u4e0a\u8fd0\u884c Meta-Llama-3-8B-Instruct &#8211; \u4f7f\u7528 airllm \u548c\u5206\u5c42\u63a8\u7406"},"content":{"rendered":"\n<h2 class=\"wp-block-heading\"><strong>0. \u539f\u7406<\/strong><\/h2>\n\n\n\n<p>\u5206\u5c42\u63a8\u7406\u672c\u8d28\u4e0a\u662f\u201c\u5206\u800c\u6cbb\u4e4b\u201d\u7684\u65b9\u6cd5<\/p>\n\n\n\n<p>\u8fd9\u6ca1\u6709\u4f7f\u7528\u91cf\u5316\u3001\u84b8\u998f\u3001\u4fee\u526a\u6216\u5176\u4ed6\u6a21\u578b\u538b\u7f29\u6280\u672f<\/p>\n\n\n\n<p>\u5927\u578b\u8bed\u8a00\u6a21\u578b\u4e4b\u6240\u4ee5\u4f53\u79ef\u5927\uff0c\u5360\u7528\u5927\u91cf\u5185\u5b58\uff0c\u4e3b\u8981\u662f\u56e0\u4e3a\u5b83\u4eec\u7684\u7ed3\u6784\u5305\u542b\u8bb8\u591a\u201c\u5c42\u201d\u3002 \u4eceLLM\u5d4c\u5165\u6295\u5f71\u5c42\u5f00\u59cb\uff0c\u7136\u540e\u662f\u8bb8\u591a\u76f8\u540c\u7684\u53d8\u538b\u5668\u5c42\u3002 8B \u578b\u53f7\u670932 \u5c42\u3002\u4f46\u662f\u5728\u63a8\u7406\u8fc7\u7a0b\u4e2d\uff0c\u6bcf\u4e00\u5c42\u90fd\u662f\u72ec\u7acb\u7684\uff0c\u53ea\u4f9d\u8d56\u4e8e\u524d\u4e00\u5c42\u7684\u8f93\u51fa\u3002 \u56e0\u6b64\uff0c\u5728\u8fd0\u884c\u5c42\u540e\uff0c\u53ef\u4ee5\u91ca\u653e\u5176\u5185\u5b58\uff0c\u4ec5\u4fdd\u7559\u5c42\u7684\u8f93\u51fa\u3002\u57fa\u4e8e\u8fd9\u4e00\u6982\u5ff5\uff0cAirLLM\u5b9e\u73b0\u4e86\u5206\u5c42\u63a8\u7406\u3002 \u5982\u4f55\u5728\u57fa\u4e8eLLMTransformer\u7684\u63a8\u7406\u8fc7\u7a0b\u4e2d\uff0c\u5404\u5c42\u662f\u6309\u987a\u5e8f\u6267\u884c\u7684\u3002\u4e0a\u4e00\u5c42\u7684\u8f93\u51fa\u662f\u4e0b\u4e00\u5c42\u7684\u8f93\u5165\u3002\u4e00\u6b21\u53ea\u6267\u884c\u4e00\u4e2a\u56fe\u5c42\u3002 \u56e0\u6b64\uff0c\u5b8c\u5168\u6ca1\u6709\u5fc5\u8981\u5c06\u6240\u6709\u5c42\u90fd\u4fdd\u7559\u5728 GPU \u5185\u5b58\u4e2d\u3002\u6211\u4eec\u53ef\u4ee5\u5728\u6267\u884c\u8be5\u5c42\u65f6\u4ece\u78c1\u76d8\u52a0\u8f7d\u6240\u9700\u7684\u4efb\u4f55\u5c42\uff0c\u6267\u884c\u6240\u6709\u8ba1\u7b97\uff0c\u7136\u540e\u5b8c\u5168\u91ca\u653e\u5185\u5b58\u3002 \u8fd9\u6837\u4e00\u6765\uff0c\u6bcf\u5c42\u6240\u9700\u7684 GPU \u5185\u5b58\u4ec5\u4e3a\u4e00\u4e2a transformer \u5c42\u7684\u53c2\u6570\u5927\u5c0f\uff0c\u5373\u5b8c\u6574\u578b\u53f7\u7684 1\/32\uff0c\u7ea6\u4e3a 417MB\u3002<\/p>\n\n\n\n<p>\u7136\u540e\u4f7f\u7528\u95ea\u5b58\u6df1\u5ea6\u4f18\u5316cuda\u5185\u5b58\u8bbf\u95ee\uff0c\u5b9e\u73b0\u591a\u500d\u52a0\u901f\u6309\u5c42\u5206\u7247\u6a21\u578b\u6587\u4ef6\u3002<\/p>\n\n\n\n<p>\u4f7f\u7528 HuggingFace Accelerate \u63d0\u4f9b\u7684\u5143\u8bbe\u5907\u529f\u80fd\u3002\u5f53\u60a8\u901a\u8fc7\u5143\u8bbe\u5907\u52a0\u8f7d\u6a21\u578b\u65f6\uff0c\u5b9e\u9645\u4e0a\u4e0d\u4f1a\u8bfb\u53d6\u6a21\u578b\u6570\u636e\uff0c\u53ea\u4f1a\u52a0\u8f7d\u4ee3\u7801\u3002\u5185\u5b58\u4f7f\u7528\u7387\u4e3a 0\u3002<\/p>\n\n\n\n<p>\u63d0\u4f9b\u4f7f\u7528\u201c\u538b\u7f29\u201d\u53c2\u6570\u8fdb\u884c\u91cf\u5316\u7684\u9009\u9879 \u201c\u538b\u7f29\u201d\uff1a\u652f\u6301\u7684\u9009\u9879\uff1a4 \u4f4d\u30018 \u4f4d\u7528\u4e8e 4 \u4f4d\u6216 8 \u4f4d\u5757\u7ea7\u91cf\u5316<\/p>\n\n\n\n<p>\u5f15\u7528\u8fde\u63a5\uff1a<a href=\"https:\/\/twitter.com\/rohanpaul_ai\/status\/1784349737899982943\">https:\/\/github.com\/lyogavin\/Anima<\/a><\/p>\n\n\n\n<h2 class=\"wp-block-heading\"><strong>1. \u63a8\u7406 Meta-Llama-3-8B-Instruct<\/strong><\/h2>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>1.1 \u67e5\u770b config.json<\/strong><\/h3>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \">{\n  \"architectures\": [\n    \"LlamaForCausalLM\"\n  ],\n  \"attention_bias\": false,\n  \"attention_dropout\": 0.0,\n  \"bos_token_id\": 128000,\n  \"eos_token_id\": 128001,\n  \"hidden_act\": \"silu\",\n  \"hidden_size\": 4096,\n  \"initializer_range\": 0.02,\n  \"intermediate_size\": 14336,\n  \"max_position_embeddings\": 8192,\n  \"model_type\": \"llama\",\n  \"num_attention_heads\": 32,\n  \"num_hidden_layers\": 32,\n  \"num_key_value_heads\": 8,\n  \"pretraining_tp\": 1,\n  \"rms_norm_eps\": 1e-05,\n  \"rope_scaling\": null,\n  \"rope_theta\": 500000.0,\n  \"tie_word_embeddings\": false,\n  \"torch_dtype\": \"bfloat16\",\n  \"transformers_version\": \"4.40.0.dev0\",\n  \"use_cache\": true,\n  \"vocab_size\": 128256\n}\n<\/pre><\/div>\n\n\n\n<ol class=\"wp-block-list\">\n<li><strong>\u6a21\u578b\u7c7b\u578b (model_type)<\/strong>: llama<\/li>\n\n\n\n<li><strong>\u9690\u85cf\u5c42\u6fc0\u6d3b\u51fd\u6570 (hidden_act)<\/strong>: silu<\/li>\n\n\n\n<li><strong>\u9690\u85cf\u5c42\u5927\u5c0f (hidden_size)<\/strong>: 4096<\/li>\n\n\n\n<li><strong>\u4e2d\u95f4\u5c42\u5927\u5c0f (intermediate_size)<\/strong>: 14336<\/li>\n\n\n\n<li><strong>\u6700\u5927\u4f4d\u7f6e\u5d4c\u5165\u6570 (max_position_embeddings)<\/strong>: 8192<\/li>\n\n\n\n<li><strong>\u6ce8\u610f\u529b\u5934\u6570 (num_attention_heads)<\/strong>: 64<\/li>\n\n\n\n<li><strong>\u9690\u85cf\u5c42\u6570 (num_hidden_layers)<\/strong>: 32<\/li>\n\n\n\n<li><strong>\u952e\u503c\u5934\u6570 (num_key_value_heads)<\/strong>: 8<\/li>\n\n\n\n<li><strong>\u8bcd\u6c47\u8868\u5927\u5c0f (vocab_size)<\/strong>: 128256<\/li>\n\n\n\n<li><strong>\u521d\u59cb\u5316\u8303\u56f4 (initializer_range)<\/strong>: 0.02<\/li>\n\n\n\n<li><strong>\u6ce8\u610f\u529b\u4e22\u5931 (attention_dropout)<\/strong>: 0.0<\/li>\n\n\n\n<li><strong>\u6807\u51c6\u5316\u4f59\u5f26\u9608\u503c (rms_norm_eps)<\/strong>: 1e-05<\/li>\n\n\n\n<li><strong>\u7ed1\u5b9a\u8bcd\u5d4c\u5165 (tie_word_embeddings)<\/strong>: false<\/li>\n\n\n\n<li><strong>\u5f20\u91cf\u6570\u636e\u7c7b\u578b (torch_dtype)<\/strong>: bfloat16<\/li>\n\n\n\n<li><strong>Transformers\u7248\u672c (transformers_version)<\/strong>: 4.40.0.dev0<\/li>\n\n\n\n<li><strong>\u662f\u5426\u4f7f\u7528\u7f13\u5b58 (use_cache)<\/strong>: true<\/li>\n\n\n\n<li><strong>\u8d77\u59cb\u6807\u8bb0ID (bos_token_id)<\/strong>: 128000<\/li>\n\n\n\n<li><strong>\u7ed3\u675f\u6807\u8bb0ID (eos_token_id)<\/strong>: 128001<\/li>\n\n\n\n<li><strong>Rope theta\u503c (rope_theta)<\/strong>: 500000.0<\/li>\n<\/ol>\n\n\n\n<p>\u4ece\u914d\u7f6e\u6587\u4ef6\u91cc\u7684\u53c2\u6570 num_hidden_layers \u4e3a 80\uff0c\u6807\u8bc6\u6a21\u578b\u6743\u91cd\u670980\u5c42<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>1.2 \u5c1d\u8bd5\u52a0\u8f7d<\/strong><\/h3>\n\n\n\n<p>\u53c2\u7167 airllm \u7684\u4f8b\u5b50\uff0c\u52a0\u8f7d Meta-Llama-3-8B-Instruct<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >from airllm import AutoModel\n\nMAX_LENGTH = 128\n# could use hugging face model repo id:\nmodel = AutoModel.from_pretrained(\"meta-llama\/Meta-Llama-3-8B-Instruct\")\ninput_text = [\n        'What is the capital of United States?',\n        #'I like',\n    ]\n\ninput_tokens = model.tokenizer(input_text,\n    return_tensors=\"pt\", \n    return_attention_mask=False, \n    truncation=True, \n    max_length=MAX_LENGTH, \n    padding=False)\n           \nprint(f'input_tokens:{len(input_tokens.input_ids[0])}')\n           \ngeneration_output = model.generate(\n    input_tokens['input_ids'].cuda(), \n    max_new_tokens=20,\n    use_cache=True,\n    return_dict_in_generate=True)\n\nprint(f'output_tokens:{len(generation_output.sequences[0])}')\noutput = model.tokenizer.decode(generation_output.sequences[0])\nprint(output)\n<\/pre><\/div>\n\n\n\n<p>\u8fd0\u884c\u7ed3\u679c<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >python test_airllm_8B.py\n&gt;&gt;&gt;&gt; bitsandbytes installed\n&gt;&gt;&gt;&gt; cache_utils installed\nfound index file...\n  0%|                                      | 0\/35 [00:00&lt;?, ?it\/s]Loading shard 1\/4\nsaved as: meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model\/model.embed_tokens.safetensors\n  3%|\u2588\u2588\u2588\u258b                                  | 1\/35 [00:39&lt;22:19, 39.39s\/it]\nsaved as: meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model\/model.layers.0.safetensors\n  6%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588                               | 2\/35 [00:55&lt;14:09, 25.74s\/it]\nsaved as: meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model\/model.layers.1.safetensors\n  9%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2589                           | 3\/35 [01:12&lt;11:40, 21.88s\/it]\nsaved as: meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model\/model.layers.2.safetensors\n 11%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u258c                       | 4\/35 [01:29&lt;10:16, 19.87s\/it]\nsaved as: meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model\/model.layers.3.safetensors\n\n...\n\nsaved as: meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model\/lm_head.safetensors\n100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588 | 35\/35 [10:02&lt;00:00, 17.23s\/it]\n\nSpecial tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\ninput_tokens:8\nThe attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\nSetting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:18&lt;00:00,  3.95s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:20&lt;00:00,  4.01s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:19&lt;00:00,  3.98s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:16&lt;00:00,  3.90s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.92s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:20&lt;00:00,  4.03s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:14&lt;00:00,  3.84s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:24&lt;00:00,  4.12s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:22&lt;00:00,  4.06s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.94s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.93s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:15&lt;00:00,  3.87s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.93s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.93s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:20&lt;00:00,  4.01s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.94s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:15&lt;00:00,  3.88s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:13&lt;00:00,  3.82s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.93s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [02:17&lt;00:00,  3.94s\/it]\noutput_tokens:28\nWhat is the capital of United States? A) Washington D.C. B) New York C) Los Angeles D) Chicago\nAnswer:\n<\/pre><\/div>\n\n\n\n<p>\u4ece\u5904\u7406\u8fc7\u7a0b\u6765\u770b\uff0c\u5176\u5b9e\u662f\u628a\u6bcf\u4e00\u5c42\u4fdd\u5b58\u5230 splitted_mode \u76ee\u5f55\u4e0b\u4e86\u3002<\/p>\n\n\n\n<p>\u6211\u4eec\u663e\u793a\u4e00\u4e0b\u6587\u4ef6<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:sh decode:true \">ls -l\ntotal 15684244\n-rwxrwxrwx 1 tony tony 1050673248 Apr 28 22:19 lm_head.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:19 lm_head.safetensors.done\n-rwxrwxrwx 1 tony tony 1050673264 Apr 28 22:09 model.embed_tokens.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:09 model.embed_tokens.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:10 model.layers.0.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:10 model.layers.0.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:10 model.layers.1.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:10 model.layers.1.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:12 model.layers.10.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:12 model.layers.10.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:13 model.layers.11.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:13 model.layers.11.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:13 model.layers.12.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:13 model.layers.12.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:13 model.layers.13.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:13 model.layers.13.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:13 model.layers.14.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:13 model.layers.14.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:14 model.layers.15.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:14 model.layers.15.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:14 model.layers.16.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:14 model.layers.16.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:14 model.layers.17.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:14 model.layers.17.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:14 model.layers.18.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:14 model.layers.18.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:15 model.layers.19.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:15 model.layers.19.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:10 model.layers.2.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:10 model.layers.2.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:15 model.layers.20.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:15 model.layers.20.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:15 model.layers.21.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:15 model.layers.21.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:16 model.layers.22.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:16 model.layers.22.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:16 model.layers.23.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:16 model.layers.23.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:16 model.layers.24.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:16 model.layers.24.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:16 model.layers.25.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:16 model.layers.25.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:17 model.layers.26.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:17 model.layers.26.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:17 model.layers.27.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:17 model.layers.27.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:17 model.layers.28.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:17 model.layers.28.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:17 model.layers.29.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:17 model.layers.29.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:10 model.layers.3.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:10 model.layers.3.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:18 model.layers.30.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:18 model.layers.30.safetensors.done\n-rwxrwxrwx 1 tony tony  436225024 Apr 28 22:18 model.layers.31.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:18 model.layers.31.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:11 model.layers.4.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:11 model.layers.4.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:11 model.layers.5.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:11 model.layers.5.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:11 model.layers.6.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:11 model.layers.6.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:11 model.layers.7.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:11 model.layers.7.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:12 model.layers.8.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:12 model.layers.8.safetensors.done\n-rwxrwxrwx 1 tony tony  436225016 Apr 28 22:12 model.layers.9.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:12 model.layers.9.safetensors.done\n-rwxrwxrwx 1 tony tony       8280 Apr 28 22:18 model.norm.safetensors\n-rwxrwxrwx 1 tony tony          0 Apr 28 22:18 model.norm.safetensors.done<\/pre><\/div>\n\n\n\n<p>\u7b2c\u4e00\u6b21\u8981\u4fdd\u5b58\u8fd9\u4e9b\u6587\u4ef6\uff0c\u6240\u4ee5\u975e\u5e38\u6162\uff0c\u5927\u6982\u9700\u89812\u4e2a\u591a\u5c0f\u65f6,\u6bcf\u5c42\u5927\u5c0f\u7ea6\u4e3a417MB\u3002<\/p>\n\n\n\n<p>\u90a3\u7b2c\u4e8c\u6b21\u5219\u76f4\u63a5\u5224\u65ad\u8fd9\u4e9b\u6587\u4ef6\u5b58\u5728\u4e0d\u5b58\u5728\uff0c\u81ea\u7136\u5c31\u5feb\u4e86\u3002<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >output_tokens:28\nWhat is the capital of United States? A) Washington D.C. B) New York C) Los Angeles D) Chicago\nAnswer:\n\nreal    47m12.458s\nuser    18m15.109s\nsys     20m54.825s<\/pre><\/div>\n\n\n\n<p>\u548c\u524d\u9762\u4e00\u6837\uff0c\u8f93\u5165token\u6570\u4e3a8\uff0c \u8f93\u603btoken \u4e3a28\uff0c \u6240\u4ee528-8 = 20\uff0c \u603b\u517120\u4e2a\u56de\u5408\uff0c\u6bcf\u4e2a\u56de\u54082\u5206\u591a\u949f<\/p>\n\n\n\n<h3 class=\"wp-block-heading\"><strong><strong>1.3 \u5c1d\u8bd54bit\u52a0\u8f7d<\/strong><\/strong><\/h3>\n\n\n\n<p>\u53ea\u9700\u8981\u589e\u52a0 compression=&#8217;4bit&#8217;\u5c31\u53ef\u4ee5\uff0c8bit \u6807\u8bc68\u4f4d\u538b\u7f29<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >from airllm import AutoModel\n\nMAX_LENGTH = 128\n# could use hugging face model repo id:\nmodel = AutoModel.from_pretrained(\n            \"meta-llama\/Meta-Llama-3-8B-Instruct\", \n            compression='4bit',\n            prefetching=False)\n    \ninput_text = [\n        'What is the capital of United States?',\n        #'I like',\n    ]\n\ninput_tokens = model.tokenizer(input_text,\n    return_tensors=\"pt\", \n    return_attention_mask=False, \n    truncation=True, \n    max_length=MAX_LENGTH, \n    padding=False)\n    \nprint(f'input_tokens:{len(input_tokens.input_ids[0])}')\n           \ngeneration_output = model.generate(\n    input_tokens['input_ids'].cuda(), \n    max_new_tokens=20,\n    use_cache=True,\n    return_dict_in_generate=True)\n\nprint(f'output_tokens:{len(generation_output.sequences[0])}')\noutput = model.tokenizer.decode(generation_output.sequences[0])\n\nprint(output)\n<\/pre><\/div>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \">time python test_airllm_8B.py<\/pre><\/div>\n\n\n\n<p>\u8fd0\u884c\u540e\u4f1a\u91cd\u65b0\u4ea7\u751f\u4e00\u4e2a4bit \u7684\u76ee\u5f55\uff0c\u6587\u4ef6\u4ea7\u751f\u5927\u7ea6\u65f6\u95f4\u4e3a10\u5206\u949f\u5de6\u53f3<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \"> ls -la\ntotal 4411404\ndrwxrwxrwx 1 tony tony      4096 Apr 29 11:32 .\ndrwxrwxrwx 1 tony tony      4096 Apr 29 11:22 ..\n-rwxrwxrwx 1 tony tony 295502380 Apr 29 11:32 lm_head.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:32 lm_head.safetensors.done\n-rwxrwxrwx 1 tony tony 295502420 Apr 29 11:23 model.embed_tokens.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:23 model.embed_tokens.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:23 model.layers.0.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:23 model.layers.0.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:23 model.layers.1.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:23 model.layers.1.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:26 model.layers.10.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:26 model.layers.10.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:26 model.layers.11.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:26 model.layers.11.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:26 model.layers.12.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:26 model.layers.12.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:27 model.layers.13.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:27 model.layers.13.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:27 model.layers.14.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:27 model.layers.14.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:27 model.layers.15.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:27 model.layers.15.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:28 model.layers.16.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:28 model.layers.16.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:28 model.layers.17.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:28 model.layers.17.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:28 model.layers.18.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:28 model.layers.18.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:28 model.layers.19.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:28 model.layers.19.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:23 model.layers.2.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:23 model.layers.2.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:29 model.layers.20.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:29 model.layers.20.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:29 model.layers.21.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:29 model.layers.21.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:29 model.layers.22.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:29 model.layers.22.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:30 model.layers.23.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:30 model.layers.23.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:30 model.layers.24.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:30 model.layers.24.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:30 model.layers.25.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:30 model.layers.25.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:30 model.layers.26.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:30 model.layers.26.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:31 model.layers.27.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:31 model.layers.27.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:31 model.layers.28.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:31 model.layers.28.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:31 model.layers.29.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:31 model.layers.29.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:24 model.layers.3.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:24 model.layers.3.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:32 model.layers.30.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:32 model.layers.30.safetensors.done\n-rwxrwxrwx 1 tony tony 122693729 Apr 29 11:32 model.layers.31.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:32 model.layers.31.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:24 model.layers.4.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:24 model.layers.4.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:24 model.layers.5.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:24 model.layers.5.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:25 model.layers.6.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:25 model.layers.6.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:25 model.layers.7.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:25 model.layers.7.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:25 model.layers.8.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:25 model.layers.8.safetensors.done\n-rwxrwxrwx 1 tony tony 122693697 Apr 29 11:25 model.layers.9.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:25 model.layers.9.safetensors.done\n-rwxrwxrwx 1 tony tony      2820 Apr 29 11:32 model.norm.safetensors\n-rwxrwxrwx 1 tony tony         0 Apr 29 11:32 model.norm.safetensors.done<\/pre><\/div>\n\n\n\n<p>\u6bcf\u5c42\u7684\u6587\u4ef6\u5927\u5c0f\u5927\u6982\u4e3a118MB\uff0c\u662f\u524d\u9762\u6ca1\u538b\u7f29\u7684\u9762\u76841\/4\u3002<\/p>\n\n\n\n<p>\u603b\u7ed3\u6d88\u8017\u65f6\u95f4<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \">real    49m44.333s\nuser    3m19.002s\nsys     1m52.240s<\/pre><\/div>\n\n\n\n<h3 class=\"wp-block-heading\"><strong>1.4 4bit\u52a0\u8f7d\u540e\uff0c\u4fdd\u7559\u5728\u5185\u5b58<\/strong><\/h3>\n\n\n\n<p>\u539f\u6765\u7684\u4ee3\u7801\u662f\u6bcf\u6b21\u8fd0\u884c\u90fd\u884c\u4ece\u6587\u4ef6\u91cc\u9762\u52a0\u8f7d\uff0c\u73b0\u5728\u6539\u4e3a\uff0c\u52a0\u8f7d\u540e\u4fdd\u7559\u5185\u5b58\uff0c\u5148\u514b\u9686\u539f\u6765\u7684\u9879\u76ee\uff08\u7248\u672c\u53f7\u662f2.8.3)<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >git clone https:\/\/github.com\/lyogavin\/Anima\/<\/pre><\/div>\n\n\n\n<h4 class=\"wp-block-heading\"><strong>1.4.1 \u4fee\u6539\u7248\u672c\u53f7<\/strong><\/h4>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >cd Anima\/air_llm\n<\/pre><\/div>\n\n\n\n<p>\u4fee\u6539 setup.py \u6587\u4ef6\u91cc\u9762\u7684\u7248\u672c\u53f7<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >\n...\nsetuptools.setup(\n    name=\"airllm\",\n    version=\"2.8.5\",\n...<\/pre><\/div>\n\n\n\n<h4 class=\"wp-block-heading\"><strong>1.4.2 \u4fee\u6539 load_layer_to_cpu \u51fd\u6570<\/strong><\/h4>\n\n\n\n<p>\u4fee\u6539 airllm\/airllm_base.py \u6587\u4ef6\u4e24\u5904\uff0c\u9996\u5148\u6dfb\u52a0\u4e00\u4e2a\u5168\u5c40\u53d8\u91cf loaded_layers\uff0c\u7528\u6765\u4fdd\u5b58\u52a0\u8f7d\u540e\u7684\u5c42<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >\n...\nloaded_layers = {}\n\nclass AirLLMBaseModel(GenerationMixin):\n\n...<\/pre><\/div>\n\n\n\n<p>\u4fee\u6539\u540e\u7684 load_layer_to_cpu \u51fd\u6570\uff1a<\/p>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >    def load_layer_to_cpu(self, layer_name):\n        t = time.time()\n\n        # \u68c0\u67e5\u5c42\u662f\u5426\u5df2\u7ecf\u5728\u5168\u5c40\u5b57\u5178\u4e2d\n        if layer_name in loaded_layers:\n            state_dict = loaded_layers[layer_name]\n        else:\n            load_layer_output = load_layer(self.checkpoint_path, layer_name, self.profiling_mode)\n            elapsed_time = time.time() - t\n\n            if self.profiling_mode:\n                state_dict, compression_time = load_layer_output\n                disk_loading_time = elapsed_time - compression_time\n\n                self.profiler.add_profiling_time('load_safe_tensor', disk_loading_time)\n                self.profiler.add_profiling_time('compression_time', compression_time)\n            else:\n                state_dict = load_layer_output\n\n            # \u5b58\u50a8\u5230\u5168\u5c40\u53d8\u91cf\u4e2d\n            loaded_layers[layer_name] = state_dict\n\n        # pin memory:\n        if self.prefetching:\n            t = time.time()\n            for k in state_dict.keys():\n                state_dict[k].pin_memory()\n\n            elapsed_time = time.time() - t\n            if self.profiling_mode:\n                self.profiler.add_profiling_time('pin_memory_to_trigger_load', elapsed_time)\n\n        return state_dict\n<\/pre><\/div>\n\n\n\n<h4 class=\"wp-block-heading\"><strong>1.4.3 \u5b89\u88c5\u5230\u4f60\u7684\u73af\u5883<\/strong><\/h4>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" >pip install -e . <\/pre><\/div>\n\n\n\n<h4 class=\"wp-block-heading\"><strong>1.4.5 \u8fdb\u884c\u518d\u6b21\u6d4b\u8bd5<\/strong><\/h4>\n\n\n\n<div class=\"wp-block-urvanov-syntax-highlighter-code-block\"><pre class=\"lang:python decode:true \" > time python test_airllm_8B.py\n&gt;&gt;&gt;&gt; bitsandbytes installed\n&gt;&gt;&gt;&gt; cache_utils installed\nfound index file...\nfound_layers:{'model.embed_tokens.': True, 'model.layers.0.': True, 'model.layers.1.': True, 'model.layers.2.': True, 'model.layers.3.': True, 'model.layers.4.': True, 'model.layers.5.': True, 'model.layers.6.': True, 'model.layers.7.': True, 'model.layers.8.': True, 'model.layers.9.': True, 'model.layers.10.': True, 'model.layers.11.': True, 'model.layers.12.': True, 'model.layers.13.': True, 'model.layers.14.': True, 'model.layers.15.': True, 'model.layers.16.': True, 'model.layers.17.': True, 'model.layers.18.': True, 'model.layers.19.': True, 'model.layers.20.': True, 'model.layers.21.': True, 'model.layers.22.': True, 'model.layers.23.': True, 'model.layers.24.': True, 'model.layers.25.': True, 'model.layers.26.': True, 'model.layers.27.': True, 'model.layers.28.': True, 'model.layers.29.': True, 'model.layers.30.': True, 'model.layers.31.': True, 'model.norm.': True, 'lm_head.': True}\nsaved layers already found in meta-llama\/Meta-Llama-3-8B-Instruct\/splitted_model.4bit\nSpecial tokens have been added in the vocabulary, make sure the associated word embeddings are fine-tuned or trained.\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nnot support prefetching for compression for now. loading with no prepetching mode.\ninput_tokens:8\nThe attention mask and the pad token id were not set. As a consequence, you may observe unexpected behavior. Please pass your input's `attention_mask` to obtain reliable results.\nSetting `pad_token_id` to `eos_token_id`:128001 for open-end generation.\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [01:59&lt;00:00,  3.41s\/it]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  4.45it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  4.43it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  4.44it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  4.44it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  4.82it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  5.00it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.02it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:07&lt;00:00,  4.99it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.25it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.21it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.20it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.52it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.21it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.28it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.24it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.28it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.32it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.17it\/s]\nnew version of transfomer, no need to use BetterTransformer, try setting attn impl to sdpa...\nattn imp: &lt;class 'transformers.models.llama.modeling_llama.LlamaSdpaAttention'&gt;\nrunning layers(self.running_device): 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 35\/35 [00:06&lt;00:00,  5.10it\/s]\noutput_tokens:28\nWhat is the capital of United States? A) Washington D.C. B) New York C) Los Angeles D) Chicago\nAnswer:\n\nreal    4m27.649s\nuser    2m48.139s\nsys     0m10.896s<\/pre><\/div>\n\n\n\n<p>\u53ef\u4ee5\u770b\u5230\u7b2c\u4e00\u6b21\u63a8\u7406\u7684\u65f6\u95f4\u6ca1\u6709\u53d8\uff1a1\u520659\u79d2\uff0c\u4f46\u540e\u976219\u6b21\u7684\u63a8\u7406\u65f6\u95f4\u90fd\u662f7\u79d2\u5de6\u53f3\uff0c\u6574\u4f53\u65f6\u95f4\u7f29\u5c0f\u4e8610\u500d\u3002<\/p>\n\n\n\n<p><\/p>\n","protected":false},"excerpt":{"rendered":"<p>0. \u539f\u7406 \u5206\u5c42\u63a8\u7406\u672c\u8d28\u4e0a\u662f\u201c\u5206\u800c\u6cbb\u4e4b\u201d\u7684\u65b9\u6cd5 \u8fd9\u6ca1\u6709\u4f7f\u7528\u91cf\u5316\u3001\u84b8\u998f\u3001\u4fee\u526a\u6216\u5176\u4ed6\u6a21\u578b\u538b\u7f29\u6280\u672f \u5927\u578b\u8bed\u8a00\u6a21\u578b\u4e4b\u6240 [&hellip;]<\/p>\n","protected":false},"author":1,"featured_media":0,"comment_status":"open","ping_status":"open","sticky":false,"template":"","format":"standard","meta":{"site-sidebar-layout":"default","site-content-layout":"","ast-site-content-layout":"default","site-content-style":"default","site-sidebar-style":"default","ast-global-header-display":"","ast-banner-title-visibility":"","ast-main-header-display":"","ast-hfb-above-header-display":"","ast-hfb-below-header-display":"","ast-hfb-mobile-header-display":"","site-post-title":"","ast-breadcrumbs-content":"","ast-featured-img":"","footer-sml-layout":"","theme-transparent-header-meta":"","adv-header-id-meta":"","stick-header-meta":"","header-above-stick-meta":"","header-main-stick-meta":"","header-below-stick-meta":"","astra-migrate-meta-layouts":"set","ast-page-background-enabled":"default","ast-page-background-meta":{"desktop":{"background-color":"var(--ast-global-color-4)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"ast-content-background-meta":{"desktop":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"tablet":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""},"mobile":{"background-color":"var(--ast-global-color-5)","background-image":"","background-repeat":"repeat","background-position":"center center","background-size":"auto","background-attachment":"scroll","background-type":"","background-media":"","overlay-type":"","overlay-color":"","overlay-opacity":"","overlay-gradient":""}},"_jetpack_memberships_contains_paid_content":false,"footnotes":""},"categories":[444,443,442],"tags":[242,440,314,439],"class_list":["post-3432","post","type-post","status-publish","format-standard","hentry","category-ai","category-llm","category-llms","tag-chatgpt","tag-llama-3-70b","tag-openai-api","tag-439"],"views":3807,"jetpack_sharing_enabled":true,"jetpack_featured_media_url":"","_links":{"self":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/3432","targetHints":{"allow":["GET"]}}],"collection":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts"}],"about":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/types\/post"}],"author":[{"embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/users\/1"}],"replies":[{"embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcomments&post=3432"}],"version-history":[{"count":58,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/3432\/revisions"}],"predecessor-version":[{"id":3504,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=\/wp\/v2\/posts\/3432\/revisions\/3504"}],"wp:attachment":[{"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fmedia&parent=3432"}],"wp:term":[{"taxonomy":"category","embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Fcategories&post=3432"},{"taxonomy":"post_tag","embeddable":true,"href":"https:\/\/www.aqwu.net\/wp\/index.php?rest_route=%2Fwp%2Fv2%2Ftags&post=3432"}],"curies":[{"name":"wp","href":"https:\/\/api.w.org\/{rel}","templated":true}]}}