|
4 | 4 | # This source code is licensed under the BSD-style license found in the |
5 | 5 | # LICENSE file in the root directory of this source tree. |
6 | 6 | import csv |
7 | | -import io |
8 | 7 | import itertools |
9 | 8 | import json |
10 | 9 | import logging |
@@ -8517,207 +8516,6 @@ def test_whisper(self): |
8517 | 8516 | self.assertLessEqual(msg["wer"], 0.25) |
8518 | 8517 |
|
8519 | 8518 |
|
8520 | | -class TestExampleQaihubScript(TestQNN): |
8521 | | - def test_utils_export(self): |
8522 | | - with tempfile.TemporaryDirectory() as tmp_dir: |
8523 | | - module = ContextBinaryExample() # noqa: F405 |
8524 | | - generate_context_binary( |
8525 | | - module=module, |
8526 | | - inputs=module.example_inputs(), |
8527 | | - quantized=True, |
8528 | | - artifact_dir=tmp_dir, |
8529 | | - ) |
8530 | | - ctx_path = f"{tmp_dir}/model_ctx.bin" |
8531 | | - fpath = f"{self.executorch_root}/examples/qualcomm/qaihub_scripts/utils/export.py" |
8532 | | - |
8533 | | - # do compilation |
8534 | | - compile_cmds = [ |
8535 | | - "python", |
8536 | | - fpath, |
8537 | | - "compile", |
8538 | | - "-a", |
8539 | | - ctx_path, |
8540 | | - "-m", |
8541 | | - self.soc_model, |
8542 | | - "-l", |
8543 | | - "False", |
8544 | | - "-b", |
8545 | | - self.build_folder, |
8546 | | - "-o", |
8547 | | - f"{tmp_dir}/output_pte", |
8548 | | - ] |
8549 | | - compile_process = subprocess.Popen( |
8550 | | - compile_cmds, stdout=subprocess.DEVNULL, cwd=self.executorch_root |
8551 | | - ) |
8552 | | - output_pte_dir = f"{tmp_dir}/output_pte/model_ctx" |
8553 | | - compile_process.communicate() |
8554 | | - |
8555 | | - # check artifacts are correctly generated |
8556 | | - self.assertTrue( |
8557 | | - all( |
8558 | | - [ |
8559 | | - Path(output_pte_dir).exists(), |
8560 | | - Path(f"{output_pte_dir}/model_ctx.json").exists(), |
8561 | | - Path(f"{output_pte_dir}/model_ctx.svg").exists(), |
8562 | | - ] |
8563 | | - ) |
8564 | | - ) |
8565 | | - |
8566 | | - # prepare input files |
8567 | | - input_list, inputs = [], module.example_inputs() |
8568 | | - for name, tensor in inputs.items(): |
8569 | | - tensor_path = f"{output_pte_dir}/{name}.pt" |
8570 | | - torch.save(tensor, tensor_path) |
8571 | | - input_list.append(tensor_path) |
8572 | | - |
8573 | | - # do execution |
8574 | | - output_data_dir = f"{tmp_dir}/output_data" |
8575 | | - execute_cmds = [ |
8576 | | - "python", |
8577 | | - fpath, |
8578 | | - "execute", |
8579 | | - "-p", |
8580 | | - output_pte_dir, |
8581 | | - "-i", |
8582 | | - *input_list, |
8583 | | - "-s", |
8584 | | - self.device, |
8585 | | - "-z", |
8586 | | - "-b", |
8587 | | - self.build_folder, |
8588 | | - "-o", |
8589 | | - output_data_dir, |
8590 | | - ] |
8591 | | - if self.host is not None: |
8592 | | - execute_cmds.append(f"-H {self.host}") |
8593 | | - execute_process = subprocess.Popen(execute_cmds, cwd=self.executorch_root) |
8594 | | - execute_process.communicate() |
8595 | | - |
8596 | | - # read outputs |
8597 | | - with open(f"{output_pte_dir}/model_ctx.json", "r") as f: |
8598 | | - graph_info = json.load(f) |
8599 | | - |
8600 | | - device_output = [] |
8601 | | - for output in graph_info["outputs"]: |
8602 | | - with open(f"{output_data_dir}/{output['name']}.pt", "rb") as f: |
8603 | | - buffer = io.BytesIO(f.read()) |
8604 | | - device_output.append(torch.load(buffer, weights_only=False)) |
8605 | | - |
8606 | | - # validate outputs |
8607 | | - golden_output = module.forward(inputs["x"], inputs["y"]) |
8608 | | - self.atol, self.rtol = 1e-1, 1 |
8609 | | - self._assert_outputs_equal(golden_output, device_output) |
8610 | | - |
8611 | | - def test_llama2_7b(self): |
8612 | | - if not self.required_envs(): |
8613 | | - self.skipTest("missing required envs") |
8614 | | - |
8615 | | - prompt = "Explain the rules of baseball" |
8616 | | - cmds = [ |
8617 | | - "python", |
8618 | | - f"{self.executorch_root}/examples/qualcomm/qaihub_scripts/llama/llama2/qaihub_llama2_7b.py", |
8619 | | - "--artifact", |
8620 | | - self.artifact_dir, |
8621 | | - "--build_folder", |
8622 | | - self.build_folder, |
8623 | | - "--tokenizer_bin", |
8624 | | - f"{self.artifact_dir}/tokenizer.bin", |
8625 | | - "--context_binaries", |
8626 | | - f"{self.artifact_dir}", |
8627 | | - "--prompt", |
8628 | | - f"{prompt}", |
8629 | | - ] |
8630 | | - self.add_default_cmds(cmds) |
8631 | | - |
8632 | | - p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL) |
8633 | | - with Listener((self.ip, self.port)) as listener: |
8634 | | - conn = listener.accept() |
8635 | | - p.communicate() |
8636 | | - msg = json.loads(conn.recv()) |
8637 | | - if "Error" in msg: |
8638 | | - self.fail(msg["Error"]) |
8639 | | - else: |
8640 | | - model_out = msg["result"] |
8641 | | - self.assertTrue(model_out.startswith(prompt)) |
8642 | | - |
8643 | | - def test_llama3_8b(self): |
8644 | | - if not self.required_envs(): |
8645 | | - self.skipTest("missing required envs") |
8646 | | - |
8647 | | - prompt = "Explain the rules of baseball" |
8648 | | - cmds = [ |
8649 | | - "python", |
8650 | | - f"{self.executorch_root}/examples/qualcomm/qaihub_scripts/llama/llama3/qaihub_llama3_8b.py", |
8651 | | - "--artifact", |
8652 | | - self.artifact_dir, |
8653 | | - "--build_folder", |
8654 | | - self.build_folder, |
8655 | | - "--tokenizer_model", |
8656 | | - f"{self.artifact_dir}/tokenizer.model", |
8657 | | - "--context_binaries", |
8658 | | - f"{self.artifact_dir}", |
8659 | | - "--prompt", |
8660 | | - f"{prompt}", |
8661 | | - ] |
8662 | | - self.add_default_cmds(cmds) |
8663 | | - |
8664 | | - p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL) |
8665 | | - with Listener((self.ip, self.port)) as listener: |
8666 | | - conn = listener.accept() |
8667 | | - p.communicate() |
8668 | | - msg = json.loads(conn.recv()) |
8669 | | - if "Error" in msg: |
8670 | | - self.fail(msg["Error"]) |
8671 | | - else: |
8672 | | - model_out = msg["result"] |
8673 | | - expected_result = ( |
8674 | | - "<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n" |
8675 | | - + prompt |
8676 | | - + "<|eot_id|><|start_header_id|>assistant<|end_header_id|>" |
8677 | | - ) |
8678 | | - self.assertTrue(model_out.startswith(expected_result)) |
8679 | | - |
8680 | | - def test_stable_diffusion(self): |
8681 | | - if not self.required_envs(): |
8682 | | - self.skipTest("missing required envs") |
8683 | | - |
8684 | | - prompt = "a photo of an astronaut riding a horse on mars" |
8685 | | - cmds = [ |
8686 | | - "python", |
8687 | | - f"{self.executorch_root}/examples/qualcomm/qaihub_scripts/stable_diffusion/qaihub_stable_diffusion.py", |
8688 | | - "--artifact", |
8689 | | - self.artifact_dir, |
8690 | | - "--build_folder", |
8691 | | - self.build_folder, |
8692 | | - "--text_encoder_bin", |
8693 | | - f"{self.artifact_dir}/text_encoder.serialized.bin", |
8694 | | - "--unet_bin", |
8695 | | - f"{self.artifact_dir}/unet.serialized.bin", |
8696 | | - "--vae_bin", |
8697 | | - f"{self.artifact_dir}/vae.serialized.bin", |
8698 | | - "--vocab_json", |
8699 | | - f"{self.artifact_dir}/vocab.json", |
8700 | | - "--num_time_steps", |
8701 | | - "20", |
8702 | | - "--prompt", |
8703 | | - f"{prompt}", |
8704 | | - "--fix_latents", |
8705 | | - ] |
8706 | | - self.add_default_cmds(cmds) |
8707 | | - |
8708 | | - p = subprocess.Popen(cmds, stdout=subprocess.DEVNULL) |
8709 | | - with Listener((self.ip, self.port)) as listener: |
8710 | | - conn = listener.accept() |
8711 | | - p.communicate() |
8712 | | - msg = json.loads(conn.recv()) |
8713 | | - if "Error" in msg: |
8714 | | - self.fail(msg["Error"]) |
8715 | | - else: |
8716 | | - # For the default settings and prompt, the expected results will be {PSNR: 23.258, SSIM: 0.852} |
8717 | | - self.assertGreaterEqual(msg["PSNR"], 20) |
8718 | | - self.assertGreaterEqual(msg["SSIM"], 0.8) |
8719 | | - |
8720 | | - |
8721 | 8519 | class TestExampleScript(TestQNN): |
8722 | 8520 | def test_mobilenet_v2(self): |
8723 | 8521 | if not self.required_envs([self.image_dataset]): |
|
0 commit comments