7.1.3. Example: Inference With A Model
Sample Program for Randomly Initialized Inference Model
Execution Method
$ cd /opt/pfn/pfcomp/codegen/examples/
$ ./exec_with_env.sh python3 infer.py
Expected Output
Inference results using the randomly initialized model
tensor([[-0.3188, 0.6279, -0.5000, -1.2148],
[-0.3188, 0.6279, -0.5000, -1.2148],
[-0.3188, 0.6279, -0.5000, -1.2148],
[-0.3188, 0.6279, -0.5000, -1.2148]])
Sample Program
1import torch
2from mlsdk import Context, MNDevice, set_tensor_name_in_module, storage
3
4torch.manual_seed(0)
5
6
7def run_infer():
8 device = MNDevice("mncore2:auto")
9 context = Context(device)
10 Context.switch_context(context)
11
12 model = torch.nn.Linear(4, 4)
13 model.eval()
14 set_tensor_name_in_module(model, "model")
15 for p in model.parameters():
16 context.register_param(p)
17 for b in model.buffers():
18 context.register_buffer(b)
19
20 def infer(input: dict[str, torch.Tensor]) -> dict[str, torch.Tensor]:
21 x = input["x"]
22 y = model(x)
23 return {"out": y}
24
25 sample = {"x": torch.randn(4, 4)}
26
27 compiled_infer = context.compile(
28 infer,
29 sample,
30 storage.path("/tmp/infer"),
31 options={"float_dtype": "mixed"},
32 )
33 result = compiled_infer({"x": torch.ones(4, 4)})
34 result_on_cpu = result["out"].cpu()
35 print(result_on_cpu)
36
37
38if __name__ == "__main__":
39 run_infer()