Compare commits

..

42 commits

Author SHA1 Message Date
Erin bf78cc751a initial work to adhere spec in handling memory access faults 2023-06-21 01:56:26 +02:00
Erin c95deefcb2 change spec 2023-06-21 00:12:43 +02:00
Erin 2c20c7c859 removed UB 2023-06-20 19:00:23 +02:00
Erin f85e3eb062 updates spec 2023-06-19 19:51:48 +02:00
Erin 7f2676af91 Reimplemented memory instructions, deal with it. 2023-06-19 00:42:57 +02:00
Erin 4dd2052634 fix 2023-06-12 00:40:24 +02:00
Erin 8afb251950 a 2023-06-12 00:39:53 +02:00
Erin e4f84aadc0 assembly 2023-06-12 00:39:50 +02:00
Erin bdb596befb spec 2023-06-12 00:32:42 +02:00
Erin 9ee9d6e266 Start working on spec
(also i wanted to test if my comming signing works)
2023-06-11 23:34:53 +02:00
Erin c7b5512ada fixed label handling 2023-06-11 23:24:16 +02:00
Erin 059e2b4b66 Merged division with remainder 2023-06-11 23:06:31 +02:00
Erin b7ff456808 fix 2023-06-11 22:43:02 +02:00
Erin 1ec4a7a653 gone 2023-06-11 15:54:44 +02:00
Erin ed3bcba42e a 2023-06-11 15:42:53 +02:00
Erin 86df8ddc64 uhh oohh i guess it works 2023-06-11 14:47:49 +02:00
Erin 1263941560 c 2023-06-11 14:05:57 +02:00
Erin efdcd826ee fixed typo 2023-06-11 13:55:05 +02:00
Erin c873945681 stores 2023-06-11 13:47:33 +02:00
Erin 2e6e6b7939 Basic paging 2023-06-11 13:26:16 +02:00
Erin 2144c055d1 added safety notice 2023-06-10 16:46:04 +02:00
Erin f832f6a04a asdasdasdasdasdasdasdasd 2023-06-09 21:58:59 +02:00
Erin 36f4d31fb2 sus 2023-06-09 18:35:34 +02:00
Erin e1499fd5a1 CMP, CMPU, CMPI, CMPUI 2023-06-09 18:25:37 +02:00
Erin d32b9e7fba I guess I improved local labels? 2023-06-09 18:07:08 +02:00
Erin 8ba86db561 daily 2023-06-09 17:38:36 +02:00
Erin 642488cb64 fixed 2023-06-09 15:42:13 +02:00
Erin 82160af7af fixed labels, added comments 2023-06-09 13:33:17 +02:00
Erin 2a08362b52 Very quick and dirty labels.
I wanna sleep now, so... will fix later™
2023-06-09 02:42:00 +02:00
Erin 48353db26f added conditional jumps 2023-06-09 01:55:09 +02:00
Erin 417047806b Program interrupts, immediate binary ops and bitshifts. 2023-06-09 00:10:46 +02:00
Erin da6ad6d2c7 Small improvements 2023-06-08 23:23:23 +02:00
Erin a34f2fc9f8 Added zero register + relative jump 2023-06-08 00:52:24 +02:00
Erin fce7a96e50 VM exceptions 2023-06-08 00:27:53 +02:00
Erin 859e14daa6 Memory access 2023-06-08 00:25:38 +02:00
Erin 0fd3aee6b5 Assembler program 2023-06-07 15:17:45 +02:00
Erin cbb0ac2abe Assembla. 2023-06-07 15:07:37 +02:00
Erin 8675965ef5 Implement copy + docs for instructions 2023-06-07 00:02:27 +02:00
Erin 8d6e7af9d8 Validated VM creator 2023-06-06 23:08:26 +02:00
Erin bb3a472eeb Added valider 2023-06-06 22:56:28 +02:00
Erin e67d512f89 Implemented some portion of VM, missing validation 2023-06-06 22:03:37 +02:00
Erin a4b22e2053 Instruction redefinition 2023-05-31 01:51:04 +02:00
70 changed files with 1530 additions and 4678 deletions

View file

@ -1,2 +0,0 @@
[alias]
xtask = "r -p xtask --"

522
Cargo.lock generated
View file

@ -2,101 +2,22 @@
# It is not intended for manual editing.
version = 3
[[package]]
name = "addr2line"
version = "0.21.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8a30b2e23b9e17a9f90641c7ab1549cd9b44f296d3ccbf309d2863cfe398a0cb"
dependencies = [
"gimli",
]
[[package]]
name = "adler"
version = "1.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f26201604c87b1e01bd3d98f8d5d9a8fcbb815e8cedb41ffccbeb4bf593a35fe"
[[package]]
name = "ahash"
version = "0.8.5"
version = "0.8.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cd7d5a2cecb58716e47d67d5703a249964b14c7be1ec3cad3affc295b2d1c35d"
checksum = "2c99f64d1e06488f620f932677e24bc6e2897582980441ae90a671415bd7ec2f"
dependencies = [
"cfg-if",
"const-random",
"getrandom",
"once_cell",
"version_check",
"zerocopy",
]
[[package]]
name = "argh"
version = "0.1.12"
name = "beef"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7af5ba06967ff7214ce4c7419c7d185be7ecd6cc4965a8f6e1d8ce0398aad219"
dependencies = [
"argh_derive",
"argh_shared",
]
[[package]]
name = "argh_derive"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "56df0aeedf6b7a2fc67d06db35b09684c3e8da0c95f8f27685cb17e08413d87a"
dependencies = [
"argh_shared",
"proc-macro2",
"quote",
"syn 2.0.38",
]
[[package]]
name = "argh_shared"
version = "0.1.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5693f39141bda5760ecc4111ab08da40565d1771038c4a0250f03457ec707531"
dependencies = [
"serde",
]
[[package]]
name = "autocfg"
version = "1.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa"
[[package]]
name = "backtrace"
version = "0.3.69"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837"
dependencies = [
"addr2line",
"cc",
"cfg-if",
"libc",
"miniz_oxide",
"object",
"rustc-demangle",
]
[[package]]
name = "bitflags"
version = "2.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "327762f6e5a765692301e5bb513e0d9fef63be86bbc14528052b1cd3e6f03e07"
[[package]]
name = "cc"
version = "1.0.83"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f1174fb0b6ec23863f8b971027804a42614e347eafb0a95bf0b12cdae21fc4d0"
dependencies = [
"libc",
]
checksum = "3a8241f3ebb85c056b509d4327ad0358fbbba6ffb340bf388f26350aeda225b1"
[[package]]
name = "cfg-if"
@ -105,324 +26,142 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd"
[[package]]
name = "color-eyre"
version = "0.6.2"
name = "delegate"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5a667583cca8c4f8436db8de46ea8233c42a7d9ae424a82d338f2e4675229204"
checksum = "d358e0ec5c59a5e1603b933def447096886121660fc680dc1e64a0753981fe3c"
dependencies = [
"backtrace",
"color-spantrace",
"eyre",
"indenter",
"once_cell",
"owo-colors",
"tracing-error",
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "color-spantrace"
version = "0.2.0"
name = "fnv"
version = "1.0.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1ba75b3d9449ecdccb27ecbc479fdc0b87fa2dd43d2f8298f9bf0e59aacc8dce"
checksum = "3f9eec918d3f24069decb9af1554cad7c880e2da24a9afd88aca000531ab82c1"
[[package]]
name = "hashbrown"
version = "0.13.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "43a3c133739dddd0d2990f9a4bdf8eb4b21ef50e4851ca85ab661199821d510e"
dependencies = [
"once_cell",
"owo-colors",
"tracing-core",
"tracing-error",
"ahash",
]
[[package]]
name = "const-random"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "11df32a13d7892ec42d51d3d175faba5211ffe13ed25d4fb348ac9e9ce835593"
dependencies = [
"const-random-macro",
]
[[package]]
name = "const-random-macro"
version = "0.1.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e"
dependencies = [
"getrandom",
"once_cell",
"tiny-keccak",
]
[[package]]
name = "crunchy"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a81dae078cea95a014a339291cec439d2f232ebe854a9d672b796c6afafa9b7"
[[package]]
name = "eyre"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4c2b6b5a29c02cdc822728b7d7b8ae1bab3e3b05d44522770ddd49722eeac7eb"
dependencies = [
"indenter",
"once_cell",
]
[[package]]
name = "getrandom"
version = "0.2.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "be4136b2a15dd319360be1c07d9933517ccf0be8f16bf62a3bee4f0d618df427"
dependencies = [
"cfg-if",
"libc",
"wasi",
]
[[package]]
name = "gimli"
version = "0.28.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6fb8d784f27acf97159b40fc4db5ecd8aa23b9ad5ef69cdd136d3bc80665f0c0"
[[package]]
name = "hbasm"
version = "0.1.0"
dependencies = [
"hbbytecode",
"lasso",
"logos",
"paste",
"rhai",
"with_builtin_macros",
]
[[package]]
name = "hbbytecode"
version = "0.1.0"
dependencies = [
"with_builtin_macros",
]
[[package]]
name = "hbvm"
version = "0.1.0"
dependencies = [
"delegate",
"hashbrown",
"hbbytecode",
"log",
"paste",
"static_assertions",
]
[[package]]
name = "hbvm_aos_on_linux"
version = "0.1.0"
dependencies = [
"hbvm",
"nix",
]
[[package]]
name = "hbxrt"
version = "0.1.0"
dependencies = [
"hbvm",
"memmap2",
]
[[package]]
name = "indenter"
version = "0.3.3"
name = "lasso"
version = "0.7.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ce23b50ad8242c51a442f3ff322d56b02f08852c77e4c0b4d3fd684abc89c683"
checksum = "4644821e1c3d7a560fe13d842d13f587c07348a1a05d3a797152d41c90c56df2"
dependencies = [
"hashbrown",
]
[[package]]
name = "instant"
version = "0.1.12"
name = "log"
version = "0.4.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7a5bbe824c507c5da5956355e86a746d82e0e1464f65d862cc5e71da70e94b2c"
checksum = "abb12e687cfb44aa40f41fc3978ef76448f9b6038cad6aef4259d3c095a2382e"
dependencies = [
"cfg-if",
]
[[package]]
name = "lazy_static"
version = "1.4.0"
name = "logos"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e2abad23fbc42b3700f2f279844dc832adb2b2eb069b2df918f455c4e18cc646"
[[package]]
name = "libc"
version = "0.2.149"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a08173bc88b7955d1b3145aa561539096c421ac8debde8cbc3612ec635fee29b"
[[package]]
name = "memchr"
version = "2.6.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f665ee40bc4a3c5590afb1e9677db74a508659dfd71e126420da8274909a0167"
[[package]]
name = "memmap2"
version = "0.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "deaba38d7abf1d4cca21cc89e932e542ba2b9258664d2a9ef0e61512039c9375"
checksum = "c000ca4d908ff18ac99b93a062cb8958d331c3220719c52e77cb19cc6ac5d2c1"
dependencies = [
"libc",
"logos-derive",
]
[[package]]
name = "miniz_oxide"
version = "0.7.1"
name = "logos-codegen"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7810e0be55b428ada41041c41f32c9f1a42817901b4ccf45fa3d4b6561e74c7"
checksum = "dc487311295e0002e452025d6b580b77bb17286de87b57138f3b5db711cded68"
dependencies = [
"adler",
"beef",
"fnv",
"proc-macro2",
"quote",
"regex-syntax",
"syn 2.0.18",
]
[[package]]
name = "nix"
version = "0.27.1"
name = "logos-derive"
version = "0.13.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053"
checksum = "dbfc0d229f1f42d790440136d941afd806bc9e949e2bcb8faa813b0f00d1267e"
dependencies = [
"bitflags",
"cfg-if",
"libc",
]
[[package]]
name = "num-traits"
version = "0.2.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "39e3200413f237f41ab11ad6d161bc7239c84dcb631773ccd7de3dfe4b5c267c"
dependencies = [
"autocfg",
]
[[package]]
name = "object"
version = "0.32.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9cf5f9dd3933bd50a9e1f149ec995f39ae2c496d31fd772c1fd45ebc27e902b0"
dependencies = [
"memchr",
"logos-codegen",
]
[[package]]
name = "once_cell"
version = "1.18.0"
version = "1.17.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dd8b5dd2ae5ed71462c540258bedcb51965123ad7e7ccf4b9a8cafaa4a63576d"
[[package]]
name = "owo-colors"
version = "3.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c1b04fb49957986fdce4d6ee7a65027d55d4b6d2265e5848bbb507b58ccfdb6f"
checksum = "b7e5500299e16ebb147ae15a00a942af264cf3688f47923b8fc2cd5858f23ad3"
[[package]]
name = "paste"
version = "1.0.14"
version = "1.0.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "de3145af08024dea9fa9914f381a17b8fc6034dfb00f3a84013f7ff43f29ed4c"
[[package]]
name = "pin-project-lite"
version = "0.2.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58"
checksum = "9f746c4065a8fa3fe23974dd82f15431cc8d40779821001404d10d2e79ca7d79"
[[package]]
name = "proc-macro2"
version = "1.0.69"
version = "1.0.59"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "134c189feb4956b20f6f547d2cf727d4c0fe06722b20a0eec87ed445a97f92da"
checksum = "6aeca18b86b413c660b781aa319e4e2648a3e6f9eadc9b47e9038e6fe9f3451b"
dependencies = [
"unicode-ident",
]
[[package]]
name = "quote"
version = "1.0.33"
version = "1.0.28"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5267fca4496028628a95160fc423a33e8b2e6af8a5302579e322e4b520293cae"
checksum = "1b9ab9c7eadfd8df19006f1cf1a4aed13540ed5cbc047010ece5826e10825488"
dependencies = [
"proc-macro2",
]
[[package]]
name = "rhai"
version = "1.16.2"
name = "regex-syntax"
version = "0.6.29"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "206cee941730eaf90a22c84235b25193df661393688162e15551164f92f09eca"
dependencies = [
"ahash",
"bitflags",
"instant",
"num-traits",
"once_cell",
"rhai_codegen",
"smallvec",
"smartstring",
]
[[package]]
name = "rhai_codegen"
version = "1.6.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "853977598f084a492323fe2f7896b4100a86284ee8473612de60021ea341310f"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.38",
]
[[package]]
name = "rustc-demangle"
version = "0.1.23"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d626bb9dae77e28219937af045c257c28bfd3f69333c512553507f5f9798cb76"
[[package]]
name = "serde"
version = "1.0.189"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e422a44e74ad4001bdc8eede9a4570ab52f71190e9c076d14369f38b9200537"
dependencies = [
"serde_derive",
]
[[package]]
name = "serde_derive"
version = "1.0.189"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e48d1f918009ce3145511378cf68d613e3b3d9137d67272562080d68a2b32d5"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.38",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f40ca3c46823713e0d4209592e8d6e826aa57e928f09752619fc696c499637f6"
dependencies = [
"lazy_static",
]
[[package]]
name = "smallvec"
version = "1.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "942b4a808e05215192e39f4ab80813e599068285906cc91aa64f923db842bd5a"
[[package]]
name = "smartstring"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fb72c633efbaa2dd666986505016c32c3044395ceaf881518399d2f4127ee29"
dependencies = [
"autocfg",
"static_assertions",
"version_check",
]
checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1"
[[package]]
name = "static_assertions"
@ -443,144 +182,23 @@ dependencies = [
[[package]]
name = "syn"
version = "2.0.38"
version = "2.0.18"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e96b79aaa137db8f61e26363a0c9b47d8b4ec75da28b7d1d614c2303e232408b"
checksum = "32d41677bcbe24c20c52e7c70b0d8db04134c5d1066bf98662e2871ad200ea3e"
dependencies = [
"proc-macro2",
"quote",
"unicode-ident",
]
[[package]]
name = "thread_local"
version = "1.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3fdd6f064ccff2d6567adcb3873ca630700f00b5ad3f060c25b5dcfd9a4ce152"
dependencies = [
"cfg-if",
"once_cell",
]
[[package]]
name = "tiny-keccak"
version = "2.0.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2c9d3793400a45f954c52e73d068316d76b6f4e36977e3fcebb13a2721e80237"
dependencies = [
"crunchy",
]
[[package]]
name = "tracing"
version = "0.1.40"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c3523ab5a71916ccf420eebdf5521fcef02141234bbc0b8a49f2fdc4544364ef"
dependencies = [
"pin-project-lite",
"tracing-core",
]
[[package]]
name = "tracing-core"
version = "0.1.32"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c06d3da6113f116aaee68e4d601191614c9053067f9ab7f6edbcb161237daa54"
dependencies = [
"once_cell",
"valuable",
]
[[package]]
name = "tracing-error"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d686ec1c0f384b1277f097b2f279a2ecc11afe8c133c1aabf036a27cb4cd206e"
dependencies = [
"tracing",
"tracing-subscriber",
]
[[package]]
name = "tracing-subscriber"
version = "0.3.17"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30a651bc37f915e81f087d86e62a18eec5f79550c7faff886f7090b4ea757c77"
dependencies = [
"sharded-slab",
"thread_local",
"tracing-core",
]
[[package]]
name = "unicode-ident"
version = "1.0.12"
version = "1.0.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3354b9ac3fae1ff6755cb6db53683adb661634f67557942dea4facebec0fee4b"
[[package]]
name = "valuable"
version = "0.1.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "830b7e5d4d90034032940e4ace0d9a9a057e7a45cd94e6c007832e39edb82f6d"
checksum = "b15811caf2415fb889178633e7724bad2509101cde276048e013b9def5e51fa0"
[[package]]
name = "version_check"
version = "0.9.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49874b5167b65d7193b8aba1567f5c7d93d001cafc34600cee003eda787e483f"
[[package]]
name = "wasi"
version = "0.11.0+wasi-snapshot-preview1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423"
[[package]]
name = "with_builtin_macros"
version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a59d55032495429b87f9d69954c6c8602e4d3f3e0a747a12dea6b0b23de685da"
dependencies = [
"with_builtin_macros-proc_macros",
]
[[package]]
name = "with_builtin_macros-proc_macros"
version = "0.0.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "15bd7679c15e22924f53aee34d4e448c45b674feb6129689af88593e129f8f42"
dependencies = [
"proc-macro2",
"quote",
"syn 1.0.109",
]
[[package]]
name = "xtask"
version = "0.1.0"
dependencies = [
"argh",
"color-eyre",
"once_cell",
]
[[package]]
name = "zerocopy"
version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9ffc046c9f849405a42c87e82e2f2f861d1f0a06b855910c76c2bd1e87be900c"
dependencies = [
"zerocopy-derive",
]
[[package]]
name = "zerocopy-derive"
version = "0.7.13"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "246c000cfc5f942bac7ff99fde24a9ebb589d92e024bc758c6c733c15a02a73e"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.38",
]

View file

@ -1,3 +1,2 @@
[workspace]
resolver = "2"
members = ["hbasm", "hbbytecode", "hbvm", "hbvm_aos_on_linux", "hbxrt", "xtask"]
members = ["hbasm", "hbbytecode", "hbvm"]

4
assets/example.asm Normal file
View file

@ -0,0 +1,4 @@
load 0 a0 ;; 05 00 A0
load 10 a1 ;; 05 10 A1
add a0 1 a0 ;; 01 A0 01 A0
jump_neq a0 a1 0 ;; a1 A0 A1 0

4
assets/example.bytes Normal file
View file

@ -0,0 +1,4 @@
load 10 A1
load 0 A0
add A0 1
jump_less_than A0 A1 0

View file

@ -1,100 +0,0 @@
jmp entry
puts:
-- Write string to console
-- r2: [IN] *const u8 String pointer
-- r3: [IN] usize String length
li8 r1, 0x1 -- Write syscall
brc r2, r3, 2 -- Copy parameters
li8 r2, 0x1 -- STDOUT
eca
jal r0, r31, 0
gets:
-- Read string until end of buffer or LF
-- r2: [IN] *mut u8 Buffer
-- r3: [IN] usize Buffer length
-- Register allocations:
-- r33: *mut u8 Buffer end
-- r34: u8 Immediate char
-- r35: u8 Const [0x0A = LF]
li8 r35, 0x0A
add64 r33, r2, r3
-- Setup syscall
li8 r2, 0x1 -- Stdin
cp r3, r2
li8 r4, 0x1 -- Read one char
jeq r3, r33, end
loop:
li8 r1, 0x1 -- Read syscall
eca
addi64 r3, r3, 1
ld r34, r3, 0, 1
jeq r34, r35, end
jne r3, r33, loop
end:
-- Set copied amount
sub64 r1, r33, r3
addi64 r1, -1
jal r0, r31, 0
alloc-pages:
-- Allocate pages
-- r1: [OUT] *mut u8 Pointer to page
-- r2: [IN] u16 Page count
muli16 r3, r2, 4096 -- page count
li8 r1, 0x9 -- mmap syscall
li8 r2, 0x0 -- no address set, kernel chosen
li8 r4, 0x2 -- PROT_WRITE
li8 r5, 0x20 -- MAP_ANONYMOUS
li64 r6, -1 -- Doesn't map file
li8 r7, 0x0 -- Doesn't map file
eca
jal r0, r31, 0
entry:
-- Program entrypoint
-- Register allocations:
-- r32: *mut u8 Buffer
-- r36: usize Read buffer length
-- Allocate one page (4096 KiB)
li8 r2, 1
jal r31, 0, alloc-pages
cp r32, r1
-- Print message
lra16 r2, r0, #enter-your-name
li8 r3, 17
jal r31, r0, puts
-- Read name
cp r2, r32
li16 r3, 4096
jal r31, r0, gets
cp r36, r1
-- Print your name is
lra16 r2, r0, #your-name-is
li8 r3, 15
jal r31, r0, puts
-- And now print the name
cp r2, r32
cp r3, r36
jal r31, r0, puts
tx
#enter-your-name: "Enter your name: "
#your-name-is : "\nYour name is: "

Binary file not shown.

Binary file not shown.

View file

@ -4,6 +4,11 @@ version = "0.1.0"
edition = "2021"
[dependencies]
hbbytecode = { path = "../hbbytecode" }
lasso = "0.7"
paste = "1.0"
rhai = "1.16"
with_builtin_macros = "0.0.3"
[dependencies.logos]
version = "0.13"
default-features = false
features = ["export_derive"]

View file

@ -1,13 +0,0 @@
import "hbasm/examples/ableos/std" as std;
fn main(){
std::Error(":+)");
std::Warn("Your mom fell in a well!");
std::Info("Hello, world!");
std::Debug("ABC");
std::Trace("Trace Deez");
tx();
}
main();

View file

@ -1,24 +0,0 @@
fn ipc_send(buffer_id, mem_addr, length){
// set the ecall
li8(r1, 3);
// Set the buffer ID to be the BufferID
li64(r2, buffer_id);
lra(r3, r0, mem_addr);
// set the length
li64(r4, length);
// ecall
eca();
}
private fn log(log_level, string){
let str = data::str(string);
ipc_send(1, str, str.len);
}
fn Error(string) {log(0, string);}
fn Warn(string) {log(1, string);}
fn Info(string) {log(2, string);}
// Due to rhai limitations this cannot be debug
// because of this all of the log levels are upper case
fn Debug(string) {log(3, string);}
fn Trace(string) {log(4, string);}

View file

@ -1,9 +0,0 @@
let hello = data::str("Hello, world!");
li8 (r1, 1); // Write syscall
li8 (r2, 1); // Stdout FD
lra16 (r3, r0, hello); // String buffer
li8 (r4, hello.len); // String length
eca (); // System call
tx (); // End program

View file

@ -1,33 +0,0 @@
li8(r1, 69);
li8(r2, 0);
if_eq(r1, r2,
|| puts("Equals!"),
|| puts("Not equals!"),
);
tx(); // END OF MAIN
/// Inline function write text to stdout
fn puts(string) {
let d = data::str(string);
li8 (r1, 1); // Write syscall
li8 (r2, 1); // Stdout handle
lra16 (r3, r0, d);
li64 (r4, d.len);
eca ();
}
fn if_eq(a, b, thenblk, elseblk) {
let elselbl = declabel();
let endlbl = declabel();
jne(a, b, elselbl);
thenblk.call();
jmp16(endlbl);
elselbl.here();
elseblk.call();
endlbl.here();
}

View file

@ -1,85 +0,0 @@
use rhai::{CustomType, Engine, ImmutableString};
use {
crate::{object::SymbolRef, SharedObject},
rhai::Module,
};
macro_rules! gen_data_insertions {
($module:expr, $obj:expr, [$($ty:ident),* $(,)?] $(,)?) => {{
let (module, obj) = ($module, $obj);
$({
let obj = ::std::rc::Rc::clone(obj);
let hash = module.set_native_fn(stringify!($ty), move |arr: ::rhai::Array| {
let obj = &mut *obj.borrow_mut();
let symbol = obj.symbol($crate::object::Section::Data);
obj.sections
.data
.reserve(arr.len() * ::std::mem::size_of::<$ty>());
for item in arr {
obj.sections.data.extend(
match item.as_int() {
Ok(num) => $ty::try_from(num).map_err(|_| "i64".to_owned()),
Err(ty) => Err(ty.to_owned()),
}
.map_err(|err| {
::rhai::EvalAltResult::ErrorMismatchDataType(
stringify!($ty).to_owned(),
err,
::rhai::Position::NONE,
)
})?
.to_le_bytes(),
);
}
Ok(DataRef {
symbol,
len: obj.sections.data.len() - symbol.0,
})
});
module.update_fn_namespace(hash, ::rhai::FnNamespace::Global);
})*
}};
}
#[derive(Clone, Copy, Debug)]
pub struct DataRef {
pub symbol: SymbolRef,
pub len: usize,
}
impl CustomType for DataRef {
fn build(mut builder: rhai::TypeBuilder<Self>) {
builder
.with_name("DataRef")
.with_get("symbol", |this: &mut Self| this.symbol)
.with_get("len", |this: &mut Self| this.len as u64 as i64);
}
}
pub fn module(engine: &mut Engine, obj: SharedObject) -> Module {
let mut module = Module::new();
gen_data_insertions!(&mut module, &obj, [i8, i16, i32, i64]);
{
let hash = module.set_native_fn("str", move |s: ImmutableString| {
let obj = &mut *obj.borrow_mut();
let symbol = obj.symbol(crate::object::Section::Data);
obj.sections.data.extend(s.as_bytes());
Ok(DataRef {
symbol,
len: s.len(),
})
});
module.update_fn_namespace(hash, rhai::FnNamespace::Global);
}
engine.build_type::<DataRef>();
module
}

View file

@ -1,226 +0,0 @@
use {
crate::object::Object,
rhai::{FnNamespace, Module},
std::{cell::RefCell, rc::Rc},
};
mod optypes {
use {
crate::{
label::UnboundLabel,
object::{Object, RelocKey, RelocType, SymbolRef},
},
rhai::{Dynamic, EvalAltResult, ImmutableString, Position},
};
pub type R = u8;
pub type B = i8;
pub type H = i16;
pub type W = i32;
pub type D = i64;
pub type A = Dynamic;
pub type O = Dynamic;
pub type P = Dynamic;
pub fn insert_reloc(
obj: &mut Object,
ty: RelocType,
val: &Dynamic,
) -> Result<(), EvalAltResult> {
match () {
_ if val.is::<SymbolRef>() => {
obj.relocation(RelocKey::Symbol(val.clone_cast::<SymbolRef>().0), ty)
}
_ if val.is::<UnboundLabel>() => {
obj.relocation(RelocKey::Symbol(val.clone_cast::<UnboundLabel>().0), ty)
}
_ if val.is::<DataRef>() => {
obj.relocation(RelocKey::Symbol(val.clone_cast::<DataRef>().symbol.0), ty)
}
_ if val.is_string() => {
obj.relocation(RelocKey::Label(val.clone_cast::<ImmutableString>()), ty)
}
_ if val.is_int() => {
let int = val.clone_cast::<i64>();
match ty {
RelocType::Rel32 => obj.sections.text.extend((int as i32).to_le_bytes()),
RelocType::Rel16 => obj.sections.text.extend((int as i16).to_le_bytes()),
RelocType::Abs64 => obj.sections.text.extend(int.to_le_bytes()),
}
}
_ => {
return Err(EvalAltResult::ErrorMismatchDataType(
"SybolRef, UnboundLabel, String or Int".to_owned(),
val.type_name().to_owned(),
Position::NONE,
))
}
}
Ok(())
}
macro_rules! gen_insert {
(le_bytes: [$($lety:ident),* $(,)?]) => {
macro_rules! insert {
$(($thing:expr, $obj: expr, $lety) => {
$obj.sections.text.extend($thing.to_le_bytes());
};)*
($thing:expr, $obj:expr, A) => {
$crate::ins::optypes::insert_reloc(
$obj,
$crate::object::RelocType::Abs64,
$thing
)?
};
($thing:expr, $obj:expr, O) => {
$crate::ins::optypes::insert_reloc(
$obj,
$crate::object::RelocType::Rel32,
$thing
)?
};
($thing:expr, $obj:expr, P) => {
$crate::ins::optypes::insert_reloc(
$obj,
$crate::object::RelocType::Rel16,
$thing
)?
};
}
};
}
gen_insert!(le_bytes: [R, B, H, W, D]);
#[allow(clippy::single_component_path_imports)]
pub(super) use insert;
use crate::data::DataRef;
}
mod rity {
pub use super::optypes::{A, O, P, R};
pub type B = i64;
pub type H = i64;
pub type W = i64;
pub type D = i64;
}
mod generic {
use {crate::object::Object, rhai::EvalAltResult};
pub(super) fn convert_op<A, B>(from: A) -> Result<B, EvalAltResult>
where
B: TryFrom<A>,
<B as TryFrom<A>>::Error: std::error::Error + Sync + Send + 'static,
{
B::try_from(from).map_err(|e| {
EvalAltResult::ErrorSystem("Data conversion error".to_owned(), Box::new(e))
})
}
macro_rules! gen_ins {
($($($name:ident : $ty:ty),*;)*) => {
paste::paste! {
$(#[inline]
pub fn [<$($ty:lower)*>](
obj: &mut Object,
opcode: u8,
$($name: $crate::ins::optypes::$ty),*,
) -> Result<(), EvalAltResult> {
obj.sections.text.push(opcode);
$($crate::ins::optypes::insert!(&$name, obj, $ty);)*
Ok(())
})*
macro_rules! gen_ins_fn {
$(($obj:expr, $opcode:expr, [<$($ty)*>]) => {
move |$($name: $crate::ins::rity::$ty),*| {
$crate::ins::generic::[<$($ty:lower)*>](
&mut *$obj.borrow_mut(),
$opcode,
$(
$crate::ins::generic::convert_op::<
_,
$crate::ins::optypes::$ty
>($name)?
),*
)?;
Ok(())
}
};)*
($obj:expr, $opcode:expr, N) => {
move || {
$crate::ins::generic::n(&mut *$obj.borrow_mut(), $opcode);
Ok(())
}
};
}
}
};
}
#[inline]
pub fn n(obj: &mut Object, opcode: u8) {
obj.sections.text.push(opcode);
}
gen_ins! {
o0: R, o1: R;
o0: R, o1: R, o2: R;
o0: R, o1: R, o2: R, o3: R;
o0: R, o1: R, o2: B;
o0: R, o1: R, o2: H;
o0: R, o1: R, o2: W;
o0: R, o1: R, o2: D;
o0: R, o1: B;
o0: R, o1: H;
o0: R, o1: W;
o0: R, o1: D;
o0: R, o1: R, o2: A;
o0: R, o1: R, o2: A, o3: H;
o0: R, o1: R, o2: O, o3: H;
o0: R, o1: R, o2: P, o3: H;
o0: R, o1: R, o2: O;
o0: R, o1: R, o2: P;
o0: O;
o0: P;
}
#[allow(clippy::single_component_path_imports)]
pub(super) use gen_ins_fn;
}
macro_rules! instructions {
(
($module:expr, $obj:expr $(,)?)
{ $($opcode:expr, $mnemonic:ident, $ops:ident, $doc:literal;)* }
) => {{
let (module, obj) = ($module, $obj);
$({
let obj = Rc::clone(&obj);
let hash = module.set_native_fn(
paste::paste!(stringify!([<$mnemonic:lower>])),
generic::gen_ins_fn!(
obj,
$opcode,
$ops
)
);
module.update_fn_namespace(hash, FnNamespace::Global);
})*
}};
}
pub fn setup(module: &mut Module, obj: Rc<RefCell<Object>>) {
with_builtin_macros::with_builtin! {
let $spec = include_from_root!("../hbbytecode/instructions.in") in {
instructions!((module, obj) { $spec });
}
}
}

View file

@ -1,76 +0,0 @@
use {
crate::SharedObject,
rhai::{Engine, ImmutableString, Module},
};
#[derive(Clone, Copy, Debug)]
pub struct UnboundLabel(pub usize);
pub fn setup(engine: &mut Engine, module: &mut Module, object: SharedObject) {
{
let object = SharedObject::clone(&object);
let hash = module.set_native_fn("label", move || {
let mut obj = object.borrow_mut();
let symbol = obj.symbol(crate::object::Section::Text);
Ok(symbol)
});
module.update_fn_namespace(hash, rhai::FnNamespace::Global);
}
{
let object = SharedObject::clone(&object);
let hash = module.set_native_fn("label", move |label: ImmutableString| {
let mut obj = object.borrow_mut();
let symbol = obj.symbol(crate::object::Section::Text);
obj.labels.insert(label, symbol.0);
Ok(symbol)
});
module.update_fn_namespace(hash, rhai::FnNamespace::Global);
}
{
let object = SharedObject::clone(&object);
let hash = module.set_native_fn("declabel", move || {
let mut obj = object.borrow_mut();
let index = obj.symbols.len();
obj.symbols.push(None);
Ok(UnboundLabel(index))
});
module.update_fn_namespace(hash, rhai::FnNamespace::Global);
}
{
let object = SharedObject::clone(&object);
let hash = module.set_native_fn("declabel", move |label: ImmutableString| {
let mut obj = object.borrow_mut();
let index = obj.symbols.len();
obj.symbols.push(None);
obj.labels.insert(label, index);
Ok(UnboundLabel(index))
});
module.update_fn_namespace(hash, rhai::FnNamespace::Global);
}
{
module.set_native_fn("here", move |label: UnboundLabel| {
let mut obj = object.borrow_mut();
obj.symbols[label.0] = Some(crate::object::SymbolEntry {
location: crate::object::Section::Text,
offset: obj.sections.text.len(),
});
Ok(())
});
}
engine.register_type_with_name::<UnboundLabel>("UnboundLabel");
}

View file

@ -1,45 +1,269 @@
mod data;
mod ins;
mod label;
mod linker;
mod object;
use std::collections::HashMap;
use {
object::Object,
rhai::{Engine, Module},
std::{cell::RefCell, rc::Rc},
lasso::{Rodeo, Spur},
logos::{Lexer, Logos, Span},
std::fmt::{Display, Formatter},
};
type SharedObject = Rc<RefCell<Object>>;
macro_rules! tokendef {
($($opcode:literal),* $(,)?) => {
paste::paste! {
#[derive(Clone, Copy, Debug, PartialEq, Eq, Logos)]
#[logos(extras = Rodeo)]
#[logos(skip r"[ \t\f]+")]
#[logos(skip r"-- .*")]
pub enum Token {
$(#[token($opcode, |_| hbbytecode::opcode::[<$opcode:upper>])])*
OpCode(u8),
pub fn assembler(
linkout: &mut impl std::io::Write,
loader: impl FnOnce(&mut Engine) -> Result<(), Box<rhai::EvalAltResult>>,
) -> Result<(), Box<dyn std::error::Error>> {
let mut engine = Engine::new();
let mut module = Module::new();
let obj = Rc::new(RefCell::new(Object::default()));
ins::setup(&mut module, Rc::clone(&obj));
label::setup(&mut engine, &mut module, Rc::clone(&obj));
#[regex("[0-9]+", |l| l.slice().parse().ok())]
#[regex(
"-[0-9]+",
|lexer| {
Some(u64::from_ne_bytes(lexer.slice().parse::<i64>().ok()?.to_ne_bytes()))
},
)] Integer(u64),
// Registers
for n in 0_u8..=255 {
module.set_var(format!("r{n}"), n);
#[regex(
"r[0-9]+",
|lexer| match lexer.slice()[1..].parse() {
Ok(n) => Some(n),
_ => None
},
)] Register(u8),
#[regex(
r"\p{XID_Start}\p{XID_Continue}*:",
|lexer| lexer.extras.get_or_intern(&lexer.slice()[..lexer.slice().len() - 1]),
)] Label(Spur),
#[regex(
r"\p{XID_Start}\p{XID_Continue}*",
|lexer| lexer.extras.get_or_intern(lexer.slice()),
)] Symbol(Spur),
#[token("\n")]
#[token(";")] ISep,
#[token(",")] PSep,
}
}
};
}
module.set_native_fn("reg", |n: i64| {
Ok(u8::try_from(n).map_err(|_| {
rhai::EvalAltResult::ErrorRuntime("Invalid register value".into(), rhai::Position::NONE)
})?)
});
#[rustfmt::skip]
tokendef![
"nop", "add", "sub", "mul", "and", "or", "xor", "sl", "sr", "srs", "cmp", "cmpu",
"dir", "neg", "not", "addi", "muli", "andi", "ori", "xori", "sli", "sri", "srsi",
"cmpi", "cmpui", "cp", "swa", "li", "ld", "st", "bmc", "brc", "jmp", "jeq", "jne",
"jlt", "jgt", "jltu", "jgtu", "ecall", "addf", "mulf", "dirf", "addfi", "mulfi",
];
module.set_native_fn("as_i64", |n: u8| Ok(n as i64));
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum ErrorKind {
UnexpectedToken,
InvalidToken,
UnexpectedEnd,
InvalidSymbol,
}
let datamod = Rc::new(data::module(&mut engine, SharedObject::clone(&obj)));
engine.register_global_module(Rc::new(module));
engine.register_static_module("data", datamod);
engine.register_type_with_name::<object::SymbolRef>("SymbolRef");
loader(&mut engine)?;
linker::link(obj, linkout)?;
#[derive(Clone, Debug, PartialEq, Eq)]
pub struct Error {
pub kind: ErrorKind,
pub span: Span,
}
impl Display for Error {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "Error {:?} at {:?}", self.kind, self.span)
}
}
impl std::error::Error for Error {}
macro_rules! expect_matches {
($self:expr, $($pat:pat),* $(,)?) => {$(
let $pat = $self.next()?
else { return Err(ErrorKind::UnexpectedToken) };
)*}
}
pub fn assembly(code: &str, buf: &mut Vec<u8>) -> Result<(), Error> {
struct Assembler<'a> {
lexer: Lexer<'a, Token>,
buf: &'a mut Vec<u8>,
label_map: HashMap<Spur, u64>,
to_sub_label: HashMap<usize, Spur>,
}
impl<'a> Assembler<'a> {
fn next(&mut self) -> Result<Token, ErrorKind> {
match self.lexer.next() {
Some(Ok(t)) => Ok(t),
Some(Err(())) => Err(ErrorKind::InvalidToken),
None => Err(ErrorKind::UnexpectedEnd),
}
}
fn assemble(&mut self) -> Result<(), ErrorKind> {
use hbbytecode::opcode::*;
loop {
match self.lexer.next() {
Some(Ok(Token::OpCode(op))) => {
self.buf.push(op);
match op {
NOP | ECALL => Ok(()),
DIR | DIRF => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Register(r2),
Token::PSep,
Token::Register(r3),
);
self.buf.extend([r0, r1, r2, r3]);
Ok(())
}
ADD..=CMPU | ADDF..=MULF => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Register(r2),
);
self.buf.extend([r0, r1, r2]);
Ok(())
}
BRC => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Integer(count),
);
self.buf.extend([
r0,
r1,
u8::try_from(count).map_err(|_| ErrorKind::UnexpectedToken)?,
]);
Ok(())
}
NEG..=NOT | CP..=SWA => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
);
self.buf.extend([r0, r1]);
Ok(())
}
LI | JMP => {
expect_matches!(self, Token::Register(r0), Token::PSep);
self.buf.push(r0);
self.insert_imm()?;
Ok(())
}
ADDI..=CMPUI | BMC | JEQ..=JGTU | ADDFI..=MULFI => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
);
self.buf.extend([r0, r1]);
self.insert_imm()?;
Ok(())
}
LD..=ST => {
expect_matches!(
self,
Token::Register(r0),
Token::PSep,
Token::Register(r1),
Token::PSep,
Token::Integer(offset),
Token::PSep,
Token::Integer(len),
);
self.buf.extend([r0, r1]);
self.buf.extend(offset.to_le_bytes());
self.buf.extend(
u16::try_from(len)
.map_err(|_| ErrorKind::InvalidToken)?
.to_le_bytes(),
);
Ok(())
}
_ => unreachable!(),
}?;
match self.next() {
Ok(Token::ISep) => (),
Ok(_) => return Err(ErrorKind::UnexpectedToken),
Err(ErrorKind::UnexpectedEnd) => return Ok(()),
Err(e) => return Err(e),
}
}
Some(Ok(Token::Label(lbl))) => {
self.label_map.insert(lbl, self.buf.len() as u64);
}
Some(Ok(Token::ISep)) => (),
Some(Ok(_)) => return Err(ErrorKind::UnexpectedToken),
Some(Err(())) => return Err(ErrorKind::InvalidToken),
None => return Ok(()),
}
}
}
fn link_local_syms(&mut self) -> Result<(), ErrorKind> {
for (ix, sym) in &self.to_sub_label {
self.label_map
.get(sym)
.ok_or(ErrorKind::InvalidSymbol)?
.to_le_bytes()
.iter()
.enumerate()
.for_each(|(i, b)| {
self.buf[ix + i] = *b;
});
}
Ok(())
}
fn insert_imm(&mut self) -> Result<(), ErrorKind> {
let imm = match self.next()? {
Token::Integer(i) => i.to_le_bytes(),
Token::Symbol(s) => {
self.to_sub_label.insert(self.buf.len(), s);
[0; 8]
}
_ => return Err(ErrorKind::UnexpectedToken),
};
self.buf.extend(imm);
Ok(())
}
}
let mut asm = Assembler {
lexer: Token::lexer(code),
label_map: Default::default(),
to_sub_label: Default::default(),
buf,
};
asm.assemble().map_err(|kind| Error {
kind,
span: asm.lexer.span(),
})?;
asm.link_local_syms()
.map_err(|kind| Error { kind, span: 0..0 })
}

View file

@ -1,35 +0,0 @@
use {
crate::{
object::{RelocKey, RelocType, Section},
SharedObject,
},
std::io::Write,
};
pub fn link(object: SharedObject, out: &mut impl Write) -> std::io::Result<()> {
let obj = &mut *object.borrow_mut();
for (&loc, entry) in &obj.relocs {
let value = match &entry.key {
RelocKey::Symbol(sym) => obj.symbols[*sym],
RelocKey::Label(label) => obj.symbols[obj.labels[label]],
}
.ok_or_else(|| std::io::Error::other("Invalid symbol"))?;
let offset = match value.location {
Section::Text => value.offset,
Section::Data => value.offset + obj.sections.text.len(),
};
match entry.ty {
RelocType::Rel32 => obj.sections.text[loc..loc + 4]
.copy_from_slice(&((offset as isize - loc as isize) as i32).to_le_bytes()),
RelocType::Rel16 => obj.sections.text[loc..loc + 2]
.copy_from_slice(&((offset as isize - loc as isize) as i16).to_le_bytes()),
RelocType::Abs64 => obj.sections.text[loc..loc + 8]
.copy_from_slice(&(offset as isize - loc as isize).to_le_bytes()),
}
}
out.write_all(&obj.sections.text)?;
out.write_all(&obj.sections.data)
}

View file

@ -1,8 +1,21 @@
use std::{io::stdout, path::PathBuf};
use std::{
error::Error,
io::{stdin, stdout, Read, Write},
};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let path = PathBuf::from(std::env::args().nth(1).ok_or("Missing path")?);
hbasm::assembler(&mut stdout(), |engine| engine.run_file(path))?;
fn main() -> Result<(), Box<dyn Error>> {
let mut code = String::new();
stdin().read_to_string(&mut code)?;
let mut buf = vec![];
if let Err(e) = hbasm::assembly(&code, &mut buf) {
eprintln!(
"Error {:?} at {:?} (`{}`)",
e.kind,
e.span.clone(),
&code[e.span],
);
}
stdout().write_all(&buf)?;
Ok(())
}

View file

@ -1,77 +0,0 @@
use {rhai::ImmutableString, std::collections::HashMap};
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum Section {
Text,
Data,
}
#[derive(Clone, Copy, Debug)]
pub struct SymbolEntry {
pub location: Section,
pub offset: usize,
}
#[derive(Clone, Debug)]
pub enum RelocKey {
Symbol(usize),
Label(ImmutableString),
}
#[derive(Clone, Copy, Debug)]
pub enum RelocType {
Rel32,
Rel16,
Abs64,
}
#[derive(Clone, Debug)]
pub struct RelocEntry {
pub key: RelocKey,
pub ty: RelocType,
}
#[derive(Clone, Debug, Default)]
pub struct Sections {
pub text: Vec<u8>,
pub data: Vec<u8>,
}
#[derive(Clone, Debug, Default)]
pub struct Object {
pub sections: Sections,
pub symbols: Vec<Option<SymbolEntry>>,
pub labels: HashMap<ImmutableString, usize>,
pub relocs: HashMap<usize, RelocEntry>,
}
#[derive(Clone, Copy, Debug)]
#[repr(transparent)]
pub struct SymbolRef(pub usize);
impl Object {
pub fn symbol(&mut self, section: Section) -> SymbolRef {
let section_buf = match section {
Section::Text => &mut self.sections.text,
Section::Data => &mut self.sections.data,
};
self.symbols.push(Some(SymbolEntry {
location: section,
offset: section_buf.len(),
}));
SymbolRef(self.symbols.len() - 1)
}
pub fn relocation(&mut self, key: RelocKey, ty: RelocType) {
self.relocs
.insert(self.sections.text.len(), RelocEntry { key, ty });
self.sections.text.extend(match ty {
RelocType::Rel32 => &[0_u8; 4] as &[u8],
RelocType::Rel16 => &[0; 2],
RelocType::Abs64 => &[0; 8],
});
}
}

View file

@ -1,7 +1,6 @@
[package]
name = "hbbytecode"
version = "0.1.0"
edition = "2018"
edition = "2021"
[dependencies]
with_builtin_macros = "0.0.3"

View file

@ -1,75 +1,60 @@
/* HoleyBytes Bytecode representation in C
* Requires C23 compiler or better
*
* Uses MSVC pack pragma extension,
* proved to work with Clang and GNU® GCC.
*/
#pragma once
#include <assert.h>
#include <limits.h>
#include <stdint.h>
static_assert(CHAR_BIT == 8, "Cursed architectures are not supported");
enum hbbc_Opcode: uint8_t {
hbbc_Op_UN , hbbc_Op_TX , hbbc_Op_NOP , hbbc_Op_ADD , hbbc_Op_SUB , hbbc_Op_MUL ,
hbbc_Op_AND , hbbc_Op_OR , hbbc_Op_XOR , hbbc_Op_SL , hbbc_Op_SR , hbbc_Op_SRS ,
hbbc_Op_CMP , hbbc_Op_CMPU , hbbc_Op_DIR , hbbc_Op_NEG , hbbc_Op_NOT , hbbc_Op_ADDI ,
hbbc_Op_MULI , hbbc_Op_ANDI , hbbc_Op_ORI , hbbc_Op_XORI , hbbc_Op_SLI , hbbc_Op_SRI ,
hbbc_Op_SRSI , hbbc_Op_CMPI , hbbc_Op_CMPUI , hbbc_Op_CP , hbbc_Op_SWA , hbbc_Op_LI ,
hhbc_Op_LRA , hbbc_Op_LD , hbbc_Op_ST , hbbc_Op_LDR , hhbc_Op_STR , hbbc_Op_BMC ,
hbbc_Op_BRC , hbbc_Op_JMP , hbbc_Op_JMPR , hbbc_Op_JAL , hbbc_Op_JALR , hbbc_Op_JEQ ,
hbbc_Op_JNE , hbbc_Op_JLT , hbbc_Op_JGT , hbbc_Op_JLTU , hbbc_Op_JGTU , hbbc_Op_ECALL ,
hbbc_Op_ADDF , hbbc_Op_SUBF , hbbc_Op_MULF , hbbc_Op_DIRF , hbbc_Op_FMAF , hbbc_Op_NEGF ,
hbbc_Op_ITF , hbbc_Op_FTI , hbbc_Op_ADDFI , hbbc_Op_MULFI ,
} typedef hbbc_Opcode;
typedef enum hbbc_Opcode: uint8_t {
hbbc_Op_NOP, hbbc_Op_ADD, hbbc_Op_MUL, hbbc_Op_AND, hbbc_Op_OR, hbbc_Op_XOR, hbbc_Op_SL,
hbbc_Op_SR, hbbc_Op_SRS, hbbc_Op_CMP, hbbc_Op_CMPU, hbbc_Op_DIR, hbbc_Op_NEG, hbbc_Op_NOT,
hbbc_Op_ADDI, hbbc_Op_MULI, hbbc_Op_ANDI, hbbc_Op_ORI, hbbc_Op_XORI, hbbc_Op_SLI, hbbc_Op_SRI,
hbbc_Op_SRSI, hbbc_Op_CMPI, hbbc_Op_CMPUI, hbbc_Op_CP, hbbc_Op_SWA, hbbc_Op_LI, hbbc_Op_LD,
hbbc_Op_ST, hbbc_Op_BMC, hbbc_Op_BRC, hbbc_Op_JMP, hbbc_Op_JEQ, hbbc_Op_JNE, hbbc_Op_JLT,
hbbc_Op_JGT, hbbc_Op_JLTU, hbbc_Op_JGTU, hbbc_Op_ECALL, hbbc_Op_ADDF, hbbc_Op_MULF,
hbbc_Op_DIRF, hbbc_Op_ADDFI, hbbc_Op_MULFI,
} hbbc_Opcode;
static_assert(sizeof(hbbc_Opcode) == 1);
#pragma pack(push, 1)
struct hbbc_ParamBBBB
typedef struct hbbc_ParamBBBB
{ uint8_t _0; uint8_t _1; uint8_t _2; uint8_t _3; }
typedef hbbc_ParamBBBB;
static_assert(sizeof(hbbc_ParamBBBB) == 32 / 8);
hbbc_ParamBBBB;
static_assert(sizeof(hbbc_ParamBBBB) == 4);
struct hbbc_ParamBBB
typedef struct hbbc_ParamBBB
{ uint8_t _0; uint8_t _1; uint8_t _2; }
typedef hbbc_ParamBBB;
static_assert(sizeof(hbbc_ParamBBB) == 24 / 8);
hbbc_ParamBBB;
static_assert(sizeof(hbbc_ParamBBB) == 3);
struct hbbc_ParamBBDH
typedef struct hbbc_ParamBBDH
{ uint8_t _0; uint8_t _1; uint64_t _2; uint16_t _3; }
typedef hbbc_ParamBBDH;
static_assert(sizeof(hbbc_ParamBBDH) == 96 / 8);
hbbc_ParamBBDH;
static_assert(sizeof(hbbc_ParamBBDH) == 12);
struct hbbc_ParamBBWH
{ uint8_t _0; uint8_t _1; uint32_t _2; uint16_t _3; }
typedef hbbc_ParamBBWH;
static_assert(sizeof(hbbc_ParamBBWH) == 64 / 8);
typedef struct hbbc_ParamBBDB
{ uint8_t _0; uint8_t _1; uint64_t _2; uint8_t _3; }
hbbc_ParamBBDB;
static_assert(sizeof(hbbc_ParamBBDB) == 11);
struct hbbc_ParamBBD
typedef struct hbbc_ParamBBD
{ uint8_t _0; uint8_t _1; uint64_t _2; }
typedef hbbc_ParamBBD;
static_assert(sizeof(hbbc_ParamBBD) == 80 / 8);
hbbc_ParamBBD;
static_assert(sizeof(hbbc_ParamBBD) == 10);
struct hbbc_ParamBBW
{ uint8_t _0; uint8_t _1; uint32_t _2; }
typedef hbbc_ParamBBW;
static_assert(sizeof(hbbc_ParamBBW) == 48 / 8);
struct hbbc_ParamBB
typedef struct hbbc_ParamBB
{ uint8_t _0; uint8_t _1; }
typedef hbbc_ParamBB;
static_assert(sizeof(hbbc_ParamBB) == 16 / 8);
hbbc_ParamBB;
static_assert(sizeof(hbbc_ParamBB) == 2);
struct hbbc_ParamBD
typedef struct hbbc_ParamBD
{ uint8_t _0; uint64_t _1; }
typedef hbbc_ParamBD;
static_assert(sizeof(hbbc_ParamBD) == 72 / 8);
hbbc_ParamBD;
static_assert(sizeof(hbbc_ParamBD) == 9);
typedef uint64_t hbbc_ParamD;
static_assert(sizeof(hbbc_ParamD) == 64 / 8);
static_assert(sizeof(hbbc_ParamD) == 8);
#pragma pack(pop)

View file

@ -1,120 +0,0 @@
0x00, UN, N, "Cause an unreachable code trap" ;
0x01, TX, N, "Termiante execution" ;
0x02, NOP, N, "Do nothing" ;
0x03, ADD8, RRR, "Addition (8b)" ;
0x04, ADD16, RRR, "Addition (16b)" ;
0x05, ADD32, RRR, "Addition (32b)" ;
0x06, ADD64, RRR, "Addition (64b)" ;
0x07, SUB8, RRR, "Subtraction (8b)" ;
0x08, SUB16, RRR, "Subtraction (16b)" ;
0x09, SUB32, RRR, "Subtraction (32b)" ;
0x0A, SUB64, RRR, "Subtraction (64b)" ;
0x0B, MUL8, RRR, "Multiplication (8b)" ;
0x0C, MUL16, RRR, "Multiplication (16b)" ;
0x0D, MUL32, RRR, "Multiplication (32b)" ;
0x0E, MUL64, RRR, "Multiplication (64b)" ;
0x0F, AND, RRR, "Bitand" ;
0x10, OR, RRR, "Bitor" ;
0x11, XOR, RRR, "Bitxor" ;
0x12, SLU8, RRR, "Unsigned left bitshift (8b)" ;
0x13, SLU16, RRR, "Unsigned left bitshift (16b)" ;
0x14, SLU32, RRR, "Unsigned left bitshift (32b)" ;
0x15, SLU64, RRR, "Unsigned left bitshift (64b)" ;
0x16, SRU8, RRR, "Unsigned right bitshift (8b)" ;
0x17, SRU16, RRR, "Unsigned right bitshift (16b)" ;
0x18, SRU32, RRR, "Unsigned right bitshift (32b)" ;
0x19, SRU64, RRR, "Unsigned right bitshift (64b)" ;
0x1A, SRS8, RRR, "Signed right bitshift (8b)" ;
0x1B, SRS16, RRR, "Signed right bitshift (16b)" ;
0x1C, SRS32, RRR, "Signed right bitshift (32b)" ;
0x1D, SRS64, RRR, "Signed right bitshift (64b)" ;
0x1E, CMPU, RRR, "Unsigned comparsion" ;
0x1F, CMPS, RRR, "Signed comparsion" ;
0x20, DIRU8, RRRR, "Merged divide-remainder (unsigned 8b)" ;
0x21, DIRU16, RRRR, "Merged divide-remainder (unsigned 16b)" ;
0x22, DIRU32, RRRR, "Merged divide-remainder (unsigned 32b)" ;
0x23, DIRU64, RRRR, "Merged divide-remainder (unsigned 64b)" ;
0x24, DIRS8, RRRR, "Merged divide-remainder (signed 8b)" ;
0x25, DIRS16, RRRR, "Merged divide-remainder (signed 16b)" ;
0x26, DIRS32, RRRR, "Merged divide-remainder (signed 32b)" ;
0x27, DIRS64, RRRR, "Merged divide-remainder (signed 64b)" ;
0x28, NEG, RR, "Bit negation" ;
0x29, NOT, RR, "Logical negation" ;
0x2A, SXT8, RR, "Sign extend 8b to 64b" ;
0x2B, SXT16, RR, "Sign extend 16b to 64b" ;
0x2C, SXT32, RR, "Sign extend 32b to 64b" ;
0x2D, ADDI8, RRB, "Addition with immediate (8b)" ;
0x2E, ADDI16, RRH, "Addition with immediate (16b)" ;
0x2F, ADDI32, RRW, "Addition with immediate (32b)" ;
0x30, ADDI64, RRD, "Addition with immediate (64b)" ;
0x31, MULI8, RRB, "Multiplication with immediate (8b)" ;
0x32, MULI16, RRH, "Multiplication with immediate (16b)" ;
0x33, MULI32, RRW, "Multiplication with immediate (32b)" ;
0x34, MULI64, RRD, "Multiplication with immediate (64b)" ;
0x35, ANDI, RRD, "Bitand with immediate" ;
0x36, ORI, RRD, "Bitor with immediate" ;
0x37, XORI, RRD, "Bitxor with immediate" ;
0x38, SLUI8, RRB, "Unsigned left bitshift with immedidate (8b)" ;
0x39, SLUI16, RRB, "Unsigned left bitshift with immedidate (16b)";
0x3A, SLUI32, RRB, "Unsigned left bitshift with immedidate (32b)";
0x3B, SLUI64, RRB, "Unsigned left bitshift with immedidate (64b)";
0x3C, SRUI8, RRB, "Unsigned right bitshift with immediate (8b)" ;
0x3D, SRUI16, RRB, "Unsigned right bitshift with immediate (16b)";
0x3E, SRUI32, RRB, "Unsigned right bitshift with immediate (32b)";
0x3F, SRUI64, RRB, "Unsigned right bitshift with immediate (64b)";
0x40, SRSI8, RRB, "Signed right bitshift with immediate" ;
0x41, SRSI16, RRB, "Signed right bitshift with immediate" ;
0x42, SRSI32, RRB, "Signed right bitshift with immediate" ;
0x43, SRSI64, RRB, "Signed right bitshift with immediate" ;
0x44, CMPUI, RRD, "Unsigned compare with immediate" ;
0x45, CMPSI, RRD, "Signed compare with immediate" ;
0x46, CP, RR, "Copy register" ;
0x47, SWA, RR, "Swap registers" ;
0x48, LI8, RB, "Load immediate (8b)" ;
0x49, LI16, RH, "Load immediate (16b)" ;
0x4A, LI32, RW, "Load immediate (32b)" ;
0x4B, LI64, RD, "Load immediate (64b)" ;
0x4C, LRA, RRO, "Load relative address" ;
0x4D, LD, RRAH, "Load from absolute address" ;
0x4E, ST, RRAH, "Store to absolute address" ;
0x4F, LDR, RROH, "Load from relative address" ;
0x50, STR, RROH, "Store to relative address" ;
0x51, BMC, RRH, "Copy block of memory" ;
0x52, BRC, RRB, "Copy register block" ;
0x53, JMP, O, "Relative jump" ;
0x54, JAL, RRO, "Linking relative jump" ;
0x55, JALA, RRA, "Linking absolute jump" ;
0x56, JEQ, RRP, "Branch on equal" ;
0x57, JNE, RRP, "Branch on nonequal" ;
0x58, JLTU, RRP, "Branch on lesser-than (unsigned)" ;
0x59, JGTU, RRP, "Branch on greater-than (unsigned)" ;
0x5A, JLTS, RRP, "Branch on lesser-than (signed)" ;
0x5B, JGTS, RRP, "Branch on greater-than (signed)" ;
0x5C, ECA, N, "Environment call trap" ;
0x5D, EBP, N, "Environment breakpoint" ;
0x5E, FADD32, RRR, "Floating point addition (32b)" ;
0x5F, FADD64, RRR, "Floating point addition (64b)" ;
0x60, FSUB32, RRR, "Floating point subtraction (32b)" ;
0x61, FSUB64, RRR, "Floating point subtraction (64b)" ;
0x62, FMUL32, RRR, "Floating point multiply (32b)" ;
0x63, FMUL64, RRR, "Floating point multiply (64b)" ;
0x64, FDIV32, RRR, "Floating point division (32b)" ;
0x65, FDIV64, RRR, "Floating point division (64b)" ;
0x66, FMA32, RRRR, "Float fused multiply-add (32b)" ;
0x67, FMA64, RRRR, "Float fused multiply-add (64b)" ;
0x68, FINV32, RR, "Float reciprocal (32b)" ;
0x69, FINV64, RR, "Float reciprocal (64b)" ;
0x6A, FCMPLT32, RRR, "Flaot compare less than (32b)" ;
0x6B, FCMPLT64, RRR, "Flaot compare less than (64b)" ;
0x6C, FCMPGT32, RRR, "Flaot compare greater than (32b)" ;
0x6D, FCMPGT64, RRR, "Flaot compare greater than (64b)" ;
0x6E, ITF32, RR, "Int to 32 bit float" ;
0x6F, ITF64, RR, "Int to 64 bit float" ;
0x70, FTI32, RRB, "Float 32 to int" ;
0x71, FTI64, RRB, "Float 64 to int" ;
0x72, FC32T64, RR, "Float 64 to Float 32" ;
0x73, FC64T32, RRB, "Float 32 to Float 64" ;
0x74, LRA16, RRO, "Load relative immediate (16 bit)" ;
0x75, LDR16, RRPH, "Load from relative address (16 bit)" ;
0x76, STR16, RRPH, "Store to relative address (16 bit)" ;
0x77, JMP16, P, "Relative jump (16 bit)" ;

View file

@ -1,119 +1,106 @@
#![no_std]
use core::convert::TryFrom;
macro_rules! constmod {
($vis:vis $mname:ident($repr:ty) {
$(#![doc = $mdoc:literal])?
$($cname:ident = $val:expr $(,$doc:literal)?;)*
}) => {
$(#[doc = $mdoc])?
$vis mod $mname {
$(
$(#[doc = $doc])?
pub const $cname: $repr = $val;
)*
}
};
}
type OpR = u8;
constmod!(pub opcode(u8) {
//! Opcode constant module
type OpA = u64;
type OpO = i32;
type OpP = i16;
NOP = 0, "N; Do nothing";
type OpB = u8;
type OpH = u16;
type OpW = u32;
type OpD = u64;
ADD = 1, "BBB; #0 ← #1 + #2";
SUB = 2, "BBB; #0 ← #1 - #2";
MUL = 3, "BBB; #0 ← #1 × #2";
AND = 4, "BBB; #0 ← #1 & #2";
OR = 5, "BBB; #0 ← #1 | #2";
XOR = 6, "BBB; #0 ← #1 ^ #2";
SL = 7, "BBB; #0 ← #1 « #2";
SR = 8, "BBB; #0 ← #1 » #2";
SRS = 9, "BBB; #0 ← #1 » #2 (signed)";
CMP = 10, "BBB; #0 ← #1 <=> #2";
CMPU = 11, "BBB; #0 ← #1 <=> #2 (unsigned)";
DIR = 12, "BBBB; #0 ← #2 / #3, #1 ← #2 % #3";
NEG = 13, "BB; #0 ← ~#1";
NOT = 14, "BB; #0 ← !#1";
ADDI = 15, "BBD; #0 ← #1 + imm #2";
MULI = 16, "BBD; #0 ← #1 × imm #2";
ANDI = 17, "BBD; #0 ← #1 & imm #2";
ORI = 18, "BBD; #0 ← #1 | imm #2";
XORI = 19, "BBD; #0 ← #1 ^ imm #2";
SLI = 20, "BBD; #0 ← #1 « imm #2";
SRI = 21, "BBD; #0 ← #1 » imm #2";
SRSI = 22, "BBD; #0 ← #1 » imm #2 (signed)";
CMPI = 23, "BBD; #0 ← #1 <=> imm #2";
CMPUI = 24, "BBD; #0 ← #1 <=> imm #2 (unsigned)";
CP = 25, "BB; Copy #0 ← #1";
SWA = 26, "BB; Swap #0 and #1";
LI = 27, "BD; #0 ← imm #1";
LD = 28, "BBDB; #0 ← [#1 + imm #3], imm #4 bytes, overflowing";
ST = 29, "BBDB; [#1 + imm #3] ← #0, imm #4 bytes, overflowing";
BMC = 30, "BBD; [#0] ← [#1], imm #2 bytes";
BRC = 31, "BBB; #0 ← #1, imm #2 registers";
JMP = 32, "BD; Unconditional jump [#0 + imm #1]";
JEQ = 33, "BBD; if #0 = #1 → jump imm #2";
JNE = 34, "BBD; if #0 ≠ #1 → jump imm #2";
JLT = 35, "BBD; if #0 < #1 → jump imm #2";
JGT = 36, "BBD; if #0 > #1 → jump imm #2";
JLTU = 37, "BBD; if #0 < #1 → jump imm #2 (unsigned)";
JGTU = 38, "BBD; if #0 > #1 → jump imm #2 (unsigned)";
ECALL = 39, "N; Issue system call";
ADDF = 40, "BBB; #0 ← #1 +. #2";
MULF = 41, "BBB; #0 ← #1 +. #2";
DIRF = 42, "BBBB; #0 ← #2 / #3, #1 ← #2 % #3";
ADDFI = 43, "BBD; #0 ← #1 +. imm #2";
MULFI = 44, "BBD; #0 ← #1 *. imm #2";
});
#[repr(packed)]
pub struct ParamBBBB(pub u8, pub u8, pub u8, pub u8);
#[repr(packed)]
pub struct ParamBBB(pub u8, pub u8, pub u8);
#[repr(packed)]
pub struct ParamBBDH(pub u8, pub u8, pub u64, pub u16);
#[repr(packed)]
pub struct ParamBBDB(pub u8, pub u8, pub u64, pub u8);
#[repr(packed)]
pub struct ParamBBD(pub u8, pub u8, pub u64);
#[repr(packed)]
pub struct ParamBB(pub u8, pub u8);
#[repr(packed)]
pub struct ParamBD(pub u8, pub u64);
/// # Safety
/// Has to be valid to be decoded from bytecode.
pub unsafe trait BytecodeItem {}
macro_rules! define_items {
($($name:ident ($($item:ident),* $(,)?)),* $(,)?) => {
$(
#[derive(Clone, Copy, Debug)]
#[repr(packed)]
pub struct $name($(pub $item),*);
unsafe impl BytecodeItem for $name {}
)*
};
}
define_items! {
OpsRR (OpR, OpR ),
OpsRRR (OpR, OpR, OpR ),
OpsRRRR (OpR, OpR, OpR, OpR),
OpsRRB (OpR, OpR, OpB ),
OpsRRH (OpR, OpR, OpH ),
OpsRRW (OpR, OpR, OpW ),
OpsRRD (OpR, OpR, OpD ),
OpsRB (OpR, OpB ),
OpsRH (OpR, OpH ),
OpsRW (OpR, OpW ),
OpsRD (OpR, OpD ),
OpsRRA (OpR, OpR, OpA ),
OpsRRAH (OpR, OpR, OpA, OpH),
OpsRROH (OpR, OpR, OpO, OpH),
OpsRRPH (OpR, OpR, OpP, OpH),
OpsRRO (OpR, OpR, OpO ),
OpsRRP (OpR, OpR, OpP ),
OpsO (OpO, ),
OpsP (OpP, ),
OpsN ( ),
}
unsafe impl BytecodeItem for u8 {}
::with_builtin_macros::with_builtin! {
let $spec = include_from_root!("instructions.in") in {
/// Invoke macro with bytecode definition
///
/// # Format
/// ```text
/// Opcode, Mnemonic, Type, Docstring;
/// ```
///
/// # Type
/// ```text
/// Types consist of letters meaning a single field
/// | Type | Size (B) | Meaning |
/// |:-----|:---------|:------------------------|
/// | N | 0 | Empty |
/// | R | 1 | Register |
/// | A | 8 | Absolute address |
/// | O | 4 | Relative address offset |
/// | P | 2 | Relative address offset |
/// | B | 1 | Immediate |
/// | H | 2 | Immediate |
/// | W | 4 | Immediate |
/// | D | 8 | Immediate |
/// ```
#[macro_export]
macro_rules! invoke_with_def {
($macro:path) => {
$macro! { $spec }
};
}
}
}
macro_rules! gen_opcodes {
($($opcode:expr, $mnemonic:ident, $_ty:ident, $doc:literal;)*) => {
pub mod opcode {
$(
#[doc = $doc]
pub const $mnemonic: u8 = $opcode;
)*
}
};
}
/// Rounding mode
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum RoundingMode {
NearestEven = 0,
Truncate = 1,
Up = 2,
Down = 3,
}
impl TryFrom<u8> for RoundingMode {
type Error = ();
fn try_from(value: u8) -> Result<Self, Self::Error> {
(value <= 3)
.then(|| unsafe { core::mem::transmute(value) })
.ok_or(())
}
}
invoke_with_def!(gen_opcodes);
pub unsafe trait OpParam {}
unsafe impl OpParam for ParamBBBB {}
unsafe impl OpParam for ParamBBB {}
unsafe impl OpParam for ParamBBDB {}
unsafe impl OpParam for ParamBBDH {}
unsafe impl OpParam for ParamBBD {}
unsafe impl OpParam for ParamBB {}
unsafe impl OpParam for ParamBD {}
unsafe impl OpParam for u64 {}
unsafe impl OpParam for () {}

View file

@ -6,10 +6,10 @@ edition = "2021"
[profile.release]
lto = true
[features]
default = ["alloc"]
alloc = []
nightly = []
[dependencies]
delegate = "0.9"
hashbrown = "0.13"
hbbytecode.path = "../hbbytecode"
log = "0.4"
paste = "1.0"
static_assertions = "1.0"

View file

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View file

@ -1,5 +0,0 @@
target
artifacts
corpus
coverage
Cargo.lock

View file

@ -1,30 +0,0 @@
[package]
name = "hbvm-fuzz"
version = "0.0.0"
publish = false
edition = "2021"
[package.metadata]
cargo-fuzz = true
[dependencies]
libfuzzer-sys = "0.4"
[dependencies.hbvm]
path = ".."
[dependencies.hbbytecode]
path = "../../hbbytecode"
# Prevent this from interfering with workspaces
[workspace]
members = ["."]
[profile.release]
debug = 1
[[bin]]
name = "vm"
path = "fuzz_targets/vm.rs"
test = false
doc = false

View file

@ -1,82 +0,0 @@
#![no_main]
use {
hbvm::{
mem::{
softpaging::{
paging::{PageTable, Permission},
HandlePageFault, PageSize, SoftPagedMem,
},
Address, MemoryAccessReason,
},
Vm,
},
libfuzzer_sys::fuzz_target,
};
fuzz_target!(|data: &[u8]| {
let mut vm = unsafe {
Vm::<_, 16384>::new(
SoftPagedMem::<_, true> {
pf_handler: TestTrapHandler,
program: data,
root_pt: Box::into_raw(Default::default()),
icache: Default::default(),
},
Address::new(4),
)
};
// Alloc and map some memory
let pages = [
alloc_and_map(&mut vm.memory, 0),
alloc_and_map(&mut vm.memory, 4096),
];
// Run VM
let _ = vm.run();
// Unmap and dealloc the memory
for (i, page) in pages.into_iter().enumerate() {
unmap_and_dealloc(&mut vm.memory, page, i as u64 * 4096);
}
let _ = unsafe { Box::from_raw(vm.memory.root_pt) };
});
fn alloc_and_map(memory: &mut SoftPagedMem<TestTrapHandler>, at: u64) -> *mut u8 {
let ptr = Box::into_raw(Box::<Page>::default()).cast();
unsafe {
memory
.map(ptr, Address::new(at), Permission::Write, PageSize::Size4K)
.unwrap()
};
ptr
}
fn unmap_and_dealloc(memory: &mut SoftPagedMem<TestTrapHandler>, ptr: *mut u8, from: u64) {
memory.unmap(Address::new(from)).unwrap();
let _ = unsafe { Box::from_raw(ptr.cast::<Page>()) };
}
#[repr(align(4096))]
struct Page([u8; 4096]);
impl Default for Page {
fn default() -> Self {
unsafe { std::mem::MaybeUninit::zeroed().assume_init() }
}
}
struct TestTrapHandler;
impl HandlePageFault for TestTrapHandler {
fn page_fault(
&mut self,
_: MemoryAccessReason,
_: &mut PageTable,
_: Address,
_: PageSize,
_: *mut u8,
) -> bool {
false
}
}

View file

@ -1,141 +0,0 @@
//! Block memory copier state machine
use {
super::{mem::MemoryAccessReason, Memory, VmRunError},
crate::mem::Address,
core::{mem::MaybeUninit, task::Poll},
};
/// Buffer size (defaults to 4 KiB, a smallest page size on most platforms)
const BUF_SIZE: usize = 4096;
/// Buffer of possibly uninitialised bytes, aligned to [`BUF_SIZE`]
#[repr(align(4096))]
struct AlignedBuf([MaybeUninit<u8>; BUF_SIZE]);
/// State for block memory copy
pub struct BlockCopier {
/// Source address
src: Address,
/// Destination address
dst: Address,
/// How many buffer sizes to copy?
n_buffers: usize,
/// …and what remainds after?
rem: usize,
}
impl BlockCopier {
/// Construct a new one
#[inline]
pub fn new(src: Address, dst: Address, count: usize) -> Self {
Self {
src,
dst,
n_buffers: count / BUF_SIZE,
rem: count % BUF_SIZE,
}
}
/// Copy one block
///
/// # Safety
/// - Same as for [`Memory::load`] and [`Memory::store`]
pub unsafe fn poll(&mut self, memory: &mut impl Memory) -> Poll<Result<(), BlkCopyError>> {
// Safety: Assuming uninit of array of MaybeUninit is sound
let mut buf = AlignedBuf(unsafe { MaybeUninit::uninit().assume_init() });
// We have at least one buffer size to copy
if self.n_buffers != 0 {
if let Err(e) = unsafe {
act(
memory,
self.src,
self.dst,
buf.0.as_mut_ptr().cast(),
BUF_SIZE,
)
} {
return Poll::Ready(Err(e));
}
// Bump source and destination address
self.src += BUF_SIZE;
self.dst += BUF_SIZE;
self.n_buffers -= 1;
return if self.n_buffers + self.rem == 0 {
// If there is nothing left, we are done
Poll::Ready(Ok(()))
} else {
// Otherwise let's advice to run it again
Poll::Pending
};
}
if self.rem != 0 {
if let Err(e) = unsafe {
act(
memory,
self.src,
self.dst,
buf.0.as_mut_ptr().cast(),
self.rem,
)
} {
return Poll::Ready(Err(e));
}
}
Poll::Ready(Ok(()))
}
}
/// Load to buffer and store from buffer
#[inline]
unsafe fn act(
memory: &mut impl Memory,
src: Address,
dst: Address,
buf: *mut u8,
count: usize,
) -> Result<(), BlkCopyError> {
unsafe {
// Load to buffer
memory
.load(src, buf, count)
.map_err(|super::mem::LoadError(addr)| BlkCopyError {
access_reason: MemoryAccessReason::Load,
addr,
})?;
// Store from buffer
memory
.store(dst, buf, count)
.map_err(|super::mem::StoreError(addr)| BlkCopyError {
access_reason: MemoryAccessReason::Store,
addr,
})?;
}
Ok(())
}
/// Error occured when copying a block of memory
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct BlkCopyError {
/// Kind of access
access_reason: MemoryAccessReason,
/// VM Address
addr: Address,
}
impl From<BlkCopyError> for VmRunError {
fn from(value: BlkCopyError) -> Self {
match value.access_reason {
MemoryAccessReason::Load => Self::LoadAccessEx(value.addr),
MemoryAccessReason::Store => Self::StoreAccessEx(value.addr),
}
}
}

View file

@ -1,66 +0,0 @@
use {core::arch::asm, hbbytecode::RoundingMode};
macro_rules! fnsdef {
{$(
$(#[$attr:meta])*
$vis:vis fn $name:ident[$inreg:ident -> $outreg:ident]($from:ident -> $to:ident): $ins:literal;
)*} => {$(
$(#[$attr])*
$vis fn $name(val: $from, mode: RoundingMode) -> $to {
let result: $to;
unsafe {
set_rounding_mode(mode);
asm!(
$ins,
out($outreg) result,
in($inreg) val,
);
default_rounding_mode();
}
result
}
)*};
}
fnsdef! {
/// Convert [`f64`] to [`f32`] with chosen rounding mode
pub fn conv64to32[vreg -> vreg](f64 -> f32): "fcvt {:s}, {:d}";
/// Convert [`f32`] to [`i64`] with chosen rounding mode
pub fn f32toint[vreg -> reg](f32 -> i64): "fcvtzs {}, {:s}";
/// Convert [`f64`] to [`i64`] with chosen rounding mode
pub fn f64toint[vreg -> reg](f64 -> i64): "fcvtzs {}, {:d}";
}
/// Set rounding mode
///
/// # Safety
/// - Do not call if rounding mode isn't [`RoundingMode::NearestEven`]
/// - Do not perform any Rust FP operations until reset using
/// [`default_rounding_mode`], you have to rely on inline assembly
#[inline(always)]
unsafe fn set_rounding_mode(mode: RoundingMode) {
if mode == RoundingMode::NearestEven {
return;
}
let fpcr: u64;
unsafe { asm!("mrs {}, fpcr", out(reg) fpcr) };
let fpcr = fpcr & !(0b11 << 22)
| (match mode {
RoundingMode::NearestEven => 0b00,
RoundingMode::Truncate => 0b11,
RoundingMode::Up => 0b01,
RoundingMode::Down => 0b10,
}) << 22;
unsafe { asm!("msr fpcr, {}", in(reg) fpcr) };
}
#[inline(always)]
unsafe fn default_rounding_mode() {
// I hope so much it gets optimised
set_rounding_mode(RoundingMode::NearestEven);
}

View file

@ -1,17 +0,0 @@
macro_rules! arch_specific {
{
$({$($cfg:tt)*} : $mod:ident;)*
} => {$(
#[cfg($($cfg)*)]
mod $mod;
#[cfg($($cfg)*)]
pub use $mod::*;
)*};
}
arch_specific! {
{target_arch = "x86_64" }: x86_64;
{target_arch = "riscv64"}: riscv64;
{target_arch = "aarch64"}: aarch64;
}

View file

@ -1,59 +0,0 @@
use {core::arch::asm, hbbytecode::RoundingMode};
macro_rules! roundm_op_litmode_internal {
($ins:literal, $in:expr, $out:expr => $outy:ident, $mode:literal $(,)?) => {
asm!(
concat!($ins, " {}, {}, ", $mode),
out($outy) $out,
in(freg) $in,
)
};
}
macro_rules! gen_roundm_op_litmode {
[$($ty:ident => $reg:ident),* $(,)?] => {
macro_rules! roundm_op_litmode {
$(
($ins:literal, $in:expr, $out:expr => $ty, $mode:literal) => {
roundm_op_litmode_internal!($ins, $in, $out => $reg, $mode)
};
)*
}
};
}
gen_roundm_op_litmode![
f32 => freg,
f64 => freg,
i64 => reg,
];
macro_rules! fnsdef {
{$(
$(#[$attr:meta])*
$vis:vis fn $name:ident($from:ident -> $to:ident): $ins:literal;
)*} => {$(
$(#[$attr])*
$vis fn $name(val: $from, mode: RoundingMode) -> $to {
let result: $to;
unsafe {
match mode {
RoundingMode::NearestEven => roundm_op_litmode!($ins, val, result => $to, "rne"),
RoundingMode::Truncate => roundm_op_litmode!($ins, val, result => $to, "rtz"),
RoundingMode::Up => roundm_op_litmode!($ins, val, result => $to, "rup"),
RoundingMode::Down => roundm_op_litmode!($ins, val, result => $to, "rdn"),
}
}
result
}
)*};
}
fnsdef! {
/// Convert [`f64`] to [`f32`] with chosen rounding mode
pub fn conv64to32(f64 -> f32): "fcvt.s.d";
/// Convert [`f32`] to [`i64`] with chosen rounding mode
pub fn f32toint(f32 -> i64): "fcvt.l.s";
/// Convert [`f64`] to [`i64`] with chosen rounding mode
pub fn f64toint(f64 -> i64): "fcvt.l.d";
}

View file

@ -1,73 +0,0 @@
use {
core::arch::{asm, x86_64 as arin},
hbbytecode::RoundingMode,
};
macro_rules! gen_op {
[$($ty:ident => $reg:ident),* $(,)?] => {
macro_rules! op {
$(
($ins:literal, $in:expr, $out:expr => $ty) => {
asm!(concat!($ins, " {}, {}"), out($reg) $out, in(xmm_reg) $in)
};
)*
}
};
}
gen_op![
f32 => xmm_reg,
f64 => xmm_reg,
i64 => reg,
];
macro_rules! fnsdef {
{$(
$(#[$attr:meta])*
$vis:vis fn $name:ident($from:ident -> $to:ident): $ins:literal;
)*} => {$(
$(#[$attr])*
$vis fn $name(val: $from, mode: RoundingMode) -> $to {
let result: $to;
unsafe {
set_rounding_mode(mode);
op!($ins, val, result => $to);
default_rounding_mode();
}
result
}
)*};
}
fnsdef! {
/// Convert [`f64`] to [`f32`] with chosen rounding mode
pub fn conv64to32(f64 -> f32): "cvtsd2ss";
/// Convert [`f32`] to [`i64`] with chosen rounding mode
pub fn f32toint(f32 -> i64): "cvttss2si";
/// Convert [`f64`] to [`i64`] with chosen rounding mode
pub fn f64toint(f64 -> i64): "cvttsd2si";
}
/// Set rounding mode
///
/// # Safety
/// - Do not call if rounding mode isn't [`RoundingMode::NearestEven`]
/// - Do not perform any Rust FP operations until reset using
/// [`default_rounding_mode`], you have to rely on inline assembly
#[inline(always)]
unsafe fn set_rounding_mode(mode: RoundingMode) {
unsafe {
arin::_MM_SET_ROUNDING_MODE(match mode {
RoundingMode::NearestEven => return,
RoundingMode::Truncate => arin::_MM_ROUND_TOWARD_ZERO,
RoundingMode::Up => arin::_MM_ROUND_UP,
RoundingMode::Down => arin::_MM_ROUND_DOWN,
})
}
}
#[inline(always)]
fn default_rounding_mode() {
// SAFETY: This is said to be the default mode, do not trust me.
unsafe { arin::_MM_SET_ROUNDING_MODE(arin::_MM_ROUND_NEAREST) };
}

View file

@ -1,141 +1,22 @@
//! HoleyBytes Virtual Machine
//!
//! # Alloc feature
//! - Enabled by default
//! - Provides mapping / unmapping, as well as [`Default`] and [`Drop`]
//! implementations for soft-paged memory implementation
// # General safety notice:
// - Validation has to assure there is 256 registers (r0 - r255)
// - Instructions have to be valid as specified (values and sizes)
// - Mapped pages should be at least 4 KiB
#![no_std]
#![cfg_attr(feature = "nightly", feature(fn_align))]
#![deny(unsafe_op_in_unsafe_fn)]
#[cfg(feature = "alloc")]
extern crate alloc;
pub mod mem;
pub mod value;
pub mod validate;
pub mod vm;
mod bmc;
mod float;
mod utils;
mod vmrun;
use {
bmc::BlockCopier,
mem::{Address, Memory},
value::{Value, ValueVariant},
};
/// HoleyBytes Virtual Machine
pub struct Vm<Mem, const TIMER_QUOTIENT: usize> {
/// Holds 256 registers
///
/// Writing to register 0 is considered undefined behaviour
/// in terms of HoleyBytes program execution
pub registers: [Value; 256],
/// Memory implementation
pub memory: Mem,
/// Program counter
pub pc: Address,
/// Program timer
timer: usize,
/// Saved block copier
copier: Option<BlockCopier>,
#[derive(Debug, PartialEq)]
pub enum RuntimeErrors {
InvalidOpcodePair(u8, u8),
RegisterTooSmall,
HostError(u64),
PageNotMapped(u64),
InvalidJumpAddress(u64),
InvalidSystemCall(u8),
}
impl<Mem, const TIMER_QUOTIENT: usize> Vm<Mem, TIMER_QUOTIENT>
where
Mem: Memory,
{
/// Create a new VM with program and trap handler
///
/// # Safety
/// Program code has to be validated
pub unsafe fn new(memory: Mem, entry: Address) -> Self {
Self {
registers: [Value::from(0_u64); 256],
memory,
pc: entry,
timer: 0,
copier: None,
}
}
/// Read register
#[inline(always)]
pub fn read_reg(&self, n: u8) -> Value {
unsafe { *self.registers.get_unchecked(n as usize) }
}
/// Write a register.
/// Writing to register 0 is no-op.
#[inline(always)]
pub fn write_reg<T: ValueVariant>(&mut self, n: u8, value: T) {
if n != 0 {
unsafe {
core::ptr::copy_nonoverlapping(
(&value as *const T).cast::<u8>(),
self.registers.as_mut_ptr().add(n.into()).cast::<u8>(),
core::mem::size_of::<T>(),
);
};
}
}
}
/// Virtual machine halt error
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum VmRunError {
/// Tried to execute invalid instruction
InvalidOpcode(u8),
/// Unhandled load access exception
LoadAccessEx(Address),
/// Unhandled instruction load access exception
ProgramFetchLoadEx(Address),
/// Unhandled store access exception
StoreAccessEx(Address),
/// Register out-of-bounds access
RegOutOfBounds,
/// Address out-of-bounds
AddrOutOfBounds,
/// Reached unreachable code
Unreachable,
/// Invalid operand
InvalidOperand,
/// Unimplemented feature
Unimplemented,
}
/// Virtual machine halt ok
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
pub enum VmRunOk {
/// Program has eached its end
End,
/// Program was interrupted by a timer
Timer,
/// Environment call
Ecall,
/// Breakpoint
Breakpoint,
// If you solve the halting problem feel free to remove this
#[derive(PartialEq, Debug)]
pub enum HaltStatus {
Halted,
Running,
}

26
hbvm/src/main.rs Normal file
View file

@ -0,0 +1,26 @@
use {
hbvm::{validate::validate, vm::Vm},
std::io::{stdin, Read},
};
fn main() -> Result<(), Box<dyn std::error::Error>> {
let mut prog = vec![];
stdin().read_to_end(&mut prog)?;
if let Err(e) = validate(&prog) {
eprintln!("Program validation error: {e:?}");
return Ok(());
} else {
unsafe {
let mut vm = Vm::new_unchecked(&prog);
vm.memory.insert_test_page();
println!("Program interrupt: {:?}", vm.run());
println!("{:?}", vm.registers);
}
}
Ok(())
}
pub fn time() -> u32 {
9
}

View file

@ -1,131 +0,0 @@
//! Virtual(?) memory address
use {
crate::utils::impl_display,
core::{fmt::Debug, ops},
};
/// Memory address
#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct Address(u64);
impl Address {
/// A null address
pub const NULL: Self = Self(0);
/// Saturating integer addition. Computes self + rhs, saturating at the numeric bounds instead of overflowing.
#[inline]
pub fn saturating_add<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.saturating_add(rhs.cast_u64()))
}
/// Saturating integer subtraction. Computes self - rhs, saturating at the numeric bounds instead of overflowing.
#[inline]
pub fn saturating_sub<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.saturating_sub(rhs.cast_u64()))
}
/// Wrapping integer addition. Computes self + rhs, wrapping the numeric bounds.
#[inline]
pub fn wrapping_add<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.wrapping_add(rhs.cast_u64()))
}
/// Wrapping integer subtraction. Computes self + rhs, wrapping the numeric bounds.
#[inline]
pub fn wrapping_sub<T: AddressOp>(self, rhs: T) -> Self {
Self(self.0.wrapping_sub(rhs.cast_u64()))
}
/// Cast or if smaller, truncate to [`usize`]
pub fn truncate_usize(self) -> usize {
self.0 as _
}
/// Get inner value
#[inline(always)]
pub fn get(self) -> u64 {
self.0
}
/// Construct new address
#[inline(always)]
pub fn new(val: u64) -> Self {
Self(val)
}
/// Do something with inner value
#[inline(always)]
pub fn map(self, f: impl Fn(u64) -> u64) -> Self {
Self(f(self.0))
}
}
impl_display!(for Address =>
|Address(a)| "{a:0x}"
);
impl<T: AddressOp> ops::Add<T> for Address {
type Output = Self;
#[inline]
fn add(self, rhs: T) -> Self::Output {
Self(self.0.wrapping_add(rhs.cast_u64()))
}
}
impl<T: AddressOp> ops::Sub<T> for Address {
type Output = Self;
#[inline]
fn sub(self, rhs: T) -> Self::Output {
Self(self.0.wrapping_sub(rhs.cast_u64()))
}
}
impl<T: AddressOp> ops::AddAssign<T> for Address {
fn add_assign(&mut self, rhs: T) {
self.0 = self.0.wrapping_add(rhs.cast_u64())
}
}
impl<T: AddressOp> ops::SubAssign<T> for Address {
fn sub_assign(&mut self, rhs: T) {
self.0 = self.0.wrapping_sub(rhs.cast_u64())
}
}
impl From<Address> for u64 {
#[inline(always)]
fn from(value: Address) -> Self {
value.0
}
}
impl Debug for Address {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
write!(f, "[{:0x}]", self.0)
}
}
/// Can perform address operations with
pub trait AddressOp {
/// Cast to u64, truncating or extending
fn cast_u64(self) -> u64;
}
macro_rules! impl_address_ops_u(($($ty:ty),* $(,)?) => {
$(impl AddressOp for $ty {
#[inline(always)]
fn cast_u64(self) -> u64 { self as _ }
})*
});
macro_rules! impl_address_ops_i(($($ty:ty),* $(,)?) => {
$(impl AddressOp for $ty {
#[inline(always)]
fn cast_u64(self) -> u64 { self as i64 as u64 }
})*
});
impl_address_ops_u!(u8, u16, u32, u64, usize);
impl_address_ops_i!(i8, i16, i32, i64, isize);

View file

@ -1,80 +0,0 @@
//! Memory implementations
pub mod softpaging;
pub(crate) mod addr;
pub use addr::Address;
use crate::utils::impl_display;
/// Load-store memory access
pub trait Memory {
/// Load data from memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError>;
/// Store data to memory on address
///
/// # Safety
/// - Shall not overrun the buffer
unsafe fn store(
&mut self,
addr: Address,
source: *const u8,
count: usize,
) -> Result<(), StoreError>;
/// Read from program memory to execute
///
/// # Safety
/// - Data read have to be valid
unsafe fn prog_read<T: Copy>(&mut self, addr: Address) -> T;
}
/// Unhandled load access trap
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct LoadError(pub Address);
impl_display!(for LoadError =>
|LoadError(a)| "Load access error at address {a}",
);
/// Unhandled store access trap
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct StoreError(pub Address);
impl_display!(for StoreError =>
|StoreError(a)| "Load access error at address {a}",
);
/// Reason to access memory
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MemoryAccessReason {
/// Memory was accessed for load (read)
Load,
/// Memory was accessed for store (write)
Store,
}
impl_display!(for MemoryAccessReason => match {
Self::Load => const "Load";
Self::Store => const "Store";
});
impl From<LoadError> for crate::VmRunError {
fn from(value: LoadError) -> Self {
Self::LoadAccessEx(value.0)
}
}
impl From<StoreError> for crate::VmRunError {
fn from(value: StoreError) -> Self {
Self::StoreAccessEx(value.0)
}
}

View file

@ -1,111 +0,0 @@
//! Program instruction cache
use crate::mem::Address;
use {
super::{lookup::AddrPageLookuper, paging::PageTable, PageSize},
core::{
mem::{size_of, MaybeUninit},
ptr::{copy_nonoverlapping, NonNull},
},
};
/// Instruction cache
#[derive(Clone, Debug)]
pub struct ICache {
/// Current page address base
base: Address,
/// Curent page pointer
data: Option<NonNull<u8>>,
/// Current page size
size: PageSize,
/// Address mask
mask: u64,
}
impl Default for ICache {
fn default() -> Self {
Self {
base: Address::NULL,
data: Default::default(),
size: PageSize::Size4K,
mask: Default::default(),
}
}
}
impl ICache {
/// Fetch instruction from cache
///
/// # Safety
/// `T` should be valid to read from instruction memory
pub(super) unsafe fn fetch<T>(
&mut self,
addr: Address,
root_pt: *const PageTable,
) -> Option<T> {
let mut ret = MaybeUninit::<T>::uninit();
let pbase = self
.data
.or_else(|| unsafe { self.fetch_page(self.base + self.size, root_pt) })?;
// Get address base
let base = addr.map(|x| x & self.mask);
// Base not matching, fetch anew
if base != self.base {
unsafe { self.fetch_page(base, root_pt) }?;
};
let offset = addr.get() & !self.mask;
let requ_size = size_of::<T>();
// Page overflow
let rem = (offset as usize)
.saturating_add(requ_size)
.saturating_sub(self.size as _);
let first_copy = requ_size.saturating_sub(rem);
// Copy non-overflowing part
unsafe { copy_nonoverlapping(pbase.as_ptr(), ret.as_mut_ptr().cast::<u8>(), first_copy) };
// Copy overflow
if rem != 0 {
let pbase = unsafe { self.fetch_page(self.base + self.size, root_pt) }?;
// Unlikely, unsupported scenario
if rem > self.size as _ {
return None;
}
unsafe {
copy_nonoverlapping(
pbase.as_ptr(),
ret.as_mut_ptr().cast::<u8>().add(first_copy),
rem,
)
};
}
Some(unsafe { ret.assume_init() })
}
/// Fetch a page
unsafe fn fetch_page(&mut self, addr: Address, pt: *const PageTable) -> Option<NonNull<u8>> {
let res = AddrPageLookuper::new(addr, 0, pt).next()?.ok()?;
if !super::perm_check::executable(res.perm) {
return None;
}
(self.size, self.mask) = match res.size {
4096 => (PageSize::Size4K, !((1 << 8) - 1)),
2097152 => (PageSize::Size2M, !((1 << (8 * 2)) - 1)),
1073741824 => (PageSize::Size1G, !((1 << (8 * 3)) - 1)),
_ => return None,
};
self.data = Some(NonNull::new(res.ptr)?);
self.base = addr.map(|x| x & self.mask);
self.data
}
}

View file

@ -1,126 +0,0 @@
//! Address lookup
use crate::mem::addr::Address;
use super::{
addr_extract_index,
paging::{PageTable, Permission},
PageSize,
};
/// Good result from address split
pub struct AddrPageLookupOk {
/// Virtual address
pub vaddr: Address,
/// Pointer to the start for perform operation
pub ptr: *mut u8,
/// Size to the end of page / end of desired size
pub size: usize,
/// Page permission
pub perm: Permission,
}
/// Errornous address split result
pub struct AddrPageLookupError {
/// Address of failure
pub addr: Address,
/// Requested page size
pub size: PageSize,
}
/// Address splitter into pages
pub struct AddrPageLookuper {
/// Current address
addr: Address,
/// Size left
size: usize,
/// Page table
pagetable: *const PageTable,
}
impl AddrPageLookuper {
/// Create a new page lookuper
#[inline]
pub const fn new(addr: Address, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
/// Bump address by size X
pub fn bump(&mut self, page_size: PageSize) {
self.addr += page_size;
self.size = self.size.saturating_sub(page_size as _);
}
}
impl Iterator for AddrPageLookuper {
type Item = Result<AddrPageLookupOk, AddrPageLookupError>;
fn next(&mut self) -> Option<Self::Item> {
// The end, everything is fine
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
// Walk the page table
for lvl in (0..5).rev() {
// Get an entry
unsafe {
let entry = (*current_pt)
.table
.get_unchecked(addr_extract_index(self.addr, lvl));
let ptr = entry.ptr();
match entry.permission() {
// No page → page fault
Permission::Empty => {
return Some(Err(AddrPageLookupError {
addr: self.addr,
size: PageSize::from_lvl(lvl)?,
}))
}
// Node → proceed waking
Permission::Node => current_pt = ptr as _,
// Leaf → return relevant data
perm => {
break 'a (
// Pointer in host memory
ptr as *mut u8,
perm,
PageSize::from_lvl(lvl)?,
// In-page offset
addr_extract_index(self.addr, lvl),
);
}
}
}
}
return None; // Reached the end (should not happen)
};
// Get available byte count in the selected page with offset
let avail = (size as usize).saturating_sub(offset).clamp(0, self.size);
self.bump(size);
Some(Ok(AddrPageLookupOk {
vaddr: self.addr,
ptr: unsafe { base.add(offset) }, // Return pointer to the start of region
size: avail,
perm,
}))
}
}

View file

@ -1,172 +0,0 @@
//! Automatic memory mapping
use crate::{mem::addr::Address, utils::impl_display};
use {
super::{
addr_extract_index,
paging::{PageTable, Permission, PtEntry, PtPointedData},
PageSize, SoftPagedMem,
},
alloc::boxed::Box,
};
impl<'p, A, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, A, OUT_PROG_EXEC> {
/// Maps host's memory into VM's memory
///
/// # Safety
/// - Your faith in the gods of UB
/// - Addr-san claims it's fine but who knows is she isn't lying :ferrisSus:
/// - Alright, Miri-sama is also fine with this, who knows why
pub unsafe fn map(
&mut self,
host: *mut u8,
target: Address,
perm: Permission,
pagesize: PageSize,
) -> Result<(), MapError> {
let mut current_pt = self.root_pt;
// Decide on what level depth are we going
let lookup_depth = match pagesize {
PageSize::Size4K => 0,
PageSize::Size2M => 1,
PageSize::Size1G => 2,
};
// Walk pagetable levels
for lvl in (lookup_depth + 1..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Still not on target and already seeing empty entry?
// No worries! Let's create one (allocates).
Permission::Empty => {
// Increase children count
unsafe { *current_pt }.childen += 1;
let table = Box::into_raw(Box::new(PtPointedData {
pt: PageTable::default(),
}));
unsafe { core::ptr::write(entry, PtEntry::new(table, Permission::Node)) };
current_pt = table as _;
}
// Continue walking
Permission::Node => current_pt = ptr as _,
// There is some entry on place of node
_ => return Err(MapError::PageOnNode),
}
}
let node = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(target, lookup_depth))
};
// Check if node is not mapped
if node.permission() != Permission::Empty {
return Err(MapError::AlreadyMapped);
}
// Write entry
unsafe {
(*current_pt).childen += 1;
core::ptr::write(node, PtEntry::new(host.cast(), perm));
}
Ok(())
}
/// Unmaps pages from VM's memory
///
/// If errors, it only means there is no entry to unmap and in most cases
/// just should be ignored.
pub fn unmap(&mut self, addr: Address) -> Result<(), NothingToUnmap> {
let mut current_pt = self.root_pt;
let mut page_tables = [core::ptr::null_mut(); 5];
// Walk page table in reverse
for lvl in (0..5).rev() {
let entry = unsafe {
(*current_pt)
.table
.get_unchecked_mut(addr_extract_index(addr, lvl))
};
let ptr = entry.ptr();
match entry.permission() {
// Nothing is there, throw an error, not critical!
Permission::Empty => return Err(NothingToUnmap),
// Node Save to visited pagetables and continue walking
Permission::Node => {
page_tables[lvl as usize] = entry;
current_pt = ptr as _
}
// Page entry zero it out!
// Zero page entry is completely valid entry with
// empty permission - no UB here!
_ => unsafe {
core::ptr::write_bytes(entry, 0, 1);
break;
},
}
}
// Now walk in order visited page tables
for entry in page_tables.into_iter() {
// Level not visited, skip.
if entry.is_null() {
continue;
}
unsafe {
let children = &mut (*(*entry).ptr()).pt.childen;
*children -= 1; // Decrease children count
// If there are no children, deallocate.
if *children == 0 {
let _ = Box::from_raw((*entry).ptr() as *mut PageTable);
// Zero visited entry
core::ptr::write_bytes(entry, 0, 1);
} else {
break;
}
}
}
Ok(())
}
}
/// Error mapping
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum MapError {
/// Entry was already mapped
AlreadyMapped,
/// When walking a page entry was
/// encounterd.
PageOnNode,
}
impl_display!(for MapError => match {
Self::AlreadyMapped => "There is already a page mapped on specified address";
Self::PageOnNode => "There was a page mapped on the way instead of node";
});
/// There was no entry in page table to unmap
///
/// No worry, don't panic, nothing bad has happened,
/// but if you are 120% sure there should be something,
/// double-check your addresses.
#[derive(Clone, Copy, Debug)]
pub struct NothingToUnmap;
impl_display!(for NothingToUnmap => "There is no entry to unmap");

View file

@ -1,282 +0,0 @@
//! Platform independent, software paged memory implementation
pub mod icache;
pub mod lookup;
pub mod paging;
#[cfg(feature = "alloc")]
pub mod mapping;
use {
super::{addr::Address, LoadError, Memory, MemoryAccessReason, StoreError},
core::mem::size_of,
icache::ICache,
lookup::{AddrPageLookupError, AddrPageLookupOk, AddrPageLookuper},
paging::{PageTable, Permission},
};
/// HoleyBytes software paged memory
///
/// - `OUT_PROG_EXEC`: set to `false` to disable executing program
/// not contained in initially provided program, even the pages
/// are executable
#[derive(Clone, Debug)]
pub struct SoftPagedMem<'p, PfH, const OUT_PROG_EXEC: bool = true> {
/// Root page table
pub root_pt: *mut PageTable,
/// Page fault handler
pub pf_handler: PfH,
/// Program memory segment
pub program: &'p [u8],
/// Program instruction cache
pub icache: ICache,
}
impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> Memory
for SoftPagedMem<'p, PfH, OUT_PROG_EXEC>
{
/// Load value from an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError> {
self.memory_access(
MemoryAccessReason::Load,
addr,
target,
count,
perm_check::readable,
|src, dst, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) },
)
.map_err(LoadError)
}
/// Store value to an address
///
/// # Safety
/// Applies same conditions as for [`core::ptr::copy_nonoverlapping`]
unsafe fn store(
&mut self,
addr: Address,
source: *const u8,
count: usize,
) -> Result<(), StoreError> {
self.memory_access(
MemoryAccessReason::Store,
addr,
source.cast_mut(),
count,
perm_check::writable,
|dst, src, count| unsafe { core::ptr::copy_nonoverlapping(src, dst, count) },
)
.map_err(StoreError)
}
#[inline(always)]
unsafe fn prog_read<T>(&mut self, addr: Address) -> T {
if OUT_PROG_EXEC && addr.truncate_usize() > self.program.len() {
return unsafe { self.icache.fetch::<T>(addr, self.root_pt) }
.unwrap_or_else(|| unsafe { core::mem::zeroed() });
}
let addr = addr.truncate_usize();
self.program
.get(addr..addr + size_of::<T>())
.map(|x| unsafe { x.as_ptr().cast::<T>().read() })
.unwrap_or_else(|| unsafe { core::mem::zeroed() })
}
}
impl<'p, PfH: HandlePageFault, const OUT_PROG_EXEC: bool> SoftPagedMem<'p, PfH, OUT_PROG_EXEC> {
// Everyone behold, the holy function, the god of HBVM memory accesses!
/// Split address to pages, check their permissions and feed pointers with offset
/// to a specified function.
///
/// If page is not found, execute page fault trap handler.
#[allow(clippy::too_many_arguments)] // Silence peasant
fn memory_access(
&mut self,
reason: MemoryAccessReason,
src: Address,
mut dst: *mut u8,
len: usize,
permission_check: fn(Permission) -> bool,
action: fn(*mut u8, *mut u8, usize),
) -> Result<(), Address> {
// Memory load from program section
let (src, len) = if src.truncate_usize() < self.program.len() as _ {
// Allow only loads
if reason != MemoryAccessReason::Load {
return Err(src);
}
// Determine how much data to copy from here
let to_copy = len.clamp(0, self.program.len().saturating_sub(src.truncate_usize()));
// Perform action
action(
unsafe { self.program.as_ptr().add(src.truncate_usize()).cast_mut() },
dst,
to_copy,
);
// Return shifted from what we've already copied
(
src.saturating_add(to_copy as u64),
len.saturating_sub(to_copy),
)
} else {
(src, len) // Nothing weird!
};
// Nothing to copy? Don't bother doing anything, bail.
if len == 0 {
return Ok(());
}
// Create new splitter
let mut pspl = AddrPageLookuper::new(src, len, self.root_pt);
loop {
match pspl.next() {
// Page is found
Some(Ok(AddrPageLookupOk {
vaddr,
ptr,
size,
perm,
})) => {
if !permission_check(perm) {
return Err(vaddr);
}
// Perform specified memory action and bump destination pointer
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
// No page found
Some(Err(AddrPageLookupError { addr, size })) => {
// Attempt to execute page fault handler
if self.pf_handler.page_fault(
reason,
unsafe { &mut *self.root_pt },
addr,
size,
dst,
) {
// Shift the splitter address
pspl.bump(size);
// Bump dst pointer
dst = unsafe { dst.add(size as _) };
} else {
return Err(addr); // Unhandleable, VM will yield.
}
}
// No remaining pages, we are done!
None => return Ok(()),
}
}
}
}
/// Extract index in page table on specified level
///
/// The level shall not be larger than 4, otherwise
/// the output of the function is unspecified (yes, it can also panic :)
pub fn addr_extract_index(addr: Address, lvl: u8) -> usize {
debug_assert!(lvl <= 4);
let addr = addr.get();
usize::try_from((addr >> (lvl * 8 + 12)) & ((1 << 8) - 1)).expect("?conradluget a better CPU")
}
/// Page size
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum PageSize {
/// 4 KiB page (on level 0)
Size4K = 4096,
/// 2 MiB page (on level 1)
Size2M = 1024 * 1024 * 2,
/// 1 GiB page (on level 2)
Size1G = 1024 * 1024 * 1024,
}
impl PageSize {
/// Convert page table level to size of page
const fn from_lvl(lvl: u8) -> Option<Self> {
match lvl {
0 => Some(PageSize::Size4K),
1 => Some(PageSize::Size2M),
2 => Some(PageSize::Size1G),
_ => None,
}
}
}
impl core::ops::Add<PageSize> for Address {
type Output = Self;
#[inline(always)]
fn add(self, rhs: PageSize) -> Self::Output {
self + (rhs as u64)
}
}
impl core::ops::AddAssign<PageSize> for Address {
#[inline(always)]
fn add_assign(&mut self, rhs: PageSize) {
*self = Self::new(self.get().wrapping_add(rhs as u64));
}
}
/// Permisison checks
pub mod perm_check {
use super::paging::Permission;
/// Page is readable
#[inline(always)]
pub const fn readable(perm: Permission) -> bool {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
}
/// Page is writable
#[inline(always)]
pub const fn writable(perm: Permission) -> bool {
matches!(perm, Permission::Write)
}
/// Page is executable
#[inline(always)]
pub const fn executable(perm: Permission) -> bool {
matches!(perm, Permission::Exec)
}
}
/// Handle VM traps
pub trait HandlePageFault {
/// Handle page fault
///
/// Return true if handling was sucessful,
/// otherwise the program will be interrupted and will
/// yield an error.
fn page_fault(
&mut self,
reason: MemoryAccessReason,
pagetable: &mut PageTable,
vaddr: Address,
size: PageSize,
dataptr: *mut u8,
) -> bool
where
Self: Sized;
}

View file

@ -1,86 +0,0 @@
//! Page table and associated structures implementation
use core::{fmt::Debug, mem::MaybeUninit};
/// Page entry permission
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(u8)]
pub enum Permission {
/// No page present
#[default]
Empty,
/// Points to another pagetable
Node,
/// Page is read only
Readonly,
/// Page is readable and writable
Write,
/// Page is readable and executable
Exec,
}
/// Page table entry
#[derive(Clone, Copy, Default, PartialEq, Eq)]
pub struct PtEntry(u64);
impl PtEntry {
/// Create new
///
/// # Safety
/// - `ptr` has to point to valid data and shall not be deallocated
/// troughout the entry lifetime
#[inline]
pub unsafe fn new(ptr: *mut PtPointedData, permission: Permission) -> Self {
Self(ptr as u64 | permission as u64)
}
/// Get permission
#[inline]
pub fn permission(&self) -> Permission {
unsafe { core::mem::transmute(self.0 as u8 & 0b111) }
}
/// Get pointer to the data (leaf) or next page table (node)
#[inline]
pub fn ptr(&self) -> *mut PtPointedData {
(self.0 & !((1 << 12) - 1)) as _
}
}
impl Debug for PtEntry {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("PtEntry")
.field("ptr", &self.ptr())
.field("permission", &self.permission())
.finish()
}
}
/// Page table
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(align(4096))]
pub struct PageTable {
/// How much entries are in use
pub childen: u8,
/// Entries
pub table: [PtEntry; 256],
}
impl Default for PageTable {
fn default() -> Self {
// SAFETY: It's fine, zeroed page table entry is valid (= empty)
Self {
childen: 0,
table: unsafe { MaybeUninit::zeroed().assume_init() },
}
}
}
/// Data page table entry can possibly point to
#[derive(Clone, Copy)]
#[repr(C, align(4096))]
pub union PtPointedData {
/// Node - next page table
pub pt: PageTable,
/// Leaf
pub page: u8,
}

View file

@ -1,53 +0,0 @@
macro_rules! impl_display {
(for $ty:ty => $(|$selfty:pat_param|)? $fmt:literal $(, $($param:expr),+)? $(,)?) => {
impl ::core::fmt::Display for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
$(let $selfty = self;)?
write!(f, $fmt, $($param),*)
}
}
};
(for $ty:ty => $str:literal) => {
impl ::core::fmt::Display for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
f.write_str($str)
}
}
};
(for $ty:ty => match {$(
$bind:pat => $($const:ident)? $fmt:literal $(,$($params:tt)*)?;
)*}) => {
impl ::core::fmt::Display for $ty {
fn fmt(&self, f: &mut ::core::fmt::Formatter<'_>) -> ::core::fmt::Result {
match self {
$(
$bind => $crate::utils::internal::impl_display_match_fragment!($($const,)? f, $fmt $(, $($params)*)?)
),*
}
}
}
}
}
#[doc(hidden)]
pub(crate) mod internal {
macro_rules! impl_display_match_fragment {
(const, $f:expr, $lit:literal) => {
$f.write_str($lit)
};
($f:expr, $fmt:literal $(, $($params:tt)*)?) => {
write!($f, $fmt, $($($params)*)?)
};
}
pub(crate) use impl_display_match_fragment;
}
macro_rules! static_assert(($expr:expr $(,)?) => {
const _: [(); !$expr as usize] = [];
});
pub(crate) use {impl_display, static_assert};

52
hbvm/src/validate.rs Normal file
View file

@ -0,0 +1,52 @@
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum ErrorKind {
InvalidInstruction,
Unimplemented,
RegisterArrayOverflow,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub struct Error {
pub kind: ErrorKind,
pub index: usize,
}
pub fn validate(mut program: &[u8]) -> Result<(), Error> {
use hbbytecode::opcode::*;
let start = program;
loop {
program = match program {
[] => return Ok(()),
[LD..=ST, reg, _, _, _, _, _, _, _, _, _, count, ..]
if usize::from(*reg) * 8 + usize::from(*count) > 2048 =>
{
return Err(Error {
kind: ErrorKind::RegisterArrayOverflow,
index: (program.as_ptr() as usize) - (start.as_ptr() as usize),
})
}
[BRC, src, dst, count, ..]
if src.checked_add(*count).is_none() || dst.checked_add(*count).is_none() =>
{
return Err(Error {
kind: ErrorKind::RegisterArrayOverflow,
index: (program.as_ptr() as usize) - (start.as_ptr() as usize),
})
}
[NOP | ECALL, rest @ ..]
| [DIR | DIRF, _, _, _, _, rest @ ..]
| [ADD..=CMPU | BRC | ADDF..=MULF, _, _, _, rest @ ..]
| [NEG..=NOT | CP..=SWA, _, _, rest @ ..]
| [LI | JMP, _, _, _, _, _, _, _, _, _, rest @ ..]
| [ADDI..=CMPUI | BMC | JEQ..=JGTU | ADDFI..=MULFI, _, _, _, _, _, _, _, _, _, _, rest @ ..]
| [LD..=ST, _, _, _, _, _, _, _, _, _, _, _, _, rest @ ..] => rest,
_ => {
return Err(Error {
kind: ErrorKind::InvalidInstruction,
index: (program.as_ptr() as usize) - (start.as_ptr() as usize),
})
}
}
}
}

View file

@ -1,88 +0,0 @@
//! HoleyBytes register value definition
use crate::utils::static_assert;
/// Define [`Value`] »union« (it's fake)
///
/// # Safety
/// Its variants have to be sound to byte-reinterpretate
/// between each other. Otherwise the behaviour is undefined.
macro_rules! value_def {
($($ty:ident),* $(,)?) => {
/// HBVM register value
#[derive(Copy, Clone)]
#[repr(transparent)]
pub struct Value(pub u64);
$(
impl From<$ty> for Value {
#[inline]
fn from(value: $ty) -> Self {
let mut new = core::mem::MaybeUninit::<u64>::zeroed();
unsafe {
new.as_mut_ptr().cast::<$ty>().write(value);
Self(new.assume_init())
}
}
}
static_assert!(core::mem::size_of::<$ty>() <= core::mem::size_of::<Value>());
impl private::Sealed for $ty {}
unsafe impl ValueVariant for $ty {}
)*
};
}
impl Value {
/// Byte reinterpret value to target variant
#[inline]
pub fn cast<V: ValueVariant>(self) -> V {
unsafe { core::mem::transmute_copy(&self.0) }
}
}
/// # Safety
/// - N/A, not to be implemented manually
pub unsafe trait ValueVariant: private::Sealed + Copy + Into<Value> {}
impl private::Sealed for Value {}
unsafe impl ValueVariant for Value {}
mod private {
pub trait Sealed {}
}
value_def!(u8, u16, u32, u64, i8, i16, i32, i64, f32, f64);
static_assert!(core::mem::size_of::<Value>() == 8);
impl core::fmt::Debug for Value {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
// Print formatted as hexadecimal, unsigned integer
write!(f, "{:x}", self.cast::<u64>())
}
}
pub(crate) trait CheckedDivRem {
fn checked_div(self, other: Self) -> Option<Self>
where
Self: Sized;
fn checked_rem(self, other: Self) -> Option<Self>
where
Self: Sized;
}
macro_rules! impl_checked_div_rem {
($($ty:ty),* $(,)?) => {
$(impl CheckedDivRem for $ty {
#[inline(always)]
fn checked_div(self, another: Self) -> Option<Self>
{ self.checked_div(another) }
#[inline(always)]
fn checked_rem(self, another: Self) -> Option<Self>
{ self.checked_rem(another) }
})*
};
}
impl_checked_div_rem!(u8, u16, u32, u64, i8, i16, i32, i64);

238
hbvm/src/vm/mem/mod.rs Normal file
View file

@ -0,0 +1,238 @@
mod paging;
use self::paging::{PageTable, Permission, PtEntry};
use alloc::boxed::Box;
#[derive(Clone, Debug)]
pub struct Memory {
root_pt: *mut PageTable,
}
impl Default for Memory {
fn default() -> Self {
Self {
root_pt: Box::into_raw(Box::default()),
}
}
}
impl Drop for Memory {
fn drop(&mut self) {
let _ = unsafe { Box::from_raw(self.root_pt) };
}
}
impl Memory {
// HACK: Just for allocation testing, will be removed when proper memory interfaces
// implemented.
pub fn insert_test_page(&mut self) {
unsafe {
let mut entry = PtEntry::new(
{
let layout = alloc::alloc::Layout::from_size_align_unchecked(4096, 4096);
let ptr = alloc::alloc::alloc_zeroed(layout);
if ptr.is_null() {
alloc::alloc::handle_alloc_error(layout);
}
core::ptr::write_bytes(ptr, 69, 10);
ptr.cast()
},
Permission::Write,
);
for _ in 0..4 {
let mut pt = Box::<PageTable>::default();
pt[0] = entry;
entry = PtEntry::new(Box::into_raw(pt) as _, Permission::Node);
}
self.root_pt_mut()[0] = entry;
}
}
/// Load value from an address
pub unsafe fn load(&self, addr: u64, target: *mut u8, count: usize) -> Result<(), AccessFault> {
self.memory_access(
addr,
target,
count,
|perm| {
matches!(
perm,
Permission::Readonly | Permission::Write | Permission::Exec
)
},
|src, dst, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
}
/// Store value to an address
pub unsafe fn store(
&mut self,
addr: u64,
source: *const u8,
count: usize,
) -> Result<(), AccessFault> {
self.memory_access(
addr,
source.cast_mut(),
count,
|perm| perm == Permission::Write,
|dst, src, count| core::ptr::copy_nonoverlapping(src, dst, count),
)
}
/// Copy a block of memory
pub unsafe fn block_copy(&mut self, src: u64, dst: u64, count: u64) -> Result<(), ()> {
/* let count = usize::try_from(count).expect("?conradluget a better CPU");
let mut srcs = PageSplitter::new(src, count, self.root_pt);
let mut dsts = PageSplitter::new(dst, count, self.root_pt);
let mut c_src = srcs.next().ok_or(())?;
let mut c_dst = dsts.next().ok_or(())?;
loop {
let min_size = c_src.size.min(c_dst.size);
unsafe {
core::ptr::copy(c_src.ptr, c_dst.ptr, min_size);
}
match (
match c_src.size.saturating_sub(min_size) {
0 => srcs.next(),
size => Some(PageSplitResult { size, ..c_src }),
},
match c_dst.size.saturating_sub(min_size) {
0 => dsts.next(),
size => Some(PageSplitResult { size, ..c_dst }),
},
) {
(None, None) => return Ok(()),
(Some(src), Some(dst)) => (c_src, c_dst) = (src, dst),
_ => return Err(()),
}
} */
todo!("Block memory copy")
}
#[inline]
pub fn root_pt(&self) -> &PageTable {
unsafe { &*self.root_pt }
}
#[inline]
pub fn root_pt_mut(&mut self) -> &mut PageTable {
unsafe { &mut *self.root_pt }
}
fn memory_access(
&self,
src: u64,
mut dst: *mut u8,
len: usize,
permission_check: impl Fn(Permission) -> bool,
action: impl Fn(*mut u8, *mut u8, usize),
) -> Result<(), AccessFault> {
for item in PageSplitter::new(src, len, self.root_pt) {
let PageSplitResult { ptr, size, perm } = item?;
if !permission_check(perm) {
return Err(AccessFault::Permission);
}
action(ptr, dst, size);
dst = unsafe { dst.add(size) };
}
Ok(())
}
}
#[derive(Debug)]
struct PageSplitResult {
ptr: *mut u8,
size: usize,
perm: Permission,
}
struct PageSplitter {
addr: u64,
size: usize,
pagetable: *const PageTable,
}
impl PageSplitter {
pub const fn new(addr: u64, size: usize, pagetable: *const PageTable) -> Self {
Self {
addr,
size,
pagetable,
}
}
}
impl Iterator for PageSplitter {
type Item = Result<PageSplitResult, AccessFault>;
fn next(&mut self) -> Option<Self::Item> {
if self.size == 0 {
return None;
}
let (base, perm, size, offset) = 'a: {
let mut current_pt = self.pagetable;
for lvl in (0..5).rev() {
unsafe {
let entry = (*current_pt).get_unchecked(
usize::try_from((self.addr >> (lvl * 9 + 12)) & ((1 << 9) - 1))
.expect("?conradluget a better CPU"),
);
let ptr = entry.ptr();
match entry.permission() {
Permission::Empty => {
return Some(Err(AccessFault::NoPage {
addr: self.addr,
remaining: self.size,
}))
}
Permission::Node => current_pt = ptr as _,
perm => {
break 'a (
ptr as *mut u8,
perm,
match lvl {
0 => 4096,
1 => 1024_usize.pow(2) * 2,
2 => 1024_usize.pow(3),
_ => return Some(Err(AccessFault::TooShallow)),
},
self.addr as usize & ((1 << (lvl * 9 + 12)) - 1),
)
}
}
}
}
return Some(Err(AccessFault::TooDeep));
};
extern crate std;
let avail = (size - offset).clamp(0, self.size);
self.addr += size as u64;
self.size = self.size.saturating_sub(size);
std::dbg!(Some(Ok(PageSplitResult {
ptr: unsafe { base.add(offset) },
size: avail,
perm,
})))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
pub enum AccessFault {
NoPage { addr: u64, remaining: usize },
Permission,
TooShallow,
TooDeep,
}

101
hbvm/src/vm/mem/paging.rs Normal file
View file

@ -0,0 +1,101 @@
use core::{
fmt::Debug,
mem::MaybeUninit,
ops::{Index, IndexMut},
slice::SliceIndex,
};
use delegate::delegate;
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(u8)]
pub enum Permission {
#[default]
Empty,
Node,
Readonly,
Write,
Exec,
}
#[derive(Clone, Copy, Default, PartialEq, Eq)]
pub struct PtEntry(u64);
impl PtEntry {
#[inline]
pub unsafe fn new(ptr: *mut PtPointedData, permission: Permission) -> Self {
Self(ptr as u64 | permission as u64)
}
#[inline]
pub fn permission(&self) -> Permission {
unsafe { core::mem::transmute(self.0 as u8 & 0b111) }
}
#[inline]
pub fn ptr(&self) -> *mut PtPointedData {
(self.0 & !((1 << 12) - 1)) as _
}
}
impl Debug for PtEntry {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
f.debug_struct("PtEntry")
.field("ptr", &self.ptr())
.field("permission", &self.permission())
.finish()
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq)]
#[repr(align(4096))]
pub struct PageTable([PtEntry; 512]);
impl PageTable {
delegate!(to self.0 {
pub unsafe fn get<I>(&self, ix: I) -> Option<&I::Output>
where I: SliceIndex<[PtEntry]>;
pub unsafe fn get_mut<I>(&mut self, ix: I) -> Option<&mut I::Output>
where I: SliceIndex<[PtEntry]>;
pub unsafe fn get_unchecked<I>(&self, index: I) -> &I::Output
where I: SliceIndex<[PtEntry]>;
pub unsafe fn get_unchecked_mut<I>(&mut self, index: I) -> &mut I::Output
where I: SliceIndex<[PtEntry]>;
});
}
impl<Idx> Index<Idx> for PageTable
where
Idx: SliceIndex<[PtEntry]>,
{
type Output = Idx::Output;
#[inline(always)]
fn index(&self, index: Idx) -> &Self::Output {
&self.0[index]
}
}
impl<Idx> IndexMut<Idx> for PageTable
where
Idx: SliceIndex<[PtEntry]>,
{
#[inline(always)]
fn index_mut(&mut self, index: Idx) -> &mut Self::Output {
&mut self.0[index]
}
}
impl Default for PageTable {
fn default() -> Self {
Self(unsafe { MaybeUninit::zeroed().assume_init() })
}
}
#[derive(Clone, Copy)]
#[repr(C, align(4096))]
pub union PtPointedData {
pub pt: PageTable,
pub page: u8,
}

302
hbvm/src/vm/mod.rs Normal file
View file

@ -0,0 +1,302 @@
//! HoleyBytes Virtual Machine
//!
//! All unsafe code here should be sound, if input bytecode passes validation.
// # General safety notice:
// - Validation has to assure there is 256 registers (r0 - r255)
// - Instructions have to be valid as specified (values and sizes)
// - Mapped pages should be at least 4 KiB
// - Yes, I am aware of the UB when jumping in-mid of instruction where
// the read byte corresponds to an instruction whose lenght exceets the
// program size. If you are (rightfully) worried about the UB, for now just
// append your program with 11 zeroes.
use self::mem::AccessFault;
mod mem;
mod value;
use {
crate::validate,
core::ops,
hbbytecode::{OpParam, ParamBB, ParamBBB, ParamBBBB, ParamBBD, ParamBBDH, ParamBD},
mem::Memory,
static_assertions::assert_impl_one,
value::Value,
};
macro_rules! param {
($self:expr, $ty:ty) => {{
assert_impl_one!($ty: OpParam);
let data = $self
.program
.as_ptr()
.add($self.pc + 1)
.cast::<$ty>()
.read();
$self.pc += 1 + core::mem::size_of::<$ty>();
data
}};
}
macro_rules! binary_op {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamBBB(tg, a0, a1) = param!($self, ParamBBB);
$self.write_reg(
tg,
$handler(
Value::$ty(&$self.read_reg(a0)),
Value::$ty(&$self.read_reg(a1)),
)
.into(),
);
}};
}
macro_rules! binary_op_imm {
($self:expr, $ty:ident, $handler:expr) => {{
let ParamBBD(tg, a0, imm) = param!($self, ParamBBD);
$self.write_reg(
tg,
$handler(Value::$ty(&$self.read_reg(a0)), Value::$ty(&imm.into())).into(),
);
}};
}
macro_rules! cond_jump {
($self:expr, $ty:ident, $expected:ident) => {{
let ParamBBD(a0, a1, jt) = param!($self, ParamBBD);
if core::cmp::Ord::cmp(&$self.read_reg(a0).as_u64(), &$self.read_reg(a1).as_u64())
== core::cmp::Ordering::$expected
{
$self.pc = jt as usize;
}
}};
}
pub struct Vm<'a> {
pub registers: [Value; 256],
pub memory: Memory,
pc: usize,
program: &'a [u8],
}
impl<'a> Vm<'a> {
/// # Safety
/// Program code has to be validated
pub unsafe fn new_unchecked(program: &'a [u8]) -> Self {
Self {
registers: [Value::from(0_u64); 256],
memory: Default::default(),
pc: 0,
program,
}
}
pub fn new_validated(program: &'a [u8]) -> Result<Self, validate::Error> {
validate::validate(program)?;
Ok(unsafe { Self::new_unchecked(program) })
}
pub fn run(&mut self) -> HaltReason {
use hbbytecode::opcode::*;
loop {
let Some(&opcode) = self.program.get(self.pc)
else { return HaltReason::ProgramEnd };
unsafe {
match opcode {
NOP => param!(self, ()),
ADD => binary_op!(self, as_u64, u64::wrapping_add),
SUB => binary_op!(self, as_u64, u64::wrapping_sub),
MUL => binary_op!(self, as_u64, u64::wrapping_mul),
AND => binary_op!(self, as_u64, ops::BitAnd::bitand),
OR => binary_op!(self, as_u64, ops::BitOr::bitor),
XOR => binary_op!(self, as_u64, ops::BitXor::bitxor),
SL => binary_op!(self, as_u64, ops::Shl::shl),
SR => binary_op!(self, as_u64, ops::Shr::shr),
SRS => binary_op!(self, as_i64, ops::Shr::shr),
CMP => {
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB);
self.write_reg(
tg,
(self.read_reg(a0).as_i64().cmp(&self.read_reg(a1).as_i64()) as i64)
.into(),
);
}
CMPU => {
let ParamBBB(tg, a0, a1) = param!(self, ParamBBB);
self.write_reg(
tg,
(self.read_reg(a0).as_u64().cmp(&self.read_reg(a1).as_u64()) as i64)
.into(),
);
}
NOT => {
let param = param!(self, ParamBB);
self.write_reg(param.0, (!self.read_reg(param.1).as_u64()).into());
}
NEG => {
let param = param!(self, ParamBB);
self.write_reg(
param.0,
match self.read_reg(param.1).as_u64() {
0 => 1_u64,
_ => 0,
}
.into(),
);
}
DIR => {
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB);
let a0 = self.read_reg(a0).as_u64();
let a1 = self.read_reg(a1).as_u64();
self.write_reg(dt, (a0.checked_div(a1).unwrap_or(u64::MAX)).into());
self.write_reg(rt, (a0.checked_rem(a1).unwrap_or(u64::MAX)).into());
}
ADDI => binary_op_imm!(self, as_u64, ops::Add::add),
MULI => binary_op_imm!(self, as_u64, ops::Mul::mul),
ANDI => binary_op_imm!(self, as_u64, ops::BitAnd::bitand),
ORI => binary_op_imm!(self, as_u64, ops::BitOr::bitor),
XORI => binary_op_imm!(self, as_u64, ops::BitXor::bitxor),
SLI => binary_op_imm!(self, as_u64, ops::Shl::shl),
SRI => binary_op_imm!(self, as_u64, ops::Shr::shr),
SRSI => binary_op_imm!(self, as_i64, ops::Shr::shr),
CMPI => {
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD);
self.write_reg(
tg,
(self.read_reg(a0).as_i64().cmp(&Value::from(imm).as_i64()) as i64)
.into(),
);
}
CMPUI => {
let ParamBBD(tg, a0, imm) = param!(self, ParamBBD);
self.write_reg(tg, (self.read_reg(a0).as_u64().cmp(&imm) as i64).into());
}
CP => {
let param = param!(self, ParamBB);
self.write_reg(param.0, self.read_reg(param.1));
}
SWA => {
let ParamBB(src, dst) = param!(self, ParamBB);
if src + dst != 0 {
core::ptr::swap(
self.registers.get_unchecked_mut(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
);
}
}
LI => {
let param = param!(self, ParamBD);
self.write_reg(param.0, param.1.into());
}
LD => {
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
let n: usize = match dst {
0 => 1,
_ => 0,
};
if let Err(e) = self.memory.load(
self.read_reg(base).as_u64() + off + n as u64,
self.registers.as_mut_ptr().add(usize::from(dst) + n).cast(),
usize::from(count).saturating_sub(n),
) {
return HaltReason::LoadAccessEx(e);
}
}
ST => {
let ParamBBDH(dst, base, off, count) = param!(self, ParamBBDH);
if let Err(e) = self.memory.store(
self.read_reg(base).as_u64() + off,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
) {
return HaltReason::StoreAccessEx(e);
}
}
BMC => {
let ParamBBD(src, dst, count) = param!(self, ParamBBD);
if self
.memory
.block_copy(
self.read_reg(src).as_u64(),
self.read_reg(dst).as_u64(),
count,
)
.is_err()
{
todo!("Block memory copy fault");
}
}
BRC => {
let ParamBBB(src, dst, count) = param!(self, ParamBBB);
core::ptr::copy(
self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
usize::from(count * 8),
);
}
JMP => {
let ParamBD(reg, offset) = param!(self, ParamBD);
self.pc = (self.read_reg(reg).as_u64() + offset) as usize;
}
JEQ => cond_jump!(self, int, Equal),
JNE => {
let ParamBBD(a0, a1, jt) = param!(self, ParamBBD);
if self.read_reg(a0).as_u64() != self.read_reg(a1).as_u64() {
self.pc = jt as usize;
}
}
JLT => cond_jump!(self, int, Less),
JGT => cond_jump!(self, int, Greater),
JLTU => cond_jump!(self, sint, Less),
JGTU => cond_jump!(self, sint, Greater),
ECALL => {
param!(self, ());
return HaltReason::Ecall;
}
ADDF => binary_op!(self, as_f64, ops::Add::add),
MULF => binary_op!(self, as_f64, ops::Mul::mul),
DIRF => {
let ParamBBBB(dt, rt, a0, a1) = param!(self, ParamBBBB);
let a0 = self.read_reg(a0).as_f64();
let a1 = self.read_reg(a1).as_f64();
self.write_reg(dt, (a0 / a1).into());
self.write_reg(rt, (a0 % a1).into());
}
ADDFI => binary_op_imm!(self, as_f64, ops::Add::add),
MULFI => binary_op_imm!(self, as_f64, ops::Mul::mul),
op => return HaltReason::InvalidOpEx(op),
}
}
}
}
#[inline]
unsafe fn read_reg(&self, n: u8) -> Value {
if n == 0 {
0_u64.into()
} else {
*self.registers.get_unchecked(n as usize)
}
}
#[inline]
unsafe fn write_reg(&mut self, n: u8, value: Value) {
if n != 0 {
*self.registers.get_unchecked_mut(n as usize) = value;
}
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
#[repr(u8)]
pub enum HaltReason {
ProgramEnd,
Ecall,
InvalidOpEx(u8),
LoadAccessEx(AccessFault),
StoreAccessEx(AccessFault),
}

37
hbvm/src/vm/value.rs Normal file
View file

@ -0,0 +1,37 @@
use core::fmt::Debug;
macro_rules! value_def {
($($ty:ident),* $(,)?) => {
#[derive(Copy, Clone)]
#[repr(packed)]
pub union Value {
$(pub $ty: $ty),*
}
paste::paste! {
impl Value {$(
#[inline]
pub fn [<as_ $ty>](&self) -> $ty {
unsafe { self.$ty }
}
)*}
}
$(
impl From<$ty> for Value {
#[inline]
fn from(value: $ty) -> Self {
Self { $ty: value }
}
}
)*
};
}
value_def!(u64, i64, f64);
impl Debug for Value {
fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
self.as_u64().fmt(f)
}
}

View file

@ -1,578 +0,0 @@
//! Welcome to the land of The Great Dispatch Loop
//!
//! Have fun
use {
super::{bmc::BlockCopier, mem::Memory, value::ValueVariant, Vm, VmRunError, VmRunOk},
crate::{
mem::{addr::AddressOp, Address},
value::CheckedDivRem,
},
core::{cmp::Ordering, ops},
hbbytecode::{
OpsN, OpsO, OpsP, OpsRB, OpsRD, OpsRH, OpsRR, OpsRRA, OpsRRAH, OpsRRB, OpsRRD, OpsRRH,
OpsRRO, OpsRROH, OpsRRP, OpsRRPH, OpsRRR, OpsRRRR, OpsRRW, OpsRW, RoundingMode,
},
};
macro_rules! handler {
($self:expr, |$ty:ident ($($ident:pat),* $(,)?)| $expr:expr) => {{
#[allow(unused_unsafe)]
let $ty($($ident),*) = unsafe { $self.decode::<$ty>() };
#[allow(clippy::no_effect)] let e = $expr;
$self.bump_pc::<$ty>();
e
}};
}
impl<Mem, const TIMER_QUOTIENT: usize> Vm<Mem, TIMER_QUOTIENT>
where
Mem: Memory,
{
/// Execute program
///
/// Program can return [`VmRunError`] if a trap handling failed
#[cfg_attr(feature = "nightly", repr(align(4096)))]
pub fn run(&mut self) -> Result<VmRunOk, VmRunError> {
use hbbytecode::opcode::*;
loop {
// Big match
//
// Contribution guide:
// - Zero register shall never be overwitten. It's value has to always be 0.
// - Prefer `Self::read_reg` and `Self::write_reg` functions
// - Try to use `handler!` macro for decoding and then bumping program counter
// - Prioritise speed over code size
// - Memory is cheap, CPUs not that much
// - Do not heap allocate at any cost
// - Yes, user-provided trap handler may allocate,
// but that is not our »fault«.
// - Unsafe is kinda must, but be sure you have validated everything
// - Your contributions have to pass sanitizers, fuzzer and Miri
// - Strictly follow the spec
// - The spec does not specify how you perform actions, in what order,
// just that the observable effects have to be performed in order and
// correctly.
// - Yes, we assume you run 64 bit CPU. Else ?conradluget a better CPU
// sorry 8 bit fans, HBVM won't run on your Speccy :(
unsafe {
match self.memory.prog_read::<u8>(self.pc as _) {
UN => {
self.bump_pc::<OpsN>();
return Err(VmRunError::Unreachable);
}
TX => {
self.bump_pc::<OpsN>();
return Ok(VmRunOk::End);
}
NOP => handler!(self, |OpsN()| ()),
ADD8 => self.binary_op(u8::wrapping_add),
ADD16 => self.binary_op(u16::wrapping_add),
ADD32 => self.binary_op(u32::wrapping_add),
ADD64 => self.binary_op(u64::wrapping_add),
SUB8 => self.binary_op(u8::wrapping_sub),
SUB16 => self.binary_op(u16::wrapping_sub),
SUB32 => self.binary_op(u32::wrapping_sub),
SUB64 => self.binary_op(u64::wrapping_sub),
MUL8 => self.binary_op(u8::wrapping_mul),
MUL16 => self.binary_op(u16::wrapping_mul),
MUL32 => self.binary_op(u32::wrapping_mul),
MUL64 => self.binary_op(u64::wrapping_mul),
AND => self.binary_op::<u64>(ops::BitAnd::bitand),
OR => self.binary_op::<u64>(ops::BitOr::bitor),
XOR => self.binary_op::<u64>(ops::BitXor::bitxor),
SLU8 => self.binary_op_shift::<u8>(u8::wrapping_shl),
SLU16 => self.binary_op_shift::<u16>(u16::wrapping_shl),
SLU32 => self.binary_op_shift::<u32>(u32::wrapping_shl),
SLU64 => self.binary_op_shift::<u64>(u64::wrapping_shl),
SRU8 => self.binary_op_shift::<u8>(u8::wrapping_shr),
SRU16 => self.binary_op_shift::<u16>(u16::wrapping_shr),
SRU32 => self.binary_op_shift::<u32>(u32::wrapping_shr),
SRU64 => self.binary_op_shift::<u64>(u64::wrapping_shr),
SRS8 => self.binary_op_shift::<i8>(i8::wrapping_shr),
SRS16 => self.binary_op_shift::<i16>(i16::wrapping_shr),
SRS32 => self.binary_op_shift::<i32>(i32::wrapping_shr),
SRS64 => self.binary_op_shift::<i64>(i64::wrapping_shr),
CMPU => handler!(self, |OpsRRR(tg, a0, a1)| self.cmp(
tg,
a0,
self.read_reg(a1).cast::<u64>()
)),
CMPS => handler!(self, |OpsRRR(tg, a0, a1)| self.cmp(
tg,
a0,
self.read_reg(a1).cast::<i64>()
)),
DIRU8 => self.dir::<u8>(),
DIRU16 => self.dir::<u16>(),
DIRU32 => self.dir::<u32>(),
DIRU64 => self.dir::<u64>(),
DIRS8 => self.dir::<i8>(),
DIRS16 => self.dir::<i16>(),
DIRS32 => self.dir::<i32>(),
DIRS64 => self.dir::<i64>(),
NEG => handler!(self, |OpsRR(tg, a0)| {
// Bit negation
self.write_reg(tg, !self.read_reg(a0).cast::<u64>())
}),
NOT => handler!(self, |OpsRR(tg, a0)| {
// Logical negation
self.write_reg(tg, u64::from(self.read_reg(a0).cast::<u64>() == 0));
}),
SXT8 => handler!(self, |OpsRR(tg, a0)| {
self.write_reg(tg, self.read_reg(a0).cast::<i8>() as i64)
}),
SXT16 => handler!(self, |OpsRR(tg, a0)| {
self.write_reg(tg, self.read_reg(a0).cast::<i16>() as i64)
}),
SXT32 => handler!(self, |OpsRR(tg, a0)| {
self.write_reg(tg, self.read_reg(a0).cast::<i32>() as i64)
}),
ADDI8 => self.binary_op_imm(u8::wrapping_add),
ADDI16 => self.binary_op_imm(u16::wrapping_add),
ADDI32 => self.binary_op_imm(u32::wrapping_add),
ADDI64 => self.binary_op_imm(u64::wrapping_add),
MULI8 => self.binary_op_imm(u8::wrapping_sub),
MULI16 => self.binary_op_imm(u16::wrapping_sub),
MULI32 => self.binary_op_imm(u32::wrapping_sub),
MULI64 => self.binary_op_imm(u64::wrapping_sub),
ANDI => self.binary_op_imm::<u64>(ops::BitAnd::bitand),
ORI => self.binary_op_imm::<u64>(ops::BitOr::bitor),
XORI => self.binary_op_imm::<u64>(ops::BitXor::bitxor),
SLUI8 => self.binary_op_ims::<u8>(u8::wrapping_shl),
SLUI16 => self.binary_op_ims::<u16>(u16::wrapping_shl),
SLUI32 => self.binary_op_ims::<u32>(u32::wrapping_shl),
SLUI64 => self.binary_op_ims::<u64>(u64::wrapping_shl),
SRUI8 => self.binary_op_ims::<u8>(u8::wrapping_shr),
SRUI16 => self.binary_op_ims::<u16>(u16::wrapping_shr),
SRUI32 => self.binary_op_ims::<u32>(u32::wrapping_shr),
SRUI64 => self.binary_op_ims::<u64>(u64::wrapping_shr),
SRSI8 => self.binary_op_ims::<i8>(i8::wrapping_shr),
SRSI16 => self.binary_op_ims::<i16>(i16::wrapping_shr),
SRSI32 => self.binary_op_ims::<i32>(i32::wrapping_shr),
SRSI64 => self.binary_op_ims::<i64>(i64::wrapping_shr),
CMPUI => handler!(self, |OpsRRD(tg, a0, imm)| { self.cmp(tg, a0, imm) }),
CMPSI => handler!(self, |OpsRRD(tg, a0, imm)| { self.cmp(tg, a0, imm as i64) }),
CP => handler!(self, |OpsRR(tg, a0)| self.write_reg(tg, self.read_reg(a0))),
SWA => handler!(self, |OpsRR(r0, r1)| {
// Swap registers
match (r0, r1) {
(0, 0) => (),
(dst, 0) | (0, dst) => self.write_reg(dst, 0_u64),
(r0, r1) => {
core::ptr::swap(
self.registers.get_unchecked_mut(usize::from(r0)),
self.registers.get_unchecked_mut(usize::from(r1)),
);
}
}
}),
LI8 => handler!(self, |OpsRB(tg, imm)| self.write_reg(tg, imm)),
LI16 => handler!(self, |OpsRH(tg, imm)| self.write_reg(tg, imm)),
LI32 => handler!(self, |OpsRW(tg, imm)| self.write_reg(tg, imm)),
LI64 => handler!(self, |OpsRD(tg, imm)| self.write_reg(tg, imm)),
LRA => handler!(self, |OpsRRO(tg, reg, off)| self.write_reg(
tg,
self.pcrel(off, 3)
.wrapping_add(self.read_reg(reg).cast::<i64>())
.get(),
)),
// Load. If loading more than register size, continue on adjecent registers
LD => handler!(self, |OpsRRAH(dst, base, off, count)| self
.load(dst, base, off, count)?),
// Store. Same rules apply as to LD
ST => handler!(self, |OpsRRAH(dst, base, off, count)| self
.store(dst, base, off, count)?),
LDR => handler!(self, |OpsRROH(dst, base, off, count)| self.load(
dst,
base,
self.pcrel(off, 3).get(),
count
)?),
STR => handler!(self, |OpsRROH(dst, base, off, count)| self.store(
dst,
base,
self.pcrel(off, 3).get(),
count
)?),
BMC => {
// Block memory copy
match if let Some(copier) = &mut self.copier {
// There is some copier, poll.
copier.poll(&mut self.memory)
} else {
// There is none, make one!
let OpsRRH(src, dst, count) = self.decode();
self.copier = Some(BlockCopier::new(
Address::new(self.read_reg(src).cast()),
Address::new(self.read_reg(dst).cast()),
count as _,
));
self.copier
.as_mut()
.unwrap_unchecked() // SAFETY: We just assigned there
.poll(&mut self.memory)
} {
// We are done, shift program counter
core::task::Poll::Ready(Ok(())) => {
self.copier = None;
self.bump_pc::<OpsRRH>();
}
// Error, shift program counter (for consistency)
// and yield error
core::task::Poll::Ready(Err(e)) => {
return Err(e.into());
}
// Not done yet, proceed to next cycle
core::task::Poll::Pending => (),
}
}
BRC => handler!(self, |OpsRRB(src, dst, count)| {
// Block register copy
if src.checked_add(count).is_none() || dst.checked_add(count).is_none() {
return Err(VmRunError::RegOutOfBounds);
}
core::ptr::copy(
self.registers.get_unchecked(usize::from(src)),
self.registers.get_unchecked_mut(usize::from(dst)),
usize::from(count),
);
}),
JMP => {
let OpsO(off) = self.decode();
self.pc = self.pc.wrapping_add(off).wrapping_add(1);
}
JAL => {
// Jump and link. Save PC after this instruction to
// specified register and jump to reg + relative offset.
let OpsRRO(save, reg, offset) = self.decode();
self.write_reg(save, self.pc.get());
self.pc = self
.pcrel(offset, 3)
.wrapping_add(self.read_reg(reg).cast::<i64>());
}
JALA => {
// Jump and link. Save PC after this instruction to
// specified register and jump to reg
let OpsRRA(save, reg, offset) = self.decode();
self.write_reg(save, self.pc.get());
self.pc =
Address::new(self.read_reg(reg).cast::<u64>().wrapping_add(offset));
}
// Conditional jumps, jump only to immediates
JEQ => self.cond_jmp::<u64>(Ordering::Equal),
JNE => {
let OpsRRP(a0, a1, ja) = self.decode();
if self.read_reg(a0).cast::<u64>() != self.read_reg(a1).cast::<u64>() {
self.pc = self.pcrel(ja, 3);
} else {
self.bump_pc::<OpsRRP>();
}
}
JLTS => self.cond_jmp::<u64>(Ordering::Less),
JGTS => self.cond_jmp::<u64>(Ordering::Greater),
JLTU => self.cond_jmp::<i64>(Ordering::Less),
JGTU => self.cond_jmp::<i64>(Ordering::Greater),
ECA => {
// So we don't get timer interrupt after ECALL
if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1);
}
self.bump_pc::<OpsN>();
return Ok(VmRunOk::Ecall);
}
EBP => {
self.bump_pc::<OpsN>();
return Ok(VmRunOk::Breakpoint);
}
FADD32 => self.binary_op::<f32>(ops::Add::add),
FADD64 => self.binary_op::<f64>(ops::Add::add),
FSUB32 => self.binary_op::<f32>(ops::Sub::sub),
FSUB64 => self.binary_op::<f64>(ops::Sub::sub),
FMUL32 => self.binary_op::<f32>(ops::Mul::mul),
FMUL64 => self.binary_op::<f64>(ops::Mul::mul),
FDIV32 => self.binary_op::<f32>(ops::Div::div),
FDIV64 => self.binary_op::<f64>(ops::Div::div),
FMA32 => self.fma::<f32>(),
FMA64 => self.fma::<f64>(),
FINV32 => handler!(self, |OpsRR(tg, reg)| self
.write_reg(tg, 1. / self.read_reg(reg).cast::<f32>())),
FINV64 => handler!(self, |OpsRR(tg, reg)| self
.write_reg(tg, 1. / self.read_reg(reg).cast::<f64>())),
FCMPLT32 => self.fcmp::<f32>(Ordering::Less),
FCMPLT64 => self.fcmp::<f64>(Ordering::Less),
FCMPGT32 => self.fcmp::<f32>(Ordering::Greater),
FCMPGT64 => self.fcmp::<f64>(Ordering::Greater),
ITF32 => handler!(self, |OpsRR(tg, reg)| self
.write_reg(tg, self.read_reg(reg).cast::<i64>() as f32)),
ITF64 => handler!(self, |OpsRR(tg, reg)| self
.write_reg(tg, self.read_reg(reg).cast::<i64>() as f64)),
FTI32 => handler!(self, |OpsRRB(tg, reg, mode)| self.write_reg(
tg,
crate::float::f32toint(
self.read_reg(reg).cast::<f32>(),
RoundingMode::try_from(mode)
.map_err(|()| VmRunError::InvalidOperand)?,
),
)),
FTI64 => handler!(self, |OpsRRB(tg, reg, mode)| self.write_reg(
tg,
crate::float::f64toint(
self.read_reg(reg).cast::<f64>(),
RoundingMode::try_from(mode)
.map_err(|()| VmRunError::InvalidOperand)?,
),
)),
FC32T64 => handler!(self, |OpsRR(tg, reg)| self
.write_reg(tg, self.read_reg(reg).cast::<f32>() as f64)),
FC64T32 => handler!(self, |OpsRRB(tg, reg, mode)| self.write_reg(
tg,
crate::float::conv64to32(
self.read_reg(reg).cast(),
RoundingMode::try_from(mode)
.map_err(|()| VmRunError::InvalidOperand)?,
),
)),
LRA16 => handler!(self, |OpsRRP(tg, reg, imm)| self.write_reg(
tg,
(self.pc + self.read_reg(reg).cast::<u64>() + imm + 3_u16).get(),
)),
LDR16 => handler!(self, |OpsRRPH(dst, base, off, count)| self.load(
dst,
base,
self.pcrel(off, 3).get(),
count
)?),
STR16 => handler!(self, |OpsRRPH(dst, base, off, count)| self.store(
dst,
base,
self.pcrel(off, 3).get(),
count
)?),
JMP16 => {
let OpsP(off) = self.decode();
self.pc = self.pcrel(off, 1);
}
op => return Err(VmRunError::InvalidOpcode(op)),
}
}
if TIMER_QUOTIENT != 0 {
self.timer = self.timer.wrapping_add(1);
if self.timer % TIMER_QUOTIENT == 0 {
return Ok(VmRunOk::Timer);
}
}
}
}
/// Bump instruction pointer
#[inline(always)]
fn bump_pc<T: Copy>(&mut self) {
self.pc = self
.pc
.wrapping_add(core::mem::size_of::<T>())
.wrapping_add(1);
}
/// Decode instruction operands
#[inline(always)]
unsafe fn decode<T: Copy>(&mut self) -> T {
unsafe { self.memory.prog_read::<T>(self.pc + 1_u64) }
}
/// Load
#[inline(always)]
unsafe fn load(
&mut self,
dst: u8,
base: u8,
offset: u64,
count: u16,
) -> Result<(), VmRunError> {
let n: u8 = match dst {
0 => 1,
_ => 0,
};
unsafe {
self.memory.load(
self.ldst_addr_uber(dst, base, offset, count, n)?,
self.registers
.as_mut_ptr()
.add(usize::from(dst) + usize::from(n))
.cast(),
usize::from(count).saturating_sub(n.into()),
)
}?;
Ok(())
}
/// Store
#[inline(always)]
unsafe fn store(
&mut self,
dst: u8,
base: u8,
offset: u64,
count: u16,
) -> Result<(), VmRunError> {
unsafe {
self.memory.store(
self.ldst_addr_uber(dst, base, offset, count, 0)?,
self.registers.as_ptr().add(usize::from(dst)).cast(),
count.into(),
)
}?;
Ok(())
}
/// Three-way comparsion
#[inline(always)]
unsafe fn cmp<T: ValueVariant + Ord>(&mut self, to: u8, reg: u8, val: T) {
self.write_reg(to, self.read_reg(reg).cast::<T>().cmp(&val) as i64);
}
/// Perform binary operating over two registers
#[inline(always)]
unsafe fn binary_op<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
let OpsRRR(tg, a0, a1) = unsafe { self.decode() };
self.write_reg(
tg,
op(self.read_reg(a0).cast::<T>(), self.read_reg(a1).cast::<T>()),
);
self.bump_pc::<OpsRRR>();
}
/// Perform binary operation over register and immediate
#[inline(always)]
unsafe fn binary_op_imm<T: ValueVariant>(&mut self, op: impl Fn(T, T) -> T) {
#[derive(Clone, Copy)]
#[repr(packed)]
struct OpsRRImm<I>(OpsRR, I);
let OpsRRImm::<T>(OpsRR(tg, reg), imm) = unsafe { self.decode() };
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm));
self.bump_pc::<OpsRRImm<T>>();
}
/// Perform binary operation over register and shift immediate
#[inline(always)]
unsafe fn binary_op_shift<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let OpsRRR(tg, a0, a1) = unsafe { self.decode() };
self.write_reg(
tg,
op(
self.read_reg(a0).cast::<T>(),
self.read_reg(a1).cast::<u32>(),
),
);
self.bump_pc::<OpsRRR>();
}
/// Perform binary operation over register and shift immediate
#[inline(always)]
unsafe fn binary_op_ims<T: ValueVariant>(&mut self, op: impl Fn(T, u32) -> T) {
let OpsRRB(tg, reg, imm) = unsafe { self.decode() };
self.write_reg(tg, op(self.read_reg(reg).cast::<T>(), imm.into()));
self.bump_pc::<OpsRRW>();
}
/// Fused division-remainder
#[inline(always)]
unsafe fn dir<T: ValueVariant + CheckedDivRem>(&mut self) {
handler!(self, |OpsRRRR(td, tr, a0, a1)| {
let a0 = self.read_reg(a0).cast::<T>();
let a1 = self.read_reg(a1).cast::<T>();
if let Some(div) = a0.checked_div(a1) {
self.write_reg(td, div);
} else {
self.write_reg(td, -1_i64);
}
if let Some(rem) = a0.checked_rem(a1) {
self.write_reg(tr, rem);
} else {
self.write_reg(tr, a0);
}
});
}
/// Fused multiply-add
#[inline(always)]
unsafe fn fma<T>(&mut self)
where
T: ValueVariant + core::ops::Mul<Output = T> + core::ops::Add<Output = T>,
{
handler!(self, |OpsRRRR(tg, a0, a1, a2)| {
let a0 = self.read_reg(a0).cast::<T>();
let a1 = self.read_reg(a1).cast::<T>();
let a2 = self.read_reg(a2).cast::<T>();
self.write_reg(tg, a0 * a1 + a2)
});
}
/// Float comparsion
#[inline(always)]
unsafe fn fcmp<T: PartialOrd + ValueVariant>(&mut self, nan: Ordering) {
handler!(self, |OpsRRR(tg, a0, a1)| {
let a0 = self.read_reg(a0).cast::<T>();
let a1 = self.read_reg(a1).cast::<T>();
self.write_reg(tg, (a0.partial_cmp(&a1).unwrap_or(nan) as i8 + 1) as u8)
});
}
/// Calculate pc-relative address
#[inline(always)]
fn pcrel(&self, offset: impl AddressOp, pos: u8) -> Address {
self.pc.wrapping_add(pos).wrapping_add(offset)
}
/// Jump at `PC + #3` if ordering on `#0 <=> #1` is equal to expected
#[inline(always)]
unsafe fn cond_jmp<T: ValueVariant + Ord>(&mut self, expected: Ordering) {
let OpsRRP(a0, a1, ja) = unsafe { self.decode() };
if self
.read_reg(a0)
.cast::<T>()
.cmp(&self.read_reg(a1).cast::<T>())
== expected
{
self.pc = self.pcrel(ja, 3);
} else {
self.bump_pc::<OpsRRP>();
}
}
/// Load / Store Address check-computation überfunction
#[inline(always)]
unsafe fn ldst_addr_uber(
&self,
dst: u8,
base: u8,
offset: u64,
size: u16,
adder: u8,
) -> Result<Address, VmRunError> {
let reg = dst.checked_add(adder).ok_or(VmRunError::RegOutOfBounds)?;
if usize::from(reg) * 8 + usize::from(size) > 2048 {
Err(VmRunError::RegOutOfBounds)
} else {
self.read_reg(base)
.cast::<u64>()
.checked_add(offset)
.and_then(|x| x.checked_add(adder.into()))
.ok_or(VmRunError::AddrOutOfBounds)
.map(Address::new)
}
}
}

View file

@ -1,9 +0,0 @@
[package]
name = "hbvm_aos_on_linux"
version = "0.1.0"
edition = "2021"
default-run = "hbvm_aos_on_linux"
[dependencies]
hbvm.path = "../hbvm"
nix = { version = "0.27", features = ["mman", "signal"] }

View file

@ -1,3 +0,0 @@
As close to the AbleOS runtime as possible
useful for me to spec out things on my laptop

View file

@ -1,96 +0,0 @@
//! Holey Bytes Experimental Runtime
mod mem;
use {
hbvm::{mem::Address, Vm, VmRunOk},
nix::sys::mman::{mmap, MapFlags, ProtFlags},
std::{env::args, fs::File, num::NonZeroUsize, process::exit},
};
fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!("== HB×RT (Holey Bytes Linux Runtime) v0.1 ==");
eprintln!("[W] Currently supporting only flat images");
let Some(image_path) = args().nth(1) else {
eprintln!("[E] Missing image path");
exit(1);
};
// Load program
eprintln!("[I] Loading image from \"{image_path}\"");
let file = File::open(image_path)?;
let ptr = unsafe {
mmap(
None,
NonZeroUsize::new(file.metadata()?.len() as usize).ok_or("File is empty")?,
ProtFlags::PROT_READ,
MapFlags::MAP_PRIVATE,
Some(&file),
0,
)?
};
eprintln!("[I] Image loaded at {ptr:p}");
// Execute program
let mut vm = unsafe { Vm::<_, 0>::new(mem::HostMemory, Address::new(ptr as u64)) };
// Memory access fault handling
unsafe {
use nix::sys::signal;
extern "C" fn action(
_: std::ffi::c_int,
info: *mut nix::libc::siginfo_t,
_: *mut std::ffi::c_void,
) {
unsafe {
eprintln!("[E] Memory access fault at {:p}", (*info).si_addr());
}
}
signal::sigaction(
signal::Signal::SIGSEGV,
&nix::sys::signal::SigAction::new(
signal::SigHandler::SigAction(action),
signal::SaFlags::SA_NODEFER,
nix::sys::signalfd::SigSet::empty(),
),
)?;
}
let stat = loop {
match vm.run() {
Ok(VmRunOk::Breakpoint) => eprintln!(
"[I] Hit breakpoint\nIP: {}\n== Registers ==\n{:?}",
vm.pc, vm.registers
),
Ok(VmRunOk::Timer) => (),
Ok(VmRunOk::Ecall) => {
// unsafe {
// std::arch::asm!(
// "syscall",
// inlateout("rax") vm.registers[1].0,
// in("rdi") vm.registers[2].0,
// in("rsi") vm.registers[3].0,
// in("rdx") vm.registers[4].0,
// in("r10") vm.registers[5].0,
// in("r8") vm.registers[6].0,
// in("r9") vm.registers[7].0,
// )
// }
}
Ok(VmRunOk::End) => break Ok(()),
Err(e) => break Err(e),
}
};
eprintln!("\n== Registers ==\n{:?}", vm.registers);
if let Err(e) = stat {
eprintln!("\n[E] Runtime error: {e:?}");
exit(2);
}
Ok(())
}

View file

@ -1,31 +0,0 @@
use hbvm::mem::{Address, LoadError, Memory, StoreError};
pub struct HostMemory;
impl Memory for HostMemory {
#[inline]
unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError> {
unsafe { core::ptr::copy(addr.get() as *const u8, target, count) }
Ok(())
}
#[inline]
unsafe fn store(
&mut self,
addr: Address,
source: *const u8,
count: usize,
) -> Result<(), StoreError> {
unsafe { core::ptr::copy(source, addr.get() as *mut u8, count) }
Ok(())
}
#[inline]
unsafe fn prog_read<T: Copy>(&mut self, addr: Address) -> T {
core::ptr::read(addr.get() as *const T)
}
}

View file

@ -1,9 +0,0 @@
[package]
name = "hbxrt"
version = "0.1.0"
edition = "2021"
default-run = "hbxrt"
[dependencies]
hbvm.path = "../hbvm"
memmap2 = "0.9"

View file

@ -1,83 +0,0 @@
//! Holey Bytes Experimental Runtime
#![deny(unsafe_op_in_unsafe_fn)]
mod mem;
use {
hbvm::{mem::Address, Vm, VmRunOk},
memmap2::Mmap,
std::{env::args, fs::File, mem::MaybeUninit, process::exit},
};
fn main() -> Result<(), Box<dyn std::error::Error>> {
eprintln!("== HB×RT (Holey Bytes Experimental Runtime) v0.1 ==");
eprintln!("[W] Currently supporting only flat images");
let mut args = args().skip(1);
let Some(image_path) = args.next() else {
eprintln!("[E] Missing image path");
exit(1);
};
let dsls = args.next().as_deref() == Some("-L");
if cfg!(not(target_os = "linux")) && dsls {
eprintln!("[E] Unsupported platform for Direct Linux syscall mode");
exit(1);
}
if dsls {
eprintln!("[I] Direct Linux syscall mode activated")
}
// Allocate stack
let mut stack = Box::new(MaybeUninit::<[u8; 1024 * 1024 * 2]>::uninit());
eprintln!("[I] Stack allocated at {:p}", stack.as_ptr());
// Load program
eprintln!("[I] Loading image from \"{image_path}\"");
let file_handle = File::open(image_path)?;
let mmap = unsafe { Mmap::map(&file_handle) }?;
eprintln!("[I] Image loaded at {:p}", mmap.as_ptr());
let mut vm = unsafe { Vm::<_, 0>::new(mem::HostMemory, Address::new(mmap.as_ptr() as u64)) };
vm.write_reg(254, stack.as_mut_ptr() as u64);
// Execute program
let stat = loop {
match vm.run() {
Ok(VmRunOk::Breakpoint) => eprintln!(
"[I] Hit breakpoint\nIP: {}\n== Registers ==\n{:?}",
vm.pc, vm.registers
),
Ok(VmRunOk::Timer) => (),
Ok(VmRunOk::Ecall) if dsls => unsafe {
std::arch::asm!(
"syscall",
inlateout("rax") vm.registers[1].0,
in("rdi") vm.registers[2].0,
in("rsi") vm.registers[3].0,
in("rdx") vm.registers[4].0,
in("r10") vm.registers[5].0,
in("r8") vm.registers[6].0,
in("r9") vm.registers[7].0,
)
},
Ok(VmRunOk::Ecall) => {
eprintln!("[E] General environment calls not supported");
exit(1);
}
Ok(VmRunOk::End) => break Ok(()),
Err(e) => break Err(e),
}
};
eprintln!("\n== Registers ==\n{:?}", vm.registers);
if let Err(e) = stat {
eprintln!("\n[E] Runtime error: {e:?}");
exit(2);
}
Ok(())
}

View file

@ -1,31 +0,0 @@
use hbvm::mem::{Address, LoadError, Memory, StoreError};
pub struct HostMemory;
impl Memory for HostMemory {
#[inline]
unsafe fn load(
&mut self,
addr: Address,
target: *mut u8,
count: usize,
) -> Result<(), LoadError> {
unsafe { core::ptr::copy(addr.get() as *const u8, target, count) }
Ok(())
}
#[inline]
unsafe fn store(
&mut self,
addr: Address,
source: *const u8,
count: usize,
) -> Result<(), StoreError> {
unsafe { core::ptr::copy(source, addr.get() as *mut u8, count) }
Ok(())
}
#[inline]
unsafe fn prog_read<T: Copy>(&mut self, addr: Address) -> T {
unsafe { core::ptr::read(addr.get() as *const T) }
}
}

View file

@ -1 +0,0 @@
nightly

View file

@ -1,4 +1,3 @@
hex_literal_case = "Upper"
imports_granularity = "One"
struct_field_align_threshold = 8
enum_discrim_align_threshold = 8
struct_field_align_threshold = 5

645
spec.md
View file

@ -1,505 +1,272 @@
# HoleyBytes ISA Specification
# Bytecode format
- Image format is not specified, though ELF is recommended
- All numbers are encoded little-endian
- There is 256 registers, they are represented by a byte
- Immediate values are 8, 16, 32 or 64 bit
- Immediate values are 64 bit
## Instruction encoding
- Instruction operands are packed (no alignment)
- [opcode, operand 0, operand 1, …]
### Instruction encoding
- Instruction parameters are packed (no alignment)
- [opcode, …parameters…]
## Instruction parameter types
- `R`: Register (8 bits)
- Relative program-counter offset immediates:
- `O`: 32 bit (Si32)
- `P`: 16 bit (Si16)
- Immediates:
- `B`: Byte, 8 bit (Xi8)
- `H`: Half-word, 16 bit (Xi16)
- `W`: Word, 32 bit (Xi32)
- `D`: Double-word, 64 bit (Xi64)
- `A`: Absolute address immediate, 64 bit (Ui64)
### Instruction parameter types
- B = Byte
- D = Doubleword (64 bits)
- H = Halfword (16 bits)
## Types
- Si*n*: Signed integer of size *n* bits (Si8, Si16, Si32, Si64)
- Ui*n*: Unsigned integer of size *n* bits (Ui8, Ui16, Ui32, Ui64)
- Xi*n*: Sign-agnostic integer of size *n* bits (Xi8, Xi16, Xi32, Xi64)
- Fl*n*: Floating point number of size *n* bits (Fl32, Fl64)
# Behaviour
- There is only one type of register, a general-purpose one.
Used for both integers and floats.
- Integer operations are wrapping, including signed numbers
- Bitshifts are truncating
- Two's complement
- Floats as specified by IEEE 754
- Execution model is implementation defined as long all observable
effects are performed in correct order
## Relative addressing
Relative addresses are computed from address of the first byte
of offset in the code. Not from the beginning of current or following instruction.
## Zero register
- Register 0
- Cannot be clobbered
- Write is no-op
- Load always yields 0
## Rounding modes
| Rounding mode | Value |
|:-------------------------|:------|
| To nearest, ties to even | 0b00 |
| Towards 0 (truncate) | 0b01 |
| Towards +∞ (up) | 0b10 |
| Towards -∞ (down) | 0b11 |
- Remaining values in the byte traps with invalid operand exception
# Memory
- Memory implementation is implementation-defined
- Zero address (`0x0`) is considered invalid
# Traps
- Environment call
- Environment breakpoint
Program counter goes to the following instruction
## Exceptions
- Memory access fault
- Invalid operand
- Unknown opcode
Program counter stays on the currently executed instruction
| Name | Size |
|:----:|:--------|
| BBBB | 32 bits |
| BBB | 24 bits |
| BBDH | 96 bits |
| BBDB | 88 bits |
| BBD | 80 bits |
| BB | 16 bits |
| BD | 72 bits |
| D | 64 bits |
| N | 0 bits |
# Instructions
- `#n`: register in parameter *n*
- `$n`: for immediate in parameter *n*
- `#P ← V`: Set register P to value V
- `imm #n`: for immediate in parameter *n*
- `P ← V`: Set register P to value V
- `[x]`: Address x
- `XY`: X bytes from location Y
- `pc`: Program counter
- `<XYZ>`: Placeholder
- `Type(X)`: Cast
## Program execution control
- Type `N`
## No-op
- N type
| Opcode | Mnemonic | Action |
|:-------|:---------|:--------------------------------------------|
| 0x00 | UN | Throw unreachable code exception |
| 0x01 | TX | Terminate execution (eg. on end of program) |
| 0x02 | NOP | Do nothing |
| Opcode | Name | Action |
|:------:|:----:|:----------:|
| 0 | NOP | Do nothing |
## Binary register-register ops
- Type `RRR`
- Action: `#0 ← #1 <OP> #2`
## Integer binary ops.
- BBB type
- `#0 ← #1 <op> #2`
## Addition (`+`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x03 | ADD8 | Xi8 |
| 0x04 | ADD16 | Xi16 |
| 0x05 | ADD32 | Xi32 |
| 0x06 | ADD64 | Xi64 |
| Opcode | Name | Action |
|:------:|:----:|:-----------------------:|
| 1 | ADD | Wrapping addition |
| 2 | SUB | Wrapping subtraction |
| 3 | MUL | Wrapping multiplication |
| 4 | AND | Bitand |
| 5 | OR | Bitor |
| 6 | XOR | Bitxor |
| 7 | SL | Unsigned left bitshift |
| 8 | SR | Unsigned right bitshift |
| 9 | SRS | Signed right bitshift |
## Subtraction (`-`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x07 | SUB8 | Xi8 |
| 0x08 | SUB16 | Xi16 |
| 0x09 | SUB32 | Xi32 |
| 0x0A | SUB64 | Xi64 |
### Comparsion
| Opcode | Name | Action |
|:------:|:----:|:-------------------:|
| 10 | CMP | Signed comparsion |
| 11 | CMPU | Unsigned comparsion |
## Multiplication (`*`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x0B | MUL8 | Xi8 |
| 0x0C | MUL16 | Xi16 |
| 0x0D | MUL32 | Xi32 |
| 0x0E | MUL64 | Xi64 |
## Bitwise ops (type: Xi64)
| Opcode | Mnemonic | Operation |
|:-------|:---------|:--------------------|
| 0x0F | AND | Conjunction (&) |
| 0x10 | OR | Disjunction (\|) |
| 0x11 | XOR | Non-equivalence (^) |
## Unsigned left bitshift (`<<`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x12 | SLU8 | Ui8 |
| 0x13 | SLU16 | Ui16 |
| 0x14 | SLU32 | Ui32 |
| 0x15 | SLU64 | Ui64 |
## Unsigned right bitshift (`>>`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x16 | SRU8 | Ui8 |
| 0x17 | SRU16 | Ui16 |
| 0x18 | SRU32 | Ui32 |
| 0x19 | SRU64 | Ui64 |
## Signed right bitshift (`>>`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x1A | SRS8 | Si8 |
| 0x1B | SRS16 | Si16 |
| 0x1C | SRS32 | Si32 |
| 0x1D | SRS64 | Si64 |
## Comparsion
- Compares two numbers, saves result to register
- Operation: `#0 ← #1 <=> #2`
| Ordering | Number |
|:---------|:-------|
#### Comparsion table
| #1 *op* #2 | Result |
|:----------:|:------:|
| < | -1 |
| = | 0 |
| > | 1 |
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x1E | CMPU | Ui64 |
| 0x1F | CMPS | Si64 |
# Merged divide-remainder
- Type `RRRR`
- Operation:
- `#0 ← #2 / #3`
### Division-remainder
- Type BBBB
- In case of `#3` is zero, the resulting value is all-ones
- `#0 ← #2 ÷ #3`
- `#1 ← #2 % #3`
- If dividing by zero:
- `#0 ← Ui64(-1)`
- `#1 ← #2`
| Opcode | Name | Action |
|:------:|:----:|:-------------------------------:|
| 12 | DIR | Divide and remainder combinated |
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x20 | DIRU8 | Ui8 |
| 0x21 | DIRU16 | Ui16 |
| 0x22 | DIRU32 | Ui32 |
| 0x23 | DIRU64 | Ui64 |
| 0x24 | DIRS8 | Si8 |
| 0x25 | DIRS16 | Si16 |
| 0x26 | DIRS32 | Si32 |
| 0x27 | DIRS64 | Si64 |
### Negations
- Type BB
- `#0 ← #1 <op> #2`
# Unary register operations (type: Xi64)
- Type: `RR`
- Operation: `#0 ← <OP> #1`
| Opcode | Name | Action |
|:------:|:----:|:----------------:|
| 13 | NEG | Bit negation |
| 14 | NOT | Logical negation |
| Opcode | Mnemonic | Operation |
|:-------|:---------|:-------------------------|
| 0x28 | NEG | Bitwise complement (`~`) |
| 0x29 | NOT | Logical negation (`!`) |
## Integer immediate binary ops.
- Type BBD
- `#0 ← #1 <op> imm #2`
## Sign extensions
- Operation: `#0 ← Si64(#1)`
| Opcode | Name | Action |
|:------:|:----:|:-----------------------:|
| 15 | ADDI | Wrapping addition |
| 16 | MULI | Wrapping subtraction |
| 17 | ANDI | Bitand |
| 18 | ORI | Bitor |
| 19 | XORI | Bitxor |
| 20 | SLI | Unsigned left bitshift |
| 21 | SRI | Unsigned right bitshift |
| 22 | SRSI | Signed right bitshift |
| Opcode | Mnemonic | Source type |
|:-------|:---------|:------------|
| 0x2A | SXT8 | Si8 |
| 0x2B | SXT16 | Si16 |
| 0x2C | SXT32 | Si32 |
### Comparsion
- Comparsion is the same as when RRR type
# Binary register-immediate operations
- Type: `RR<IMM>`
- Operation: `#0 ← #1 <OP> $2`
| Opcode | Name | Action |
|:------:|:-----:|:-------------------:|
| 23 | CMPI | Signed comparsion |
| 24 | CMPUI | Unsigned comparsion |
## Addition (`+`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x2D | ADDI8 | Xi8 |
| 0x2E | ADDI16 | Xi16 |
| 0x2F | ADDI32 | Xi32 |
| 0x30 | ADDI64 | Xi64 |
## Register value set / copy
## Multiplication (`*`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x31 | MULI8 | Xi8 |
| 0x32 | MULI16 | Xi16 |
| 0x33 | MULI32 | Xi32 |
| 0x34 | MULI64 | Xi64 |
### Copy
- Type BB
- `#0 ← #1`
## Bitwise ops (type: Xi64)
| Opcode | Mnemonic | Operation |
|:-------|:---------|:--------------------|
| 0x35 | ANDI | Conjunction (&) |
| 0x36 | ORI | Disjunction (\|) |
| 0x37 | XORI | Non-equivalence (^) |
| Opcode | Name | Action |
|:------:|:----:|:------:|
| 25 | CP | Copy |
# Register-immediate bitshifts
- Type: `RRB`
- Operation: `#0 ← #1 <OP> $2`
### Swap
- Type BB
- Swap #0 and #1
## Unsigned left bitshift (`<<`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x38 | SLUI8 | Ui8 |
| 0x39 | SLUI16 | Ui16 |
| 0x3A | SLUI32 | Ui32 |
| 0x3B | SLUI64 | Ui64 |
| Opcode | Name | Action |
|:------:|:----:|:------:|
| 26 | SWA | Swap |
## Unsigned right bitshift (`>>`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x3C | SRUI8 | Ui8 |
| 0x3D | SRUI16 | Ui16 |
| 0x3E | SRUI32 | Ui32 |
| 0x3F | SRUI64 | Ui64 |
### Load immediate
- Type BD
- `#0 ← #1`
## Signed right bitshift (`>>`)
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x40 | SRSI8 | Si8 |
| 0x41 | SRSI16 | Si16 |
| 0x42 | SRSI32 | Si32 |
| 0x43 | SRSI64 | Si64 |
| Opcode | Name | Action |
|:------:|:----:|:--------------:|
| 27 | LI | Load immediate |
## Comparsion
- Compares two numbers, saves result to register
- Operation: `#0 ← #1 <=> $2`
- Comparsion table same for register-register one
## Memory operations
- Type BBDH
- If loaded/store value exceeds one register size, continue accessing following registers
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x44 | CMPUI | Ui64 |
| 0x45 | CMPSI | Si64 |
### Load / Store
| Opcode | Name | Action |
|:------:|:----:|:---------------------------------------:|
| 28 | LD | `#0 ← [#1 + imm #3], copy imm #4 bytes` |
| 29 | ST | `[#1 + imm #3] ← #0, copy imm #4 bytes` |
# Register copies
- Type: `RR`
## Block copy
- Block copy source and target can overlap
| Opcode | Mnemonic | Operation |
|:-------|:---------|:---------------------------------|
| 0x46 | CP | Copy register value (`#0 ← #1`) |
| 0x47 | SWA | Swap register values (`#0 ⇆ #1`) |
### Memory copy
- Type BBD
# Load immediate
- Load immediate value from code to register
- Type: `R<IMM>`
- Operation: `#0 ← $1`
| Opcode | Name | Action |
|:------:|:----:|:--------------------------------:|
| 30 | BMC | `[#0] ← [#1], copy imm #2 bytes` |
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x48 | LI8 | Xi8 |
| 0x49 | LI16 | Xi16 |
| 0x4A | Li32 | Xi32 |
| 0x4B | Li64 | Xi64 |
### Register copy
- Type BBB
- Copy a block a register to another location (again, overflowing to following registers)
# Load relative address
- Compute value from program counter, register value and offset
- Type: `RRO`
- Operation: `#0 ← pc + #1 + $2`
| Opcode | Name | Action |
|:------:|:----:|:--------------------------------:|
| 31 | BRC | `#0 ← #1, copy imm #2 registers` |
| Opcode | Mnemonic |
|:-------|:---------|
| 0x4C | LRA |
## Control flow
# Memory access operations
- Immediate `$3` specifies size
- If size is greater than register size,
it overflows to adjecent register
(eg. copying 16 bytes to register `r1` copies first 8 bytes to it
and the remaining to `r2`)
### Unconditional jump
- Type BD
## Absolute addressing
- Type: `RRAH`
- Computes address from base register and absolute offset
| Opcode | Name | Action |
|:------:|:----:|:---------------------:|
| 32 | JMP | Jump at `#0 + imm #1` |
| Opcode | Mnemonic | Operation |
|:-------|:---------|:-------------------|
| 0x4D | LD | `#0 ← $3[#1 + $2]` |
| 0x4E | ST | `$3[#1 + $2] ← #0` |
### Conditional jumps
- Type BBD
- Jump at `imm #2` if `#0 <op> #1`
## Relative addressing
- Type: `RROH`
- Computes address from register and offset from program counter
| Opcode | Name | Comparsion |
|:------:|:----:|:------------:|
| 33 | JEQ | = |
| 34 | JNE | ≠ |
| 35 | JLT | < (signed) |
| 36 | JGT | > (signed) |
| 37 | JLTU | < (unsigned) |
| 38 | JGTU | > (unsigned) |
| Opcode | Mnemonic | Operation |
|:-------|:---------|:------------------------|
| 0x4F | LDR | `#0 ← $3[pc + #1 + $2]` |
| 0x50 | STR | `$3[pc + #1 + $2] ← #0` |
### Environment call
- Type N
# Block memory copy
- Type: `RRH`
- Copies block of `$3` bytes from memory location on address on `#0` to `#1`
| Opcode | Name | Action |
|:------:|:-----:|:-------------------------------------:|
| 39 | ECALL | Cause an trap to the host environment |
| Opcode | Mnemonic | Operation |
|:-------|:---------|:------------------|
| 0x51 | BMC | `$3[#1] ← $3[x0]` |
## Floating point operations
- Type BBB
- `#0 ← #1 <op> #2`
# Block register copy
- Type: `RRB`
- Copy block of `$3` registers starting with `#0` to `#1`
- Copying over the 256 registers causes an exception
| Opcode | Name | Action |
|:------:|:----:|:--------------:|
| 40 | ADDF | Addition |
| 41 | MULF | Multiplication |
| Opcode | Mnemonic | Operation |
|:-------|:---------|:--------------|
| 0x52 | BRC | `$3#1 ← $3#0` |
### Division-remainder
- Type BBBB
# Relative jump
- Type: `O`
| Opcode | Name | Action |
|:------:|:----:|:--------------------------------------:|
| 42 | DIRF | Same flow applies as for integer `DIR` |
| Opcode | Mnemonic | Operation |
|:-------|:---------|:---------------|
| 0x53 | JMP | `pc ← pc + $0` |
## Floating point immediate operations
- Type BBD
- `#0 ← #1 <op> imm #2`
# Linking jump
- Operation:
- Save address of following instruction to `#0`
- `#0 ← pc+<instruction size>`
- Jump to specified address
| Opcode | Name | Action |
|:------:|:-----:|:--------------:|
| 43 | ADDFI | Addition |
| 44 | MULFI | Multiplication |
| Opcode | Mnemonic | Instruction type | Address |
|:-------|:---------|:------------------|:-------------------------|
| 0x54 | JAL | RRO (size = 6 B) | Relative, `pc + #1 + $2` |
| 0x55 | JALA | RRA (size = 10 B) | Absolute, `#1 + $2` |
# Registers
- There is 255 registers + one zero register (with index 0)
- Reading from zero register yields zero
- Writing to zero register is a no-op
# Conditional jump
- Perform comparsion, if operation met, jump to relative address
- Type: `RRP`
- Operation: `if #0 <CMP> #1 { pc ← pc + $2 }`
# Memory
- Addresses are 64 bit
- Memory implementation is arbitrary
- In case of accessing invalid address:
- Program shall trap (LoadAccessEx, StoreAccessEx) with parameter of accessed address
- Value of register when trapped is undefined
| Opcode | Mnemonic | Condition | Type |
|:-------|:---------|:-------------------|:-----|
| 0x56 | JEQ | Equals (`=`) | Xi64 |
| 0x57 | JNE | Not-equals (`≠`) | Xi64 |
| 0x58 | JLTU | Less-than (`<`) | Ui64 |
| 0x59 | JGTU | Greater-than (`>`) | Ui64 |
| 0x5A | JLTS | Less-than (`<`) | Si64 |
| 0x5B | JGTS | Greater-than (`>`) | Si64 |
## Recommendations
- Leave address `0x0` as invalid
- If paging used:
- Leave first page invalid
- Pages should be at least 4 KiB
# Environment traps
- Traps to the environment
- Type: `N`
# Program execution
- The way of program execution is implementation defined
- The order of instruction is arbitrary, as long all observable
effects are applied in the program's order
| Opcode | Mnemonic | Trap type |
|:-------|:---------|:-----------------|
| 0x5C | ECA | Environment call |
| 0x5D | EBP | Breakpoint |
# Program validation
- Invalid program should cause runtime error:
- The form of error is arbitrary. Can be a trap or an interpreter-specified error
- It shall not be handleable from within the program
- Executing invalid opcode should trap
- Program can be validaded either before execution or when executing
# Floating point binary operations
- Type: `RRR`
- Operation: `#0 ← #1 <OP> #2`
# Traps
Program should at least implement these traps:
- Environment call
- Invalid instruction exception
- Load address exception
- Store address exception
| Opcode | Mnemonic | Operation | Type |
|:-------|:---------|:---------------------|:-----|
| 0x5E | FADD32 | Addition (`+`) | Fl32 |
| 0x5F | FADD64 | Addition (`+`) | Fl64 |
| 0x60 | FSUB32 | Subtraction (`-`) | Fl32 |
| 0x61 | FSUB64 | Subtraction (`-`) | Fl64 |
| 0x62 | FMUL32 | Multiplication (`*`) | Fl32 |
| 0x63 | FMUL64 | Multiplication (`*`) | Fl64 |
| 0x64 | FDIV32 | Division (`/`) | Fl32 |
| 0x65 | FDIV64 | Division (`/`) | Fl64 |
and executing environment should be able to get information about them,
like the opcode of invalid instruction or attempted address to load/store.
Details about these are left as an implementation detail.
# Fused multiply-add
- Type: `RRRR`
- Operation: `#0 ← (#1 * #2) + #3`
# Assembly
HoleyBytes assembly format is not defined, this is just a weak description
of `hbasm` syntax.
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x66 | FMA32 | Fl32 |
| 0x67 | FMA64 | Fl64 |
# Comparsions
- Type: `RRR`
- Operation: `#0 ← #1 <=> #2`
- Comparsion table same as for `CMPx`/`CMPxI`
- NaN is less-than/greater-than depends on variant
| Opcode | Mnemonic | Type | NaN is |
|:-------|:---------|:-----|:-------|
| 0x6A | FCMPLT32 | Fl32 | < |
| 0x6B | FCMPLT64 | Fl64 | < |
| 0x6C | FCMPGT32 | Fl32 | > |
| 0x6D | FCMPGT64 | Fl64 | > |
# Int to float
- Type: `RR`
- Converts from `Si64`
- Operation: `#0 ← Fl<SIZE>(#1)`
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x6E | ITF32 | Fl32 |
| 0x6F | ITF64 | Fl64 |
# Float to int
- Type: `RRB`
- Operation: `#0 ← Si64(#1)`
- Immediate `$2` specifies rounding mode
| Opcode | Mnemonic | Type |
|:-------|:---------|:-----|
| 0x70 | FTI32 | Fl32 |
| 0x71 | FTI64 | Fl64 |
# Fl32 to Fl64
- Type: `RR`
- Operation: `#0 ← Fl64(#1)`
| Opcode | Mnemonic |
|:-------|:---------|
| 0x72 | FC32T64 |
# Fl64 to Fl32
- Type: `RRB`
- Operation: `#0 ← Fl32(#1)`
- Immediate `$2` specified rounding mode
| Opcode | Mnemonic |
|:-------|:---------|
| 0x73 | FC64T32 |
# 16-bit relative address instruction variants
| Opcode | Mnemonic | Type | Variant of |
|:-------|:---------|:-----|:-----------|
| 0x74 | LRA16 | RRP | LRA |
| 0x75 | LDR16 | RRPH | LDR |
| 0x76 | STR16 | RRPH | STR |
| 0x77 | JMP16 | P | JMP |
# psABI
## C datatypes and alignment
- One byte is 8 bits
| C Type | Description | Byte sizes |
|:------------|:-------------------------|:-----------|
| char | Character / byte | 1 |
| short | Short integer | 2 |
| int | Integer | 4 |
| long | Long integer | 8 |
| long long | Long long integer | 8 |
| float | Single-precision float | 4 |
| double | Double-precision float | 8 |
| long double | Extended-precision float | 8 |
- Bikeshedding note: `long double` is now 8 bytes as
the base ISA does not support `f128`. an extension
for that should be made.
## Call convention
- Registers r1 r31 are caller saved
- Registers r32 r255 are callee saved
| Register | Description | Saver |
|:-----------|:--------------------|:-------|
| r0 | Hard-wired zero | N/A |
| r1 - r2 | Return values | Caller |
| r2 - r11 | Function parameters | Caller |
| r12 - r30 | General purpose | Caller |
| r31 | Return address | Caller |
| r32 - r253 | General purpose | Callee |
| r254 | Stack pointer | Callee |
| r255 | Thread pointer | N/A |
- If return value is too big to fit r1, r2 is also used.
- Values larger than two double-words are passed by reference
- Opcode names correspond to specified opcode names, lowercase (`nop`)
- Parameters are separated by comma (`addi r0, r0, 1`)
- Instructions are separated by either line feed or semicolon
- Registers are represented by `r` followed by the number (`r10`)
- Labels are defined by label name followed with colon (`loop:`)
- Labels are references simply by their name (`print`)
- Immediates are entered plainly. Negative numbers supported.

View file

@ -1,9 +0,0 @@
[package]
name = "xtask"
version = "0.1.0"
edition = "2021"
[dependencies]
argh = "0.1"
color-eyre = "0.6"
once_cell = "1.18"

View file

@ -1,93 +0,0 @@
use {
crate::{utils::IterExt, ROOT},
argh::FromArgs,
color_eyre::{eyre::eyre, Result},
std::{
fs::File,
io::{BufRead, BufReader, BufWriter, Seek, Write},
},
};
/// Format `instructions.in`
#[derive(Debug, FromArgs, PartialEq)]
#[argh(subcommand, name = "fmt")]
pub struct Command {
/// renumber instructions in their definition order
#[argh(switch, short = 'r')]
renumber: bool,
}
pub fn command(args: Command) -> Result<()> {
let mut file = File::options()
.read(true)
.write(true)
.open(ROOT.join("hbbytecode/instructions.in"))?;
// Extract records
let reader = BufReader::new(&file);
let mut recs = vec![];
let mut lens = [0_usize; 4];
for rec in reader.split(b';').filter_map(|r| {
r.map(|ln| {
let s = String::from_utf8_lossy(&ln);
let s = s.trim_matches('\n');
if s.is_empty() {
return None;
}
s.split(',')
.map(|s| Box::<str>::from(s.trim()))
.collect_array::<4>()
.map(Ok::<_, ()>)
})
.transpose()
}) {
let rec = rec?.map_err(|_| eyre!("Invalid record format"))?;
for (current, next) in lens.iter_mut().zip(rec.iter()) {
*current = (*current).max(next.len());
}
recs.push(rec);
}
// Clear file!
file.set_len(0)?;
file.seek(std::io::SeekFrom::Start(0))?;
let mut writer = BufWriter::new(file);
let ord_opco_len = digit_count(recs.len()) as usize;
for (n, rec) in recs.iter().enumerate() {
// Write opcode number
if args.renumber {
let n = format!("{n:#04X}");
write!(writer, "{n}, {}", padding(ord_opco_len, &n))?;
} else {
write!(writer, "{}, {}", rec[0], padding(lens[0], &rec[0]))?;
}
// Write other fields
writeln!(
writer,
"{}, {}{},{} {}{};",
rec[1],
padding(lens[1], &rec[1]),
rec[2],
padding(lens[2], &rec[2]),
rec[3],
padding(lens[3], &rec[3]),
)?;
}
Ok(())
}
fn padding(req: usize, s: &str) -> Box<str> {
" ".repeat(req.saturating_sub(s.len())).into()
}
#[inline]
fn digit_count(n: usize) -> u32 {
n.checked_ilog10().unwrap_or(0) + 1
}

View file

@ -1,25 +0,0 @@
mod fmt;
mod utils;
use {argh::FromArgs, color_eyre::Result, once_cell::sync::Lazy, std::path::Path};
static ROOT: Lazy<&Path> = Lazy::new(|| Path::new(env!("CARGO_MANIFEST_DIR")).parent().unwrap());
/// xTask for Holey Bytes project
#[derive(FromArgs)]
struct Command {
#[argh(subcommand)]
subcom: Subcommands,
}
#[derive(FromArgs)]
#[argh(subcommand)]
enum Subcommands {
Format(fmt::Command),
}
fn main() -> Result<()> {
match argh::from_env::<Command>().subcom {
Subcommands::Format(com) => fmt::command(com),
}
}

View file

@ -1,19 +0,0 @@
use std::mem::MaybeUninit;
pub trait IterExt: Iterator {
fn collect_array<const N: usize>(&mut self) -> Option<[Self::Item; N]>
where
Self: Sized,
{
let mut array: [MaybeUninit<Self::Item>; N] =
unsafe { MaybeUninit::uninit().assume_init() };
for item in &mut array {
item.write(self.next()?);
}
Some(array.map(|item| unsafe { item.assume_init() }))
}
}
impl<T: Iterator> IterExt for T {}