Compare commits

...

42 Commits

Author SHA1 Message Date
rcy17 96bf183f43 Fix CI 2023-03-03 00:55:43 +08:00
hw0505 87e3fc5033
avoid template repo create page 2023-02-28 15:45:28 +08:00
hw0505 ca22127a71
Merge pull request #9 from os-lecture/ch8
update CI to support rank list
2023-02-28 14:38:14 +08:00
hw0505 7e7ec8ee07
update CI to support rank list 2023-02-28 11:47:42 +08:00
rcy c93fca5868 Add base passwd 2023-02-15 16:57:51 +08:00
holder 35794468f0 Fix SYS_PIPE 'break' bug 2023-02-10 14:25:32 +08:00
holder 0154baaf90 Add github action for ch8 2023-02-10 13:30:50 +08:00
Campbell He 9dd330e6bd
chore: upgrade rustsbi-qemu and add gitlab mirror action 2022-10-06 21:15:42 +08:00
Campbell He 25e220dd5f
docs: add info of user 2022-10-06 13:56:33 +08:00
Campbell He cef887a29d
ci: add gitlab-ci.yml 2022-10-05 09:40:24 +08:00
rcy17 05709d3153 Remove useless extern 2022-05-21 16:58:41 +08:00
rcy17 63247009a4 Update LAB5 remarks 2022-05-21 10:21:17 +08:00
rcy17 f1a5deb88a Add LAB5 remarks 2022-05-20 21:21:47 +08:00
rcy17 b1cb5f9cec Remove initproc.S 2022-05-20 19:18:04 +08:00
rcy17 915ad9898d Fix fetch null task 2022-05-20 19:17:08 +08:00
rcy17 63e3839155 Add logs 2022-05-20 16:23:13 +08:00
rcy17 fcefd480ee Fix proc exit code 2022-05-20 15:26:48 +08:00
rcy17 fcaa65b576 Change MAX_STR_LEN from 200 to 300 2022-05-20 12:53:19 +08:00
rcy17 afb6638efa Finish locks 2022-05-20 10:01:06 +08:00
youyuyang be3829b512 add more garbage when releasing threads 2022-05-19 17:05:31 +08:00
youyuyang f8cc3c5bcd add multithread support 2022-05-19 13:57:08 +08:00
youyuyang e9c7627b36 Merge branch 'ch6' into ch8 2022-05-15 15:34:41 +08:00
rcy17 d933c3731d Add some tips 2022-05-02 13:14:49 +08:00
许善朴 87ce9568f8 ch6 2022-05-01 22:17:34 +08:00
许善朴 332e27e912 ch6 2022-05-01 18:49:45 +08:00
许善朴 b2524741d6 123 2022-05-01 18:37:29 +08:00
rcy17 1db741ea95 Merge ch5 2022-05-01 12:37:34 +08:00
许善朴 960a6881c2 123 2022-04-27 22:21:20 +08:00
许善朴 bee7993bdc 123 2022-04-27 22:11:36 +08:00
许善朴 9899a33a5f 123 2022-04-27 22:00:07 +08:00
许善朴 c790078a32 123 2022-04-25 15:39:52 +08:00
Gallium70 038b95e02f Merge branch 'thu-ch5' into ch5 2022-04-19 19:54:25 +08:00
许善朴 431f75b804 123 2022-04-15 19:01:59 +08:00
KaitoD e4a4932988 update ch5 2022-04-14 16:50:15 +08:00
azd19 ced96aad2a update ch5 code 2022-04-14 14:12:19 +08:00
rcy17 0c797b2101 ch7 2022-03-09 20:39:08 +08:00
rcy17 d141f72716 ch5 2022-03-09 20:39:08 +08:00
rcy17 23107794c8 ch6 2022-03-09 20:39:08 +08:00
rcy17 b125038fbd ch4 2022-03-09 20:39:08 +08:00
rcy17 3ec22ee3d3 ch3 2022-03-09 20:39:08 +08:00
rcy17 235e2ca5f5 ch2 2022-03-09 20:39:08 +08:00
rcy17 8645ae06c1 ch1 2022-03-09 20:38:26 +08:00
65 changed files with 6314 additions and 0 deletions

561
.clang-format Normal file
View File

@ -0,0 +1,561 @@
# SPDX-License-Identifier: GPL-2.0
#
# clang-format configuration file. Intended for clang-format >= 4.
#
# For more information, see:
#
# Documentation/process/clang-format.rst
# https://clang.llvm.org/docs/ClangFormat.html
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
---
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
#AfterExternBlock: false # Unknown to clang-format-5.0
BeforeCatch: false
BeforeElse: false
IndentBraces: false
#SplitEmptyFunction: true # Unknown to clang-format-4.0
#SplitEmptyRecord: true # Unknown to clang-format-4.0
#SplitEmptyNamespace: true # Unknown to clang-format-4.0
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
#CompactNamespaces: false # Unknown to clang-format-4.0
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
#FixNamespaceComments: false # Unknown to clang-format-4.0
# Taken from:
# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
# | sort | uniq
ForEachMacros:
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- '__ata_qc_for_each'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- '__bio_for_each_bvec'
- 'bio_for_each_bvec'
- 'bio_for_each_bvec_all'
- 'bio_for_each_integrity_vec'
- '__bio_for_each_segment'
- 'bio_for_each_segment'
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- 'bitmap_for_each_clear_region'
- 'bitmap_for_each_set_region'
- 'blkg_for_each_descendant_post'
- 'blkg_for_each_descendant_pre'
- 'blk_queue_for_each_rl'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
- 'bpf_for_each_spilled_reg'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
- 'btree_for_each_safel'
- 'card_for_each_dev'
- 'cgroup_taskset_for_each'
- 'cgroup_taskset_for_each_leader'
- 'cpufreq_for_each_entry'
- 'cpufreq_for_each_entry_idx'
- 'cpufreq_for_each_valid_entry'
- 'cpufreq_for_each_valid_entry_idx'
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'device_for_each_child_node'
- 'displayid_iter_for_each'
- 'dma_fence_chain_for_each'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'flow_action_for_each'
- 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_aggr_pgid'
- 'for_each_available_child_of_node'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
- 'for_each_card_dapms'
- 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_card_widgets'
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_cmsghdr'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_comp_order'
- 'for_each_console'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_not'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_dtpm_table'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- 'for_each_free_mem_pfn_range_in_zone'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
- 'for_each_hstate'
- 'for_each_if'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_member'
- 'for_each_memcg_cache_index'
- 'for_each_mem_pfn_range'
- '__for_each_mem_range'
- 'for_each_mem_range'
- '__for_each_mem_range_rev'
- 'for_each_mem_range_rev'
- 'for_each_mem_region'
- 'for_each_migratetype_order'
- 'for_each_msi_entry'
- 'for_each_msi_entry_safe'
- 'for_each_msi_vector'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
- 'for_each_netdev_continue_reverse'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
- 'for_each_netdev_reverse'
- 'for_each_netdev_safe'
- 'for_each_net_rcu'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_private_obj_in_state'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
- 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
- 'for_each_online_cpu'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
- 'for_each_pci_msi_entry'
- 'for_each_pcm_streams'
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_cpu'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_prop_codec_conf'
- 'for_each_prop_dai_codec'
- 'for_each_prop_dai_cpu'
- 'for_each_prop_dlc_codecs'
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_registered_fb'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sgtable_dma_page'
- 'for_each_sgtable_dma_sg'
- 'for_each_sgtable_page'
- 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- '__for_each_thread'
- 'for_each_thread'
- 'for_each_unicast_dest_pgid'
- 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
- 'fwnode_for_each_available_child_node'
- 'fwnode_for_each_child_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
- 'hash_for_each_possible_rcu_notrace'
- 'hash_for_each_possible_safe'
- 'hash_for_each_rcu'
- 'hash_for_each_safe'
- 'hctx_for_each_ctx'
- 'hlist_bl_for_each_entry'
- 'hlist_bl_for_each_entry_rcu'
- 'hlist_bl_for_each_entry_safe'
- 'hlist_for_each'
- 'hlist_for_each_entry'
- 'hlist_for_each_entry_continue'
- 'hlist_for_each_entry_continue_rcu'
- 'hlist_for_each_entry_continue_rcu_bh'
- 'hlist_for_each_entry_from'
- 'hlist_for_each_entry_from_rcu'
- 'hlist_for_each_entry_rcu'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
- 'hlist_for_each_entry_srcu'
- '__hlist_for_each_rcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'ide_host_for_each_port'
- 'ide_port_for_each_dev'
- 'ide_port_for_each_present_dev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'inet_lhash2_for_each_icsk_rcu'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kunit_suite_for_each_test_case'
- 'kvm_for_each_memslot'
- 'kvm_for_each_vcpu'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
- 'list_for_each_entry_reverse'
- 'list_for_each_entry_safe'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
- 'list_for_each_entry_srcu'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
- 'netdev_for_each_mc_addr'
- 'netdev_for_each_uc_addr'
- 'netdev_for_each_upper_dev_rcu'
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
- 'nla_for_each_nested'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
- 'nr_neigh_for_each_safe'
- 'nr_node_for_each'
- 'nr_node_for_each_safe'
- 'of_for_each_phandle'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
- 'pcl_for_each_chunk'
- 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'ping_portaddr_for_each_entry'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
- 'plist_for_each_entry_continue'
- 'plist_for_each_entry_safe'
- 'plist_for_each_safe'
- 'pnp_for_each_card'
- 'pnp_for_each_dev'
- 'protocol_for_each_card'
- 'protocol_for_each_dev'
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- '__rq_for_each_bio'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
- 'sctp_for_each_hentry'
- 'sctp_skb_for_each'
- 'shdma_for_each_chan'
- '__shost_for_each_device'
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
- 'sk_for_each_safe'
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'tb_property_for_each'
- 'tcf_exts_for_each_action'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
- 'v4l2_device_for_each_subdev'
- 'v4l2_m2m_for_each_dst_buf'
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'xbc_array_for_each_value'
- 'xbc_for_each_key_value'
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'zorro_for_each_dev'
#IncludeBlocks: Preserve # Unknown to clang-format-5.0
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
#IndentPPDirectives: None # Unknown to clang-format-5.0
IndentWidth: 8
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
ObjCBlockIndentWidth: 8
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
#SortUsingDeclarations: false # Unknown to clang-format-4.0
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
SpaceBeforeParens: ControlStatements
#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp03
TabWidth: 8
UseTab: Always
...

7
.gdbinit Normal file
View File

@ -0,0 +1,7 @@
set confirm off
set architecture riscv:rv64
target remote 127.0.0.1:15234
symbol-file build/kernel
display/12i $pc-8
set riscv use-compressed-breakpoints yes
break *0x1000

44
.github/workflows/github-autotest.yml vendored Normal file
View File

@ -0,0 +1,44 @@
name: auto-test
on:
push:
jobs:
base-test:
runs-on: ubuntu-latest
outputs:
points: ${{ steps.end.outputs.points}}
container:
image: duskmoon/dev-env:ucore-ci
steps:
- uses: actions/checkout@v3
- run: git clone https://github.com/LearningOS/uCore-Tutorial-Checker-2023S.git ucore-tutorial-ci
- run: git clone https://github.com/LearningOS/uCore-Tutorial-Test-2023S.git ucore-tutorial-ci/workplace/user
- name: run test
id: tester
run: cd ucore-tutorial-ci && make test passwd=${{ secrets.BASE_TEST_TOKEN }} CHAPTER=`echo ${GITHUB_REF##*/} | grep -oP 'ch\K[0-9]'` | tee ../output.txt
- name: end
id: end
run: cat output.txt | grep "Test passed" | grep -oP "\d{1,}/\d{1,}" | xargs -i echo "points={}" >> $GITHUB_OUTPUT
deploy:
if: github.repository != 'LearningOS/uCore-Tutorial-Code-2023S'
name: Deploy to pages
needs: base-test
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
continue-on-error: true
with:
ref: 'gh-pages'
- name: Save Log File
uses: yfblock/multi-rank-log@main
with:
points: ${{ needs.base-test.outputs.points }}
- name: GitHub Pages
uses: crazy-max/ghaction-github-pages@v3
with:
target_branch: gh-pages
build_dir: ./public
keep_history: true
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

22
.github/workflows/gitlab-mirror.yml vendored Normal file
View File

@ -0,0 +1,22 @@
name: Mirror and run GitLab CI
on:
push:
branches:
- 'ch[0-9]'
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v1
- name: Mirror + trigger CI
uses: Gallium70/gitlab-mirror-and-ci-action@master
with:
args: "https://git.tsinghua.edu.cn/os-lab/2023s/public/ucore-tutorial-code-2023s"
env:
GITLAB_HOSTNAME: "git.tsinghua.edu.cn"
GITLAB_PROJECT_ID: "20789"
GITLAB_PROJECT_NAME: "ucore-tutorial-code-2023s"
GITLAB_PROJECT_TOKEN: ${{secrets.GITLAB_PROJECT_TOKEN}}
GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}

15
.gitignore vendored Normal file
View File

@ -0,0 +1,15 @@
.DS_Store
.vscode
.idea
build
target
/user
link_app.S
initproc.S
kernel_app.ld
*.o
*.d
*.asm
*.sym
nfs/*.img
nfs/fs

12
.gitlab-ci.yml Normal file
View File

@ -0,0 +1,12 @@
default:
image: duskmoon/dev-env:ucore-ci
stages:
- test
test-code-job:
stage: test
script:
- git clone https://token:${UCORE_CHECKER_REPO_READ_TOKEN_2023S}@git.tsinghua.edu.cn/os-lab/2023s/ta/ucore-tutorial-checker-2023s.git ucore-tutorial-ci
- git clone https://token:${UCORE_TEST_REPO_READ_TOKEN_2023S}@git.tsinghua.edu.cn/os-lab/2023s/public/ucore-tutorial-test-2023s.git ucore-tutorial-ci/workplace/user
- cd ucore-tutorial-ci && make test CHAPTER=`echo $CI_COMMIT_REF_NAME | grep -oP 'ch\K[0-9]'`

137
Makefile Normal file
View File

@ -0,0 +1,137 @@
.PHONY: clean build user run debug test .FORCE
all: build
K = os
U = user
F = nfs
TOOLPREFIX = riscv64-unknown-elf-
CC = $(TOOLPREFIX)gcc
AS = $(TOOLPREFIX)gcc
LD = $(TOOLPREFIX)ld
OBJCOPY = $(TOOLPREFIX)objcopy
OBJDUMP = $(TOOLPREFIX)objdump
PY = python3
GDB = $(TOOLPREFIX)gdb
CP = cp
BUILDDIR = build
C_SRCS = $(wildcard $K/*.c)
AS_SRCS = $(wildcard $K/*.S)
C_OBJS = $(addprefix $(BUILDDIR)/, $(addsuffix .o, $(basename $(C_SRCS))))
AS_OBJS = $(addprefix $(BUILDDIR)/, $(addsuffix .o, $(basename $(AS_SRCS))))
OBJS = $(C_OBJS) $(AS_OBJS)
HEADER_DEP = $(addsuffix .d, $(basename $(C_OBJS)))
ifeq (,$(findstring initproc.o,$(OBJS)))
AS_OBJS += $(BUILDDIR)/$K/initproc.o
endif
INIT_PROC ?= usershell
$(K)/initproc.o: $K/initproc.S
$(K)/initproc.S: scripts/initproc.py .FORCE
@$(PY) scripts/initproc.py $(INIT_PROC)
CFLAGS = -Wall -Werror -O -fno-omit-frame-pointer -ggdb
CFLAGS += -MD
CFLAGS += -mcmodel=medany
CFLAGS += -ffreestanding -fno-common -nostdlib -mno-relax
CFLAGS += -I$K
CFLAGS += $(shell $(CC) -fno-stack-protector -E -x c /dev/null >/dev/null 2>&1 && echo -fno-stack-protector)
LOG ?= error
ifeq ($(LOG), error)
CFLAGS += -D LOG_LEVEL_ERROR
else ifeq ($(LOG), warn)
CFLAGS += -D LOG_LEVEL_WARN
else ifeq ($(LOG), info)
CFLAGS += -D LOG_LEVEL_INFO
else ifeq ($(LOG), debug)
CFLAGS += -D LOG_LEVEL_DEBUG
else ifeq ($(LOG), trace)
CFLAGS += -D LOG_LEVEL_TRACE
endif
# Disable PIE when possible (for Ubuntu 16.10 toolchain)
ifneq ($(shell $(CC) -dumpspecs 2>/dev/null | grep -e '[^f]no-pie'),)
CFLAGS += -fno-pie -no-pie
endif
ifneq ($(shell $(CC) -dumpspecs 2>/dev/null | grep -e '[^f]nopie'),)
CFLAGS += -fno-pie -nopie
endif
# empty target
.FORCE:
LDFLAGS = -z max-page-size=4096
$(AS_OBJS): $(BUILDDIR)/$K/%.o : $K/%.S
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $< -o $@
$(C_OBJS): $(BUILDDIR)/$K/%.o : $K/%.c $(BUILDDIR)/$K/%.d
@mkdir -p $(@D)
$(CC) $(CFLAGS) -c $< -o $@
$(HEADER_DEP): $(BUILDDIR)/$K/%.d : $K/%.c
@mkdir -p $(@D)
@set -e; rm -f $@; $(CC) -MM $< $(INCLUDEFLAGS) > $@.$$$$; \
sed 's,\($*\)\.o[ :]*,\1.o $@ : ,g' < $@.$$$$ > $@; \
rm -f $@.$$$$
INIT_PROC ?= usershell
build: build/kernel
build/kernel: $(OBJS) os/kernel.ld
$(LD) $(LDFLAGS) -T os/kernel.ld -o $(BUILDDIR)/kernel $(OBJS)
$(OBJDUMP) -S $(BUILDDIR)/kernel > $(BUILDDIR)/kernel.asm
$(OBJDUMP) -t $(BUILDDIR)/kernel | sed '1,/SYMBOL TABLE/d; s/ .* / /; /^$$/d' > $(BUILDDIR)/kernel.sym
@echo 'Build kernel done'
clean:
rm -rf $(BUILDDIR) os/initproc.S
rm $(F)/*.img
# BOARD
BOARD ?= qemu
SBI ?= rustsbi
BOOTLOADER := ./bootloader/rustsbi-qemu.bin
QEMU = qemu-system-riscv64
QEMUOPTS = \
-nographic \
-machine virt \
-bios $(BOOTLOADER) \
-kernel build/kernel \
-drive file=$(F)/fs-copy.img,if=none,format=raw,id=x0 \
-device virtio-blk-device,drive=x0,bus=virtio-mmio-bus.0
$(F)/fs.img:
make -C $(F)
$(F)/fs-copy.img: $(F)/fs.img
@$(CP) $< $@
run: build/kernel $(F)/fs-copy.img
$(QEMU) $(QEMUOPTS)
# QEMU's gdb stub command line changed in 0.11
QEMUGDB = $(shell if $(QEMU) -help | grep -q '^-gdb'; \
then echo "-gdb tcp::15234"; \
else echo "-s -p 15234"; fi)
debug: build/kernel .gdbinit $(F)/fs-copy.img
$(QEMU) $(QEMUOPTS) -S $(QEMUGDB) &
sleep 1
$(GDB)
CHAPTER ?= $(shell git rev-parse --abbrev-ref HEAD | grep -oP 'ch\K[0-9]')
user:
make -C user CHAPTER=$(CHAPTER) BASE=$(BASE)
test: user run

View File

@ -9,3 +9,20 @@ Course project for THU-OS.
实验 lab1-lab5 基准代码分别位于 ch3-ch8 分支下。
注:为了兼容清华 Git 的需求、避免同学在主分支写代码、明确主分支的功能性,特意单独建了仅包含 README 与 LICENSE 的 master 分支,完成课程实验时请在 clone 仓库后先 push master 分支到清华 Git然后切到自己开发所需的分支进行后续操作。
## 本地开发测试
在本地开发并测试时,需要拉取 uCore-Tutorial-Test-2022A 到 `user` 文件夹。你可以根据网络情况和个人偏好选择下列一项执行:
```bash
# 清华 git 使用 https
git clone https://git.tsinghua.edu.cn/os-lab/public/ucore-tutorial-test-2022a.git user
# 清华 git 使用 ssh
git clone git@git.tsinghua.edu.cn:os-lab/public/ucore-tutorial-test-2022a.git user
# GitHub 使用 https
git clone https://github.com/LearningOS/uCore-Tutorial-Test-2022A.git user
# GitHub 使用 ssh
git clone git@github.com:LearningOS/uCore-Tutorial-Test-2022A.git user
```
注意:`user` 已添加至 `.gitignore`你无需将其提交ci 也不会使用它

BIN
bootloader/rustsbi-qemu.bin Normal file

Binary file not shown.

14
nfs/Makefile Normal file
View File

@ -0,0 +1,14 @@
.DEFAULT_GOAL = fs.img
U := ../user
USER_BIN_DIR := target/bin
FS_FUSE := fs
$(FS_FUSE): fs.c fs.h types.h
fs.img: $(FS_FUSE)
./$(FS_FUSE) $@ $(wildcard $(U)/$(USER_BIN_DIR)/*)
clean:
rm *.img $(FS_FUSE)

267
nfs/fs.c Normal file
View File

@ -0,0 +1,267 @@
#include <assert.h>
#include <fcntl.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include "fs.h"
#ifndef static_assert
#define static_assert(a, b) \
do { \
switch (0) \
case 0: \
case (a):; \
} while (0)
#endif
#define NINODES 200
// Disk layout:
// [ boot block | sb block | inode blocks | free bit map | data blocks ]
int nbitmap = FSSIZE / (BSIZE * 8) + 1;
int ninodeblocks = NINODES / IPB + 1;
int nmeta; // Number of meta blocks (boot, sb, nlog, inode, bitmap)
int nblocks; // Number of data blocks
int fsfd;
struct superblock sb;
char zeroes[BSIZE];
uint freeinode = 1;
uint freeblock;
char *basename(char *);
void balloc(int);
void wsect(uint, void *);
void winode(uint, struct dinode *);
void rinode(uint inum, struct dinode *ip);
void rsect(uint sec, void *buf);
uint ialloc(ushort type);
void iappend(uint inum, void *p, int n);
// convert to intel byte order
ushort xshort(ushort x)
{
ushort y;
uchar *a = (uchar *)&y;
a[0] = x;
a[1] = x >> 8;
return y;
}
uint xint(uint x)
{
uint y;
uchar *a = (uchar *)&y;
a[0] = x;
a[1] = x >> 8;
a[2] = x >> 16;
a[3] = x >> 24;
return y;
}
int main(int argc, char *argv[])
{
int i, cc, fd;
uint rootino, inum, off;
struct dirent de;
char buf[BSIZE];
struct dinode din;
static_assert(sizeof(int) == 4, "Integers must be 4 bytes!");
if (argc < 2) {
fprintf(stderr, "Usage: mkfs fs.img files...\n");
exit(1);
}
assert((BSIZE % sizeof(struct dinode)) == 0);
fsfd = open(argv[1], O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fsfd < 0) {
perror(argv[1]);
exit(1);
}
// 1 fs block = 1 disk sector
nmeta = 2 + ninodeblocks + nbitmap;
nblocks = FSSIZE - nmeta;
sb.magic = FSMAGIC;
sb.size = xint(FSSIZE);
sb.nblocks = xint(nblocks);
sb.ninodes = xint(NINODES);
sb.inodestart = xint(2);
sb.bmapstart = xint(2 + ninodeblocks);
printf("nmeta %d (boot, super, inode blocks %u, bitmap blocks %u) blocks %d "
"total %d\n",
nmeta, ninodeblocks, nbitmap, nblocks, FSSIZE);
freeblock = nmeta; // the first free block that we can allocate
for (i = 0; i < FSSIZE; i++)
wsect(i, zeroes);
memset(buf, 0, sizeof(buf));
memmove(buf, &sb, sizeof(sb));
wsect(1, buf);
rootino = ialloc(T_DIR);
for (i = 2; i < argc; i++) {
char *shortname = basename(argv[i]);
assert(index(shortname, '/') == 0);
if ((fd = open(argv[i], 0)) < 0) {
perror(argv[i]);
exit(1);
}
inum = ialloc(T_FILE);
bzero(&de, sizeof(de));
de.inum = xshort(inum);
strncpy(de.name, shortname, DIRSIZ);
iappend(rootino, &de, sizeof(de));
while ((cc = read(fd, buf, sizeof(buf))) > 0)
iappend(inum, buf, cc);
close(fd);
}
// fix size of root inode dir
rinode(rootino, &din);
off = xint(din.size);
off = ((off / BSIZE) + 1) * BSIZE;
din.size = xint(off);
winode(rootino, &din);
balloc(freeblock);
return 0;
}
char *basename(char *path)
{
while (index(path, '/') != 0) {
path = index(path, '/') + 1;
}
return path;
}
void wsect(uint sec, void *buf)
{
if (lseek(fsfd, sec * BSIZE, 0) != sec * BSIZE) {
perror("lseek");
exit(1);
}
if (write(fsfd, buf, BSIZE) != BSIZE) {
perror("write");
exit(1);
}
}
void winode(uint inum, struct dinode *ip)
{
char buf[BSIZE];
uint bn;
struct dinode *dip;
bn = IBLOCK(inum, sb);
rsect(bn, buf);
dip = ((struct dinode *)buf) + (inum % IPB);
*dip = *ip;
wsect(bn, buf);
}
void rinode(uint inum, struct dinode *ip)
{
char buf[BSIZE];
uint bn;
struct dinode *dip;
bn = IBLOCK(inum, sb);
rsect(bn, buf);
dip = ((struct dinode *)buf) + (inum % IPB);
*ip = *dip;
}
void rsect(uint sec, void *buf)
{
if (lseek(fsfd, sec * BSIZE, 0) != sec * BSIZE) {
perror("lseek");
exit(1);
}
if (read(fsfd, buf, BSIZE) != BSIZE) {
perror("read");
exit(1);
}
}
uint ialloc(ushort type)
{
uint inum = freeinode++;
struct dinode din;
bzero(&din, sizeof(din));
din.type = xshort(type);
din.size = xint(0);
// LAB4: You may want to init link count here
winode(inum, &din);
return inum;
}
void balloc(int used)
{
uchar buf[BSIZE];
int i;
assert(used < BSIZE * 8);
bzero(buf, BSIZE);
for (i = 0; i < used; i++) {
buf[i / 8] = buf[i / 8] | (0x1 << (i % 8));
}
wsect(sb.bmapstart, buf);
}
#define min(a, b) ((a) < (b) ? (a) : (b))
void iappend(uint inum, void *xp, int n)
{
char *p = (char *)xp;
uint fbn, off, n1;
struct dinode din;
char buf[BSIZE];
uint indirect[NINDIRECT];
uint x;
rinode(inum, &din);
off = xint(din.size);
while (n > 0) {
fbn = off / BSIZE;
assert(fbn < MAXFILE);
if (fbn < NDIRECT) {
if (xint(din.addrs[fbn]) == 0) {
din.addrs[fbn] = xint(freeblock++);
}
x = xint(din.addrs[fbn]);
} else {
if (xint(din.addrs[NDIRECT]) == 0) {
din.addrs[NDIRECT] = xint(freeblock++);
}
rsect(xint(din.addrs[NDIRECT]), (char *)indirect);
if (indirect[fbn - NDIRECT] == 0) {
indirect[fbn - NDIRECT] = xint(freeblock++);
wsect(xint(din.addrs[NDIRECT]),
(char *)indirect);
}
x = xint(indirect[fbn - NDIRECT]);
}
n1 = min(n, (fbn + 1) * BSIZE - off);
rsect(x, buf);
bcopy(p, buf + off - (fbn * BSIZE), n1);
wsect(x, buf);
n -= n1;
off += n1;
p += n1;
}
din.size = xint(off);
winode(inum, &din);
}

73
nfs/fs.h Normal file
View File

@ -0,0 +1,73 @@
#ifndef __FS_H__
#define __FS_H__
#include "types.h"
// On-disk file system format.
// Both the kernel and user programs use this header file.
#define NFILE 100 // open files per system
#define NINODE 50 // maximum number of active i-nodes
#define NDEV 10 // maximum major device number
#define ROOTDEV 1 // device number of file system root disk
#define MAXOPBLOCKS 10 // max # of blocks any FS op writes
#define NBUF (MAXOPBLOCKS * 3) // size of disk block cache
#define FSSIZE 1000 // size of file system in blocks
#define MAXPATH 128 // maximum file path name
#define ROOTINO 1 // root i-number
#define BSIZE 1024 // block size
// Disk layout:
// [ boot block | super block | inode blocks | free bit map | data blocks]
//
// mkfs computes the super block and builds an initial file system. The
// super block describes the disk layout:
struct superblock {
uint magic; // Must be FSMAGIC
uint size; // Size of file system image (blocks)
uint nblocks; // Number of data blocks
uint ninodes; // Number of inodes.
uint inodestart; // Block number of first inode block
uint bmapstart; // Block number of first free map block
};
#define FSMAGIC 0x10203040
#define NDIRECT 12
#define NINDIRECT (BSIZE / sizeof(uint))
#define MAXFILE (NDIRECT + NINDIRECT)
// File type
#define T_DIR 1 // Directory
#define T_FILE 2 // File
// LAB4: Keep it the same as dinode in os/fs.h after you change it
// On-disk inode structure
struct dinode {
short type; // File type
short pad[3];
uint size; // Size of file (bytes)
uint addrs[NDIRECT + 1]; // Data block addresses
};
// Inodes per block.
#define IPB (BSIZE / sizeof(struct dinode))
// Block containing inode i
#define IBLOCK(i, sb) ((i) / IPB + sb.inodestart)
// Bitmap bits per block
#define BPB (BSIZE * 8)
// Block of free map containing bit for block b
#define BBLOCK(b, sb) ((b) / BPB + sb.bmapstart)
// Directory is a file containing a sequence of dirent structures.
#define DIRSIZ 14
struct dirent {
ushort inum;
char name[DIRSIZ];
};
#endif //!__FS_H__

12
nfs/types.h Normal file
View File

@ -0,0 +1,12 @@
#ifndef TYPES_H
#define TYPES_H
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint32;
typedef unsigned long uint64;
#endif // TYPES_H

114
os/bio.c Normal file
View File

@ -0,0 +1,114 @@
// Buffer cache.
//
// The buffer cache is a linked list of buf structures holding
// cached copies of disk block contents. Caching disk blocks
// in memory reduces the number of disk reads and also provides
// a synchronization point for disk blocks used by multiple processes.
//
// Interface:
// * To get a buffer for a particular disk block, call bread.
// * After changing buffer data, call bwrite to write it to disk.
// * When done with the buffer, call brelse.
// * Do not use the buffer after calling brelse.
// * Only one process at a time can use a buffer,
// so do not keep them longer than necessary.
#include "bio.h"
#include "defs.h"
#include "fs.h"
#include "riscv.h"
#include "types.h"
#include "virtio.h"
struct {
struct buf buf[NBUF];
struct buf head;
} bcache;
void binit()
{
struct buf *b;
// Create linked list of buffers
bcache.head.prev = &bcache.head;
bcache.head.next = &bcache.head;
for (b = bcache.buf; b < bcache.buf + NBUF; b++) {
b->next = bcache.head.next;
b->prev = &bcache.head;
bcache.head.next->prev = b;
bcache.head.next = b;
}
}
// Look through buffer cache for block on device dev.
// If not found, allocate a buffer.
static struct buf *bget(uint dev, uint blockno)
{
struct buf *b;
// Is the block already cached?
for (b = bcache.head.next; b != &bcache.head; b = b->next) {
if (b->dev == dev && b->blockno == blockno) {
b->refcnt++;
return b;
}
}
// Not cached.
// Recycle the least recently used (LRU) unused buffer.
for (b = bcache.head.prev; b != &bcache.head; b = b->prev) {
if (b->refcnt == 0) {
b->dev = dev;
b->blockno = blockno;
b->valid = 0;
b->refcnt = 1;
return b;
}
}
panic("bget: no buffers");
return 0;
}
const int R = 0;
const int W = 1;
// Return a buf with the contents of the indicated block.
struct buf *bread(uint dev, uint blockno)
{
struct buf *b;
b = bget(dev, blockno);
if (!b->valid) {
virtio_disk_rw(b, R);
b->valid = 1;
}
return b;
}
// Write b's contents to disk.
void bwrite(struct buf *b)
{
virtio_disk_rw(b, W);
}
// Release a buffer.
// Move to the head of the most-recently-used list.
void brelse(struct buf *b)
{
b->refcnt--;
if (b->refcnt == 0) {
// no one is waiting for it.
b->next->prev = b->prev;
b->prev->next = b->next;
b->next = bcache.head.next;
b->prev = &bcache.head;
bcache.head.next->prev = b;
bcache.head.next = b;
}
}
void bpin(struct buf *b)
{
b->refcnt++;
}
void bunpin(struct buf *b)
{
b->refcnt--;
}

25
os/bio.h Normal file
View File

@ -0,0 +1,25 @@
#ifndef BUF_H
#define BUF_H
#include "fs.h"
#include "types.h"
struct buf {
int valid; // has data been read from disk?
int disk; // does disk "own" buf?
uint dev;
uint blockno;
uint refcnt;
struct buf *prev; // LRU cache list
struct buf *next;
uchar data[BSIZE];
};
void binit(void);
struct buf *bread(uint, uint);
void brelse(struct buf *);
void bwrite(struct buf *);
void bpin(struct buf *);
void bunpin(struct buf *);
#endif // BUF_H

17
os/console.c Normal file
View File

@ -0,0 +1,17 @@
#include "console.h"
#include "sbi.h"
void consputc(int c)
{
console_putchar(c);
}
void console_init()
{
// DO NOTHING
}
int consgetc()
{
return console_getchar();
}

8
os/console.h Normal file
View File

@ -0,0 +1,8 @@
#ifndef CONSOLE_H
#define CONSOLE_H
void consputc(int);
int consgetc();
void console_init();
#endif // CONSOLE_H

31
os/const.h Normal file
View File

@ -0,0 +1,31 @@
#ifndef CONST_H
#define CONST_H
#define PAGE_SIZE (0x1000)
// memory layout
// the kernel expects there to be RAM
// for use by the kernel and user pages
// from physical address 0x80000000 to PHYSTOP.
#define KERNBASE 0x80200000L
#define PHYSTOP (0x80000000 + 128 * 1024 * 1024) // we have 128M memroy
// one beyond the highest possible virtual address.
// MAXVA is actually one bit less than the max allowed by
// Sv39, to avoid having to sign-extend virtual addresses
// that have the high bit set.
#define MAXVA (1L << (9 + 9 + 9 + 12 - 1))
// map the trampoline page to the highest address,
// in both user and kernel space.
#define USER_TOP (MAXVA)
#define TRAMPOLINE (USER_TOP - PGSIZE)
#define TRAPFRAME (TRAMPOLINE - PGSIZE)
#define MAX_APP_NUM (32)
#define MAX_STR_LEN (300)
#define IDLE_PID (0)
#define MAX_ARG_NUM (32) // max exec arguments
#endif // CONST_H

23
os/defs.h Normal file
View File

@ -0,0 +1,23 @@
#ifndef DEFS_H
#define DEFS_H
#include "const.h"
#include "file.h"
#include "kalloc.h"
#include "log.h"
#include "printf.h"
#include "proc.h"
#include "riscv.h"
#include "sbi.h"
#include "string.h"
#include "types.h"
#include "vm.h"
// number of elements in fixed-size array
#define NELEM(x) (sizeof(x) / sizeof((x)[0]))
#define MIN(a, b) (a < b ? a : b)
#define MAX(a, b) (a > b ? a : b)
#define NULL ((void *)0)
#endif // DEF_H

12
os/entry.S Normal file
View File

@ -0,0 +1,12 @@
.section .text.entry
.globl _entry
_entry:
la sp, boot_stack_top
call main
.section .bss.stack
.globl boot_stack
boot_stack:
.space 4096 * 16
.globl boot_stack_top
boot_stack_top:

10
os/fcntl.h Normal file
View File

@ -0,0 +1,10 @@
#ifndef FCNTL_H
#define FCNTL_H
#define O_RDONLY 0x000
#define O_WRONLY 0x001
#define O_RDWR 0x002
#define O_CREATE 0x200
#define O_TRUNC 0x400
#endif // FCNIL_H

159
os/file.c Normal file
View File

@ -0,0 +1,159 @@
#include "file.h"
#include "defs.h"
#include "fcntl.h"
#include "fs.h"
#include "proc.h"
//This is a system-level open file table that holds open files of all process.
struct file filepool[FILEPOOLSIZE];
//Abstract the stdio into a file.
struct file *stdio_init(int fd)
{
struct file *f = filealloc();
f->type = FD_STDIO;
f->ref = 1;
f->readable = (fd == STDIN || fd == STDERR);
f->writable = (fd == STDOUT || fd == STDERR);
return f;
}
//The operation performed on the system-level open file table entry after some process closes a file.
void fileclose(struct file *f)
{
if (f->ref < 1)
panic("fileclose");
if (--f->ref > 0) {
return;
}
switch (f->type) {
case FD_STDIO:
// Do nothing
break;
case FD_PIPE:
pipeclose(f->pipe, f->writable);
break;
case FD_INODE:
iput(f->ip);
break;
default:
panic("unknown file type %d\n", f->type);
}
f->off = 0;
f->readable = 0;
f->writable = 0;
f->ref = 0;
f->type = FD_NONE;
}
//Add a new system-level table entry for the open file table
struct file *filealloc()
{
for (int i = 0; i < FILEPOOLSIZE; ++i) {
if (filepool[i].ref == 0) {
filepool[i].ref = 1;
return &filepool[i];
}
}
return 0;
}
//Show names of all files in the root_dir.
int show_all_files()
{
return dirls(root_dir());
}
//Create a new empty file based on path and type and return its inode;
//if the file under the path exists, return its inode;
//returns 0 if the type of file to be created is not T_file
static struct inode *create(char *path, short type)
{
struct inode *ip, *dp;
//Remember that the root_inode is open in this step,so it needs closing then.
dp = root_dir();
ivalid(dp);
if ((ip = dirlookup(dp, path, 0)) != 0) {
warnf("create a exist file\n");
iput(dp); //Close the root_inode
ivalid(ip);
if (type == T_FILE && ip->type == T_FILE)
return ip;
iput(ip);
return 0;
}
if ((ip = ialloc(dp->dev, type)) == 0)
panic("create: ialloc");
tracef("create dinode and inode type = %d\n", type);
ivalid(ip);
iupdate(ip);
if (dirlink(dp, path, ip->inum) < 0)
panic("create: dirlink");
iput(dp);
return ip;
}
//A process creates or opens a file according to its path, returning the file descriptor of the created or opened file.
//If omode is O_CREATE, create a new file
//if omode if the others,open a created file.
int fileopen(char *path, uint64 omode)
{
int fd;
struct file *f;
struct inode *ip;
if (omode & O_CREATE) {
ip = create(path, T_FILE);
if (ip == 0) {
return -1;
}
} else {
if ((ip = namei(path)) == 0) {
return -1;
}
ivalid(ip);
}
if (ip->type != T_FILE)
panic("unsupported file inode type\n");
if ((f = filealloc()) == 0 || (fd = fdalloc(f)) < 0) {
//Assign a system-level table entry to a newly created or opened file
//and then create a file descriptor that points to it
if (f)
fileclose(f);
iput(ip);
return -1;
}
// only support FD_INODE
f->type = FD_INODE;
f->off = 0;
f->ip = ip;
f->readable = !(omode & O_WRONLY);
f->writable = (omode & O_WRONLY) || (omode & O_RDWR);
if ((omode & O_TRUNC) && ip->type == T_FILE) {
itrunc(ip);
}
return fd;
}
// Write data to inode.
uint64 inodewrite(struct file *f, uint64 va, uint64 len)
{
int r;
ivalid(f->ip);
if ((r = writei(f->ip, 1, va, f->off, len)) > 0)
f->off += r;
return r;
}
//Read data from inode.
uint64 inoderead(struct file *f, uint64 va, uint64 len)
{
int r;
ivalid(f->ip);
if ((r = readi(f->ip, 1, va, f->off, len)) > 0)
f->off += r;
return r;
}

65
os/file.h Normal file
View File

@ -0,0 +1,65 @@
#ifndef FILE_H
#define FILE_H
#include "fs.h"
#include "proc.h"
#include "types.h"
#define PIPESIZE (512)
#define FILEPOOLSIZE (NPROC * FD_BUFFER_SIZE)
// in-memory copy of an inode,it can be used to quickly locate file entities on disk
struct inode {
uint dev; // Device number
uint inum; // Inode number
int ref; // Reference count
int valid; // inode has been read from disk?
short type; // copy of disk inode
uint size;
uint addrs[NDIRECT + 1];
// LAB4: You may need to add link count here
};
//a struct for pipe
struct pipe {
char data[PIPESIZE];
uint nread; // number of bytes read
uint nwrite; // number of bytes written
int readopen; // read fd is still open
int writeopen; // write fd is still open
};
// file.h
// Defines a file in memory that provides information about the current use of the file and the corresponding inode location
struct file {
enum { FD_NONE = 0, FD_PIPE, FD_INODE, FD_STDIO } type;
int ref; // reference count
char readable;
char writable;
struct pipe *pipe; // FD_PIPE
struct inode *ip; // FD_INODE
uint off;
};
//A few specific fd
enum {
STDIN = 0,
STDOUT = 1,
STDERR = 2,
};
extern struct file filepool[FILEPOOLSIZE];
int pipealloc(struct file *, struct file *);
void pipeclose(struct pipe *, int);
int piperead(struct pipe *, uint64, int);
int pipewrite(struct pipe *, uint64, int);
void fileclose(struct file *);
struct file *filealloc();
int fileopen(char *, uint64);
uint64 inodewrite(struct file *, uint64, uint64);
uint64 inoderead(struct file *, uint64, uint64);
struct file *stdio_init(int);
int show_all_files();
#endif // FILE_H

454
os/fs.c Normal file
View File

@ -0,0 +1,454 @@
// File system implementation. Five layers:
// + Blocks: allocator for raw disk blocks.
// + Log: crash recovery for multi-step updates.
// + Files: inode allocator, reading, writing, metadata.
// + Directories: inode with special contents (list of other inodes!)
// + Names: paths like /usr/rtm/xv6/fs.c for convenient naming.
//
// This file contains the low-level file system manipulation
// routines. The (higher-level) system call implementations
// are in sysfile.c.
#include "fs.h"
#include "bio.h"
#include "defs.h"
#include "file.h"
#include "proc.h"
#include "riscv.h"
#include "types.h"
// there should be one superblock per disk device, but we run with
// only one device
struct superblock sb;
// Read the super block.
static void readsb(int dev, struct superblock *sb)
{
struct buf *bp;
bp = bread(dev, 1);
memmove(sb, bp->data, sizeof(*sb));
brelse(bp);
}
// Init fs
void fsinit()
{
int dev = ROOTDEV;
readsb(dev, &sb);
if (sb.magic != FSMAGIC) {
panic("invalid file system");
}
}
// Zero a block.
static void bzero(int dev, int bno)
{
struct buf *bp;
bp = bread(dev, bno);
memset(bp->data, 0, BSIZE);
bwrite(bp);
brelse(bp);
}
// Blocks.
// Allocate a zeroed disk block.
static uint balloc(uint dev)
{
int b, bi, m;
struct buf *bp;
bp = 0;
for (b = 0; b < sb.size; b += BPB) {
bp = bread(dev, BBLOCK(b, sb));
for (bi = 0; bi < BPB && b + bi < sb.size; bi++) {
m = 1 << (bi % 8);
if ((bp->data[bi / 8] & m) == 0) { // Is block free?
bp->data[bi / 8] |= m; // Mark block in use.
bwrite(bp);
brelse(bp);
bzero(dev, b + bi);
return b + bi;
}
}
brelse(bp);
}
panic("balloc: out of blocks");
return 0;
}
// Free a disk block.
static void bfree(int dev, uint b)
{
struct buf *bp;
int bi, m;
bp = bread(dev, BBLOCK(b, sb));
bi = b % BPB;
m = 1 << (bi % 8);
if ((bp->data[bi / 8] & m) == 0)
panic("freeing free block");
bp->data[bi / 8] &= ~m;
bwrite(bp);
brelse(bp);
}
//The inode table in memory
struct {
struct inode inode[NINODE];
} itable;
static struct inode *iget(uint dev, uint inum);
// Allocate an inode on device dev.
// Mark it as allocated by giving it type `type`.
// Returns an allocated and referenced inode.
struct inode *ialloc(uint dev, short type)
{
int inum;
struct buf *bp;
struct dinode *dip;
for (inum = 1; inum < sb.ninodes; inum++) {
bp = bread(dev, IBLOCK(inum, sb));
dip = (struct dinode *)bp->data + inum % IPB;
if (dip->type == 0) { // a free inode
memset(dip, 0, sizeof(*dip));
dip->type = type;
bwrite(bp);
brelse(bp);
return iget(dev, inum);
}
brelse(bp);
}
panic("ialloc: no inodes");
return 0;
}
// Copy a modified in-memory inode to disk.
// Must be called after every change to an ip->xxx field
// that lives on disk.
void iupdate(struct inode *ip)
{
struct buf *bp;
struct dinode *dip;
bp = bread(ip->dev, IBLOCK(ip->inum, sb));
dip = (struct dinode *)bp->data + ip->inum % IPB;
dip->type = ip->type;
dip->size = ip->size;
// LAB4: you may need to update link count here
memmove(dip->addrs, ip->addrs, sizeof(ip->addrs));
bwrite(bp);
brelse(bp);
}
// Find the inode with number inum on device dev
// and return the in-memory copy. Does not read
// it from disk.
static struct inode *iget(uint dev, uint inum)
{
struct inode *ip, *empty;
// Is the inode already in the table?
empty = 0;
for (ip = &itable.inode[0]; ip < &itable.inode[NINODE]; ip++) {
if (ip->ref > 0 && ip->dev == dev && ip->inum == inum) {
ip->ref++;
return ip;
}
if (empty == 0 && ip->ref == 0) // Remember empty slot.
empty = ip;
}
// Recycle an inode entry.
if (empty == 0)
panic("iget: no inodes");
ip = empty;
ip->dev = dev;
ip->inum = inum;
ip->ref = 1;
ip->valid = 0;
return ip;
}
// Increment reference count for ip.
// Returns ip to enable ip = idup(ip1) idiom.
struct inode *idup(struct inode *ip)
{
ip->ref++;
return ip;
}
// Reads the inode from disk if necessary.
void ivalid(struct inode *ip)
{
struct buf *bp;
struct dinode *dip;
if (ip->valid == 0) {
bp = bread(ip->dev, IBLOCK(ip->inum, sb));
dip = (struct dinode *)bp->data + ip->inum % IPB;
ip->type = dip->type;
ip->size = dip->size;
// LAB4: You may need to get lint count here
memmove(ip->addrs, dip->addrs, sizeof(ip->addrs));
brelse(bp);
ip->valid = 1;
if (ip->type == 0)
panic("ivalid: no type");
}
}
// Drop a reference to an in-memory inode.
// If that was the last reference, the inode table entry can
// be recycled.
// If that was the last reference and the inode has no links
// to it, free the inode (and its content) on disk.
// All calls to iput() must be inside a transaction in
// case it has to free the inode.
void iput(struct inode *ip)
{
// LAB4: Unmark the condition and change link count variable name (nlink) if needed
if (ip->ref == 1 && ip->valid && 0 /*&& ip->nlink == 0*/) {
// inode has no links and no other references: truncate and free.
itrunc(ip);
ip->type = 0;
iupdate(ip);
ip->valid = 0;
}
ip->ref--;
}
// Inode content
//
// The content (data) associated with each inode is stored
// in blocks on the disk. The first NDIRECT block numbers
// are listed in ip->addrs[]. The next NINDIRECT blocks are
// listed in block ip->addrs[NDIRECT].
// Return the disk block address of the nth block in inode ip.
// If there is no such block, bmap allocates one.
static uint bmap(struct inode *ip, uint bn)
{
uint addr, *a;
struct buf *bp;
if (bn < NDIRECT) {
if ((addr = ip->addrs[bn]) == 0)
ip->addrs[bn] = addr = balloc(ip->dev);
return addr;
}
bn -= NDIRECT;
if (bn < NINDIRECT) {
// Load indirect block, allocating if necessary.
if ((addr = ip->addrs[NDIRECT]) == 0)
ip->addrs[NDIRECT] = addr = balloc(ip->dev);
bp = bread(ip->dev, addr);
a = (uint *)bp->data;
if ((addr = a[bn]) == 0) {
a[bn] = addr = balloc(ip->dev);
bwrite(bp);
}
brelse(bp);
return addr;
}
panic("bmap: out of range");
return 0;
}
// Truncate inode (discard contents).
void itrunc(struct inode *ip)
{
int i, j;
struct buf *bp;
uint *a;
for (i = 0; i < NDIRECT; i++) {
if (ip->addrs[i]) {
bfree(ip->dev, ip->addrs[i]);
ip->addrs[i] = 0;
}
}
if (ip->addrs[NDIRECT]) {
bp = bread(ip->dev, ip->addrs[NDIRECT]);
a = (uint *)bp->data;
for (j = 0; j < NINDIRECT; j++) {
if (a[j])
bfree(ip->dev, a[j]);
}
brelse(bp);
bfree(ip->dev, ip->addrs[NDIRECT]);
ip->addrs[NDIRECT] = 0;
}
ip->size = 0;
iupdate(ip);
}
// Read data from inode.
// If user_dst==1, then dst is a user virtual address;
// otherwise, dst is a kernel address.
int readi(struct inode *ip, int user_dst, uint64 dst, uint off, uint n)
{
uint tot, m;
struct buf *bp;
if (off > ip->size || off + n < off)
return 0;
if (off + n > ip->size)
n = ip->size - off;
for (tot = 0; tot < n; tot += m, off += m, dst += m) {
bp = bread(ip->dev, bmap(ip, off / BSIZE));
m = MIN(n - tot, BSIZE - off % BSIZE);
if (either_copyout(user_dst, dst,
(char *)bp->data + (off % BSIZE), m) == -1) {
brelse(bp);
tot = -1;
break;
}
brelse(bp);
}
return tot;
}
// Write data to inode.
// Caller must hold ip->lock.
// If user_src==1, then src is a user virtual address;
// otherwise, src is a kernel address.
// Returns the number of bytes successfully written.
// If the return value is less than the requested n,
// there was an error of some kind.
int writei(struct inode *ip, int user_src, uint64 src, uint off, uint n)
{
uint tot, m;
struct buf *bp;
if (off > ip->size || off + n < off)
return -1;
if (off + n > MAXFILE * BSIZE)
return -1;
for (tot = 0; tot < n; tot += m, off += m, src += m) {
bp = bread(ip->dev, bmap(ip, off / BSIZE));
m = MIN(n - tot, BSIZE - off % BSIZE);
if (either_copyin(user_src, src,
(char *)bp->data + (off % BSIZE), m) == -1) {
brelse(bp);
break;
}
bwrite(bp);
brelse(bp);
}
if (off > ip->size)
ip->size = off;
// write the i-node back to disk even if the size didn't change
// because the loop above might have called bmap() and added a new
// block to ip->addrs[].
iupdate(ip);
return tot;
}
// Look for a directory entry in a directory.
// If found, set *poff to byte offset of entry.
struct inode *dirlookup(struct inode *dp, char *name, uint *poff)
{
uint off, inum;
struct dirent de;
if (dp->type != T_DIR)
panic("dirlookup not DIR");
for (off = 0; off < dp->size; off += sizeof(de)) {
if (readi(dp, 0, (uint64)&de, off, sizeof(de)) != sizeof(de))
panic("dirlookup read");
if (de.inum == 0)
continue;
if (strncmp(name, de.name, DIRSIZ) == 0) {
// entry matches path element
if (poff)
*poff = off;
inum = de.inum;
return iget(dp->dev, inum);
}
}
return 0;
}
//Show the filenames of all files in the directory
int dirls(struct inode *dp)
{
uint64 off, count;
struct dirent de;
if (dp->type != T_DIR)
panic("dirlookup not DIR");
count = 0;
for (off = 0; off < dp->size; off += sizeof(de)) {
if (readi(dp, 0, (uint64)&de, off, sizeof(de)) != sizeof(de))
panic("dirlookup read");
if (de.inum == 0)
continue;
printf("%s\n", de.name);
count++;
}
return count;
}
// Write a new directory entry (name, inum) into the directory dp.
int dirlink(struct inode *dp, char *name, uint inum)
{
int off;
struct dirent de;
struct inode *ip;
// Check that name is not present.
if ((ip = dirlookup(dp, name, 0)) != 0) {
iput(ip);
return -1;
}
// Look for an empty dirent.
for (off = 0; off < dp->size; off += sizeof(de)) {
if (readi(dp, 0, (uint64)&de, off, sizeof(de)) != sizeof(de))
panic("dirlink read");
if (de.inum == 0)
break;
}
strncpy(de.name, name, DIRSIZ);
de.inum = inum;
if (writei(dp, 0, (uint64)&de, off, sizeof(de)) != sizeof(de))
panic("dirlink");
return 0;
}
// LAB4: You may want to add dirunlink here
//Return the inode of the root directory
struct inode *root_dir()
{
struct inode *r = iget(ROOTDEV, ROOTINO);
ivalid(r);
return r;
}
//Find the corresponding inode according to the path
struct inode *namei(char *path)
{
int skip = 0;
// if(path[0] == '.' && path[1] == '/')
// skip = 2;
// if (path[0] == '/') {
// skip = 1;
// }
struct inode *dp = root_dir();
if (dp == 0)
panic("fs dumped.\n");
return dirlookup(dp, path + skip, 0);
}

95
os/fs.h Normal file
View File

@ -0,0 +1,95 @@
#ifndef __FS_H__
#define __FS_H__
#include "types.h"
// On-disk file system format.
// Both the kernel and user programs use this header file.
#define NFILE 100 // open files per system
#define NINODE 50 // maximum number of active i-nodes
#define NDEV 10 // maximum major device number
#define ROOTDEV 1 // device number of file system root disk
#define MAXOPBLOCKS 10 // max # of blocks any FS op writes
#define NBUF (MAXOPBLOCKS * 3) // size of disk block cache
#define FSSIZE 1000 // size of file system in blocks
#define MAXPATH 128 // maximum file path name
#define ROOTINO 1 // root i-number
#define BSIZE 1024 // block size
// Disk layout:
// [ boot block | super block | inode blocks | free bit map | data blocks]
//
// mkfs computes the super block and builds an initial file system. The
// super block describes the disk layout:
struct superblock {
uint magic; // Must be FSMAGIC
uint size; // Size of file system image (blocks)
uint nblocks; // Number of data blocks
uint ninodes; // Number of inodes.
uint inodestart; // Block number of first inode block
uint bmapstart; // Block number of first free map block
};
#define FSMAGIC 0x10203040
#define NDIRECT 12
#define NINDIRECT (BSIZE / sizeof(uint))
#define MAXFILE (NDIRECT + NINDIRECT)
// File type
#define T_DIR 1 // Directory
#define T_FILE 2 // File
// On-disk inode structure
struct dinode {
short type; // File type
short pad[3];
// LAB4: you can reduce size of pad array and add link count below,
// or you can just regard a pad as link count.
// But keep in mind that you'd better keep sizeof(dinode) unchanged
uint size; // Size of file (bytes)
uint addrs[NDIRECT + 1]; // Data block addresses
};
// Inodes per block.
#define IPB (BSIZE / sizeof(struct dinode))
// Block containing inode i
#define IBLOCK(i, sb) ((i) / IPB + sb.inodestart)
// Bitmap bits per block
#define BPB (BSIZE * 8)
// Block of free map containing bit for block b
#define BBLOCK(b, sb) ((b) / BPB + sb.bmapstart)
// Directory is a file containing a sequence of dirent structures.
#define DIRSIZ 14
struct dirent {
ushort inum;
char name[DIRSIZ];
};
// file.h
struct inode;
void fsinit();
int dirlink(struct inode *, char *, uint);
struct inode *dirlookup(struct inode *, char *, uint *);
struct inode *ialloc(uint, short);
struct inode *idup(struct inode *);
void iinit();
void ivalid(struct inode *);
void iput(struct inode *);
void iunlock(struct inode *);
void iunlockput(struct inode *);
void iupdate(struct inode *);
struct inode *namei(char *);
struct inode *root_dir();
int readi(struct inode *, int, uint64, uint, uint);
int writei(struct inode *, int, uint64, uint, uint);
void itrunc(struct inode *);
int dirls(struct inode *);
#endif //!__FS_H__

57
os/kalloc.c Normal file
View File

@ -0,0 +1,57 @@
#include "kalloc.h"
#include "defs.h"
#include "riscv.h"
extern char ekernel[];
struct linklist {
struct linklist *next;
};
struct {
struct linklist *freelist;
} kmem;
void freerange(void *pa_start, void *pa_end)
{
char *p;
p = (char *)PGROUNDUP((uint64)pa_start);
for (; p + PGSIZE <= (char *)pa_end; p += PGSIZE)
kfree(p);
}
void kinit()
{
freerange(ekernel, (void *)PHYSTOP);
}
// Free the page of physical memory pointed at by v,
// which normally should have been returned by a
// call to kalloc(). (The exception is when
// initializing the allocator; see kinit above.)
void kfree(void *pa)
{
struct linklist *l;
if (((uint64)pa % PGSIZE) != 0 || (char *)pa < ekernel ||
(uint64)pa >= PHYSTOP)
panic("kfree");
// Fill with junk to catch dangling refs.
memset(pa, 1, PGSIZE);
l = (struct linklist *)pa;
l->next = kmem.freelist;
kmem.freelist = l;
}
// Allocate one 4096-byte page of physical memory.
// Returns a pointer that the kernel can use.
// Returns 0 if the memory cannot be allocated.
void *kalloc()
{
struct linklist *l;
l = kmem.freelist;
if (l) {
kmem.freelist = l->next;
memset((char *)l, 5, PGSIZE); // fill with junk
}
return (void *)l;
}

8
os/kalloc.h Normal file
View File

@ -0,0 +1,8 @@
#ifndef KALLOC_H
#define KALLOC_H
void *kalloc();
void kfree(void *);
void kinit();
#endif // KALLOC_H

51
os/kernel.ld Normal file
View File

@ -0,0 +1,51 @@
OUTPUT_ARCH(riscv)
ENTRY(_entry)
BASE_ADDRESS = 0x80200000;
SECTIONS
{
. = BASE_ADDRESS;
skernel = .;
s_text = .;
.text : {
*(.text.entry)
*(.text .text.*)
. = ALIGN(0x1000);
*(trampsec)
. = ALIGN(0x1000);
}
. = ALIGN(4K);
e_text = .;
s_rodata = .;
.rodata : {
*(.rodata .rodata.*)
}
. = ALIGN(4K);
e_rodata = .;
s_data = .;
.data : {
*(.data.apps)
*(.data .data.*)
*(.sdata .sdata.*)
}
. = ALIGN(4K);
e_data = .;
.bss : {
*(.bss.stack)
s_bss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
}
. = ALIGN(4K);
e_bss = .;
ekernel = .;
/DISCARD/ : {
*(.eh_frame)
}
}

68
os/kernelld.py Normal file
View File

@ -0,0 +1,68 @@
import os
TARGET_DIR = "../user/target/"
if __name__ == '__main__':
f = open("kernel_app.ld", mode="w")
apps = os.listdir(TARGET_DIR)
f.write(
'''OUTPUT_ARCH(riscv)
ENTRY(_entry)
BASE_ADDRESS = 0x80200000;
SECTIONS
{
. = BASE_ADDRESS;
skernel = .;
s_text = .;
.text : {
*(.text.entry)
*(.text .text.*)
. = ALIGN(0x1000);
*(trampsec)
. = ALIGN(0x1000);
}
. = ALIGN(4K);
e_text = .;
s_rodata = .;
.rodata : {
*(.rodata .rodata.*)
}
. = ALIGN(4K);
e_rodata = .;
s_data = .;
.data : {
*(.data)
''')
for (idx, _) in enumerate(apps):
f.write(' . = ALIGN(0x8);\n')
f.write(' *(.data.app{})\n'.format(idx))
f.write(
'''
*(.data.*)
*(.sdata .sdata.*)
}
. = ALIGN(4K);
e_data = .;
.bss : {
*(.bss.stack)
s_bss = .;
*(.bss .bss.*)
*(.sbss .sbss.*)
}
. = ALIGN(4K);
e_bss = .;
ekernel = .;
/DISCARD/ : {
*(.eh_frame)
}
}
''')
f.close()

86
os/kernelvec.S Normal file
View File

@ -0,0 +1,86 @@
#
# interrupts and exceptions while in supervisor
# mode come here.
#
# push all registers, call kerneltrap(), restore, return.
#
.globl kerneltrap
.globl kernelvec
.align 4
kernelvec:
// make room to save registers.
addi sp, sp, -256
// save the registers.
sd ra, 0(sp)
sd sp, 8(sp)
sd gp, 16(sp)
sd tp, 24(sp)
sd t0, 32(sp)
sd t1, 40(sp)
sd t2, 48(sp)
sd s0, 56(sp)
sd s1, 64(sp)
sd a0, 72(sp)
sd a1, 80(sp)
sd a2, 88(sp)
sd a3, 96(sp)
sd a4, 104(sp)
sd a5, 112(sp)
sd a6, 120(sp)
sd a7, 128(sp)
sd s2, 136(sp)
sd s3, 144(sp)
sd s4, 152(sp)
sd s5, 160(sp)
sd s6, 168(sp)
sd s7, 176(sp)
sd s8, 184(sp)
sd s9, 192(sp)
sd s10, 200(sp)
sd s11, 208(sp)
sd t3, 216(sp)
sd t4, 224(sp)
sd t5, 232(sp)
sd t6, 240(sp)
// call the C trap handler in trap.c
call kerneltrap
kernelret:
// restore registers.
ld ra, 0(sp)
ld sp, 8(sp)
ld gp, 16(sp)
// not this, in case we moved CPUs: ld tp, 24(sp)
ld t0, 32(sp)
ld t1, 40(sp)
ld t2, 48(sp)
ld s0, 56(sp)
ld s1, 64(sp)
ld a0, 72(sp)
ld a1, 80(sp)
ld a2, 88(sp)
ld a3, 96(sp)
ld a4, 104(sp)
ld a5, 112(sp)
ld a6, 120(sp)
ld a7, 128(sp)
ld s2, 136(sp)
ld s3, 144(sp)
ld s4, 152(sp)
ld s5, 160(sp)
ld s6, 168(sp)
ld s7, 176(sp)
ld s8, 184(sp)
ld s9, 192(sp)
ld s10, 200(sp)
ld s11, 208(sp)
ld t3, 216(sp)
ld t4, 224(sp)
ld t5, 232(sp)
ld t6, 240(sp)
addi sp, sp, 256
// return to whatever we were doing in the kernel.
sret

62
os/loader.c Normal file
View File

@ -0,0 +1,62 @@
#include "loader.h"
#include "defs.h"
#include "file.h"
#include "trap.h"
extern char INIT_PROC[];
int bin_loader(struct inode *ip, struct proc *p)
{
ivalid(ip);
void *page;
uint64 length = ip->size;
uint64 va_start = BASE_ADDRESS;
uint64 va_end = PGROUNDUP(BASE_ADDRESS + length);
for (uint64 va = va_start, off = 0; va < va_end;
va += PGSIZE, off += PAGE_SIZE) {
page = kalloc();
if (page == 0) {
panic("...");
}
readi(ip, 0, (uint64)page, off, PAGE_SIZE);
if (off + PAGE_SIZE > length) {
memset(page + (length - off), 0,
PAGE_SIZE - (length - off));
}
if (mappages(p->pagetable, va, PGSIZE, (uint64)page,
PTE_U | PTE_R | PTE_W | PTE_X) != 0)
panic("...");
}
p->max_page = va_end / PAGE_SIZE;
p->ustack_base = va_end + PAGE_SIZE;
// alloc main thread
if (allocthread(p, va_start, 1) != 0) {
panic("proc %d alloc main thread failed!", p->pid);
}
debugf("bin loader fin");
return 0;
}
// load all apps and init the corresponding `proc` structure.
int load_init_app()
{
struct inode *ip;
struct proc *p = allocproc();
init_stdio(p);
if ((ip = namei(INIT_PROC)) == 0) {
errorf("invalid init proc name\n");
return -1;
}
debugf("load init app %s", INIT_PROC);
bin_loader(ip, p);
iput(ip);
char *argv[2];
argv[0] = INIT_PROC;
argv[1] = NULL;
struct thread *t = &p->threads[0];
t->trapframe->a0 = push_argv(p, argv);
t->state = RUNNABLE;
add_task(t);
return 0;
}

17
os/loader.h Normal file
View File

@ -0,0 +1,17 @@
#ifndef LOADER_H
#define LOADER_H
#include "const.h"
#include "file.h"
#include "proc.h"
#include "types.h"
int load_init_app();
int bin_loader(struct inode *, struct proc *);
#define BASE_ADDRESS (0x1000)
#define USTACK_SIZE (PAGE_SIZE)
#define KSTACK_SIZE (PAGE_SIZE)
#define TRAP_PAGE_SIZE (PAGE_SIZE)
#endif // LOADER_H

122
os/log.h Normal file
View File

@ -0,0 +1,122 @@
#ifndef LOG_H
#define LOG_H
extern void printf(char *, ...);
extern int procid();
extern int threadid();
extern void dummy(int, ...);
extern void shutdown();
#if defined(LOG_LEVEL_ERROR)
#define USE_LOG_ERROR
#endif // LOG_LEVEL_ERROR
#if defined(LOG_LEVEL_WARN)
#define USE_LOG_ERROR
#define USE_LOG_WARN
#endif // LOG_LEVEL_ERROR
#if defined(LOG_LEVEL_INFO)
#define USE_LOG_ERROR
#define USE_LOG_WARN
#define USE_LOG_INFO
#endif // LOG_LEVEL_INFO
#if defined(LOG_LEVEL_DEBUG)
#define USE_LOG_ERROR
#define USE_LOG_WARN
#define USE_LOG_INFO
#define USE_LOG_DEBUG
#endif // LOG_LEVEL_DEBUG
#if defined(LOG_LEVEL_TRACE)
#define USE_LOG_ERROR
#define USE_LOG_WARN
#define USE_LOG_INFO
#define USE_LOG_DEBUG
#define USE_LOG_TRACE
#endif // LOG_LEVEL_TRACE
enum LOG_COLOR {
RED = 31,
GREEN = 32,
BLUE = 34,
GRAY = 90,
YELLOW = 93,
};
#if defined(USE_LOG_ERROR)
#define errorf(fmt, ...) \
do { \
int pid = procid(), tid = threadid(); \
printf("\x1b[%dm[%s %d-%d]" fmt "\x1b[0m\n", RED, "ERROR", \
pid, tid, ##__VA_ARGS__); \
} while (0)
#else
#define errorf(fmt, ...) dummy(0, ##__VA_ARGS__)
#endif // USE_LOG_ERROR
#if defined(USE_LOG_WARN)
#define warnf(fmt, ...) \
do { \
int pid = procid(), tid = threadid(); \
printf("\x1b[%dm[%s %d-%d]" fmt "\x1b[0m\n", YELLOW, "WARN", \
pid, tid, ##__VA_ARGS__); \
} while (0)
#else
#define warnf(fmt, ...) dummy(0, ##__VA_ARGS__)
#endif // USE_LOG_WARN
#if defined(USE_LOG_INFO)
#define infof(fmt, ...) \
do { \
int pid = procid(), tid = threadid(); \
printf("\x1b[%dm[%s %d-%d]" fmt "\x1b[0m\n", BLUE, "INFO", \
pid, tid, ##__VA_ARGS__); \
} while (0)
#else
#define infof(fmt, ...) dummy(0, ##__VA_ARGS__)
#endif // USE_LOG_INFO
#if defined(USE_LOG_DEBUG)
#define debugf(fmt, ...) \
do { \
int pid = procid(), tid = threadid(); \
printf("\x1b[%dm[%s %d-%d]" fmt "\x1b[0m\n", GREEN, "DEBUG", \
pid, tid, ##__VA_ARGS__); \
} while (0)
#else
#define debugf(fmt, ...) dummy(0, ##__VA_ARGS__)
#endif // USE_LOG_DEBUG
#if defined(USE_LOG_TRACE)
#define tracef(fmt, ...) \
do { \
int pid = procid(), tid = threadid(); \
printf("\x1b[%dm[%s %d-%d]" fmt "\x1b[0m\n", GRAY, "TRACE", \
pid, tid, ##__VA_ARGS__); \
} while (0)
#else
#define tracef(fmt, ...) dummy(0, ##__VA_ARGS__)
#endif // USE_LOG_TRACE
#define panic(fmt, ...) \
do { \
int pid = procid(), tid = threadid(); \
printf("\x1b[%dm[%s %d-%d] %s:%d: " fmt "\x1b[0m\n", RED, \
"PANIC", pid, tid, __FILE__, __LINE__, ##__VA_ARGS__); \
shutdown(); \
__builtin_unreachable(); \
} while (0)
#endif //! LOG_H

33
os/main.c Normal file
View File

@ -0,0 +1,33 @@
#include "console.h"
#include "defs.h"
#include "loader.h"
#include "plic.h"
#include "timer.h"
#include "trap.h"
#include "virtio.h"
void clean_bss()
{
extern char s_bss[];
extern char e_bss[];
memset(s_bss, 0, e_bss - s_bss);
}
void main()
{
clean_bss();
printf("hello world!\n");
proc_init();
kinit();
kvm_init();
trap_init();
plicinit();
virtio_disk_init();
binit();
fsinit();
timer_init();
load_init_app();
infof("start scheduler!");
show_all_files();
scheduler();
}

99
os/pipe.c Normal file
View File

@ -0,0 +1,99 @@
#include "defs.h"
#include "proc.h"
#include "riscv.h"
int pipealloc(struct file *f0, struct file *f1)
{
struct pipe *pi;
pi = 0;
if ((pi = (struct pipe *)kalloc()) == 0)
goto bad;
pi->readopen = 1;
pi->writeopen = 1;
pi->nwrite = 0;
pi->nread = 0;
f0->type = FD_PIPE;
f0->readable = 1;
f0->writable = 0;
f0->pipe = pi;
f1->type = FD_PIPE;
f1->readable = 0;
f1->writable = 1;
f1->pipe = pi;
return 0;
bad:
if (pi)
kfree((char *)pi);
return -1;
}
void pipeclose(struct pipe *pi, int writable)
{
if (writable) {
pi->writeopen = 0;
} else {
pi->readopen = 0;
}
if (pi->readopen == 0 && pi->writeopen == 0) {
kfree((char *)pi);
}
}
int pipewrite(struct pipe *pi, uint64 addr, int n)
{
int w = 0;
uint64 size;
struct proc *p = curr_proc();
if (n <= 0) {
panic("invalid read num");
}
while (w < n) {
if (pi->readopen == 0) {
return -1;
}
if (pi->nwrite == pi->nread + PIPESIZE) { // DOC: pipewrite-full
yield();
} else {
size = MIN(MIN(n - w,
pi->nread + PIPESIZE - pi->nwrite),
PIPESIZE - (pi->nwrite % PIPESIZE));
if (copyin(p->pagetable,
&pi->data[pi->nwrite % PIPESIZE], addr + w,
size) < 0) {
panic("copyin");
}
pi->nwrite += size;
w += size;
}
}
return w;
}
int piperead(struct pipe *pi, uint64 addr, int n)
{
int r = 0;
uint64 size = -1;
struct proc *p = curr_proc();
if (n <= 0) {
panic("invalid read num");
}
while (pi->nread == pi->nwrite) {
if (pi->writeopen)
yield();
else
return -1;
}
while (r < n && size != 0) { // DOC: piperead-copy
if (pi->nread == pi->nwrite)
break;
size = MIN(MIN(n - r, pi->nwrite - pi->nread),
PIPESIZE - (pi->nread % PIPESIZE));
if (copyout(p->pagetable, addr + r,
&pi->data[pi->nread % PIPESIZE], size) < 0) {
panic("copyout");
}
pi->nread += size;
r += size;
}
return r;
}

35
os/plic.c Normal file
View File

@ -0,0 +1,35 @@
#include "plic.h"
#include "log.h"
#include "proc.h"
#include "riscv.h"
#include "types.h"
//
// the riscv Platform Level Interrupt Controller (PLIC).
//
void plicinit()
{
// set desired IRQ priorities non-zero (otherwise disabled).
int hart = cpuid();
*(uint32 *)(PLIC + VIRTIO0_IRQ * 4) = 1;
// set uart's enable bit for this hart's S-mode.
*(uint32 *)PLIC_SENABLE(hart) = (1 << VIRTIO0_IRQ);
// set this hart's S-mode priority threshold to 0.
*(uint32 *)PLIC_SPRIORITY(hart) = 0;
}
// ask the PLIC what interrupt we should serve.
int plic_claim()
{
int hart = cpuid();
int irq = *(uint32 *)PLIC_SCLAIM(hart);
return irq;
}
// tell the PLIC we've served this IRQ.
void plic_complete(int irq)
{
int hart = cpuid();
*(uint32 *)PLIC_SCLAIM(hart) = irq;
}

27
os/plic.h Normal file
View File

@ -0,0 +1,27 @@
#ifndef PLIC_H
#define PLIC_H
// qemu puts UART registers here in physical memory.
#define UART0 0x10000000L
#define UART0_IRQ 10
// virtio mmio interface
#define VIRTIO0 0x10001000
#define VIRTIO0_IRQ 1
// qemu puts platform-level interrupt controller (PLIC) here.
#define PLIC 0x0c000000L
#define PLIC_PRIORITY (PLIC + 0x0)
#define PLIC_PENDING (PLIC + 0x1000)
#define PLIC_MENABLE(hart) (PLIC + 0x2000 + (hart)*0x100)
#define PLIC_SENABLE(hart) (PLIC + 0x2080 + (hart)*0x100)
#define PLIC_MPRIORITY(hart) (PLIC + 0x200000 + (hart)*0x2000)
#define PLIC_SPRIORITY(hart) (PLIC + 0x201000 + (hart)*0x2000)
#define PLIC_MCLAIM(hart) (PLIC + 0x200004 + (hart)*0x2000)
#define PLIC_SCLAIM(hart) (PLIC + 0x201004 + (hart)*0x2000)
void plicinit();
int plic_claim();
void plic_complete(int);
#endif // PLIC_H

83
os/printf.c Normal file
View File

@ -0,0 +1,83 @@
#include <stdarg.h>
#include "console.h"
#include "defs.h"
static char digits[] = "0123456789abcdef";
static void printint(int xx, int base, int sign)
{
char buf[16];
int i;
uint x;
if (sign && (sign = xx < 0))
x = -xx;
else
x = xx;
i = 0;
do {
buf[i++] = digits[x % base];
} while ((x /= base) != 0);
if (sign)
buf[i++] = '-';
while (--i >= 0)
consputc(buf[i]);
}
static void printptr(uint64 x)
{
int i;
consputc('0');
consputc('x');
for (i = 0; i < (sizeof(uint64) * 2); i++, x <<= 4)
consputc(digits[x >> (sizeof(uint64) * 8 - 4)]);
}
// Print to the console. only understands %d, %x, %p, %s.
void printf(char *fmt, ...)
{
va_list ap;
int i, c;
char *s;
if (fmt == 0)
panic("null fmt");
va_start(ap, fmt);
for (i = 0; (c = fmt[i] & 0xff) != 0; i++) {
if (c != '%') {
consputc(c);
continue;
}
c = fmt[++i] & 0xff;
if (c == 0)
break;
switch (c) {
case 'd':
printint(va_arg(ap, int), 10, 1);
break;
case 'x':
printint(va_arg(ap, int), 16, 1);
break;
case 'p':
printptr(va_arg(ap, uint64));
break;
case 's':
if ((s = va_arg(ap, char *)) == 0)
s = "(null)";
for (; *s; s++)
consputc(*s);
break;
case '%':
consputc('%');
break;
default:
// Print unknown % sequence to draw attention.
consputc('%');
consputc(c);
break;
}
}
}

6
os/printf.h Normal file
View File

@ -0,0 +1,6 @@
#ifndef PRINTF_H
#define PRINTF_H
void printf(char *, ...);
#endif // PRINTF_H

481
os/proc.c Normal file
View File

@ -0,0 +1,481 @@
#include "proc.h"
#include "defs.h"
#include "loader.h"
#include "trap.h"
#include "vm.h"
#include "queue.h"
struct proc pool[NPROC];
__attribute__((aligned(16))) char kstack[NPROC][NTHREAD][KSTACK_SIZE];
__attribute__((aligned(4096))) char trapframe[NPROC][NTHREAD][TRAP_PAGE_SIZE];
extern char boot_stack_top[];
struct thread *current_thread;
struct thread idle;
struct queue task_queue;
int procid()
{
return curr_proc()->pid;
}
int threadid()
{
return curr_thread()->tid;
}
int cpuid()
{
return 0;
}
struct proc *curr_proc()
{
return current_thread->process;
}
struct thread *curr_thread()
{
return current_thread;
}
// initialize the proc table at boot time.
void proc_init()
{
struct proc *p;
for (p = pool; p < &pool[NPROC]; p++) {
p->state = P_UNUSED;
for (int tid = 0; tid < NTHREAD; ++tid) {
struct thread *t = &p->threads[tid];
t->state = T_UNUSED;
}
}
idle.kstack = (uint64)boot_stack_top;
current_thread = &idle;
// for procid() and threadid()
idle.process = pool;
idle.tid = -1;
init_queue(&task_queue, QUEUE_SIZE, process_queue_data);
}
int allocpid()
{
static int PID = 1;
return PID++;
}
int alloctid(const struct proc *process)
{
for (int i = 0; i < NTHREAD; ++i) {
if (process->threads[i].state == T_UNUSED)
return i;
}
return -1;
}
// get task by unique task id
struct thread *id_to_task(int index)
{
if (index < 0) {
return NULL;
}
int pool_id = index / NTHREAD;
int tid = index % NTHREAD;
struct thread *t = &pool[pool_id].threads[tid];
return t;
}
// ncode unique task id for each thread
int task_to_id(struct thread *t)
{
int pool_id = t->process - pool;
int task_id = pool_id * NTHREAD + t->tid;
return task_id;
}
struct thread *fetch_task()
{
int index = pop_queue(&task_queue);
struct thread *t = id_to_task(index);
if (t == NULL) {
debugf("No task to fetch\n");
return t;
}
int tid = t->tid;
int pid = t->process->pid;
tracef("fetch index %d(pid=%d, tid=%d, addr=%p) from task queue", index,
pid, tid, (uint64)t);
return t;
}
void add_task(struct thread *t)
{
int task_id = task_to_id(t);
int pid = t->process->pid;
push_queue(&task_queue, task_id);
tracef("add index %d(pid=%d, tid=%d, addr=%p) to task queue", task_id,
pid, t->tid, (uint64)t);
}
// Look in the process table for an UNUSED proc.
// If found, initialize state required to run in the kernel.
// If there are no free procs, or a memory allocation fails, return 0.
struct proc *allocproc()
{
struct proc *p;
for (p = pool; p < &pool[NPROC]; p++) {
if (p->state == P_UNUSED) {
goto found;
}
}
return 0;
found:
// init proc
p->pid = allocpid();
p->state = P_USED;
p->max_page = 0;
p->parent = NULL;
p->exit_code = 0;
p->pagetable = uvmcreate();
memset((void *)p->files, 0, sizeof(struct file *) * FD_BUFFER_SIZE);
p->next_mutex_id = 0;
p->next_semaphore_id = 0;
p->next_condvar_id = 0;
// LAB5: (1) you may initialize your new proc variables here
return p;
}
inline uint64 get_thread_trapframe_va(int tid)
{
return TRAPFRAME - tid * TRAP_PAGE_SIZE;
}
inline uint64 get_thread_ustack_base_va(struct thread *t)
{
return t->process->ustack_base + t->tid * USTACK_SIZE;
}
int allocthread(struct proc *p, uint64 entry, int alloc_user_res)
{
int tid;
struct thread *t;
for (tid = 0; tid < NTHREAD; ++tid) {
t = &p->threads[tid];
if (t->state == T_UNUSED) {
goto found;
}
}
return -1;
found:
t->tid = tid;
t->state = T_USED;
t->process = p;
t->exit_code = 0;
// kernel stack
t->kstack = (uint64)kstack[p - pool][tid];
// don't clear kstack now for exec()
// memset((void *)t->kstack, 0, KSTACK_SIZE);
// user stack
t->ustack = get_thread_ustack_base_va(t);
if (alloc_user_res != 0) {
if (uvmmap(p->pagetable, t->ustack, USTACK_SIZE / PAGE_SIZE,
PTE_U | PTE_R | PTE_W) < 0) {
panic("map ustack fail");
}
p->max_page =
MAX(p->max_page,
PGROUNDUP(t->ustack + USTACK_SIZE - 1) / PAGE_SIZE);
}
// trap frame
t->trapframe = (struct trapframe *)trapframe[p - pool][tid];
memset((void *)t->trapframe, 0, TRAP_PAGE_SIZE);
if (mappages(p->pagetable, get_thread_trapframe_va(tid), TRAP_PAGE_SIZE,
(uint64)t->trapframe, PTE_R | PTE_W) < 0) {
panic("map trapframe fail");
}
t->trapframe->sp = t->ustack + USTACK_SIZE;
t->trapframe->epc = entry;
//task context
memset(&t->context, 0, sizeof(t->context));
t->context.ra = (uint64)usertrapret;
t->context.sp = t->kstack + KSTACK_SIZE;
// we do not add thread to scheduler immediately
debugf("allocthread p: %d, o: %d, t: %d, e: %p, sp: %p, spp: %p",
p->pid, (p - pool), t->tid, entry, t->ustack,
useraddr(p->pagetable, t->ustack));
return tid;
}
int init_stdio(struct proc *p)
{
for (int i = 0; i < 3; i++) {
if (p->files[i] != NULL) {
return -1;
}
p->files[i] = stdio_init(i);
}
return 0;
}
// Scheduler never returns. It loops, doing:
// - choose a process to run.
// - swtch to start running that process.
// - eventually that process transfers control
// via swtch back to the scheduler.
void scheduler()
{
struct thread *t;
for (;;) {
t = fetch_task();
if (t == NULL) {
panic("all app are over!\n");
}
// throw out freed threads
if (t->state != RUNNABLE) {
warnf("not RUNNABLE", t->process->pid, t->tid);
continue;
}
tracef("swtich to proc %d, thread %d", t->process->pid, t->tid);
t->state = RUNNING;
current_thread = t;
swtch(&idle.context, &t->context);
}
}
// Switch to scheduler. Must hold only p->lock
// and have changed proc->state. Saves and restores
// intena because intena is a property of this
// kernel thread, not this CPU. It should
// be proc->intena and proc->noff, but that would
// break in the few places where a lock is held but
// there's no process.
void sched()
{
struct thread *t = curr_thread();
if (t->state == RUNNING)
panic("sched running");
swtch(&t->context, &idle.context);
}
// Give up the CPU for one scheduling round.
void yield()
{
current_thread->state = RUNNABLE;
add_task(current_thread);
sched();
}
// Free a process's page table, and free the
// physical memory it refers to.
void freepagetable(pagetable_t pagetable, uint64 max_page)
{
uvmunmap(pagetable, TRAMPOLINE, 1, 0);
uvmfree(pagetable, max_page);
}
void freethread(struct thread *t)
{
pagetable_t pt = t->process->pagetable;
// fill with junk
memset((void *)t->trapframe, 6, TRAP_PAGE_SIZE);
memset(&t->context, 6, sizeof(t->context));
uvmunmap(pt, get_thread_trapframe_va(t->tid), 1, 0);
uvmunmap(pt, get_thread_ustack_base_va(t), USTACK_SIZE / PAGE_SIZE, 1);
}
void freeproc(struct proc *p)
{
for (int tid = 0; tid < NTHREAD; ++tid) {
struct thread *t = &p->threads[tid];
if (t->state != T_UNUSED && t->state != EXITED) {
freethread(t);
}
t->state = T_UNUSED;
}
if (p->pagetable)
freepagetable(p->pagetable, p->max_page);
p->pagetable = 0;
p->max_page = 0;
p->ustack_base = 0;
for (int i = 0; i > FD_BUFFER_SIZE; i++) {
if (p->files[i] != NULL) {
fileclose(p->files[i]);
}
}
p->state = P_UNUSED;
}
int fork()
{
struct proc *np;
struct proc *p = curr_proc();
int i;
// Allocate process.
if ((np = allocproc()) == 0) {
panic("allocproc\n");
}
// Copy user memory from parent to child.
if (uvmcopy(p->pagetable, np->pagetable, p->max_page) < 0) {
panic("uvmcopy\n");
}
np->max_page = p->max_page;
np->ustack_base = p->ustack_base;
// Copy file table to new proc
for (i = 0; i < FD_BUFFER_SIZE; i++) {
if (p->files[i] != NULL) {
// TODO: f->type == STDIO ?
p->files[i]->ref++;
np->files[i] = p->files[i];
}
}
np->parent = p;
// currently only copy main thread
struct thread *nt = &np->threads[allocthread(np, 0, 0)],
*t = &p->threads[0];
// copy saved user registers.
*(nt->trapframe) = *(t->trapframe);
// Cause fork to return 0 in the child.
nt->trapframe->a0 = 0;
nt->state = RUNNABLE;
add_task(nt);
return np->pid;
}
int push_argv(struct proc *p, char **argv)
{
uint64 argc, ustack[MAX_ARG_NUM + 1];
// only push to main thread
struct thread *t = &p->threads[0];
uint64 sp = t->ustack + USTACK_SIZE, spb = t->ustack;
debugf("[push] sp: %p, spb: %p", sp, spb);
// Push argument strings, prepare rest of stack in ustack.
for (argc = 0; argv[argc]; argc++) {
if (argc >= MAX_ARG_NUM)
panic("too many args!");
sp -= strlen(argv[argc]) + 1;
sp -= sp % 16; // riscv sp must be 16-byte aligned
if (sp < spb) {
panic("uset stack overflow!");
}
if (copyout(p->pagetable, sp, argv[argc],
strlen(argv[argc]) + 1) < 0) {
panic("copy argv failed!");
}
ustack[argc] = sp;
}
ustack[argc] = 0;
// push the array of argv[] pointers.
sp -= (argc + 1) * sizeof(uint64);
sp -= sp % 16;
if (sp < spb) {
panic("uset stack overflow!");
}
if (copyout(p->pagetable, sp, (char *)ustack,
(argc + 1) * sizeof(uint64)) < 0) {
panic("copy argc failed!");
}
t->trapframe->a1 = sp;
t->trapframe->sp = sp;
// clear files ?
return argc; // this ends up in a0, the first argument to main(argc, argv)
}
int exec(char *path, char **argv)
{
infof("exec : %s\n", path);
struct inode *ip;
struct proc *p = curr_proc();
if ((ip = namei(path)) == 0) {
errorf("invalid file name %s\n", path);
return -1;
}
// free current main thread's ustack and trapframe
struct thread *t = curr_thread();
freethread(t);
t->state = T_UNUSED;
uvmunmap(p->pagetable, 0, p->max_page, 1);
bin_loader(ip, p);
iput(ip);
t->state = RUNNING;
return push_argv(p, argv);
}
int wait(int pid, int *code)
{
struct proc *np;
int havekids;
struct proc *p = curr_proc();
struct thread *t = curr_thread();
for (;;) {
// Scan through table looking for exited children.
havekids = 0;
for (np = pool; np < &pool[NPROC]; np++) {
if (np->state != P_UNUSED && np->parent == p &&
(pid <= 0 || np->pid == pid)) {
havekids = 1;
if (np->state == ZOMBIE) {
// Found one.
np->state = P_UNUSED;
pid = np->pid;
*code = np->exit_code;
memset((void *)np->threads[0].kstack, 9,
KSTACK_SIZE);
return pid;
}
}
}
if (!havekids) {
return -1;
}
t->state = RUNNABLE;
add_task(t);
sched();
}
}
// Exit the current process.
void exit(int code)
{
struct proc *p = curr_proc();
struct thread *t = curr_thread();
t->exit_code = code;
t->state = EXITED;
int tid = t->tid;
debugf("thread exit with %d", code);
freethread(t);
if (tid == 0) {
p->exit_code = code;
freeproc(p);
debugf("proc exit");
if (p->parent != NULL) {
// Parent should `wait`
p->state = ZOMBIE;
}
// Set the `parent` of all children to NULL
struct proc *np;
for (np = pool; np < &pool[NPROC]; np++) {
if (np->parent == p) {
np->parent = NULL;
}
}
}
sched();
}
int fdalloc(struct file *f)
{
debugf("debugf f = %p, type = %d", f, f->type);
struct proc *p = curr_proc();
for (int i = 0; i < FD_BUFFER_SIZE; ++i) {
if (p->files[i] == NULL) {
p->files[i] = f;
debugf("debugf fd = %d, f = %p", i, p->files[i]);
return i;
}
}
return -1;
}

95
os/proc.h Normal file
View File

@ -0,0 +1,95 @@
#ifndef PROC_H
#define PROC_H
#include "riscv.h"
#include "types.h"
#include "sync.h"
#define NPROC (128)
#define NTHREAD (16)
#define FD_BUFFER_SIZE (16)
#define LOCK_POOL_SIZE (8)
struct file;
// Saved registers for kernel context switches.
struct context {
uint64 ra;
uint64 sp;
// callee-saved
uint64 s0;
uint64 s1;
uint64 s2;
uint64 s3;
uint64 s4;
uint64 s5;
uint64 s6;
uint64 s7;
uint64 s8;
uint64 s9;
uint64 s10;
uint64 s11;
};
enum threadstate { T_UNUSED, T_USED, SLEEPING, RUNNABLE, RUNNING, EXITED };
struct thread {
enum threadstate state; // Thread state
int tid; // Thread ID
struct proc *process;
uint64 ustack; // Virtual address of user stack
uint64 kstack; // Virtual address of kernel stack
struct trapframe *trapframe; // data page for trampoline.S
struct context context; // swtch() here to run process
uint64 exit_code;
};
enum procstate { P_UNUSED, P_USED, ZOMBIE };
// Per-process state
struct proc {
enum procstate state; // Process state
int pid; // Process ID
pagetable_t pagetable; // User page table
uint64 max_page;
uint64 ustack_base; // Virtual address of user stack base
struct proc *parent; // Parent process
uint64 exit_code;
//File descriptor table, using to record the files opened by the process
struct file *files[FD_BUFFER_SIZE];
struct thread threads[NTHREAD];
// Use dummy increasing id as index index of lock pool because we don't have destroy method yet
uint next_mutex_id, next_semaphore_id, next_condvar_id;
struct mutex mutex_pool[LOCK_POOL_SIZE];
struct semaphore semaphore_pool[LOCK_POOL_SIZE];
struct condvar condvar_pool[LOCK_POOL_SIZE];
// LAB5: (1) Define your variables for deadlock detect here.
// You may need a flag to record if detection enabled,
// and some arrays for detection algorithm.
};
int cpuid();
struct proc *curr_proc();
struct thread *curr_thread(void);
void exit(int);
void proc_init();
void scheduler() __attribute__((noreturn));
void sched();
void yield();
int fork();
int exec(char *, char **);
int wait(int, int *);
void add_task(struct thread *);
struct thread *id_to_task(int);
int task_to_id(struct thread *);
struct thread *pop_task();
struct proc *allocproc();
int allocthread(struct proc *p, uint64 entry, int alloc_user_res);
uint64 get_thread_trapframe_va(int tid);
int fdalloc(struct file *);
int init_stdio(struct proc *);
int push_argv(struct proc *, char **);
// swtch.S
void swtch(struct context *, struct context *);
#endif // PROC_H

33
os/queue.c Normal file
View File

@ -0,0 +1,33 @@
#include "queue.h"
#include "defs.h"
int process_queue_data[QUEUE_SIZE];
void init_queue(struct queue *q, int size, int *data)
{
q->size = size;
q->data = data;
q->front = q->tail = 0;
q->empty = 1;
}
void push_queue(struct queue *q, int value)
{
if (!q->empty && q->front == q->tail) {
panic("queue shouldn't be overflow");
}
q->empty = 0;
q->data[q->tail] = value;
q->tail = (q->tail + 1) % q->size;
}
int pop_queue(struct queue *q)
{
if (q->empty)
return -1;
int value = q->data[q->front];
q->front = (q->front + 1) % q->size;
if (q->front == q->tail)
q->empty = 1;
return value;
}

21
os/queue.h Normal file
View File

@ -0,0 +1,21 @@
#ifndef QUEUE_H
#define QUEUE_H
#define QUEUE_SIZE (1024)
// queue data for processing scheduling only
// for queue for wait queue of mutex/semaphore/condvar, provide other data
extern int process_queue_data[QUEUE_SIZE];
struct queue {
int *data;
int size;
int front;
int tail;
int empty;
};
void init_queue(struct queue *, int, int *);
void push_queue(struct queue *, int);
int pop_queue(struct queue *);
#endif // QUEUE_H

324
os/riscv.h Normal file
View File

@ -0,0 +1,324 @@
#ifndef RISCV_H
#define RISCV_H
#include "types.h"
// which hart (core) is this?
static inline uint64 r_mhartid()
{
uint64 x;
asm volatile("csrr %0, mhartid" : "=r"(x));
return x;
}
// Machine Status Register, mstatus
#define MSTATUS_MPP_MASK (3L << 11) // previous mode.
#define MSTATUS_MPP_M (3L << 11)
#define MSTATUS_MPP_S (1L << 11)
#define MSTATUS_MPP_U (0L << 11)
#define MSTATUS_MIE (1L << 3) // machine-mode interrupt enable.
static inline uint64 r_mstatus()
{
uint64 x;
asm volatile("csrr %0, mstatus" : "=r"(x));
return x;
}
static inline void w_mstatus(uint64 x)
{
asm volatile("csrw mstatus, %0" : : "r"(x));
}
// machine exception program counter, holds the
// instruction address to which a return from
// exception will go.
static inline void w_mepc(uint64 x)
{
asm volatile("csrw mepc, %0" : : "r"(x));
}
// Supervisor Status Register, sstatus
#define SSTATUS_SPP (1L << 8) // Previous mode, 1=Supervisor, 0=User
#define SSTATUS_SPIE (1L << 5) // Supervisor Previous Interrupt Enable
#define SSTATUS_UPIE (1L << 4) // User Previous Interrupt Enable
#define SSTATUS_SIE (1L << 1) // Supervisor Interrupt Enable
#define SSTATUS_UIE (1L << 0) // User Interrupt Enable
static inline uint64 r_sstatus()
{
uint64 x;
asm volatile("csrr %0, sstatus" : "=r"(x));
return x;
}
static inline void w_sstatus(uint64 x)
{
asm volatile("csrw sstatus, %0" : : "r"(x));
}
// Supervisor Interrupt Pending
static inline uint64 r_sip()
{
uint64 x;
asm volatile("csrr %0, sip" : "=r"(x));
return x;
}
static inline void w_sip(uint64 x)
{
asm volatile("csrw sip, %0" : : "r"(x));
}
// Supervisor Interrupt Enable
#define SIE_SEIE (1L << 9) // external
#define SIE_STIE (1L << 5) // timer
#define SIE_SSIE (1L << 1) // software
static inline uint64 r_sie()
{
uint64 x;
asm volatile("csrr %0, sie" : "=r"(x));
return x;
}
static inline void w_sie(uint64 x)
{
asm volatile("csrw sie, %0" : : "r"(x));
}
// Machine-mode Interrupt Enable
#define MIE_MEIE (1L << 11) // external
#define MIE_MTIE (1L << 7) // timer
#define MIE_MSIE (1L << 3) // software
static inline uint64 r_mie()
{
uint64 x;
asm volatile("csrr %0, mie" : "=r"(x));
return x;
}
static inline void w_mie(uint64 x)
{
asm volatile("csrw mie, %0" : : "r"(x));
}
// machine exception program counter, holds the
// instruction address to which a return from
// exception will go.
static inline void w_sepc(uint64 x)
{
asm volatile("csrw sepc, %0" : : "r"(x));
}
static inline uint64 r_sepc()
{
uint64 x;
asm volatile("csrr %0, sepc" : "=r"(x));
return x;
}
// Machine Exception Delegation
static inline uint64 r_medeleg()
{
uint64 x;
asm volatile("csrr %0, medeleg" : "=r"(x));
return x;
}
static inline void w_medeleg(uint64 x)
{
asm volatile("csrw medeleg, %0" : : "r"(x));
}
// Machine Interrupt Delegation
static inline uint64 r_mideleg()
{
uint64 x;
asm volatile("csrr %0, mideleg" : "=r"(x));
return x;
}
static inline void w_mideleg(uint64 x)
{
asm volatile("csrw mideleg, %0" : : "r"(x));
}
// Supervisor Trap-Vector Base Address
// low two bits are mode.
static inline void w_stvec(uint64 x)
{
asm volatile("csrw stvec, %0" : : "r"(x));
}
static inline uint64 r_stvec()
{
uint64 x;
asm volatile("csrr %0, stvec" : "=r"(x));
return x;
}
// Machine-mode interrupt vector
static inline void w_mtvec(uint64 x)
{
asm volatile("csrw mtvec, %0" : : "r"(x));
}
// use riscv's sv39 page table scheme.
#define SATP_SV39 (8L << 60)
#define MAKE_SATP(pagetable) (SATP_SV39 | (((uint64)pagetable) >> 12))
// supervisor address translation and protection;
// holds the address of the page table.
static inline void w_satp(uint64 x)
{
asm volatile("csrw satp, %0" : : "r"(x));
}
static inline uint64 r_satp()
{
uint64 x;
asm volatile("csrr %0, satp" : "=r"(x));
return x;
}
// Supervisor Scratch register, for early trap handler in trampoline.S.
static inline void w_sscratch(uint64 x)
{
asm volatile("csrw sscratch, %0" : : "r"(x));
}
static inline void w_mscratch(uint64 x)
{
asm volatile("csrw mscratch, %0" : : "r"(x));
}
// Supervisor Trap Cause
static inline uint64 r_scause()
{
uint64 x;
asm volatile("csrr %0, scause" : "=r"(x));
return x;
}
// Supervisor Trap Value
static inline uint64 r_stval()
{
uint64 x;
asm volatile("csrr %0, stval" : "=r"(x));
return x;
}
// Machine-mode Counter-Enable
static inline void w_mcounteren(uint64 x)
{
asm volatile("csrw mcounteren, %0" : : "r"(x));
}
static inline uint64 r_mcounteren()
{
uint64 x;
asm volatile("csrr %0, mcounteren" : "=r"(x));
return x;
}
// machine-mode cycle counter
static inline uint64 r_time()
{
uint64 x;
asm volatile("csrr %0, time" : "=r"(x));
return x;
}
// enable device interrupts
static inline void intr_on()
{
w_sstatus(r_sstatus() | SSTATUS_SIE);
}
// disable device interrupts
static inline void intr_off()
{
w_sstatus(r_sstatus() & ~SSTATUS_SIE);
}
// are device interrupts enabled?
static inline int intr_get()
{
uint64 x = r_sstatus();
return (x & SSTATUS_SIE) != 0;
}
static inline uint64 r_sp()
{
uint64 x;
asm volatile("mv %0, sp" : "=r"(x));
return x;
}
// read and write tp, the thread pointer, which holds
// this core's hartid (core number), the index into cpus[].
static inline uint64 r_tp()
{
uint64 x;
asm volatile("mv %0, tp" : "=r"(x));
return x;
}
static inline void w_tp(uint64 x)
{
asm volatile("mv tp, %0" : : "r"(x));
}
static inline uint64 r_ra()
{
uint64 x;
asm volatile("mv %0, ra" : "=r"(x));
return x;
}
// flush the TLB.
static inline void sfence_vma()
{
// the zero, zero means flush all TLB entries.
asm volatile("sfence.vma zero, zero");
}
#define PGSIZE 4096 // bytes per page
#define PGSHIFT 12 // bits of offset within a page
#define PGROUNDUP(sz) (((sz) + PGSIZE - 1) & ~(PGSIZE - 1))
#define PGROUNDDOWN(a) (((a)) & ~(PGSIZE - 1))
#define PGALIGNED(a) (((a) & (PGSIZE - 1)) == 0)
#define PTE_V (1L << 0) // valid
#define PTE_R (1L << 1)
#define PTE_W (1L << 2)
#define PTE_X (1L << 3)
#define PTE_U (1L << 4) // 1 -> user can access
// shift a physical address to the right place for a PTE.
#define PA2PTE(pa) ((((uint64)pa) >> 12) << 10)
#define PTE2PA(pte) (((pte) >> 10) << 12)
#define PTE_FLAGS(pte) ((pte)&0x3FF)
// extract the three 9-bit page table indices from a virtual address.
#define PXMASK 0x1FF // 9 bits
#define PXSHIFT(level) (PGSHIFT + (9 * (level)))
#define PX(level, va) ((((uint64)(va)) >> PXSHIFT(level)) & PXMASK)
// one beyond the highest possible virtual address.
// MAXVA is actually one bit less than the max allowed by
// Sv39, to avoid having to sign-extend virtual addresses
// that have the high bit set.
#define MAXVA (1L << (9 + 9 + 9 + 12 - 1))
typedef uint64 pte_t;
typedef uint64 pde_t;
typedef uint64 *pagetable_t; // 512 PTEs
#endif // RISCV_H

44
os/sbi.c Normal file
View File

@ -0,0 +1,44 @@
#include "sbi.h"
#include "types.h"
const uint64 SBI_SET_TIMER = 0;
const uint64 SBI_CONSOLE_PUTCHAR = 1;
const uint64 SBI_CONSOLE_GETCHAR = 2;
const uint64 SBI_CLEAR_IPI = 3;
const uint64 SBI_SEND_IPI = 4;
const uint64 SBI_REMOTE_FENCE_I = 5;
const uint64 SBI_REMOTE_SFENCE_VMA = 6;
const uint64 SBI_REMOTE_SFENCE_VMA_ASID = 7;
const uint64 SBI_SHUTDOWN = 8;
int inline sbi_call(uint64 which, uint64 arg0, uint64 arg1, uint64 arg2)
{
register uint64 a0 asm("a0") = arg0;
register uint64 a1 asm("a1") = arg1;
register uint64 a2 asm("a2") = arg2;
register uint64 a7 asm("a7") = which;
asm volatile("ecall"
: "=r"(a0)
: "r"(a0), "r"(a1), "r"(a2), "r"(a7)
: "memory");
return a0;
}
void console_putchar(int c)
{
sbi_call(SBI_CONSOLE_PUTCHAR, c, 0, 0);
}
int console_getchar()
{
return sbi_call(SBI_CONSOLE_GETCHAR, 0, 0, 0);
}
void shutdown()
{
sbi_call(SBI_SHUTDOWN, 0, 0, 0);
}
void set_timer(uint64 stime)
{
sbi_call(SBI_SET_TIMER, stime, 0, 0);
}

11
os/sbi.h Normal file
View File

@ -0,0 +1,11 @@
#ifndef SBI_H
#define SBI_H
#include "types.h"
void console_putchar(int);
int console_getchar();
void shutdown();
void set_timer(uint64 stime);
#endif // SBI_H

100
os/string.c Normal file
View File

@ -0,0 +1,100 @@
#include "string.h"
#include "types.h"
void *memset(void *dst, int c, uint n)
{
char *cdst = (char *)dst;
int i;
for (i = 0; i < n; i++) {
cdst[i] = c;
}
return dst;
}
int memcmp(const void *v1, const void *v2, uint n)
{
const uchar *s1, *s2;
s1 = v1;
s2 = v2;
while (n-- > 0) {
if (*s1 != *s2)
return *s1 - *s2;
s1++, s2++;
}
return 0;
}
void *memmove(void *dst, const void *src, uint n)
{
const char *s;
char *d;
s = src;
d = dst;
if (s < d && s + n > d) {
s += n;
d += n;
while (n-- > 0)
*--d = *--s;
} else
while (n-- > 0)
*d++ = *s++;
return dst;
}
// memcpy exists to placate GCC. Use memmove.
void *memcpy(void *dst, const void *src, uint n)
{
return memmove(dst, src, n);
}
int strncmp(const char *p, const char *q, uint n)
{
while (n > 0 && *p && *p == *q)
n--, p++, q++;
if (n == 0)
return 0;
return (uchar)*p - (uchar)*q;
}
char *strncpy(char *s, const char *t, int n)
{
char *os;
os = s;
while (n-- > 0 && (*s++ = *t++) != 0)
;
while (n-- > 0)
*s++ = 0;
return os;
}
// Like strncpy but guaranteed to NUL-terminate.
char *safestrcpy(char *s, const char *t, int n)
{
char *os;
os = s;
if (n <= 0)
return os;
while (--n > 0 && (*s++ = *t++) != 0)
;
*s = 0;
return os;
}
int strlen(const char *s)
{
int n;
for (n = 0; s[n]; n++)
;
return n;
}
void dummy(int _, ...)
{
}

14
os/string.h Normal file
View File

@ -0,0 +1,14 @@
#ifndef STRING_H
#define STRING_H
#include "types.h"
int memcmp(const void *, const void *, uint);
void *memmove(void *, const void *, uint);
void *memset(void *, int, uint);
char *safestrcpy(char *, const char *, int);
int strlen(const char *);
int strncmp(const char *, const char *, uint);
char *strncpy(char *, const char *, int);
#endif // STRING_H

40
os/switch.S Normal file
View File

@ -0,0 +1,40 @@
# Context switch
#
# void swtch(struct context *old, struct context *new);
#
# Save current registers in old. Load from new.
.globl swtch
swtch:
sd ra, 0(a0)
sd sp, 8(a0)
sd s0, 16(a0)
sd s1, 24(a0)
sd s2, 32(a0)
sd s3, 40(a0)
sd s4, 48(a0)
sd s5, 56(a0)
sd s6, 64(a0)
sd s7, 72(a0)
sd s8, 80(a0)
sd s9, 88(a0)
sd s10, 96(a0)
sd s11, 104(a0)
ld ra, 0(a1)
ld sp, 8(a1)
ld s0, 16(a1)
ld s1, 24(a1)
ld s2, 32(a1)
ld s3, 40(a1)
ld s4, 48(a1)
ld s5, 56(a1)
ld s6, 64(a1)
ld s7, 72(a1)
ld s8, 80(a1)
ld s9, 88(a1)
ld s10, 96(a1)
ld s11, 104(a1)
ret

150
os/sync.c Normal file
View File

@ -0,0 +1,150 @@
#include "defs.h"
#include "proc.h"
#include "sync.h"
struct mutex *mutex_create(int blocking)
{
struct proc *p = curr_proc();
if (p->next_mutex_id >= LOCK_POOL_SIZE) {
return NULL;
}
struct mutex *m = &p->mutex_pool[p->next_mutex_id];
p->next_mutex_id++;
m->blocking = blocking;
m->locked = 0;
if (blocking) {
// blocking mutex need wait queue but spinning mutex not
init_queue(&m->wait_queue, WAIT_QUEUE_MAX_LENGTH,
m->_wait_queue_data);
}
return m;
}
void mutex_lock(struct mutex *m)
{
if (!m->locked) {
m->locked = 1;
debugf("lock a free mutex");
return;
}
if (!m->blocking) {
// spin mutex will just poll
debugf("try to lock spin mutex");
while (m->locked) {
yield();
}
debugf("lock spin mutex after some trials");
return;
}
// blocking mutex will wait in the queue
struct thread *t = curr_thread();
push_queue(&m->wait_queue, task_to_id(t));
// don't forget to change thread state to SLEEPING
t->state = SLEEPING;
debugf("block to wait for mutex");
sched();
debugf("blocking mutex passed to me");
// here lock is released (with locked = 1) and passed to me, so just do nothing
}
void mutex_unlock(struct mutex *m)
{
if (m->blocking) {
struct thread *t = id_to_task(pop_queue(&m->wait_queue));
if (t == NULL) {
// Without waiting thread, just release the lock
m->locked = 0;
debugf("blocking mutex released");
} else {
// Or we should give lock to next thread
t->state = RUNNABLE;
add_task(t);
debugf("blocking mutex passed to thread %d", t->tid);
}
} else {
m->locked = 0;
debugf("spin mutex unlocked");
}
}
struct semaphore *semaphore_create(int count)
{
struct proc *p = curr_proc();
if (p->next_semaphore_id >= LOCK_POOL_SIZE) {
return NULL;
}
struct semaphore *s = &p->semaphore_pool[p->next_semaphore_id];
p->next_semaphore_id++;
s->count = count;
init_queue(&s->wait_queue, WAIT_QUEUE_MAX_LENGTH, s->_wait_queue_data);
return s;
}
void semaphore_up(struct semaphore *s)
{
s->count++;
if (s->count <= 0) {
// count <= 0 after up means wait queue not empty
struct thread *t = id_to_task(pop_queue(&s->wait_queue));
if (t == NULL) {
panic("count <= 0 after up but wait queue is empty?");
}
t->state = RUNNABLE;
add_task(t);
debugf("semaphore up and notify another task");
}
debugf("semaphore up from %d to %d", s->count - 1, s->count);
}
void semaphore_down(struct semaphore *s)
{
s->count--;
if (s->count < 0) {
// s->count < 0 means need to wait (state=SLEEPING)
struct thread *t = curr_thread();
push_queue(&s->wait_queue, task_to_id(t));
t->state = SLEEPING;
debugf("semaphore down to %d and wait...", s->count);
sched();
debugf("semaphore up to %d and wake up", s->count);
}
debugf("finish semaphore_down with count = %d", s->count);
}
struct condvar *condvar_create()
{
struct proc *p = curr_proc();
if (p->next_condvar_id >= LOCK_POOL_SIZE) {
return NULL;
}
struct condvar *c = &p->condvar_pool[p->next_condvar_id];
p->next_condvar_id++;
init_queue(&c->wait_queue, WAIT_QUEUE_MAX_LENGTH, c->_wait_queue_data);
return c;
}
void cond_signal(struct condvar *cond)
{
struct thread *t = id_to_task(pop_queue(&cond->wait_queue));
if (t) {
t->state = RUNNABLE;
add_task(t);
debugf("signal wake up thread %d", t->tid);
} else {
debugf("dummpy signal");
}
}
void cond_wait(struct condvar *cond, struct mutex *m)
{
// conditional variable will unlock the mutex first and lock it again on return
mutex_unlock(m);
struct thread *t = curr_thread();
// now just wait for cond
push_queue(&cond->wait_queue, task_to_id(t));
t->state = SLEEPING;
debugf("wait for cond");
sched();
debugf("wake up from cond");
mutex_lock(m);
}

38
os/sync.h Normal file
View File

@ -0,0 +1,38 @@
#ifndef SYNC_H
#define SYNC_H
#include "queue.h"
#include "types.h"
#define WAIT_QUEUE_MAX_LENGTH 16
struct mutex {
uint blocking;
uint locked;
struct queue wait_queue;
// "alloc" data for wait queue
int _wait_queue_data[WAIT_QUEUE_MAX_LENGTH];
};
struct semaphore {
int count;
struct queue wait_queue;
// "alloc" data for wait queue
int _wait_queue_data[WAIT_QUEUE_MAX_LENGTH];
};
struct condvar {
struct queue wait_queue;
// "alloc" data for wait queue
int _wait_queue_data[WAIT_QUEUE_MAX_LENGTH];
};
struct mutex *mutex_create(int blocking);
void mutex_lock(struct mutex *);
void mutex_unlock(struct mutex *);
struct semaphore *semaphore_create(int count);
void semaphore_up(struct semaphore *);
void semaphore_down(struct semaphore *);
struct condvar *condvar_create();
void cond_signal(struct condvar *);
void cond_wait(struct condvar *, struct mutex *);
#endif

466
os/syscall.c Normal file
View File

@ -0,0 +1,466 @@
#include "console.h"
#include "defs.h"
#include "loader.h"
#include "sync.h"
#include "syscall.h"
#include "syscall_ids.h"
#include "timer.h"
#include "trap.h"
uint64 console_write(uint64 va, uint64 len)
{
struct proc *p = curr_proc();
char str[MAX_STR_LEN];
int size = copyinstr(p->pagetable, str, va, MIN(len, MAX_STR_LEN));
tracef("write size = %d", size);
for (int i = 0; i < size; ++i) {
console_putchar(str[i]);
}
return len;
}
uint64 console_read(uint64 va, uint64 len)
{
struct proc *p = curr_proc();
char str[MAX_STR_LEN];
tracef("read size = %d", len);
for (int i = 0; i < len; ++i) {
int c = consgetc();
str[i] = c;
}
copyout(p->pagetable, va, str, len);
return len;
}
uint64 sys_write(int fd, uint64 va, uint64 len)
{
if (fd < 0 || fd > FD_BUFFER_SIZE)
return -1;
struct proc *p = curr_proc();
struct file *f = p->files[fd];
if (f == NULL) {
errorf("invalid fd %d\n", fd);
return -1;
}
switch (f->type) {
case FD_STDIO:
return console_write(va, len);
case FD_PIPE:
return pipewrite(f->pipe, va, len);
case FD_INODE:
return inodewrite(f, va, len);
default:
panic("unknown file type %d\n", f->type);
}
}
uint64 sys_read(int fd, uint64 va, uint64 len)
{
if (fd < 0 || fd > FD_BUFFER_SIZE)
return -1;
struct proc *p = curr_proc();
struct file *f = p->files[fd];
if (f == NULL) {
errorf("invalid fd %d\n", fd);
return -1;
}
switch (f->type) {
case FD_STDIO:
return console_read(va, len);
case FD_PIPE:
return piperead(f->pipe, va, len);
case FD_INODE:
return inoderead(f, va, len);
default:
panic("unknown file type %d\n", f->type);
}
}
__attribute__((noreturn)) void sys_exit(int code)
{
exit(code);
__builtin_unreachable();
}
uint64 sys_sched_yield()
{
yield();
return 0;
}
uint64 sys_gettimeofday(uint64 val, int _tz)
{
struct proc *p = curr_proc();
uint64 cycle = get_cycle();
TimeVal t;
t.sec = cycle / CPU_FREQ;
t.usec = (cycle % CPU_FREQ) * 1000000 / CPU_FREQ;
copyout(p->pagetable, val, (char *)&t, sizeof(TimeVal));
return 0;
}
uint64 sys_getpid()
{
return curr_proc()->pid;
}
uint64 sys_getppid()
{
struct proc *p = curr_proc();
return p->parent == NULL ? IDLE_PID : p->parent->pid;
}
uint64 sys_clone()
{
debugf("fork!");
return fork();
}
static inline uint64 fetchaddr(pagetable_t pagetable, uint64 va)
{
uint64 *addr = (uint64 *)useraddr(pagetable, va);
return *addr;
}
uint64 sys_exec(uint64 path, uint64 uargv)
{
struct proc *p = curr_proc();
char name[MAX_STR_LEN];
copyinstr(p->pagetable, name, path, MAX_STR_LEN);
uint64 arg;
static char strpool[MAX_ARG_NUM][MAX_STR_LEN];
char *argv[MAX_ARG_NUM];
int i;
for (i = 0; uargv && (arg = fetchaddr(p->pagetable, uargv));
uargv += sizeof(char *), i++) {
copyinstr(p->pagetable, (char *)strpool[i], arg, MAX_STR_LEN);
argv[i] = (char *)strpool[i];
}
argv[i] = NULL;
return exec(name, (char **)argv);
}
uint64 sys_wait(int pid, uint64 va)
{
struct proc *p = curr_proc();
int *code = (int *)useraddr(p->pagetable, va);
return wait(pid, code);
}
uint64 sys_pipe(uint64 fdarray)
{
struct proc *p = curr_proc();
uint64 fd0, fd1;
struct file *f0, *f1;
if (f0 < 0 || f1 < 0) {
return -1;
}
f0 = filealloc();
f1 = filealloc();
if (pipealloc(f0, f1) < 0)
goto err0;
fd0 = fdalloc(f0);
fd1 = fdalloc(f1);
if (fd0 < 0 || fd1 < 0)
goto err0;
if (copyout(p->pagetable, fdarray, (char *)&fd0, sizeof(fd0)) < 0 ||
copyout(p->pagetable, fdarray + sizeof(uint64), (char *)&fd1,
sizeof(fd1)) < 0) {
goto err1;
}
return 0;
err1:
p->files[fd0] = 0;
p->files[fd1] = 0;
err0:
fileclose(f0);
fileclose(f1);
return -1;
}
uint64 sys_openat(uint64 va, uint64 omode, uint64 _flags)
{
struct proc *p = curr_proc();
char path[200];
copyinstr(p->pagetable, path, va, 200);
return fileopen(path, omode);
}
uint64 sys_close(int fd)
{
if (fd < 0 || fd > FD_BUFFER_SIZE)
return -1;
struct proc *p = curr_proc();
struct file *f = p->files[fd];
if (f == NULL) {
errorf("invalid fd %d", fd);
return -1;
}
fileclose(f);
p->files[fd] = 0;
return 0;
}
int sys_thread_create(uint64 entry, uint64 arg)
{
struct proc *p = curr_proc();
int tid = allocthread(p, entry, 1);
if (tid < 0) {
errorf("fail to create thread");
return -1;
}
struct thread *t = &p->threads[tid];
t->trapframe->a0 = arg;
t->state = RUNNABLE;
add_task(t);
return tid;
}
int sys_gettid()
{
return curr_thread()->tid;
}
int sys_waittid(int tid)
{
if (tid < 0 || tid >= NTHREAD) {
errorf("unexpected tid %d", tid);
return -1;
}
struct thread *t = &curr_proc()->threads[tid];
if (t->state == T_UNUSED || tid == curr_thread()->tid) {
return -1;
}
if (t->state != EXITED) {
return -2;
}
memset((void *)t->kstack, 7, KSTACK_SIZE);
t->tid = -1;
t->state = T_UNUSED;
return t->exit_code;
}
/*
* LAB5: (3) In the TA's reference implementation, here defines funtion
* int deadlock_detect(const int available[LOCK_POOL_SIZE],
* const int allocation[NTHREAD][LOCK_POOL_SIZE],
* const int request[NTHREAD][LOCK_POOL_SIZE])
* for both mutex and semaphore detect, you can also
* use this idea or just ignore it.
*/
int sys_mutex_create(int blocking)
{
struct mutex *m = mutex_create(blocking);
if (m == NULL) {
errorf("fail to create mutex: out of resource");
return -1;
}
// LAB5: (4-1) You may want to maintain some variables for detect here
int mutex_id = m - curr_proc()->mutex_pool;
debugf("create mutex %d", mutex_id);
return mutex_id;
}
int sys_mutex_lock(int mutex_id)
{
if (mutex_id < 0 || mutex_id >= curr_proc()->next_mutex_id) {
errorf("Unexpected mutex id %d", mutex_id);
return -1;
}
// LAB5: (4-1) You may want to maintain some variables for detect
// or call your detect algorithm here
mutex_lock(&curr_proc()->mutex_pool[mutex_id]);
return 0;
}
int sys_mutex_unlock(int mutex_id)
{
if (mutex_id < 0 || mutex_id >= curr_proc()->next_mutex_id) {
errorf("Unexpected mutex id %d", mutex_id);
return -1;
}
// LAB5: (4-1) You may want to maintain some variables for detect here
mutex_unlock(&curr_proc()->mutex_pool[mutex_id]);
return 0;
}
int sys_semaphore_create(int res_count)
{
struct semaphore *s = semaphore_create(res_count);
if (s == NULL) {
errorf("fail to create semaphore: out of resource");
return -1;
}
// LAB5: (4-2) You may want to maintain some variables for detect here
int sem_id = s - curr_proc()->semaphore_pool;
debugf("create semaphore %d", sem_id);
return sem_id;
}
int sys_semaphore_up(int semaphore_id)
{
if (semaphore_id < 0 ||
semaphore_id >= curr_proc()->next_semaphore_id) {
errorf("Unexpected semaphore id %d", semaphore_id);
return -1;
}
// LAB5: (4-2) You may want to maintain some variables for detect here
semaphore_up(&curr_proc()->semaphore_pool[semaphore_id]);
return 0;
}
int sys_semaphore_down(int semaphore_id)
{
if (semaphore_id < 0 ||
semaphore_id >= curr_proc()->next_semaphore_id) {
errorf("Unexpected semaphore id %d", semaphore_id);
return -1;
}
// LAB5: (4-2) You may want to maintain some variables for detect
// or call your detect algorithm here
semaphore_down(&curr_proc()->semaphore_pool[semaphore_id]);
return 0;
}
int sys_condvar_create()
{
struct condvar *c = condvar_create();
if (c == NULL) {
errorf("fail to create condvar: out of resource");
return -1;
}
int cond_id = c - curr_proc()->condvar_pool;
debugf("create condvar %d", cond_id);
return cond_id;
}
int sys_condvar_signal(int cond_id)
{
if (cond_id < 0 || cond_id >= curr_proc()->next_condvar_id) {
errorf("Unexpected condvar id %d", cond_id);
return -1;
}
cond_signal(&curr_proc()->condvar_pool[cond_id]);
return 0;
}
int sys_condvar_wait(int cond_id, int mutex_id)
{
if (cond_id < 0 || cond_id >= curr_proc()->next_condvar_id) {
errorf("Unexpected condvar id %d", cond_id);
return -1;
}
if (mutex_id < 0 || mutex_id >= curr_proc()->next_mutex_id) {
errorf("Unexpected mutex id %d", mutex_id);
return -1;
}
cond_wait(&curr_proc()->condvar_pool[cond_id],
&curr_proc()->mutex_pool[mutex_id]);
return 0;
}
// LAB5: (2) you may need to define function enable_deadlock_detect here
extern char trap_page[];
void syscall()
{
struct trapframe *trapframe = curr_thread()->trapframe;
int id = trapframe->a7, ret;
uint64 args[6] = { trapframe->a0, trapframe->a1, trapframe->a2,
trapframe->a3, trapframe->a4, trapframe->a5 };
if (id != SYS_write && id != SYS_read && id != SYS_sched_yield) {
debugf("syscall %d args = [%x, %x, %x, %x, %x, %x]", id,
args[0], args[1], args[2], args[3], args[4], args[5]);
}
switch (id) {
case SYS_write:
ret = sys_write(args[0], args[1], args[2]);
break;
case SYS_read:
ret = sys_read(args[0], args[1], args[2]);
break;
case SYS_openat:
ret = sys_openat(args[0], args[1], args[2]);
break;
case SYS_close:
ret = sys_close(args[0]);
break;
case SYS_exit:
sys_exit(args[0]);
// __builtin_unreachable();
// case SYS_nanosleep:
// ret = sys_nanosleep(args[0]);
// break;
case SYS_sched_yield:
ret = sys_sched_yield();
break;
case SYS_gettimeofday:
ret = sys_gettimeofday(args[0], args[1]);
break;
case SYS_getpid:
ret = sys_getpid();
break;
case SYS_getppid:
ret = sys_getppid();
break;
case SYS_clone: // SYS_fork
ret = sys_clone();
break;
case SYS_execve:
ret = sys_exec(args[0], args[1]);
break;
case SYS_wait4:
ret = sys_wait(args[0], args[1]);
break;
case SYS_pipe2:
ret = sys_pipe(args[0]);
break;
case SYS_thread_create:
ret = sys_thread_create(args[0], args[1]);
break;
case SYS_gettid:
ret = sys_gettid();
break;
case SYS_waittid:
ret = sys_waittid(args[0]);
break;
case SYS_mutex_create:
ret = sys_mutex_create(args[0]);
break;
case SYS_mutex_lock:
ret = sys_mutex_lock(args[0]);
break;
case SYS_mutex_unlock:
ret = sys_mutex_unlock(args[0]);
break;
case SYS_semaphore_create:
ret = sys_semaphore_create(args[0]);
break;
case SYS_semaphore_up:
ret = sys_semaphore_up(args[0]);
break;
case SYS_semaphore_down:
ret = sys_semaphore_down(args[0]);
break;
case SYS_condvar_create:
ret = sys_condvar_create();
break;
case SYS_condvar_signal:
ret = sys_condvar_signal(args[0]);
break;
case SYS_condvar_wait:
ret = sys_condvar_wait(args[0], args[1]);
break;
// LAB5: (2) you may need to add case SYS_enable_deadlock_detect here
default:
ret = -1;
errorf("unknown syscall %d", id);
}
curr_thread()->trapframe->a0 = ret;
if (id != SYS_write && id != SYS_read && id != SYS_sched_yield) {
debugf("syscall %d ret %d", id, ret);
}
}

6
os/syscall.h Normal file
View File

@ -0,0 +1,6 @@
#ifndef SYSCALL_H
#define SYSCALL_H
void syscall();
#endif // SYSCALL_H

308
os/syscall_ids.h Normal file
View File

@ -0,0 +1,308 @@
#define SYS_io_setup 0
#define SYS_io_destroy 1
#define SYS_io_submit 2
#define SYS_io_cancel 3
#define SYS_io_getevents 4
#define SYS_setxattr 5
#define SYS_lsetxattr 6
#define SYS_fsetxattr 7
#define SYS_getxattr 8
#define SYS_lgetxattr 9
#define SYS_fgetxattr 10
#define SYS_listxattr 11
#define SYS_llistxattr 12
#define SYS_flistxattr 13
#define SYS_removexattr 14
#define SYS_lremovexattr 15
#define SYS_fremovexattr 16
#define SYS_getcwd 17
#define SYS_lookup_dcookie 18
#define SYS_eventfd2 19
#define SYS_epoll_create1 20
#define SYS_epoll_ctl 21
#define SYS_epoll_pwait 22
#define SYS_dup 23
#define SYS_dup3 24
#define SYS_fcntl 25
#define SYS_inotify_init1 26
#define SYS_inotify_add_watch 27
#define SYS_inotify_rm_watch 28
#define SYS_ioctl 29
#define SYS_ioprio_set 30
#define SYS_ioprio_get 31
#define SYS_flock 32
#define SYS_mknodat 33
#define SYS_mkdirat 34
#define SYS_unlinkat 35
#define SYS_symlinkat 36
#define SYS_linkat 37
#define SYS_umount2 39
#define SYS_mount 40
#define SYS_pivot_root 41
#define SYS_nfsservctl 42
#define SYS_statfs 43
#define SYS_fstatfs 44
#define SYS_truncate 45
#define SYS_ftruncate 46
#define SYS_fallocate 47
#define SYS_faccessat 48
#define SYS_chdir 49
#define SYS_fchdir 50
#define SYS_chroot 51
#define SYS_fchmod 52
#define SYS_fchmodat 53
#define SYS_fchownat 54
#define SYS_fchown 55
#define SYS_openat 56
#define SYS_close 57
#define SYS_vhangup 58
#define SYS_pipe2 59
#define SYS_quotactl 60
#define SYS_getdents64 61
#define SYS_lseek 62
#define SYS_read 63
#define SYS_write 64
#define SYS_readv 65
#define SYS_writev 66
#define SYS_pread64 67
#define SYS_pwrite64 68
#define SYS_preadv 69
#define SYS_pwritev 70
#define SYS_sendfile 71
#define SYS_pselect6 72
#define SYS_ppoll 73
#define SYS_signalfd4 74
#define SYS_vmsplice 75
#define SYS_splice 76
#define SYS_tee 77
#define SYS_readlinkat 78
#define SYS_fstatat 79
#define SYS_fstat 80
#define SYS_sync 81
#define SYS_fsync 82
#define SYS_fdatasync 83
#define SYS_sync_file_range 84
#define SYS_timerfd_create 85
#define SYS_timerfd_settime 86
#define SYS_timerfd_gettime 87
#define SYS_utimensat 88
#define SYS_acct 89
#define SYS_capget 90
#define SYS_capset 91
#define SYS_personality 92
#define SYS_exit 93
#define SYS_exit_group 94
#define SYS_waitid 95
#define SYS_set_tid_address 96
#define SYS_unshare 97
#define SYS_futex 98
#define SYS_set_robust_list 99
#define SYS_get_robust_list 100
#define SYS_nanosleep 101
#define SYS_getitimer 102
#define SYS_setitimer 103
#define SYS_kexec_load 104
#define SYS_init_module 105
#define SYS_delete_module 106
#define SYS_timer_create 107
#define SYS_timer_gettime 108
#define SYS_timer_getoverrun 109
#define SYS_timer_settime 110
#define SYS_timer_delete 111
#define SYS_clock_settime 112
#define SYS_clock_gettime 113
#define SYS_clock_getres 114
#define SYS_clock_nanosleep 115
#define SYS_syslog 116
#define SYS_ptrace 117
#define SYS_sched_setparam 118
#define SYS_sched_setscheduler 119
#define SYS_sched_getscheduler 120
#define SYS_sched_getparam 121
#define SYS_sched_setaffinity 122
#define SYS_sched_getaffinity 123
#define SYS_sched_yield 124
#define SYS_sched_get_priority_max 125
#define SYS_sched_get_priority_min 126
#define SYS_sched_rr_get_interval 127
#define SYS_restart_syscall 128
#define SYS_kill 129
#define SYS_tkill 130
#define SYS_tgkill 131
#define SYS_sigaltstack 132
#define SYS_rt_sigsuspend 133
#define SYS_rt_sigaction 134
#define SYS_rt_sigprocmask 135
#define SYS_rt_sigpending 136
#define SYS_rt_sigtimedwait 137
#define SYS_rt_sigqueueinfo 138
#define SYS_rt_sigreturn 139
#define SYS_setpriority 140
#define SYS_getpriority 141
#define SYS_reboot 142
#define SYS_setregid 143
#define SYS_setgid 144
#define SYS_setreuid 145
#define SYS_setuid 146
#define SYS_setresuid 147
#define SYS_getresuid 148
#define SYS_setresgid 149
#define SYS_getresgid 150
#define SYS_setfsuid 151
#define SYS_setfsgid 152
#define SYS_times 153
#define SYS_setpgid 154
#define SYS_getpgid 155
#define SYS_getsid 156
#define SYS_setsid 157
#define SYS_getgroups 158
#define SYS_setgroups 159
#define SYS_uname 160
#define SYS_sethostname 161
#define SYS_setdomainname 162
#define SYS_getrlimit 163
#define SYS_setrlimit 164
#define SYS_getrusage 165
#define SYS_umask 166
#define SYS_prctl 167
#define SYS_getcpu 168
#define SYS_gettimeofday 169
#define SYS_settimeofday 170
#define SYS_adjtimex 171
#define SYS_getpid 172
#define SYS_getppid 173
#define SYS_getuid 174
#define SYS_geteuid 175
#define SYS_getgid 176
#define SYS_getegid 177
#define SYS_gettid 178
#define SYS_sysinfo 179
#define SYS_mq_open 180
#define SYS_mq_unlink 181
#define SYS_mq_timedsend 182
#define SYS_mq_timedreceive 183
#define SYS_mq_notify 184
#define SYS_mq_getsetattr 185
#define SYS_msgget 186
#define SYS_msgctl 187
#define SYS_msgrcv 188
#define SYS_msgsnd 189
#define SYS_semget 190
#define SYS_semctl 191
#define SYS_semtimedop 192
#define SYS_semop 193
#define SYS_shmget 194
#define SYS_shmctl 195
#define SYS_shmat 196
#define SYS_shmdt 197
#define SYS_socket 198
#define SYS_socketpair 199
#define SYS_bind 200
#define SYS_listen 201
#define SYS_accept 202
#define SYS_connect 203
#define SYS_getsockname 204
#define SYS_getpeername 205
#define SYS_sendto 206
#define SYS_recvfrom 207
#define SYS_setsockopt 208
#define SYS_getsockopt 209
#define SYS_shutdown 210
#define SYS_sendmsg 211
#define SYS_recvmsg 212
#define SYS_readahead 213
#define SYS_brk 214
#define SYS_munmap 215
#define SYS_mremap 216
#define SYS_add_key 217
#define SYS_request_key 218
#define SYS_keyctl 219
#define SYS_clone 220
#define SYS_execve 221
#define SYS_mmap 222
#define SYS_fadvise64 223
#define SYS_swapon 224
#define SYS_swapoff 225
#define SYS_mprotect 226
#define SYS_msync 227
#define SYS_mlock 228
#define SYS_munlock 229
#define SYS_mlockall 230
#define SYS_munlockall 231
#define SYS_mincore 232
#define SYS_madvise 233
#define SYS_remap_file_pages 234
#define SYS_mbind 235
#define SYS_get_mempolicy 236
#define SYS_set_mempolicy 237
#define SYS_migrate_pages 238
#define SYS_move_pages 239
#define SYS_rt_tgsigqueueinfo 240
#define SYS_perf_event_open 241
#define SYS_accept4 242
#define SYS_recvmmsg 243
#define SYS_arch_specific_syscall 244
#define SYS_wait4 260
#define SYS_prlimit64 261
#define SYS_fanotify_init 262
#define SYS_fanotify_mark 263
#define SYS_name_to_handle_at 264
#define SYS_open_by_handle_at 265
#define SYS_clock_adjtime 266
#define SYS_syncfs 267
#define SYS_setns 268
#define SYS_sendmmsg 269
#define SYS_process_vm_readv 270
#define SYS_process_vm_writev 271
#define SYS_kcmp 272
#define SYS_finit_module 273
#define SYS_sched_setattr 274
#define SYS_sched_getattr 275
#define SYS_renameat2 276
#define SYS_seccomp 277
#define SYS_getrandom 278
#define SYS_memfd_create 279
#define SYS_bpf 280
#define SYS_execveat 281
#define SYS_userfaultfd 282
#define SYS_membarrier 283
#define SYS_mlock2 284
#define SYS_copy_file_range 285
#define SYS_preadv2 286
#define SYS_pwritev2 287
#define SYS_pkey_mprotect 288
#define SYS_pkey_alloc 289
#define SYS_pkey_free 290
#define SYS_statx 291
#define SYS_io_pgetevents 292
#define SYS_rseq 293
#define SYS_kexec_file_load 294
#define SYS_spawn 400
#define SYS_pidfd_send_signal 424
#define SYS_io_uring_setup 425
#define SYS_io_uring_enter 426
#define SYS_io_uring_register 427
#define SYS_open_tree 428
#define SYS_move_mount 429
#define SYS_fsopen 430
#define SYS_fsconfig 431
#define SYS_fsmount 432
#define SYS_fspick 433
#define SYS_pidfd_open 434
#define SYS_clone3 435
#define SYS_openat2 437
#define SYS_pidfd_getfd 438
#define SYS_faccessat2 439
#define SYS_riscv_flush_icache 244 + 15
#define SYS_thread_create 460
#define SYS_waittid 462
#define SYS_mutex_create 463
#define SYS_mutex_lock 464
#define SYS_mutex_unlock 466
#define SYS_semaphore_create 467
#define SYS_semaphore_up 468
#define SYS_enable_deadlock_detect 469
#define SYS_semaphore_down 470
#define SYS_condvar_create 471
#define SYS_condvar_signal 472
#define SYS_condvar_wait 473

24
os/timer.c Normal file
View File

@ -0,0 +1,24 @@
#include "timer.h"
#include "riscv.h"
#include "sbi.h"
/// read the `mtime` regiser
uint64 get_cycle()
{
return r_time();
}
/// Enable timer interrupt
void timer_init()
{
// Enable supervisor timer interrupt
w_sie(r_sie() | SIE_STIE);
set_next_timer();
}
/// Set the next timer interrupt
void set_next_timer()
{
const uint64 timebase = CPU_FREQ / TICKS_PER_SEC;
set_timer(get_cycle() + timebase);
}

19
os/timer.h Normal file
View File

@ -0,0 +1,19 @@
#ifndef TIMER_H
#define TIMER_H
#include "types.h"
#define TICKS_PER_SEC (100)
// QEMU
#define CPU_FREQ (12500000)
uint64 get_cycle();
void timer_init();
void set_next_timer();
typedef struct {
uint64 sec; // 自 Unix 纪元起的秒数
uint64 usec; // 微秒数
} TimeVal;
#endif // TIMER_H

129
os/trampoline.S Normal file
View File

@ -0,0 +1,129 @@
#
# code to switch between user and kernel space.
#
# this code is mapped at the same virtual address
# (TRAMPOLINE) in user and kernel space so that
# it continues to work when it switches page tables.
#
# kernel.ld causes this to be aligned
# to a page boundary.
#
.section trampsec
.globl trampoline
trampoline:
.align 4
.globl uservec
uservec:
#
# trap.c sets stvec to point here, so
# traps from user space start here,
# in supervisor mode, but with a
# user page table.
#
# sscratch points to where the process's p->trapframe is
# mapped into user space, at TRAPFRAME.
#
# swap a0 and sscratch
# so that a0 is TRAPFRAME
csrrw a0, sscratch, a0
# save the user registers in TRAPFRAME
sd ra, 40(a0)
sd sp, 48(a0)
sd gp, 56(a0)
sd tp, 64(a0)
sd t0, 72(a0)
sd t1, 80(a0)
sd t2, 88(a0)
sd s0, 96(a0)
sd s1, 104(a0)
sd a1, 120(a0)
sd a2, 128(a0)
sd a3, 136(a0)
sd a4, 144(a0)
sd a5, 152(a0)
sd a6, 160(a0)
sd a7, 168(a0)
sd s2, 176(a0)
sd s3, 184(a0)
sd s4, 192(a0)
sd s5, 200(a0)
sd s6, 208(a0)
sd s7, 216(a0)
sd s8, 224(a0)
sd s9, 232(a0)
sd s10, 240(a0)
sd s11, 248(a0)
sd t3, 256(a0)
sd t4, 264(a0)
sd t5, 272(a0)
sd t6, 280(a0)
csrr t0, sscratch
sd t0, 112(a0)
csrr t1, sepc
sd t1, 24(a0)
ld sp, 8(a0)
ld tp, 32(a0)
ld t0, 16(a0)
ld t1, 0(a0)
csrw satp, t1
sfence.vma zero, zero
jr t0
.globl userret
userret:
# userret(TRAPFRAME, pagetable)
# switch from kernel to user.
# usertrapret() calls here.
# a0: TRAPFRAME, in user page table.
# a1: user page table, for satp.
# switch to the user page table.
csrw satp, a1
sfence.vma zero, zero
# put the saved user a0 in sscratch, so we
# can swap it with our a0 (TRAPFRAME) in the last step.
ld t0, 112(a0)
csrw sscratch, t0
# restore all but a0 from TRAPFRAME
ld ra, 40(a0)
ld sp, 48(a0)
ld gp, 56(a0)
ld tp, 64(a0)
ld t0, 72(a0)
ld t1, 80(a0)
ld t2, 88(a0)
ld s0, 96(a0)
ld s1, 104(a0)
ld a1, 120(a0)
ld a2, 128(a0)
ld a3, 136(a0)
ld a4, 144(a0)
ld a5, 152(a0)
ld a6, 160(a0)
ld a7, 168(a0)
ld s2, 176(a0)
ld s3, 184(a0)
ld s4, 192(a0)
ld s5, 200(a0)
ld s6, 208(a0)
ld s7, 216(a0)
ld s8, 224(a0)
ld s9, 232(a0)
ld s10, 240(a0)
ld s11, 248(a0)
ld t3, 256(a0)
ld t4, 264(a0)
ld t5, 272(a0)
ld t6, 280(a0)
# restore user a0, and save TRAPFRAME in sscratch
csrrw a0, sscratch, a0
# return to user mode and user pc.
# usertrapret() set up sstatus and sepc.
sret

166
os/trap.c Normal file
View File

@ -0,0 +1,166 @@
#include "trap.h"
#include "defs.h"
#include "loader.h"
#include "plic.h"
#include "syscall.h"
#include "timer.h"
#include "virtio.h"
#include "proc.h"
extern char trampoline[], uservec[];
extern char userret[], kernelvec[];
void kerneltrap();
// set up to take exceptions and traps while in the kernel.
void set_usertrap()
{
w_stvec(((uint64)TRAMPOLINE + (uservec - trampoline)) & ~0x3); // DIRECT
}
void set_kerneltrap()
{
w_stvec((uint64)kernelvec & ~0x3); // DIRECT
}
// set up to take exceptions and traps while in the kernel.
void trap_init()
{
// intr_on();
set_kerneltrap();
w_sie(r_sie() | SIE_SEIE | SIE_STIE | SIE_SSIE);
}
void unknown_trap()
{
errorf("unknown trap: %p, stval = %p", r_scause(), r_stval());
exit(-1);
}
void devintr(uint64 cause)
{
int irq;
switch (cause) {
case SupervisorTimer:
set_next_timer();
// if form user, allow yield
if ((r_sstatus() & SSTATUS_SPP) == 0) {
yield();
}
break;
case SupervisorExternal:
irq = plic_claim();
if (irq == UART0_IRQ) {
// do nothing
} else if (irq == VIRTIO0_IRQ) {
virtio_disk_intr();
} else if (irq) {
infof("unexpected interrupt irq=%d\n", irq);
}
if (irq)
plic_complete(irq);
break;
default:
unknown_trap();
break;
}
}
//
// handle an interrupt, exception, or system call from user space.
// called from trampoline.S
//
void usertrap()
{
set_kerneltrap();
struct trapframe *trapframe = curr_thread()->trapframe;
tracef("trap from user epc = %p", trapframe->epc);
if ((r_sstatus() & SSTATUS_SPP) != 0)
panic("usertrap: not from user mode");
uint64 cause = r_scause();
if (cause & (1ULL << 63)) {
devintr(cause & 0xff);
} else {
switch (cause) {
case UserEnvCall:
trapframe->epc += 4;
syscall();
break;
case StoreMisaligned:
case StorePageFault:
case InstructionMisaligned:
case InstructionPageFault:
case LoadMisaligned:
case LoadPageFault:
errorf("%d in application, bad addr = %p, bad instruction = %p, "
"core dumped.",
cause, r_stval(), trapframe->epc);
exit(-2);
break;
case IllegalInstruction:
errorf("IllegalInstruction in application, core dumped.");
exit(-3);
break;
default:
unknown_trap();
break;
}
}
usertrapret();
}
//
// return to user space
//
void usertrapret()
{
set_usertrap();
struct trapframe *trapframe = curr_thread()->trapframe;
trapframe->kernel_satp = r_satp(); // kernel page table
trapframe->kernel_sp =
curr_thread()->kstack + KSTACK_SIZE; // process's kernel stack
trapframe->kernel_trap = (uint64)usertrap;
trapframe->kernel_hartid = r_tp(); // unuesd
w_sepc(trapframe->epc);
// set up the registers that trampoline.S's sret will use
// to get to user space.
// set S Previous Privilege mode to User.
uint64 x = r_sstatus();
x &= ~SSTATUS_SPP; // clear SPP to 0 for user mode
x |= SSTATUS_SPIE; // enable interrupts in user mode
w_sstatus(x);
// tell trampoline.S the user page table to switch to.
uint64 satp = MAKE_SATP(curr_proc()->pagetable);
uint64 fn = TRAMPOLINE + (userret - trampoline);
uint64 trapframe_va = get_thread_trapframe_va(curr_thread()->tid);
debugf("return to user @ %p, sp @ %p", trapframe->epc, trapframe->sp);
((void (*)(uint64, uint64))fn)(trapframe_va, satp);
}
void kerneltrap()
{
uint64 sepc = r_sepc();
uint64 sstatus = r_sstatus();
uint64 scause = r_scause();
debugf("kernel trap: epc = %p, cause = %d", sepc, scause);
if ((sstatus & SSTATUS_SPP) == 0)
panic("kerneltrap: not from supervisor mode");
if (scause & (1ULL << 63)) {
devintr(scause & 0xff);
} else {
errorf("invalid trap from kernel: %p, stval = %p sepc = %p\n",
scause, r_stval(), sepc);
exit(-1);
}
// the yield() may have caused some traps to occur,
// so restore trap registers for use by kernelvec.S's sepc instruction.
w_sepc(sepc);
w_sstatus(sstatus);
}

74
os/trap.h Normal file
View File

@ -0,0 +1,74 @@
#ifndef TRAP_H
#define TRAP_H
#include "types.h"
struct trapframe {
/* 0 */ uint64 kernel_satp; // kernel page table
/* 8 */ uint64 kernel_sp; // top of process's kernel stack
/* 16 */ uint64 kernel_trap; // usertrap()
/* 24 */ uint64 epc; // saved user program counter
/* 32 */ uint64 kernel_hartid; // saved kernel tp
/* 40 */ uint64 ra;
/* 48 */ uint64 sp;
/* 56 */ uint64 gp;
/* 64 */ uint64 tp;
/* 72 */ uint64 t0;
/* 80 */ uint64 t1;
/* 88 */ uint64 t2;
/* 96 */ uint64 s0;
/* 104 */ uint64 s1;
/* 112 */ uint64 a0;
/* 120 */ uint64 a1;
/* 128 */ uint64 a2;
/* 136 */ uint64 a3;
/* 144 */ uint64 a4;
/* 152 */ uint64 a5;
/* 160 */ uint64 a6;
/* 168 */ uint64 a7;
/* 176 */ uint64 s2;
/* 184 */ uint64 s3;
/* 192 */ uint64 s4;
/* 200 */ uint64 s5;
/* 208 */ uint64 s6;
/* 216 */ uint64 s7;
/* 224 */ uint64 s8;
/* 232 */ uint64 s9;
/* 240 */ uint64 s10;
/* 248 */ uint64 s11;
/* 256 */ uint64 t3;
/* 264 */ uint64 t4;
/* 272 */ uint64 t5;
/* 280 */ uint64 t6;
};
enum Exception {
InstructionMisaligned = 0,
InstructionAccessFault = 1,
IllegalInstruction = 2,
Breakpoint = 3,
LoadMisaligned = 4,
LoadAccessFault = 5,
StoreMisaligned = 6,
StoreAccessFault = 7,
UserEnvCall = 8,
SupervisorEnvCall = 9,
MachineEnvCall = 11,
InstructionPageFault = 12,
LoadPageFault = 13,
StorePageFault = 15,
};
enum Interrupt {
UserSoft = 0,
SupervisorSoft,
UserTimer = 4,
SupervisorTimer,
UserExternal = 8,
SupervisorExternal,
};
void trap_init();
void usertrapret();
#endif // TRAP_H

12
os/types.h Normal file
View File

@ -0,0 +1,12 @@
#ifndef TYPES_H
#define TYPES_H
typedef unsigned int uint;
typedef unsigned short ushort;
typedef unsigned char uchar;
typedef unsigned char uint8;
typedef unsigned short uint16;
typedef unsigned int uint32;
typedef unsigned long uint64;
#endif // TYPES_H

106
os/virtio.h Normal file
View File

@ -0,0 +1,106 @@
#ifndef VIRTIO_H
#define VIRTIO_H
#include "bio.h"
//
// virtio device definitions.
// for both the mmio interface, and virtio descriptors.
// only tested with qemu.
// this is the "legacy" virtio interface.
//
// the virtio spec:
// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.pdf
//
// virtio mmio control registers, mapped starting at 0x10001000.
// from qemu virtio_mmio.h
#define VIRTIO_MMIO_MAGIC_VALUE 0x000 // 0x74726976
#define VIRTIO_MMIO_VERSION 0x004 // version; 1 is legacy
#define VIRTIO_MMIO_DEVICE_ID 0x008 // device type; 1 is net, 2 is disk
#define VIRTIO_MMIO_VENDOR_ID 0x00c // 0x554d4551
#define VIRTIO_MMIO_DEVICE_FEATURES 0x010
#define VIRTIO_MMIO_DRIVER_FEATURES 0x020
#define VIRTIO_MMIO_GUEST_PAGE_SIZE 0x028 // page size for PFN, write-only
#define VIRTIO_MMIO_QUEUE_SEL 0x030 // select queue, write-only
#define VIRTIO_MMIO_QUEUE_NUM_MAX 0x034 // max size of current queue, read-only
#define VIRTIO_MMIO_QUEUE_NUM 0x038 // size of current queue, write-only
#define VIRTIO_MMIO_QUEUE_ALIGN 0x03c // used ring alignment, write-only
#define VIRTIO_MMIO_QUEUE_PFN \
0x040 // physical page number for queue, read/write
#define VIRTIO_MMIO_QUEUE_READY 0x044 // ready bit
#define VIRTIO_MMIO_QUEUE_NOTIFY 0x050 // write-only
#define VIRTIO_MMIO_INTERRUPT_STATUS 0x060 // read-only
#define VIRTIO_MMIO_INTERRUPT_ACK 0x064 // write-only
#define VIRTIO_MMIO_STATUS 0x070 // read/write
// status register bits, from qemu virtio_config.h
#define VIRTIO_CONFIG_S_ACKNOWLEDGE 1
#define VIRTIO_CONFIG_S_DRIVER 2
#define VIRTIO_CONFIG_S_DRIVER_OK 4
#define VIRTIO_CONFIG_S_FEATURES_OK 8
// device feature bits
#define VIRTIO_BLK_F_RO 5 /* Disk is read-only */
#define VIRTIO_BLK_F_SCSI 7 /* Supports scsi command passthru */
#define VIRTIO_BLK_F_CONFIG_WCE 11 /* Writeback mode available in config */
#define VIRTIO_BLK_F_MQ 12 /* support more than one vq */
#define VIRTIO_F_ANY_LAYOUT 27
#define VIRTIO_RING_F_INDIRECT_DESC 28
#define VIRTIO_RING_F_EVENT_IDX 29
// this many virtio descriptors.
// must be a power of two.
#define NUM 8
// a single descriptor, from the spec.
struct virtq_desc {
uint64 addr;
uint32 len;
uint16 flags;
uint16 next;
};
#define VRING_DESC_F_NEXT 1 // chained with another descriptor
#define VRING_DESC_F_WRITE 2 // device writes (vs read)
// the (entire) avail ring, from the spec.
struct virtq_avail {
uint16 flags; // always zero
uint16 idx; // driver will write ring[idx] next
uint16 ring[NUM]; // descriptor numbers of chain heads
uint16 unused;
};
// one entry in the "used" ring, with which the
// device tells the driver about completed requests.
struct virtq_used_elem {
uint32 id; // index of start of completed descriptor chain
uint32 len;
};
struct virtq_used {
uint16 flags; // always zero
uint16 idx; // device increments when it adds a ring[] entry
struct virtq_used_elem ring[NUM];
};
// these are specific to virtio block devices, e.g. disks,
// described in Section 5.2 of the spec.
#define VIRTIO_BLK_T_IN 0 // read the disk
#define VIRTIO_BLK_T_OUT 1 // write the disk
// the format of the first descriptor in a disk request.
// to be followed by two more descriptors containing
// the block, and a one-byte status.
struct virtio_blk_req {
uint32 type; // VIRTIO_BLK_T_IN or ..._OUT
uint32 reserved;
uint64 sector;
};
void virtio_disk_init();
void virtio_disk_rw(struct buf *, int);
void virtio_disk_intr();
#endif // VIRTIO_H

295
os/virtio_disk.c Normal file
View File

@ -0,0 +1,295 @@
//
// driver for qemu's virtio disk device.
// uses qemu's mmio interface to virtio.
// qemu presents a "legacy" virtio interface.
//
// qemu ... -drive file=fs.img,if=none,format=raw,id=x0 -device
// virtio-blk-device,drive=x0,bus=virtio-mmio-bus.0
//
#include "bio.h"
#include "defs.h"
#include "file.h"
#include "fs.h"
#include "plic.h"
#include "riscv.h"
#include "types.h"
#include "virtio.h"
// the address of virtio mmio register r.
#define R(r) ((volatile uint32 *)(VIRTIO0 + (r)))
static struct disk {
// the virtio driver and device mostly communicate through a set of
// structures in RAM. pages[] allocates that memory. pages[] is a
// global (instead of calls to kalloc()) because it must consist of
// two contiguous pages of page-aligned physical memory.
char pages[2 * PGSIZE];
// pages[] is divided into three regions (descriptors, avail, and
// used), as explained in Section 2.6 of the virtio specification
// for the legacy interface.
// https://docs.oasis-open.org/virtio/virtio/v1.1/virtio-v1.1.pdf
// the first region of pages[] is a set (not a ring) of DMA
// descriptors, with which the driver tells the device where to read
// and write individual disk operations. there are NUM descriptors.
// most commands consist of a "chain" (a linked list) of a couple of
// these descriptors.
// points into pages[].
struct virtq_desc *desc;
// next is a ring in which the driver writes descriptor numbers
// that the driver would like the device to process. it only
// includes the head descriptor of each chain. the ring has
// NUM elements.
// points into pages[].
struct virtq_avail *avail;
// finally a ring in which the device writes descriptor numbers that
// the device has finished processing (just the head of each chain).
// there are NUM used ring entries.
// points into pages[].
struct virtq_used *used;
// our own book-keeping.
char free[NUM]; // is a descriptor free?
uint16 used_idx; // we've looked this far in used[2..NUM].
// track info about in-flight operations,
// for use when completion interrupt arrives.
// indexed by first descriptor index of chain.
struct {
struct buf *b;
char status;
} info[NUM];
// disk command headers.
// one-for-one with descriptors, for convenience.
struct virtio_blk_req ops[NUM];
} __attribute__((aligned(PGSIZE))) disk;
void virtio_disk_init()
{
uint32 status = 0;
if (*R(VIRTIO_MMIO_MAGIC_VALUE) != 0x74726976 ||
*R(VIRTIO_MMIO_VERSION) != 1 || *R(VIRTIO_MMIO_DEVICE_ID) != 2 ||
*R(VIRTIO_MMIO_VENDOR_ID) != 0x554d4551) {
panic("could not find virtio disk");
}
status |= VIRTIO_CONFIG_S_ACKNOWLEDGE;
*R(VIRTIO_MMIO_STATUS) = status;
status |= VIRTIO_CONFIG_S_DRIVER;
*R(VIRTIO_MMIO_STATUS) = status;
// negotiate features
uint64 features = *R(VIRTIO_MMIO_DEVICE_FEATURES);
features &= ~(1 << VIRTIO_BLK_F_RO);
features &= ~(1 << VIRTIO_BLK_F_SCSI);
features &= ~(1 << VIRTIO_BLK_F_CONFIG_WCE);
features &= ~(1 << VIRTIO_BLK_F_MQ);
features &= ~(1 << VIRTIO_F_ANY_LAYOUT);
features &= ~(1 << VIRTIO_RING_F_EVENT_IDX);
features &= ~(1 << VIRTIO_RING_F_INDIRECT_DESC);
*R(VIRTIO_MMIO_DRIVER_FEATURES) = features;
// tell device that feature negotiation is complete.
status |= VIRTIO_CONFIG_S_FEATURES_OK;
*R(VIRTIO_MMIO_STATUS) = status;
// tell device we're completely ready.
status |= VIRTIO_CONFIG_S_DRIVER_OK;
*R(VIRTIO_MMIO_STATUS) = status;
*R(VIRTIO_MMIO_GUEST_PAGE_SIZE) = PGSIZE;
// initialize queue 0.
*R(VIRTIO_MMIO_QUEUE_SEL) = 0;
uint32 max = *R(VIRTIO_MMIO_QUEUE_NUM_MAX);
if (max == 0)
panic("virtio disk has no queue 0");
if (max < NUM)
panic("virtio disk max queue too short");
*R(VIRTIO_MMIO_QUEUE_NUM) = NUM;
memset(disk.pages, 0, sizeof(disk.pages));
*R(VIRTIO_MMIO_QUEUE_PFN) = ((uint64)disk.pages) >> PGSHIFT;
// desc = pages -- num * virtq_desc
// avail = pages + 0x40 -- 2 * uint16, then num * uint16
// used = pages + 4096 -- 2 * uint16, then num * vRingUsedElem
disk.desc = (struct virtq_desc *)disk.pages;
disk.avail = (struct virtq_avail *)(disk.pages +
NUM * sizeof(struct virtq_desc));
disk.used = (struct virtq_used *)(disk.pages + PGSIZE);
// all NUM descriptors start out unused.
for (int i = 0; i < NUM; i++)
disk.free[i] = 1;
// plic.c and trap.c arrange for interrupts from VIRTIO0_IRQ.
}
// find a free descriptor, mark it non-free, return its index.
static int alloc_desc()
{
for (int i = 0; i < NUM; i++) {
if (disk.free[i]) {
disk.free[i] = 0;
return i;
}
}
return -1;
}
// mark a descriptor as free.
static void free_desc(int i)
{
if (i >= NUM)
panic("free_desc 1");
if (disk.free[i])
panic("free_desc 2");
disk.desc[i].addr = 0;
disk.desc[i].len = 0;
disk.desc[i].flags = 0;
disk.desc[i].next = 0;
disk.free[i] = 1;
}
// free a chain of descriptors.
static void free_chain(int i)
{
while (1) {
int flag = disk.desc[i].flags;
int nxt = disk.desc[i].next;
free_desc(i);
if (flag & VRING_DESC_F_NEXT)
i = nxt;
else
break;
}
}
// allocate three descriptors (they need not be contiguous).
// disk transfers always use three descriptors.
static int alloc3_desc(int *idx)
{
for (int i = 0; i < 3; i++) {
idx[i] = alloc_desc();
if (idx[i] < 0) {
for (int j = 0; j < i; j++)
free_desc(idx[j]);
return -1;
}
}
return 0;
}
extern int PID;
void virtio_disk_rw(struct buf *b, int write)
{
uint64 sector = b->blockno * (BSIZE / 512);
// the spec's Section 5.2 says that legacy block operations use
// three descriptors: one for type/reserved/sector, one for the
// data, one for a 1-byte status result.
// allocate the three descriptors.
int idx[3];
while (1) {
if (alloc3_desc(idx) == 0) {
break;
}
yield();
}
// format the three descriptors.
// qemu's virtio-blk.c reads them.
struct virtio_blk_req *buf0 = &disk.ops[idx[0]];
if (write)
buf0->type = VIRTIO_BLK_T_OUT; // write the disk
else
buf0->type = VIRTIO_BLK_T_IN; // read the disk
buf0->reserved = 0;
buf0->sector = sector;
disk.desc[idx[0]].addr = (uint64)buf0;
disk.desc[idx[0]].len = sizeof(struct virtio_blk_req);
disk.desc[idx[0]].flags = VRING_DESC_F_NEXT;
disk.desc[idx[0]].next = idx[1];
disk.desc[idx[1]].addr = (uint64)b->data;
disk.desc[idx[1]].len = BSIZE;
if (write)
disk.desc[idx[1]].flags = 0; // device reads b->data
else
disk.desc[idx[1]].flags =
VRING_DESC_F_WRITE; // device writes b->data
disk.desc[idx[1]].flags |= VRING_DESC_F_NEXT;
disk.desc[idx[1]].next = idx[2];
disk.info[idx[0]].status = 0xfb; // device writes 0 on success
disk.desc[idx[2]].addr = (uint64)&disk.info[idx[0]].status;
disk.desc[idx[2]].len = 1;
disk.desc[idx[2]].flags =
VRING_DESC_F_WRITE; // device writes the status
disk.desc[idx[2]].next = 0;
// record struct buf for virtio_disk_intr().
b->disk = 1;
disk.info[idx[0]].b = b;
// tell the device the first index in our chain of descriptors.
disk.avail->ring[disk.avail->idx % NUM] = idx[0];
__sync_synchronize();
// tell the device another avail ring entry is available.
disk.avail->idx += 1; // not % NUM ...
__sync_synchronize();
*R(VIRTIO_MMIO_QUEUE_NOTIFY) = 0; // value is queue number
// Wait for virtio_disk_intr() to say request has finished.
// Make sure complier will load 'b' form memory
struct buf volatile *_b = b;
intr_on();
while (_b->disk == 1) {
// WARN: No kernel concurrent support, DO NOT allow kernel yield
// yield();
}
intr_off();
disk.info[idx[0]].b = 0;
free_chain(idx[0]);
}
void virtio_disk_intr()
{
// the device won't raise another interrupt until we tell it
// we've seen this interrupt, which the following line does.
// this may race with the device writing new entries to
// the "used" ring, in which case we may process the new
// completion entries in this interrupt, and have nothing to do
// in the next interrupt, which is harmless.
*R(VIRTIO_MMIO_INTERRUPT_ACK) = *R(VIRTIO_MMIO_INTERRUPT_STATUS) & 0x3;
__sync_synchronize();
// the device increments disk.used->idx when it
// adds an entry to the used ring.
while (disk.used_idx != disk.used->idx) {
__sync_synchronize();
int id = disk.used->ring[disk.used_idx % NUM].id;
if (disk.info[id].status != 0)
panic("virtio_disk_intr status");
struct buf *b = disk.info[id].b;
b->disk = 0; // disk is done with buf
disk.used_idx += 1;
}
}

373
os/vm.c Normal file
View File

@ -0,0 +1,373 @@
#include "vm.h"
#include "defs.h"
#include "plic.h"
#include "riscv.h"
pagetable_t kernel_pagetable;
extern char e_text[]; // kernel.ld sets this to end of kernel code.
extern char trampoline[];
// Make a direct-map page table for the kernel.
pagetable_t kvmmake()
{
pagetable_t kpgtbl;
kpgtbl = (pagetable_t)kalloc();
memset(kpgtbl, 0, PGSIZE);
// virtio mmio disk interface
kvmmap(kpgtbl, VIRTIO0, VIRTIO0, PGSIZE, PTE_R | PTE_W);
// PLIC
kvmmap(kpgtbl, PLIC, PLIC, 0x400000, PTE_R | PTE_W);
// map kernel text executable and read-only.
kvmmap(kpgtbl, KERNBASE, KERNBASE, (uint64)e_text - KERNBASE,
PTE_R | PTE_X);
// map kernel data and the physical RAM we'll make use of.
kvmmap(kpgtbl, (uint64)e_text, (uint64)e_text, PHYSTOP - (uint64)e_text,
PTE_R | PTE_W);
kvmmap(kpgtbl, TRAMPOLINE, (uint64)trampoline, PGSIZE, PTE_R | PTE_X);
return kpgtbl;
}
// Initialize the one kernel_pagetable
// Switch h/w page table register to the kernel's page table,
// and enable paging.
void kvm_init()
{
kernel_pagetable = kvmmake();
w_satp(MAKE_SATP(kernel_pagetable));
sfence_vma();
infof("enable pageing at %p", r_satp());
}
// Return the address of the PTE in page table pagetable
// that corresponds to virtual address va. If alloc!=0,
// create any required page-table pages.
//
// The risc-v Sv39 scheme has three levels of page-table
// pages. A page-table page contains 512 64-bit PTEs.
// A 64-bit virtual address is split into five fields:
// 39..63 -- must be zero.
// 30..38 -- 9 bits of level-2 index.
// 21..29 -- 9 bits of level-1 index.
// 12..20 -- 9 bits of level-0 index.
// 0..11 -- 12 bits of byte offset within the page.
pte_t *walk(pagetable_t pagetable, uint64 va, int alloc)
{
if (va >= MAXVA)
panic("walk");
for (int level = 2; level > 0; level--) {
pte_t *pte = &pagetable[PX(level, va)];
if (*pte & PTE_V) {
pagetable = (pagetable_t)PTE2PA(*pte);
} else {
if (!alloc || (pagetable = (pde_t *)kalloc()) == 0)
return 0;
memset(pagetable, 0, PGSIZE);
*pte = PA2PTE(pagetable) | PTE_V;
}
}
return &pagetable[PX(0, va)];
}
// Look up a virtual address, return the physical address,
// or 0 if not mapped.
// Can only be used to look up user pages.
uint64 walkaddr(pagetable_t pagetable, uint64 va)
{
pte_t *pte;
uint64 pa;
if (va >= MAXVA)
return 0;
pte = walk(pagetable, va, 0);
if (pte == 0)
return 0;
if ((*pte & PTE_V) == 0)
return 0;
if ((*pte & PTE_U) == 0)
return 0;
pa = PTE2PA(*pte);
return pa;
}
// Look up a virtual address, return the physical address,
uint64 useraddr(pagetable_t pagetable, uint64 va)
{
uint64 page = walkaddr(pagetable, va);
if (page == 0)
return 0;
return page | (va & 0xFFFULL);
}
// Add a mapping to the kernel page table.
// only used when booting.
// does not flush TLB or enable paging.
void kvmmap(pagetable_t kpgtbl, uint64 va, uint64 pa, uint64 sz, int perm)
{
if (mappages(kpgtbl, va, sz, pa, perm) != 0)
panic("kvmmap");
}
// Create PTEs for virtual addresses starting at va that refer to
// physical addresses starting at pa. va and size might not
// be page-aligned. Returns 0 on success, -1 if walk() couldn't
// allocate a needed page-table page.
int mappages(pagetable_t pagetable, uint64 va, uint64 size, uint64 pa, int perm)
{
uint64 a, last;
pte_t *pte;
a = PGROUNDDOWN(va);
last = PGROUNDDOWN(va + size - 1);
for (;;) {
if ((pte = walk(pagetable, a, 1)) == 0) {
errorf("pte invalid, va = %p", a);
return -1;
}
if (*pte & PTE_V) {
errorf("remap");
return -1;
}
*pte = PA2PTE(pa) | perm | PTE_V;
if (a == last)
break;
a += PGSIZE;
pa += PGSIZE;
}
return 0;
}
int uvmmap(pagetable_t pagetable, uint64 va, uint64 npages, int perm)
{
for (int i = 0; i < npages; ++i) {
if (mappages(pagetable, va + i * 0x1000, 0x1000,
(uint64)kalloc(), perm)) {
return -1;
}
}
return 0;
}
// Remove npages of mappings starting from va. va must be
// page-aligned. The mappings must exist.
// Optionally free the physical memory.
void uvmunmap(pagetable_t pagetable, uint64 va, uint64 npages, int do_free)
{
uint64 a;
pte_t *pte;
if ((va % PGSIZE) != 0)
panic("uvmunmap: not aligned");
for (a = va; a < va + npages * PGSIZE; a += PGSIZE) {
if ((pte = walk(pagetable, a, 0)) == 0)
continue;
if ((*pte & PTE_V) != 0) {
if (PTE_FLAGS(*pte) == PTE_V)
panic("uvmunmap: not a leaf");
if (do_free) {
uint64 pa = PTE2PA(*pte);
kfree((void *)pa);
}
}
*pte = 0;
}
}
// create an empty user page table.
// returns 0 if out of memory.
pagetable_t uvmcreate()
{
pagetable_t pagetable;
pagetable = (pagetable_t)kalloc();
if (pagetable == 0) {
errorf("uvmcreate: kalloc error");
return 0;
}
memset(pagetable, 0, PGSIZE);
if (mappages(pagetable, TRAMPOLINE, PAGE_SIZE, (uint64)trampoline,
PTE_R | PTE_X) < 0) {
panic("mappages fail");
}
return pagetable;
}
// Recursively free page-table pages.
// All leaf mappings must already have been removed.
void freewalk(pagetable_t pagetable)
{
// there are 2^9 = 512 PTEs in a page table.
for (int i = 0; i < 512; i++) {
pte_t pte = pagetable[i];
if ((pte & PTE_V) && (pte & (PTE_R | PTE_W | PTE_X)) == 0) {
// this PTE points to a lower-level page table.
uint64 child = PTE2PA(pte);
freewalk((pagetable_t)child);
pagetable[i] = 0;
} else if (pte & PTE_V) {
panic("freewalk: leaf");
}
}
kfree((void *)pagetable);
}
/**
* @brief Free user memory pages, then free page-table pages.
*
* @param max_page The max vaddr of user-space.
*/
void uvmfree(pagetable_t pagetable, uint64 max_page)
{
if (max_page > 0)
uvmunmap(pagetable, 0, max_page, 1);
freewalk(pagetable);
}
// Used in fork.
// Copy the pagetable page and all the user pages.
// Return 0 on success, -1 on error.
int uvmcopy(pagetable_t old, pagetable_t new, uint64 max_page)
{
pte_t *pte;
uint64 pa, i;
uint flags;
char *mem;
for (i = 0; i < max_page * PAGE_SIZE; i += PGSIZE) {
if ((pte = walk(old, i, 0)) == 0)
continue;
if ((*pte & PTE_V) == 0)
continue;
pa = PTE2PA(*pte);
flags = PTE_FLAGS(*pte);
if ((mem = kalloc()) == 0)
goto err;
memmove(mem, (char *)pa, PGSIZE);
if (mappages(new, i, PGSIZE, (uint64)mem, flags) != 0) {
kfree(mem);
goto err;
}
}
return 0;
err:
uvmunmap(new, 0, i / PGSIZE, 1);
return -1;
}
// Copy from kernel to user.
// Copy len bytes from src to virtual address dstva in a given page table.
// Return 0 on success, -1 on error.
int copyout(pagetable_t pagetable, uint64 dstva, char *src, uint64 len)
{
uint64 n, va0, pa0;
while (len > 0) {
va0 = PGROUNDDOWN(dstva);
pa0 = walkaddr(pagetable, va0);
if (pa0 == 0)
return -1;
n = PGSIZE - (dstva - va0);
if (n > len)
n = len;
memmove((void *)(pa0 + (dstva - va0)), src, n);
len -= n;
src += n;
dstva = va0 + PGSIZE;
}
return 0;
}
// Copy from user to kernel.
// Copy len bytes to dst from virtual address srcva in a given page table.
// Return 0 on success, -1 on error.
int copyin(pagetable_t pagetable, char *dst, uint64 srcva, uint64 len)
{
uint64 n, va0, pa0;
while (len > 0) {
va0 = PGROUNDDOWN(srcva);
pa0 = walkaddr(pagetable, va0);
if (pa0 == 0)
return -1;
n = PGSIZE - (srcva - va0);
if (n > len)
n = len;
memmove(dst, (void *)(pa0 + (srcva - va0)), n);
len -= n;
dst += n;
srcva = va0 + PGSIZE;
}
return 0;
}
// Copy a null-terminated string from user to kernel.
// Copy bytes to dst from virtual address srcva in a given page table,
// until a '\0', or max.
// Return 0 on success, -1 on error.
int copyinstr(pagetable_t pagetable, char *dst, uint64 srcva, uint64 max)
{
uint64 n, va0, pa0;
int got_null = 0, len = 0;
while (got_null == 0 && max > 0) {
va0 = PGROUNDDOWN(srcva);
pa0 = walkaddr(pagetable, va0);
if (pa0 == 0)
return -1;
n = PGSIZE - (srcva - va0);
if (n > max)
n = max;
char *p = (char *)(pa0 + (srcva - va0));
while (n > 0) {
if (*p == '\0') {
*dst = '\0';
got_null = 1;
break;
} else {
*dst = *p;
}
--n;
--max;
p++;
dst++;
len++;
}
srcva = va0 + PGSIZE;
}
return len;
}
// Copy to either a user address, or kernel address,
// depending on usr_dst.
// Returns 0 on success, -1 on error.
int either_copyout(int user_dst, uint64 dst, char *src, uint64 len)
{
struct proc *p = curr_proc();
if (user_dst) {
return copyout(p->pagetable, dst, src, len);
} else {
memmove((void *)dst, src, len);
return 0;
}
}
// Copy from either a user address, or kernel address,
// depending on usr_src.
// Returns 0 on success, -1 on error.
int either_copyin(int user_src, uint64 src, char *dst, uint64 len)
{
struct proc *p = curr_proc();
if (user_src) {
return copyin(p->pagetable, dst, src, len);
} else {
memmove(dst, (char *)src, len);
return 0;
}
}

23
os/vm.h Normal file
View File

@ -0,0 +1,23 @@
#ifndef VM_H
#define VM_H
#include "riscv.h"
#include "types.h"
void kvm_init();
void kvmmap(pagetable_t, uint64, uint64, uint64, int);
int mappages(pagetable_t, uint64, uint64, uint64, int);
pagetable_t uvmcreate();
int uvmcopy(pagetable_t, pagetable_t, uint64);
void uvmfree(pagetable_t, uint64);
int uvmmap(pagetable_t pagetable, uint64 va, uint64 npages, int perm);
void uvmunmap(pagetable_t, uint64, uint64, int);
uint64 walkaddr(pagetable_t, uint64);
uint64 useraddr(pagetable_t, uint64);
int copyout(pagetable_t, uint64, char *, uint64);
int copyin(pagetable_t, char *, uint64, uint64);
int copyinstr(pagetable_t, char *, uint64, uint64);
int either_copyout(int, uint64, char *, uint64);
int either_copyin(int, uint64, char *, uint64);
#endif // VM_H

14
scripts/initproc.py Normal file
View File

@ -0,0 +1,14 @@
import os
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('INIT_PROC', default="usershell")
args = parser.parse_args()
f = open("os/initproc.S", mode="w")
f.write(
'''
.global INIT_PROC
INIT_PROC:
.string \"{0}\"
'''.format(args.INIT_PROC));