feat: add .clang-format file from torvalds/linux (#492)

This commit is contained in:
mzz 2024-04-07 21:13:55 +08:00 committed by GitHub
parent bcb3e80d96
commit 3a83d98819
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 1015 additions and 175 deletions

742
.clang-format Normal file
View File

@ -0,0 +1,742 @@
# SPDX-License-Identifier: GPL-2.0
#
# clang-format configuration file. Intended for clang-format >= 11.
#
# For more information, see:
#
# Documentation/process/clang-format.rst
# https://clang.llvm.org/docs/ClangFormat.html
# https://clang.llvm.org/docs/ClangFormatStyleOptions.html
#
---
AccessModifierOffset: -4
AlignAfterOpenBracket: Align
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: false
AllowAllParametersOfDeclarationOnNextLine: false
AllowShortBlocksOnASingleLine: false
AllowShortCaseLabelsOnASingleLine: false
AllowShortFunctionsOnASingleLine: None
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterDefinitionReturnType: None
AlwaysBreakAfterReturnType: None
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: false
BinPackArguments: true
BinPackParameters: true
BraceWrapping:
AfterClass: false
AfterControlStatement: false
AfterEnum: false
AfterFunction: true
AfterNamespace: true
AfterObjCDeclaration: false
AfterStruct: false
AfterUnion: false
AfterExternBlock: false
BeforeCatch: false
BeforeElse: false
IndentBraces: false
SplitEmptyFunction: true
SplitEmptyRecord: true
SplitEmptyNamespace: true
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Custom
BreakBeforeInheritanceComma: false
BreakBeforeTernaryOperators: false
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializers: BeforeComma
BreakAfterJavaFieldAnnotations: false
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^ IWYU pragma:'
CompactNamespaces: false
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerIndentWidth: 8
ContinuationIndentWidth: 8
Cpp11BracedListStyle: false
DerivePointerAlignment: false
DisableFormat: false
ExperimentalAutoDetectBinPacking: false
FixNamespaceComments: false
# Taken from:
# git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ tools/ \
# | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$, - '\1'," \
# | LC_ALL=C sort -u
ForEachMacros:
- '__ata_qc_for_each'
- '__bio_for_each_bvec'
- '__bio_for_each_segment'
- '__evlist__for_each_entry'
- '__evlist__for_each_entry_continue'
- '__evlist__for_each_entry_from'
- '__evlist__for_each_entry_reverse'
- '__evlist__for_each_entry_safe'
- '__for_each_mem_range'
- '__for_each_mem_range_rev'
- '__for_each_thread'
- '__hlist_for_each_rcu'
- '__map__for_each_symbol_by_name'
- '__pci_bus_for_each_res0'
- '__pci_bus_for_each_res1'
- '__pci_dev_for_each_res0'
- '__pci_dev_for_each_res1'
- '__perf_evlist__for_each_entry'
- '__perf_evlist__for_each_entry_reverse'
- '__perf_evlist__for_each_entry_safe'
- '__rq_for_each_bio'
- '__shost_for_each_device'
- '__sym_for_each'
- 'apei_estatus_for_each_section'
- 'ata_for_each_dev'
- 'ata_for_each_link'
- 'ata_qc_for_each'
- 'ata_qc_for_each_raw'
- 'ata_qc_for_each_with_internal'
- 'ax25_for_each'
- 'ax25_uid_for_each'
- 'bio_for_each_bvec'
- 'bio_for_each_bvec_all'
- 'bio_for_each_folio_all'
- 'bio_for_each_integrity_vec'
- 'bio_for_each_segment'
- 'bio_for_each_segment_all'
- 'bio_list_for_each'
- 'bip_for_each_vec'
- 'bond_for_each_slave'
- 'bond_for_each_slave_rcu'
- 'bpf_for_each'
- 'bpf_for_each_reg_in_vstate'
- 'bpf_for_each_reg_in_vstate_mask'
- 'bpf_for_each_spilled_reg'
- 'bpf_object__for_each_map'
- 'bpf_object__for_each_program'
- 'btree_for_each_safe128'
- 'btree_for_each_safe32'
- 'btree_for_each_safe64'
- 'btree_for_each_safel'
- 'card_for_each_dev'
- 'cgroup_taskset_for_each'
- 'cgroup_taskset_for_each_leader'
- 'cpu_aggr_map__for_each_idx'
- 'cpufreq_for_each_efficient_entry_idx'
- 'cpufreq_for_each_entry'
- 'cpufreq_for_each_entry_idx'
- 'cpufreq_for_each_valid_entry'
- 'cpufreq_for_each_valid_entry_idx'
- 'css_for_each_child'
- 'css_for_each_descendant_post'
- 'css_for_each_descendant_pre'
- 'damon_for_each_region'
- 'damon_for_each_region_from'
- 'damon_for_each_region_safe'
- 'damon_for_each_scheme'
- 'damon_for_each_scheme_safe'
- 'damon_for_each_target'
- 'damon_for_each_target_safe'
- 'damos_for_each_filter'
- 'damos_for_each_filter_safe'
- 'data__for_each_file'
- 'data__for_each_file_new'
- 'data__for_each_file_start'
- 'device_for_each_child_node'
- 'displayid_iter_for_each'
- 'dma_fence_array_for_each'
- 'dma_fence_chain_for_each'
- 'dma_fence_unwrap_for_each'
- 'dma_resv_for_each_fence'
- 'dma_resv_for_each_fence_unlocked'
- 'do_for_each_ftrace_op'
- 'drm_atomic_crtc_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane'
- 'drm_atomic_crtc_state_for_each_plane_state'
- 'drm_atomic_for_each_plane_damage'
- 'drm_client_for_each_connector_iter'
- 'drm_client_for_each_modeset'
- 'drm_connector_for_each_possible_encoder'
- 'drm_exec_for_each_locked_object'
- 'drm_exec_for_each_locked_object_reverse'
- 'drm_for_each_bridge_in_chain'
- 'drm_for_each_connector_iter'
- 'drm_for_each_crtc'
- 'drm_for_each_crtc_reverse'
- 'drm_for_each_encoder'
- 'drm_for_each_encoder_mask'
- 'drm_for_each_fb'
- 'drm_for_each_legacy_plane'
- 'drm_for_each_plane'
- 'drm_for_each_plane_mask'
- 'drm_for_each_privobj'
- 'drm_gem_for_each_gpuva'
- 'drm_gem_for_each_gpuva_safe'
- 'drm_gpuva_for_each_op'
- 'drm_gpuva_for_each_op_from_reverse'
- 'drm_gpuva_for_each_op_safe'
- 'drm_gpuvm_for_each_va'
- 'drm_gpuvm_for_each_va_range'
- 'drm_gpuvm_for_each_va_range_safe'
- 'drm_gpuvm_for_each_va_safe'
- 'drm_mm_for_each_hole'
- 'drm_mm_for_each_node'
- 'drm_mm_for_each_node_in_range'
- 'drm_mm_for_each_node_safe'
- 'dsa_switch_for_each_available_port'
- 'dsa_switch_for_each_cpu_port'
- 'dsa_switch_for_each_cpu_port_continue_reverse'
- 'dsa_switch_for_each_port'
- 'dsa_switch_for_each_port_continue_reverse'
- 'dsa_switch_for_each_port_safe'
- 'dsa_switch_for_each_user_port'
- 'dsa_tree_for_each_cpu_port'
- 'dsa_tree_for_each_user_port'
- 'dsa_tree_for_each_user_port_continue_reverse'
- 'dso__for_each_symbol'
- 'dsos__for_each_with_build_id'
- 'elf_hash_for_each_possible'
- 'elf_symtab__for_each_symbol'
- 'evlist__for_each_cpu'
- 'evlist__for_each_entry'
- 'evlist__for_each_entry_continue'
- 'evlist__for_each_entry_from'
- 'evlist__for_each_entry_reverse'
- 'evlist__for_each_entry_safe'
- 'flow_action_for_each'
- 'for_each_acpi_consumer_dev'
- 'for_each_acpi_dev_match'
- 'for_each_active_dev_scope'
- 'for_each_active_drhd_unit'
- 'for_each_active_iommu'
- 'for_each_active_route'
- 'for_each_aggr_pgid'
- 'for_each_and_bit'
- 'for_each_andnot_bit'
- 'for_each_available_child_of_node'
- 'for_each_bench'
- 'for_each_bio'
- 'for_each_board_func_rsrc'
- 'for_each_btf_ext_rec'
- 'for_each_btf_ext_sec'
- 'for_each_bvec'
- 'for_each_card_auxs'
- 'for_each_card_auxs_safe'
- 'for_each_card_components'
- 'for_each_card_dapms'
- 'for_each_card_pre_auxs'
- 'for_each_card_prelinks'
- 'for_each_card_rtds'
- 'for_each_card_rtds_safe'
- 'for_each_card_widgets'
- 'for_each_card_widgets_safe'
- 'for_each_cgroup_storage_type'
- 'for_each_child_of_node'
- 'for_each_clear_bit'
- 'for_each_clear_bit_from'
- 'for_each_clear_bitrange'
- 'for_each_clear_bitrange_from'
- 'for_each_cmd'
- 'for_each_cmsghdr'
- 'for_each_collection'
- 'for_each_comp_order'
- 'for_each_compatible_node'
- 'for_each_component_dais'
- 'for_each_component_dais_safe'
- 'for_each_conduit'
- 'for_each_console'
- 'for_each_console_srcu'
- 'for_each_cpu'
- 'for_each_cpu_and'
- 'for_each_cpu_andnot'
- 'for_each_cpu_or'
- 'for_each_cpu_wrap'
- 'for_each_dapm_widgets'
- 'for_each_dedup_cand'
- 'for_each_dev_addr'
- 'for_each_dev_scope'
- 'for_each_dma_cap_mask'
- 'for_each_dpcm_be'
- 'for_each_dpcm_be_rollback'
- 'for_each_dpcm_be_safe'
- 'for_each_dpcm_fe'
- 'for_each_drhd_unit'
- 'for_each_dss_dev'
- 'for_each_efi_memory_desc'
- 'for_each_efi_memory_desc_in_map'
- 'for_each_element'
- 'for_each_element_extid'
- 'for_each_element_id'
- 'for_each_endpoint_of_node'
- 'for_each_event'
- 'for_each_event_tps'
- 'for_each_evictable_lru'
- 'for_each_fib6_node_rt_rcu'
- 'for_each_fib6_walker_rt'
- 'for_each_free_mem_pfn_range_in_zone'
- 'for_each_free_mem_pfn_range_in_zone_from'
- 'for_each_free_mem_range'
- 'for_each_free_mem_range_reverse'
- 'for_each_func_rsrc'
- 'for_each_gpiochip_node'
- 'for_each_group_evsel'
- 'for_each_group_evsel_head'
- 'for_each_group_member'
- 'for_each_group_member_head'
- 'for_each_hstate'
- 'for_each_if'
- 'for_each_inject_fn'
- 'for_each_insn'
- 'for_each_insn_prefix'
- 'for_each_intid'
- 'for_each_iommu'
- 'for_each_ip_tunnel_rcu'
- 'for_each_irq_nr'
- 'for_each_lang'
- 'for_each_link_codecs'
- 'for_each_link_cpus'
- 'for_each_link_platforms'
- 'for_each_lru'
- 'for_each_matching_node'
- 'for_each_matching_node_and_match'
- 'for_each_media_entity_data_link'
- 'for_each_mem_pfn_range'
- 'for_each_mem_range'
- 'for_each_mem_range_rev'
- 'for_each_mem_region'
- 'for_each_member'
- 'for_each_memory'
- 'for_each_migratetype_order'
- 'for_each_missing_reg'
- 'for_each_mle_subelement'
- 'for_each_mod_mem_type'
- 'for_each_net'
- 'for_each_net_continue_reverse'
- 'for_each_net_rcu'
- 'for_each_netdev'
- 'for_each_netdev_continue'
- 'for_each_netdev_continue_rcu'
- 'for_each_netdev_continue_reverse'
- 'for_each_netdev_dump'
- 'for_each_netdev_feature'
- 'for_each_netdev_in_bond_rcu'
- 'for_each_netdev_rcu'
- 'for_each_netdev_reverse'
- 'for_each_netdev_safe'
- 'for_each_new_connector_in_state'
- 'for_each_new_crtc_in_state'
- 'for_each_new_mst_mgr_in_state'
- 'for_each_new_plane_in_state'
- 'for_each_new_plane_in_state_reverse'
- 'for_each_new_private_obj_in_state'
- 'for_each_new_reg'
- 'for_each_node'
- 'for_each_node_by_name'
- 'for_each_node_by_type'
- 'for_each_node_mask'
- 'for_each_node_state'
- 'for_each_node_with_cpus'
- 'for_each_node_with_property'
- 'for_each_nonreserved_multicast_dest_pgid'
- 'for_each_numa_hop_mask'
- 'for_each_of_allnodes'
- 'for_each_of_allnodes_from'
- 'for_each_of_cpu_node'
- 'for_each_of_pci_range'
- 'for_each_old_connector_in_state'
- 'for_each_old_crtc_in_state'
- 'for_each_old_mst_mgr_in_state'
- 'for_each_old_plane_in_state'
- 'for_each_old_private_obj_in_state'
- 'for_each_oldnew_connector_in_state'
- 'for_each_oldnew_crtc_in_state'
- 'for_each_oldnew_mst_mgr_in_state'
- 'for_each_oldnew_plane_in_state'
- 'for_each_oldnew_plane_in_state_reverse'
- 'for_each_oldnew_private_obj_in_state'
- 'for_each_online_cpu'
- 'for_each_online_node'
- 'for_each_online_pgdat'
- 'for_each_or_bit'
- 'for_each_path'
- 'for_each_pci_bridge'
- 'for_each_pci_dev'
- 'for_each_pcm_streams'
- 'for_each_physmem_range'
- 'for_each_populated_zone'
- 'for_each_possible_cpu'
- 'for_each_present_blessed_reg'
- 'for_each_present_cpu'
- 'for_each_prime_number'
- 'for_each_prime_number_from'
- 'for_each_probe_cache_entry'
- 'for_each_process'
- 'for_each_process_thread'
- 'for_each_prop_codec_conf'
- 'for_each_prop_dai_codec'
- 'for_each_prop_dai_cpu'
- 'for_each_prop_dlc_codecs'
- 'for_each_prop_dlc_cpus'
- 'for_each_prop_dlc_platforms'
- 'for_each_property_of_node'
- 'for_each_reg'
- 'for_each_reg_filtered'
- 'for_each_reloc'
- 'for_each_reloc_from'
- 'for_each_requested_gpio'
- 'for_each_requested_gpio_in_range'
- 'for_each_reserved_mem_range'
- 'for_each_reserved_mem_region'
- 'for_each_rtd_codec_dais'
- 'for_each_rtd_components'
- 'for_each_rtd_cpu_dais'
- 'for_each_rtd_dais'
- 'for_each_sband_iftype_data'
- 'for_each_script'
- 'for_each_sec'
- 'for_each_set_bit'
- 'for_each_set_bit_from'
- 'for_each_set_bit_wrap'
- 'for_each_set_bitrange'
- 'for_each_set_bitrange_from'
- 'for_each_set_clump8'
- 'for_each_sg'
- 'for_each_sg_dma_page'
- 'for_each_sg_page'
- 'for_each_sgtable_dma_page'
- 'for_each_sgtable_dma_sg'
- 'for_each_sgtable_page'
- 'for_each_sgtable_sg'
- 'for_each_sibling_event'
- 'for_each_sta_active_link'
- 'for_each_subelement'
- 'for_each_subelement_extid'
- 'for_each_subelement_id'
- 'for_each_sublist'
- 'for_each_subsystem'
- 'for_each_supported_activate_fn'
- 'for_each_supported_inject_fn'
- 'for_each_sym'
- 'for_each_test'
- 'for_each_thread'
- 'for_each_token'
- 'for_each_unicast_dest_pgid'
- 'for_each_valid_link'
- 'for_each_vif_active_link'
- 'for_each_vma'
- 'for_each_vma_range'
- 'for_each_vsi'
- 'for_each_wakeup_source'
- 'for_each_zone'
- 'for_each_zone_zonelist'
- 'for_each_zone_zonelist_nodemask'
- 'func_for_each_insn'
- 'fwnode_for_each_available_child_node'
- 'fwnode_for_each_child_node'
- 'fwnode_for_each_parent_node'
- 'fwnode_graph_for_each_endpoint'
- 'gadget_for_each_ep'
- 'genradix_for_each'
- 'genradix_for_each_from'
- 'genradix_for_each_reverse'
- 'hash_for_each'
- 'hash_for_each_possible'
- 'hash_for_each_possible_rcu'
- 'hash_for_each_possible_rcu_notrace'
- 'hash_for_each_possible_safe'
- 'hash_for_each_rcu'
- 'hash_for_each_safe'
- 'hashmap__for_each_entry'
- 'hashmap__for_each_entry_safe'
- 'hashmap__for_each_key_entry'
- 'hashmap__for_each_key_entry_safe'
- 'hctx_for_each_ctx'
- 'hists__for_each_format'
- 'hists__for_each_sort_list'
- 'hlist_bl_for_each_entry'
- 'hlist_bl_for_each_entry_rcu'
- 'hlist_bl_for_each_entry_safe'
- 'hlist_for_each'
- 'hlist_for_each_entry'
- 'hlist_for_each_entry_continue'
- 'hlist_for_each_entry_continue_rcu'
- 'hlist_for_each_entry_continue_rcu_bh'
- 'hlist_for_each_entry_from'
- 'hlist_for_each_entry_from_rcu'
- 'hlist_for_each_entry_rcu'
- 'hlist_for_each_entry_rcu_bh'
- 'hlist_for_each_entry_rcu_notrace'
- 'hlist_for_each_entry_safe'
- 'hlist_for_each_entry_srcu'
- 'hlist_for_each_safe'
- 'hlist_nulls_for_each_entry'
- 'hlist_nulls_for_each_entry_from'
- 'hlist_nulls_for_each_entry_rcu'
- 'hlist_nulls_for_each_entry_safe'
- 'i3c_bus_for_each_i2cdev'
- 'i3c_bus_for_each_i3cdev'
- 'idr_for_each_entry'
- 'idr_for_each_entry_continue'
- 'idr_for_each_entry_continue_ul'
- 'idr_for_each_entry_ul'
- 'in_dev_for_each_ifa_rcu'
- 'in_dev_for_each_ifa_rtnl'
- 'inet_bind_bucket_for_each'
- 'interval_tree_for_each_span'
- 'intlist__for_each_entry'
- 'intlist__for_each_entry_safe'
- 'kcore_copy__for_each_phdr'
- 'key_for_each'
- 'key_for_each_safe'
- 'klp_for_each_func'
- 'klp_for_each_func_safe'
- 'klp_for_each_func_static'
- 'klp_for_each_object'
- 'klp_for_each_object_safe'
- 'klp_for_each_object_static'
- 'kunit_suite_for_each_test_case'
- 'kvm_for_each_memslot'
- 'kvm_for_each_memslot_in_gfn_range'
- 'kvm_for_each_vcpu'
- 'libbpf_nla_for_each_attr'
- 'list_for_each'
- 'list_for_each_codec'
- 'list_for_each_codec_safe'
- 'list_for_each_continue'
- 'list_for_each_entry'
- 'list_for_each_entry_continue'
- 'list_for_each_entry_continue_rcu'
- 'list_for_each_entry_continue_reverse'
- 'list_for_each_entry_from'
- 'list_for_each_entry_from_rcu'
- 'list_for_each_entry_from_reverse'
- 'list_for_each_entry_lockless'
- 'list_for_each_entry_rcu'
- 'list_for_each_entry_reverse'
- 'list_for_each_entry_safe'
- 'list_for_each_entry_safe_continue'
- 'list_for_each_entry_safe_from'
- 'list_for_each_entry_safe_reverse'
- 'list_for_each_entry_srcu'
- 'list_for_each_from'
- 'list_for_each_prev'
- 'list_for_each_prev_safe'
- 'list_for_each_rcu'
- 'list_for_each_reverse'
- 'list_for_each_safe'
- 'llist_for_each'
- 'llist_for_each_entry'
- 'llist_for_each_entry_safe'
- 'llist_for_each_safe'
- 'lwq_for_each_safe'
- 'map__for_each_symbol'
- 'map__for_each_symbol_by_name'
- 'maps__for_each_entry'
- 'maps__for_each_entry_safe'
- 'mas_for_each'
- 'mci_for_each_dimm'
- 'media_device_for_each_entity'
- 'media_device_for_each_intf'
- 'media_device_for_each_link'
- 'media_device_for_each_pad'
- 'media_entity_for_each_pad'
- 'media_pipeline_for_each_entity'
- 'media_pipeline_for_each_pad'
- 'mlx5_lag_for_each_peer_mdev'
- 'msi_domain_for_each_desc'
- 'msi_for_each_desc'
- 'mt_for_each'
- 'nanddev_io_for_each_page'
- 'netdev_for_each_lower_dev'
- 'netdev_for_each_lower_private'
- 'netdev_for_each_lower_private_rcu'
- 'netdev_for_each_mc_addr'
- 'netdev_for_each_synced_mc_addr'
- 'netdev_for_each_synced_uc_addr'
- 'netdev_for_each_uc_addr'
- 'netdev_for_each_upper_dev_rcu'
- 'netdev_hw_addr_list_for_each'
- 'nft_rule_for_each_expr'
- 'nla_for_each_attr'
- 'nla_for_each_nested'
- 'nlmsg_for_each_attr'
- 'nlmsg_for_each_msg'
- 'nr_neigh_for_each'
- 'nr_neigh_for_each_safe'
- 'nr_node_for_each'
- 'nr_node_for_each_safe'
- 'of_for_each_phandle'
- 'of_property_for_each_string'
- 'of_property_for_each_u32'
- 'pci_bus_for_each_resource'
- 'pci_dev_for_each_resource'
- 'pcl_for_each_chunk'
- 'pcl_for_each_segment'
- 'pcm_for_each_format'
- 'perf_config_items__for_each_entry'
- 'perf_config_sections__for_each_entry'
- 'perf_config_set__for_each_entry'
- 'perf_cpu_map__for_each_cpu'
- 'perf_cpu_map__for_each_idx'
- 'perf_evlist__for_each_entry'
- 'perf_evlist__for_each_entry_reverse'
- 'perf_evlist__for_each_entry_safe'
- 'perf_evlist__for_each_evsel'
- 'perf_evlist__for_each_mmap'
- 'perf_hpp_list__for_each_format'
- 'perf_hpp_list__for_each_format_safe'
- 'perf_hpp_list__for_each_sort_list'
- 'perf_hpp_list__for_each_sort_list_safe'
- 'perf_tool_event__for_each_event'
- 'plist_for_each'
- 'plist_for_each_continue'
- 'plist_for_each_entry'
- 'plist_for_each_entry_continue'
- 'plist_for_each_entry_safe'
- 'plist_for_each_safe'
- 'pnp_for_each_card'
- 'pnp_for_each_dev'
- 'protocol_for_each_card'
- 'protocol_for_each_dev'
- 'queue_for_each_hw_ctx'
- 'radix_tree_for_each_slot'
- 'radix_tree_for_each_tagged'
- 'rb_for_each'
- 'rbtree_postorder_for_each_entry_safe'
- 'rdma_for_each_block'
- 'rdma_for_each_port'
- 'rdma_umem_for_each_dma_block'
- 'resort_rb__for_each_entry'
- 'resource_list_for_each_entry'
- 'resource_list_for_each_entry_safe'
- 'rhl_for_each_entry_rcu'
- 'rhl_for_each_rcu'
- 'rht_for_each'
- 'rht_for_each_entry'
- 'rht_for_each_entry_from'
- 'rht_for_each_entry_rcu'
- 'rht_for_each_entry_rcu_from'
- 'rht_for_each_entry_safe'
- 'rht_for_each_from'
- 'rht_for_each_rcu'
- 'rht_for_each_rcu_from'
- 'rq_for_each_bvec'
- 'rq_for_each_segment'
- 'rq_list_for_each'
- 'rq_list_for_each_safe'
- 'sample_read_group__for_each'
- 'scsi_for_each_prot_sg'
- 'scsi_for_each_sg'
- 'sctp_for_each_hentry'
- 'sctp_skb_for_each'
- 'sec_for_each_insn'
- 'sec_for_each_insn_continue'
- 'sec_for_each_insn_from'
- 'sec_for_each_sym'
- 'shdma_for_each_chan'
- 'shost_for_each_device'
- 'sk_for_each'
- 'sk_for_each_bound'
- 'sk_for_each_bound_bhash2'
- 'sk_for_each_entry_offset_rcu'
- 'sk_for_each_from'
- 'sk_for_each_rcu'
- 'sk_for_each_safe'
- 'sk_nulls_for_each'
- 'sk_nulls_for_each_from'
- 'sk_nulls_for_each_rcu'
- 'snd_array_for_each'
- 'snd_pcm_group_for_each_entry'
- 'snd_soc_dapm_widget_for_each_path'
- 'snd_soc_dapm_widget_for_each_path_safe'
- 'snd_soc_dapm_widget_for_each_sink_path'
- 'snd_soc_dapm_widget_for_each_source_path'
- 'strlist__for_each_entry'
- 'strlist__for_each_entry_safe'
- 'sym_for_each_insn'
- 'sym_for_each_insn_continue_reverse'
- 'symbols__for_each_entry'
- 'tb_property_for_each'
- 'tcf_act_for_each_action'
- 'tcf_exts_for_each_action'
- 'ttm_resource_manager_for_each_res'
- 'twsk_for_each_bound_bhash2'
- 'udp_portaddr_for_each_entry'
- 'udp_portaddr_for_each_entry_rcu'
- 'usb_hub_for_each_child'
- 'v4l2_device_for_each_subdev'
- 'v4l2_m2m_for_each_dst_buf'
- 'v4l2_m2m_for_each_dst_buf_safe'
- 'v4l2_m2m_for_each_src_buf'
- 'v4l2_m2m_for_each_src_buf_safe'
- 'virtio_device_for_each_vq'
- 'while_for_each_ftrace_op'
- 'xa_for_each'
- 'xa_for_each_marked'
- 'xa_for_each_range'
- 'xa_for_each_start'
- 'xas_for_each'
- 'xas_for_each_conflict'
- 'xas_for_each_marked'
- 'xbc_array_for_each_value'
- 'xbc_for_each_key_value'
- 'xbc_node_for_each_array_value'
- 'xbc_node_for_each_child'
- 'xbc_node_for_each_key_value'
- 'xbc_node_for_each_subkey'
- 'zorro_for_each_dev'
IncludeBlocks: Preserve
IncludeCategories:
- Regex: '.*'
Priority: 1
IncludeIsMainRegex: '(Test)?$'
IndentCaseLabels: false
IndentGotoLabels: false
IndentPPDirectives: None
IndentWidth: 8
IndentWrappedFunctionNames: false
JavaScriptQuotes: Leave
JavaScriptWrapImports: true
KeepEmptyLinesAtTheStartOfBlocks: false
MacroBlockBegin: ''
MacroBlockEnd: ''
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None
ObjCBinPackProtocolList: Auto
ObjCBlockIndentWidth: 8
ObjCSpaceAfterProperty: true
ObjCSpaceBeforeProtocolList: true
# Taken from git's rules
PenaltyBreakAssignment: 10
PenaltyBreakBeforeFirstCallParameter: 30
PenaltyBreakComment: 10
PenaltyBreakFirstLessLess: 0
PenaltyBreakString: 10
PenaltyExcessCharacter: 100
PenaltyReturnTypeOnItsOwnLine: 60
PointerAlignment: Right
ReflowComments: false
SortIncludes: false
SortUsingDeclarations: false
SpaceAfterCStyleCast: false
SpaceAfterTemplateKeyword: true
SpaceBeforeAssignmentOperators: true
SpaceBeforeCtorInitializerColon: true
SpaceBeforeInheritanceColon: true
SpaceBeforeParens: ControlStatementsExceptForEachMacros
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesBeforeTrailingComments: 1
SpacesInAngles: false
SpacesInContainerLiterals: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp03
TabWidth: 8
UseTab: Always

1
.gitignore vendored
View File

@ -10,3 +10,4 @@ go-mod/
node_modules/
*.log
.build_tags
.checkpatch-camelcase.git.

View File

@ -97,6 +97,6 @@ ebpf: submodule clean-ebpf
go generate ./trace/trace.go && echo trace > $(BUILD_TAGS_FILE) || echo > $(BUILD_TAGS_FILE)
ebpf-lint:
./scripts/checkpatch.pl --no-tree --strict --no-summary --show-types --color=always control/kern/tproxy.c --ignore COMMIT_COMMENT_SYMBOL,NOT_UNIFIED_DIFF,COMMIT_LOG_LONG_LINE,LONG_LINE_COMMENT,VOLATILE,ASSIGN_IN_IF,PREFER_DEFINED_ATTRIBUTE_MACRO,CAMELCASE,LEADING_SPACE
./scripts/checkpatch.pl --no-tree --strict --no-summary --show-types --color=always control/kern/tproxy.c --ignore COMMIT_COMMENT_SYMBOL,NOT_UNIFIED_DIFF,COMMIT_LOG_LONG_LINE,LONG_LINE_COMMENT,VOLATILE,ASSIGN_IN_IF,PREFER_DEFINED_ATTRIBUTE_MACRO,CAMELCASE,LEADING_SPACE,OPEN_ENDED_LINE,SPACING
## End Ebpf

View File

@ -34,8 +34,10 @@
#define IPV4_CSUM_OFF(link_h_len) ((link_h_len) + offsetof(struct iphdr, check))
#define IPV4_DST_OFF(link_h_len) ((link_h_len) + offsetof(struct iphdr, daddr))
#define IPV4_SRC_OFF(link_h_len) ((link_h_len) + offsetof(struct iphdr, saddr))
#define IPV6_DST_OFF(link_h_len) ((link_h_len) + offsetof(struct ipv6hdr, daddr))
#define IPV6_SRC_OFF(link_h_len) ((link_h_len) + offsetof(struct ipv6hdr, saddr))
#define IPV6_DST_OFF(link_h_len) \
((link_h_len) + offsetof(struct ipv6hdr, daddr))
#define IPV6_SRC_OFF(link_h_len) \
((link_h_len) + offsetof(struct ipv6hdr, saddr))
#define PACKET_HOST 0
#define PACKET_OTHERHOST 3
@ -97,14 +99,14 @@ struct outbound_connectivity_query {
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, struct outbound_connectivity_query);
__type(value, __u32); // true, false
__type(value, __u32); // true, false
__uint(max_entries, 256 * 2 * 2); // outbound * l4proto * ipversion
} outbound_connectivity_map SEC(".maps");
// Sockmap:
struct {
__uint(type, BPF_MAP_TYPE_SOCKMAP);
__type(key, __u32); // 0 is tcp, 1 is udp.
__type(key, __u32); // 0 is tcp, 1 is udp.
__type(value, __u64); // fd of socket.
__uint(max_entries, 2);
} listen_socket_map SEC(".maps");
@ -177,12 +179,12 @@ static volatile const struct dae_param PARAM = {};
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
__type(key, __u32); // tgid
__type(key, __u32); // tgid
__type(value, __u32[TASK_COMM_LEN / 4]); // process name.
__uint(max_entries, MAX_TGID_PNAME_MAPPING_NUM);
__uint(pinning, LIBBPF_PIN_BY_NAME);
} tgid_pname_map
SEC(".maps"); // This map is only for old method (redirect mode in WAN).
SEC(".maps"); // This map is only for old method (redirect mode in WAN).
struct {
__uint(type, BPF_MAP_TYPE_LRU_HASH);
@ -209,7 +211,7 @@ struct {
#define LinkType_Ethernet 1
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32); // ifindex
__type(key, __u32); // ifindex
__type(value, __u32); // link length
__uint(max_entries, MAX_INTERFACE_NUM);
/// NOTICE: No persistence.
@ -250,7 +252,7 @@ struct if_params {
struct {
__uint(type, BPF_MAP_TYPE_HASH);
__type(key, __u32); // ifindex
__type(key, __u32); // ifindex
__type(value, struct if_params); // ip
__uint(max_entries, MAX_INTERFACE_NUM);
/// NOTICE: No persistence.
@ -409,8 +411,10 @@ get_tuples(const struct __sk_buff *skb, struct tuples *tuples,
tuples->dscp = ipv4_get_dscp(iph);
} else {
__builtin_memcpy(&tuples->five.dip, &ipv6h->daddr, IPV6_BYTE_LENGTH);
__builtin_memcpy(&tuples->five.sip, &ipv6h->saddr, IPV6_BYTE_LENGTH);
__builtin_memcpy(&tuples->five.dip, &ipv6h->daddr,
IPV6_BYTE_LENGTH);
__builtin_memcpy(&tuples->five.sip, &ipv6h->saddr,
IPV6_BYTE_LENGTH);
tuples->dscp = ipv6_get_dscp(ipv6h);
}
@ -427,7 +431,7 @@ static __always_inline bool equal16(const __be32 x[4], const __be32 y[4])
{
#if __clang_major__ >= 10
return ((__be64 *)x)[0] == ((__be64 *)y)[0] &&
((__be64 *)x)[1] == ((__be64 *)y)[1];
((__be64 *)x)[1] == ((__be64 *)y)[1];
// return x[0] == y[0] && x[1] == y[1] && x[2] == y[2] && x[3] == y[3];
#else
@ -438,7 +442,8 @@ static __always_inline bool equal16(const __be32 x[4], const __be32 y[4])
static __always_inline int
handle_ipv6_extensions(const struct __sk_buff *skb, __u32 offset, __u32 hdr,
struct icmp6hdr *icmp6h, struct tcphdr *tcph,
struct udphdr *udph, __u8 *ihl, __u8 *l4proto) {
struct udphdr *udph, __u8 *ihl, __u8 *l4proto)
{
__u8 hdr_length = 0;
__u8 nexthdr = 0;
*ihl = sizeof(struct ipv6hdr) / 4;
@ -449,9 +454,10 @@ handle_ipv6_extensions(const struct __sk_buff *skb, __u32 offset, __u32 hdr,
// We disable it here to support more poor memory devices.
// #pragma unroll
for (int i = 0; i < IPV6_MAX_EXTENSIONS;
i++, offset += hdr_length, hdr = nexthdr, *ihl += hdr_length / 4) {
i++, offset += hdr_length, hdr = nexthdr, *ihl += hdr_length / 4) {
if (hdr_length % 4) {
bpf_printk("IPv6 extension length is not multiples of 4");
bpf_printk(
"IPv6 extension length is not multiples of 4");
return 1;
}
// See control/control_plane.go.
@ -461,7 +467,8 @@ handle_ipv6_extensions(const struct __sk_buff *skb, __u32 offset, __u32 hdr,
*l4proto = hdr;
hdr_length = sizeof(struct icmp6hdr);
// Assume ICMPV6 as a level 4 protocol.
ret = bpf_skb_load_bytes(skb, offset, icmp6h, hdr_length);
ret = bpf_skb_load_bytes(skb, offset, icmp6h,
hdr_length);
if (ret) {
bpf_printk("not a valid IPv6 packet");
return -EFAULT;
@ -470,14 +477,16 @@ handle_ipv6_extensions(const struct __sk_buff *skb, __u32 offset, __u32 hdr,
case IPPROTO_HOPOPTS:
case IPPROTO_ROUTING:
ret = bpf_skb_load_bytes(skb, offset + 1, &hdr_length, sizeof(hdr_length));
ret = bpf_skb_load_bytes(skb, offset + 1, &hdr_length,
sizeof(hdr_length));
if (ret) {
bpf_printk("not a valid IPv6 packet");
return -EFAULT;
}
special_n1:
ret = bpf_skb_load_bytes(skb, offset, &nexthdr, sizeof(nexthdr));
ret = bpf_skb_load_bytes(skb, offset, &nexthdr,
sizeof(nexthdr));
if (ret) {
bpf_printk("not a valid IPv6 packet");
return -EFAULT;
@ -491,14 +500,16 @@ special_n1:
*l4proto = hdr;
if (hdr == IPPROTO_TCP) {
// Upper layer;
ret = bpf_skb_load_bytes(skb, offset, tcph, sizeof(struct tcphdr));
ret = bpf_skb_load_bytes(skb, offset, tcph,
sizeof(struct tcphdr));
if (ret) {
bpf_printk("not a valid IPv6 packet");
return -EFAULT;
}
} else if (hdr == IPPROTO_UDP) {
// Upper layer;
ret = bpf_skb_load_bytes(skb, offset, udph, sizeof(struct udphdr));
ret = bpf_skb_load_bytes(skb, offset, udph,
sizeof(struct udphdr));
if (ret) {
bpf_printk("not a valid IPv6 packet");
return -EFAULT;
@ -523,12 +534,14 @@ static __always_inline int
parse_transport(const struct __sk_buff *skb, __u32 link_h_len,
struct ethhdr *ethh, struct iphdr *iph, struct ipv6hdr *ipv6h,
struct icmp6hdr *icmp6h, struct tcphdr *tcph,
struct udphdr *udph, __u8 *ihl, __u8 *l4proto) {
struct udphdr *udph, __u8 *ihl, __u8 *l4proto)
{
__u32 offset = 0;
int ret;
if (link_h_len == ETH_HLEN) {
ret = bpf_skb_load_bytes(skb, offset, ethh, sizeof(struct ethhdr));
ret = bpf_skb_load_bytes(skb, offset, ethh,
sizeof(struct ethhdr));
if (ret) {
bpf_printk("not ethernet packet");
return 1;
@ -551,7 +564,8 @@ parse_transport(const struct __sk_buff *skb, __u32 link_h_len,
// bpf_printk("parse_transport: h_proto: %u ? %u %u", ethh->h_proto,
// bpf_htons(ETH_P_IP), bpf_htons(ETH_P_IPV6));
if (ethh->h_proto == bpf_htons(ETH_P_IP)) {
ret = bpf_skb_load_bytes(skb, offset, iph, sizeof(struct iphdr));
ret = bpf_skb_load_bytes(skb, offset, iph,
sizeof(struct iphdr));
if (ret)
return -EFAULT;
// Skip ipv4hdr and options for next hdr.
@ -561,14 +575,16 @@ parse_transport(const struct __sk_buff *skb, __u32 link_h_len,
*l4proto = iph->protocol;
switch (iph->protocol) {
case IPPROTO_TCP: {
ret = bpf_skb_load_bytes(skb, offset, tcph, sizeof(struct tcphdr));
ret = bpf_skb_load_bytes(skb, offset, tcph,
sizeof(struct tcphdr));
if (ret) {
// Not a complete tcphdr.
return -EFAULT;
}
} break;
case IPPROTO_UDP: {
ret = bpf_skb_load_bytes(skb, offset, udph, sizeof(struct udphdr));
ret = bpf_skb_load_bytes(skb, offset, udph,
sizeof(struct udphdr));
if (ret) {
// Not a complete udphdr.
return -EFAULT;
@ -580,7 +596,8 @@ parse_transport(const struct __sk_buff *skb, __u32 link_h_len,
*ihl = iph->ihl;
return 0;
} else if (ethh->h_proto == bpf_htons(ETH_P_IPV6)) {
ret = bpf_skb_load_bytes(skb, offset, ipv6h, sizeof(struct ipv6hdr));
ret = bpf_skb_load_bytes(skb, offset, ipv6h,
sizeof(struct ipv6hdr));
if (ret) {
bpf_printk("not a valid IPv6 packet");
return -EFAULT;
@ -588,8 +605,8 @@ parse_transport(const struct __sk_buff *skb, __u32 link_h_len,
offset += sizeof(struct ipv6hdr);
return handle_ipv6_extensions(skb, offset, ipv6h->nexthdr, icmp6h, tcph,
udph, ihl, l4proto);
return handle_ipv6_extensions(skb, offset, ipv6h->nexthdr,
icmp6h, tcph, udph, ihl, l4proto);
} else {
/// EXPECTED: Maybe ICMP, MPLS, etc.
// bpf_printk("IP but not supported packet: protocol is %u",
@ -621,11 +638,13 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
};
/// TODO: BPF_MAP_UPDATE_BATCH ?
ret = bpf_map_update_elem(&l4proto_ipversion_map, &key, &_l4proto_type, BPF_ANY);
ret = bpf_map_update_elem(&l4proto_ipversion_map, &key, &_l4proto_type,
BPF_ANY);
if (unlikely(ret))
return ret;
key = MatchType_IpVersion;
ret = bpf_map_update_elem(&l4proto_ipversion_map, &key, &_ipversion_type, BPF_ANY);
ret = bpf_map_update_elem(&l4proto_ipversion_map, &key,
&_ipversion_type, BPF_ANY);
if (unlikely(ret))
return ret;
@ -639,25 +658,30 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
}
key = MatchType_SourcePort;
if (unlikely((ret = bpf_map_update_elem(&h_port_map, &key, &h_sport, BPF_ANY))))
if (unlikely((ret = bpf_map_update_elem(&h_port_map, &key, &h_sport,
BPF_ANY))))
return ret;
key = MatchType_Port;
if (unlikely((ret = bpf_map_update_elem(&h_port_map, &key, &h_dport, BPF_ANY))))
if (unlikely((ret = bpf_map_update_elem(&h_port_map, &key, &h_dport,
BPF_ANY))))
return ret;
__builtin_memcpy(lpm_key_instance.data, daddr, IPV6_BYTE_LENGTH);
key = MatchType_IpSet;
ret = bpf_map_update_elem(&lpm_key_map, &key, &lpm_key_instance, BPF_ANY);
ret = bpf_map_update_elem(&lpm_key_map, &key, &lpm_key_instance,
BPF_ANY);
if (unlikely(ret))
return ret;
__builtin_memcpy(lpm_key_instance.data, saddr, IPV6_BYTE_LENGTH);
key = MatchType_SourceIpSet;
ret = bpf_map_update_elem(&lpm_key_map, &key, &lpm_key_instance, BPF_ANY);
ret = bpf_map_update_elem(&lpm_key_map, &key, &lpm_key_instance,
BPF_ANY);
if (unlikely(ret))
return ret;
__builtin_memcpy(lpm_key_instance.data, mac, IPV6_BYTE_LENGTH);
key = MatchType_Mac;
ret = bpf_map_update_elem(&lpm_key_map, &key, &lpm_key_instance, BPF_ANY);
ret = bpf_map_update_elem(&lpm_key_map, &key, &lpm_key_instance,
BPF_ANY);
if (unlikely(ret))
return ret;
@ -667,7 +691,7 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
// proxy Subrule is like: domain(suffix:baidu.com, suffix:google.com) Match
// set is like: suffix:baidu.com
volatile __u8 isdns_must_goodsubrule_badrule =
(h_dport == 53 && _l4proto_type == L4ProtoType_UDP) << 3;
(h_dport == 53 && _l4proto_type == L4ProtoType_UDP) << 3;
struct domain_routing *domain_routing;
__u32 *p_u32;
__u16 *p_u16;
@ -687,9 +711,10 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
#ifdef __DEBUG_ROUTING
key = match_set->type;
bpf_printk("key(match_set->type): %llu", key);
bpf_printk("Skip to judge. bad_rule: %d, good_subrule: %d",
isdns_must_goodsubrule_badrule & 0b10,
isdns_must_goodsubrule_badrule & 0b1);
bpf_printk(
"Skip to judge. bad_rule: %d, good_subrule: %d",
isdns_must_goodsubrule_badrule & 0b10,
isdns_must_goodsubrule_badrule & 0b1);
#endif
goto before_next_loop;
}
@ -700,11 +725,14 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
lpm_key = bpf_map_lookup_elem(&lpm_key_map, &key);
if (lpm_key) {
#ifdef __DEBUG_ROUTING
bpf_printk("CHECK: lpm_key_map, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not, match_set->outbound);
bpf_printk(
"CHECK: lpm_key_map, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not,
match_set->outbound);
bpf_printk("\tip: %pI6", lpm_key->data);
#endif
lpm = bpf_map_lookup_elem(&lpm_array_map, &match_set->index);
lpm = bpf_map_lookup_elem(&lpm_array_map,
&match_set->index);
if (unlikely(!lpm))
return -EFAULT;
if (bpf_map_lookup_elem(lpm, lpm_key)) {
@ -713,8 +741,10 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
}
} else if ((p_u16 = bpf_map_lookup_elem(&h_port_map, &key))) {
#ifdef __DEBUG_ROUTING
bpf_printk("CHECK: h_port_map, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not, match_set->outbound);
bpf_printk(
"CHECK: h_port_map, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not,
match_set->outbound);
bpf_printk("\tport: %u, range: [%u, %u]", *p_u16,
match_set->port_range.port_start,
match_set->port_range.port_end);
@ -723,10 +753,13 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
*p_u16 <= match_set->port_range.port_end) {
isdns_must_goodsubrule_badrule |= 0b10;
}
} else if ((p_u32 = bpf_map_lookup_elem(&l4proto_ipversion_map, &key))) {
} else if ((p_u32 = bpf_map_lookup_elem(&l4proto_ipversion_map,
&key))) {
#ifdef __DEBUG_ROUTING
bpf_printk("CHECK: l4proto_ipversion_map, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not, match_set->outbound);
bpf_printk(
"CHECK: l4proto_ipversion_map, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not,
match_set->outbound);
#endif
if (*p_u32 & *(__u32 *)&match_set->__value)
isdns_must_goodsubrule_badrule |= 0b10;
@ -734,20 +767,26 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
switch (key) {
case MatchType_DomainSet:
#ifdef __DEBUG_ROUTING
bpf_printk("CHECK: domain, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not, match_set->outbound);
bpf_printk(
"CHECK: domain, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not,
match_set->outbound);
#endif
// Get domain routing bitmap.
domain_routing = bpf_map_lookup_elem(&domain_routing_map, daddr);
domain_routing = bpf_map_lookup_elem(
&domain_routing_map, daddr);
// We use key instead of k to pass checker.
if (domain_routing &&
(domain_routing->bitmap[i / 32] >> (i % 32)) & 1)
(domain_routing->bitmap[i / 32] >>
(i % 32)) &
1)
isdns_must_goodsubrule_badrule |= 0b10;
break;
case MatchType_ProcessName:
if (_is_wan && equal16(match_set->pname, _pname))
if (_is_wan &&
equal16(match_set->pname, _pname))
isdns_must_goodsubrule_badrule |= 0b10;
break;
case MatchType_Dscp:
@ -762,10 +801,12 @@ route(const __u32 flag[8], const void *l4hdr, const __be32 saddr[4],
break;
default:
#ifdef __DEBUG_ROUTING
bpf_printk("CHECK: <unknown>, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not, match_set->outbound);
bpf_printk(
"CHECK: <unknown>, match_set->type: %u, not: %d, outbound: %u",
match_set->type, match_set->not,
match_set->outbound);
#endif
return -EINVAL;
return -EINVAL;
}
}
@ -780,7 +821,8 @@ before_next_loop:
// We are now at end of rule, or next match_set belongs to another
// subrule.
if ((isdns_must_goodsubrule_badrule & 0b10) > 0 == match_set->not) {
if ((isdns_must_goodsubrule_badrule & 0b10) > 0 ==
match_set->not ) {
// This subrule does not hit.
isdns_must_goodsubrule_badrule |= 0b1;
}
@ -789,42 +831,53 @@ before_next_loop:
isdns_must_goodsubrule_badrule &= ~0b10;
}
#ifdef __DEBUG_ROUTING
bpf_printk("_bad_rule: %d", isdns_must_goodsubrule_badrule & 0b1);
bpf_printk("_bad_rule: %d",
isdns_must_goodsubrule_badrule & 0b1);
#endif
if ((match_set->outbound & OUTBOUND_LOGICAL_MASK) !=
OUTBOUND_LOGICAL_MASK) {
OUTBOUND_LOGICAL_MASK) {
// Tail of a rule (line).
// Decide whether to hit.
if (!(isdns_must_goodsubrule_badrule & 0b1)) {
#ifdef __DEBUG_ROUTING
bpf_printk("MATCHED: match_set->type: %u, match_set->not: %d",
match_set->type, match_set->not);
bpf_printk(
"MATCHED: match_set->type: %u, match_set->not: %d",
match_set->type, match_set->not );
#endif
// DNS requests should routed by control plane if outbound is not
// must_direct.
if (unlikely(match_set->outbound == OUTBOUND_MUST_RULES)) {
if (unlikely(match_set->outbound ==
OUTBOUND_MUST_RULES)) {
isdns_must_goodsubrule_badrule |= 0b100;
} else {
if (isdns_must_goodsubrule_badrule & 0b100)
if (isdns_must_goodsubrule_badrule &
0b100)
match_set->must = true;
if (!match_set->must &&
(isdns_must_goodsubrule_badrule & 0b1000)) {
(isdns_must_goodsubrule_badrule &
0b1000)) {
return (__s64)OUTBOUND_CONTROL_PLANE_ROUTING |
((__s64)match_set->mark << 8) |
((__s64)match_set->must << 40);
((__s64)match_set->mark
<< 8) |
((__s64)match_set->must
<< 40);
} else {
return (__s64)match_set->outbound |
((__s64)match_set->mark << 8) |
((__s64)match_set->must << 40);
return (__s64)match_set
->outbound |
((__s64)match_set->mark
<< 8) |
((__s64)match_set->must
<< 40);
}
}
}
isdns_must_goodsubrule_badrule &= ~0b1;
}
}
bpf_printk("No match_set hits. Did coder forget to sync common/consts/ebpf.go with enum MatchType?");
bpf_printk(
"No match_set hits. Did coder forget to sync common/consts/ebpf.go with enum MatchType?");
return -EPERM;
#undef _l4proto_type
#undef _ipversion_type
@ -844,8 +897,7 @@ static __always_inline __u32 get_link_h_len(__u32 ifindex,
return 0;
}
static __always_inline int
assign_listener(struct __sk_buff *skb, __u8 l4proto)
static __always_inline int assign_listener(struct __sk_buff *skb, __u8 l4proto)
{
struct bpf_sock *sk;
@ -863,11 +915,9 @@ assign_listener(struct __sk_buff *skb, __u8 l4proto)
return ret;
}
static __always_inline void
prep_redirect_to_control_plane(struct __sk_buff *skb, __u32 link_h_len,
struct tuples *tuples, __u8 l4proto,
struct ethhdr *ethh, __u8 from_wan,
struct tcphdr *tcph)
static __always_inline void prep_redirect_to_control_plane(
struct __sk_buff *skb, __u32 link_h_len, struct tuples *tuples,
__u8 l4proto, struct ethhdr *ethh, __u8 from_wan, struct tcphdr *tcph)
{
/* Redirect from L3 dev to L2 dev, e.g. wg0 -> veth */
if (!link_h_len) {
@ -879,7 +929,8 @@ prep_redirect_to_control_plane(struct __sk_buff *skb, __u32 link_h_len,
}
bpf_skb_store_bytes(skb, offsetof(struct ethhdr, h_dest),
(void *)&PARAM.dae0peer_mac, sizeof(ethh->h_dest), 0);
(void *)&PARAM.dae0peer_mac, sizeof(ethh->h_dest),
0);
struct redirect_tuple redirect_tuple = {};
@ -887,17 +938,22 @@ prep_redirect_to_control_plane(struct __sk_buff *skb, __u32 link_h_len,
redirect_tuple.sip.u6_addr32[3] = tuples->five.sip.u6_addr32[3];
redirect_tuple.dip.u6_addr32[3] = tuples->five.dip.u6_addr32[3];
} else {
__builtin_memcpy(&redirect_tuple.sip, &tuples->five.sip, IPV6_BYTE_LENGTH);
__builtin_memcpy(&redirect_tuple.dip, &tuples->five.dip, IPV6_BYTE_LENGTH);
__builtin_memcpy(&redirect_tuple.sip, &tuples->five.sip,
IPV6_BYTE_LENGTH);
__builtin_memcpy(&redirect_tuple.dip, &tuples->five.dip,
IPV6_BYTE_LENGTH);
}
redirect_tuple.l4proto = l4proto;
struct redirect_entry redirect_entry = {};
redirect_entry.ifindex = skb->ifindex;
redirect_entry.from_wan = from_wan;
__builtin_memcpy(redirect_entry.smac, ethh->h_source, sizeof(ethh->h_source));
__builtin_memcpy(redirect_entry.dmac, ethh->h_dest, sizeof(ethh->h_dest));
bpf_map_update_elem(&redirect_track, &redirect_tuple, &redirect_entry, BPF_ANY);
__builtin_memcpy(redirect_entry.smac, ethh->h_source,
sizeof(ethh->h_source));
__builtin_memcpy(redirect_entry.dmac, ethh->h_dest,
sizeof(ethh->h_dest));
bpf_map_update_elem(&redirect_track, &redirect_tuple, &redirect_entry,
BPF_ANY);
skb->cb[0] = TPROXY_MARK;
skb->cb[1] = 0;
@ -946,7 +1002,7 @@ int tproxy_lan_ingress(struct __sk_buff *skb)
* ip -6 route del local default dev lo table 2023
*/
// Socket lookup and assign skb to existing socket connection.
struct bpf_sock_tuple tuple = {0};
struct bpf_sock_tuple tuple = { 0 };
__u32 tuple_size;
struct bpf_sock *sk;
__u32 flag[8];
@ -959,8 +1015,10 @@ int tproxy_lan_ingress(struct __sk_buff *skb)
tuple.ipv4.sport = tuples.five.sport;
tuple_size = sizeof(tuple.ipv4);
} else {
__builtin_memcpy(tuple.ipv6.daddr, &tuples.five.dip, IPV6_BYTE_LENGTH);
__builtin_memcpy(tuple.ipv6.saddr, &tuples.five.sip, IPV6_BYTE_LENGTH);
__builtin_memcpy(tuple.ipv6.daddr, &tuples.five.dip,
IPV6_BYTE_LENGTH);
__builtin_memcpy(tuple.ipv6.saddr, &tuples.five.sip,
IPV6_BYTE_LENGTH);
tuple.ipv6.dport = tuples.five.dport;
tuple.ipv6.sport = tuples.five.sport;
tuple_size = sizeof(tuple.ipv6);
@ -971,7 +1029,8 @@ int tproxy_lan_ingress(struct __sk_buff *skb)
if (tcph.syn && !tcph.ack)
goto new_connection;
sk = bpf_skc_lookup_tcp(skb, &tuple, tuple_size, PARAM.dae_netns_id, 0);
sk = bpf_skc_lookup_tcp(skb, &tuple, tuple_size,
PARAM.dae_netns_id, 0);
if (sk) {
if (sk->state != BPF_TCP_LISTEN) {
bpf_sk_release(sk);
@ -1002,26 +1061,28 @@ new_connection:
flag[1] = IpVersionType_6;
flag[6] = tuples.dscp;
__be32 mac[4] = {
0,
0,
bpf_htonl((ethh.h_source[0] << 8) + (ethh.h_source[1])),
bpf_htonl((ethh.h_source[2] << 24) + (ethh.h_source[3] << 16) +
(ethh.h_source[4] << 8) + (ethh.h_source[5])),
0,
0,
bpf_htonl((ethh.h_source[0] << 8) + (ethh.h_source[1])),
bpf_htonl((ethh.h_source[2] << 24) + (ethh.h_source[3] << 16) +
(ethh.h_source[4] << 8) + (ethh.h_source[5])),
};
__s64 s64_ret;
s64_ret = route(flag, l4hdr, tuples.five.sip.u6_addr32, tuples.five.dip.u6_addr32, mac);
s64_ret = route(flag, l4hdr, tuples.five.sip.u6_addr32,
tuples.five.dip.u6_addr32, mac);
if (s64_ret < 0) {
bpf_printk("shot routing: %d", s64_ret);
return TC_ACT_SHOT;
}
struct routing_result routing_result = {0};
struct routing_result routing_result = { 0 };
routing_result.outbound = s64_ret;
routing_result.mark = s64_ret >> 8;
routing_result.must = (s64_ret >> 40) & 1;
routing_result.dscp = tuples.dscp;
__builtin_memcpy(routing_result.mac, ethh.h_source, sizeof(routing_result.mac));
__builtin_memcpy(routing_result.mac, ethh.h_source,
sizeof(routing_result.mac));
/// NOTICE: No pid pname info for LAN packet.
// // Maybe this packet is also in the host (such as docker) ?
// // I tried and it is false.
@ -1033,7 +1094,8 @@ new_connection:
// }
// Save routing result.
ret = bpf_map_update_elem(&routing_tuples_map, &tuples.five, &routing_result, BPF_ANY);
ret = bpf_map_update_elem(&routing_tuples_map, &tuples.five,
&routing_result, BPF_ANY);
if (ret) {
bpf_printk("shot save routing result: %d", ret);
return TC_ACT_SHOT;
@ -1041,11 +1103,12 @@ new_connection:
#if defined(__DEBUG_ROUTING) || defined(__PRINT_ROUTING_RESULT)
if (l4proto == IPPROTO_TCP) {
bpf_printk("tcp(lan): outbound: %u, target: %pI6:%u", ret,
tuples.five.dip.u6_addr32, bpf_ntohs(tuples.five.dport));
tuples.five.dip.u6_addr32,
bpf_ntohs(tuples.five.dport));
} else {
bpf_printk("udp(lan): outbound: %u, target: %pI6:%u",
routing_result.outbound, tuples.five.dip.u6_addr32,
bpf_ntohs(tuples.five.dport));
bpf_ntohs(tuples.five.dport));
}
#endif
if (routing_result.outbound == OUTBOUND_DIRECT) {
@ -1056,7 +1119,7 @@ new_connection:
}
// Check outbound connectivity in specific ipversion and l4proto.
struct outbound_connectivity_query q = {0};
struct outbound_connectivity_query q = { 0 };
q.outbound = routing_result.outbound;
q.ipversion = skb->protocol == bpf_htons(ETH_P_IP) ? 4 : 6;
@ -1072,7 +1135,8 @@ new_connection:
// Assign to control plane.
control_plane:
prep_redirect_to_control_plane(skb, link_h_len, &tuples, l4proto, &ethh, 0, &tcph);
prep_redirect_to_control_plane(skb, link_h_len, &tuples, l4proto, &ethh,
0, &tcph);
return bpf_redirect(PARAM.dae0_ifindex, 0);
direct:
@ -1085,7 +1149,8 @@ block:
// Cookie will change after the first packet, so we just use it for
// handshake.
static __always_inline bool pid_is_control_plane(struct __sk_buff *skb,
struct pid_pname **p) {
struct pid_pname **p)
{
struct pid_pname *pid_pname;
__u64 cookie = bpf_get_socket_cookie(skb);
@ -1158,7 +1223,7 @@ int tproxy_wan_egress(struct __sk_buff *skb)
return TC_ACT_OK;
bool tcp_state_syn;
int ret = parse_transport(skb, link_h_len, &ethh, &iph, &ipv6h, &icmp6h,
&tcph, &udph, &ihl, &l4proto);
&tcph, &udph, &ihl, &l4proto);
if (ret)
return TC_ACT_OK;
if (l4proto == IPPROTO_ICMPV6)
@ -1181,7 +1246,7 @@ int tproxy_wan_egress(struct __sk_buff *skb)
if (unlikely(tcp_state_syn)) {
// New TCP connection.
// bpf_printk("[%X]New Connection", bpf_ntohl(tcph.seq));
__u32 flag[8] = {L4ProtoType_TCP}; // TCP
__u32 flag[8] = { L4ProtoType_TCP }; // TCP
if (skb->protocol == bpf_htons(ETH_P_IP))
flag[1] = IpVersionType_4;
@ -1194,14 +1259,18 @@ int tproxy_wan_egress(struct __sk_buff *skb)
}
if (pid_pname) {
// 2, 3, 4, 5
__builtin_memcpy(&flag[2], pid_pname->pname, TASK_COMM_LEN);
__builtin_memcpy(&flag[2], pid_pname->pname,
TASK_COMM_LEN);
}
__be32 mac[4] = {
0,
0,
bpf_htonl((ethh.h_source[0] << 8) + (ethh.h_source[1])),
bpf_htonl((ethh.h_source[2] << 24) + (ethh.h_source[3] << 16) +
(ethh.h_source[4] << 8) + (ethh.h_source[5])),
bpf_htonl((ethh.h_source[0] << 8) +
(ethh.h_source[1])),
bpf_htonl((ethh.h_source[2] << 24) +
(ethh.h_source[3] << 16) +
(ethh.h_source[4] << 8) +
(ethh.h_source[5])),
};
__s64 s64_ret;
@ -1220,16 +1289,19 @@ int tproxy_wan_egress(struct __sk_buff *skb)
// Print only new connection.
__u32 pid = pid_pname ? pid_pname->pid : 0;
bpf_printk("tcp(wan): from %pI6:%u [PID %u]", tuples.five.sip.u6_addr32,
bpf_printk("tcp(wan): from %pI6:%u [PID %u]",
tuples.five.sip.u6_addr32,
bpf_ntohs(tuples.five.sport), pid);
bpf_printk("tcp(wan): outbound: %u, %pI6:%u", outbound,
tuples.five.dip.u6_addr32, bpf_ntohs(tuples.five.dport));
tuples.five.dip.u6_addr32,
bpf_ntohs(tuples.five.dport));
#endif
} else {
// bpf_printk("[%X]Old Connection", bpf_ntohl(tcph.seq));
// The TCP connection exists.
struct routing_result *routing_result =
bpf_map_lookup_elem(&routing_tuples_map, &tuples.five);
bpf_map_lookup_elem(&routing_tuples_map,
&tuples.five);
if (!routing_result) {
// Do not impact previous connections and server connections.
@ -1251,7 +1323,7 @@ int tproxy_wan_egress(struct __sk_buff *skb)
// Rewrite to control plane.
// Check outbound connectivity in specific ipversion and l4proto.
struct outbound_connectivity_query q = {0};
struct outbound_connectivity_query q = { 0 };
q.outbound = outbound;
q.ipversion = skb->protocol == bpf_htons(ETH_P_IP) ? 4 : 6;
@ -1260,7 +1332,8 @@ int tproxy_wan_egress(struct __sk_buff *skb)
alive = bpf_map_lookup_elem(&outbound_connectivity_map, &q);
if (alive && *alive == 0 &&
!(l4proto == IPPROTO_UDP && tuples.five.dport == bpf_htons(53))) {
!(l4proto == IPPROTO_UDP &&
tuples.five.dport == bpf_htons(53))) {
// Outbound is not alive. Dns is an exception.
return TC_ACT_SHOT;
}
@ -1276,7 +1349,8 @@ int tproxy_wan_egress(struct __sk_buff *skb)
sizeof(ethh.h_source));
if (pid_pname) {
__builtin_memcpy(routing_result.pname,
pid_pname->pname, TASK_COMM_LEN);
pid_pname->pname,
TASK_COMM_LEN);
routing_result.pid = pid_pname->pid;
}
bpf_map_update_elem(&routing_tuples_map, &tuples.five,
@ -1285,7 +1359,7 @@ int tproxy_wan_egress(struct __sk_buff *skb)
} else if (l4proto == IPPROTO_UDP) {
// Routing. It decides if we redirect traffic to control plane.
__u32 flag[8] = {L4ProtoType_UDP};
__u32 flag[8] = { L4ProtoType_UDP };
if (skb->protocol == bpf_htons(ETH_P_IP))
flag[1] = IpVersionType_4;
@ -1300,14 +1374,16 @@ int tproxy_wan_egress(struct __sk_buff *skb)
}
if (pid_pname) {
// 2, 3, 4, 5
__builtin_memcpy(&flag[2], pid_pname->pname, TASK_COMM_LEN);
__builtin_memcpy(&flag[2], pid_pname->pname,
TASK_COMM_LEN);
}
__be32 mac[4] = {
0,
0,
bpf_htonl((ethh.h_source[0] << 8) + (ethh.h_source[1])),
bpf_htonl((ethh.h_source[2] << 24) + (ethh.h_source[3] << 16) +
(ethh.h_source[4] << 8) + (ethh.h_source[5])),
0,
0,
bpf_htonl((ethh.h_source[0] << 8) + (ethh.h_source[1])),
bpf_htonl((ethh.h_source[2] << 24) +
(ethh.h_source[3] << 16) +
(ethh.h_source[4] << 8) + (ethh.h_source[5])),
};
__s64 s64_ret;
@ -1324,7 +1400,8 @@ int tproxy_wan_egress(struct __sk_buff *skb)
routing_result.mark = s64_ret >> 8;
routing_result.must = (s64_ret >> 40) & 1;
routing_result.dscp = tuples.dscp;
__builtin_memcpy(routing_result.mac, ethh.h_source, sizeof(ethh.h_source));
__builtin_memcpy(routing_result.mac, ethh.h_source,
sizeof(ethh.h_source));
if (pid_pname) {
__builtin_memcpy(routing_result.pname, pid_pname->pname,
TASK_COMM_LEN);
@ -1336,26 +1413,28 @@ int tproxy_wan_egress(struct __sk_buff *skb)
__u32 pid = pid_pname ? pid_pname->pid : 0;
bpf_printk("udp(wan): from %pI6:%u [PID %u]",
tuples.five.sip.u6_addr32, bpf_ntohs(tuples.five.sport),
pid);
tuples.five.sip.u6_addr32,
bpf_ntohs(tuples.five.sport), pid);
bpf_printk("udp(wan): outbound: %u, %pI6:%u",
routing_result.outbound, tuples.five.dip.u6_addr32,
bpf_ntohs(tuples.five.dport));
#endif
if (routing_result.outbound == OUTBOUND_DIRECT && routing_result.mark == 0
// If mark is not zero, we should re-route it, so we send it to control
// plane in WAN.
if (routing_result.outbound == OUTBOUND_DIRECT &&
routing_result.mark == 0
// If mark is not zero, we should re-route it, so we send it to control
// plane in WAN.
) {
return TC_ACT_OK;
} else if (unlikely(routing_result.outbound == OUTBOUND_BLOCK)) {
} else if (unlikely(routing_result.outbound ==
OUTBOUND_BLOCK)) {
return TC_ACT_SHOT;
}
// Rewrite to control plane.
// Check outbound connectivity in specific ipversion and l4proto.
struct outbound_connectivity_query q = {0};
struct outbound_connectivity_query q = { 0 };
q.outbound = routing_result.outbound;
q.ipversion = skb->protocol == bpf_htons(ETH_P_IP) ? 4 : 6;
@ -1364,13 +1443,15 @@ int tproxy_wan_egress(struct __sk_buff *skb)
alive = bpf_map_lookup_elem(&outbound_connectivity_map, &q);
if (alive && *alive == 0 &&
!(l4proto == IPPROTO_UDP && tuples.five.dport == bpf_htons(53))) {
!(l4proto == IPPROTO_UDP &&
tuples.five.dport == bpf_htons(53))) {
// Outbound is not alive. Dns is an exception.
return TC_ACT_SHOT;
}
}
prep_redirect_to_control_plane(skb, link_h_len, &tuples, l4proto, &ethh, 1, &tcph);
prep_redirect_to_control_plane(skb, link_h_len, &tuples, l4proto, &ethh,
1, &tcph);
return bpf_redirect(PARAM.dae0_ifindex, 0);
}
@ -1425,20 +1506,24 @@ int tproxy_dae0_ingress(struct __sk_buff *skb)
redirect_tuple.sip.u6_addr32[3] = tuples.five.dip.u6_addr32[3];
redirect_tuple.dip.u6_addr32[3] = tuples.five.sip.u6_addr32[3];
} else {
__builtin_memcpy(&redirect_tuple.sip, &tuples.five.dip, IPV6_BYTE_LENGTH);
__builtin_memcpy(&redirect_tuple.dip, &tuples.five.sip, IPV6_BYTE_LENGTH);
__builtin_memcpy(&redirect_tuple.sip, &tuples.five.dip,
IPV6_BYTE_LENGTH);
__builtin_memcpy(&redirect_tuple.dip, &tuples.five.sip,
IPV6_BYTE_LENGTH);
}
redirect_tuple.l4proto = l4proto;
struct redirect_entry *redirect_entry = bpf_map_lookup_elem(&redirect_track,
&redirect_tuple);
struct redirect_entry *redirect_entry =
bpf_map_lookup_elem(&redirect_track, &redirect_tuple);
if (!redirect_entry)
return TC_ACT_OK;
bpf_skb_store_bytes(skb, offsetof(struct ethhdr, h_source),
redirect_entry->dmac, sizeof(redirect_entry->dmac), 0);
redirect_entry->dmac, sizeof(redirect_entry->dmac),
0);
bpf_skb_store_bytes(skb, offsetof(struct ethhdr, h_dest),
redirect_entry->smac, sizeof(redirect_entry->smac), 0);
redirect_entry->smac, sizeof(redirect_entry->smac),
0);
__u32 type = redirect_entry->from_wan ? PACKET_HOST : PACKET_OTHERHOST;
bpf_skb_change_type(skb, type);
@ -1460,8 +1545,8 @@ static __always_inline int _update_map_elem_by_cookie(const __u64 cookie)
int ret;
// Build value.
struct pid_pname val = {0};
char buf[MAX_ARG_SCANNER_BUFFER_SIZE] = {0};
struct pid_pname val = { 0 };
char buf[MAX_ARG_SCANNER_BUFFER_SIZE] = { 0 };
struct task_struct *current = (void *)bpf_get_current_task();
unsigned long arg_start = BPF_CORE_READ(current, mm, arg_start);
unsigned long arg_end = BPF_CORE_READ(current, mm, arg_end);
@ -1473,7 +1558,7 @@ static __always_inline int _update_map_elem_by_cookie(const __u64 cookie)
unsigned long loc, j, last_slash = -1;
#pragma unroll
for (loc = 0, j = 0; j < MAX_ARG_LEN_TO_PROBE;
++j, loc = ((loc + 1) & (MAX_ARG_SCANNER_BUFFER_SIZE - 1))) {
++j, loc = ((loc + 1) & (MAX_ARG_SCANNER_BUFFER_SIZE - 1))) {
// volatile unsigned long k = j; // Cheat to unroll.
if (unlikely(arg_start + j >= arg_end))
break;
@ -1487,7 +1572,8 @@ static __always_inline int _update_map_elem_by_cookie(const __u64 cookie)
to_read = MAX_ARG_SCANNER_BUFFER_SIZE;
else
buf[to_read] = 0;
ret = bpf_core_read_user(&buf, to_read, (const void *)(arg_start + j));
ret = bpf_core_read_user(&buf, to_read,
(const void *)(arg_start + j));
if (ret) {
// bpf_printk("failed to read process name.0: [%ld, %ld]", arg_start,
// arg_end);
@ -1505,7 +1591,8 @@ static __always_inline int _update_map_elem_by_cookie(const __u64 cookie)
if (length_cpy > TASK_COMM_LEN)
length_cpy = TASK_COMM_LEN;
ret = bpf_core_read_user(&val.pname, length_cpy, (const void *)(arg_start + last_slash));
ret = bpf_core_read_user(&val.pname, length_cpy,
(const void *)(arg_start + last_slash));
if (ret) {
bpf_printk("failed to read process name.1: %d", ret);
return ret;
@ -1527,7 +1614,8 @@ static __always_inline int _update_map_elem_by_cookie(const __u64 cookie)
bpf_map_update_elem(&tgid_pname_map, &val.pid, &val.pname, BPF_ANY);
#ifdef __PRINT_SETUP_PROCESS_CONNNECTION
bpf_printk("setup_mapping: %llu -> %s (%d)", cookie, val.pname, val.pid);
bpf_printk("setup_mapping: %llu -> %s (%d)", cookie, val.pname,
val.pid);
#endif
return 0;
}
@ -1539,11 +1627,11 @@ static __always_inline int update_map_elem_by_cookie(const __u64 cookie)
ret = _update_map_elem_by_cookie(cookie);
if (ret) {
// Fallback to only write pid to avoid loop due to packets sent by dae.
struct pid_pname val = {0};
struct pid_pname val = { 0 };
val.pid = bpf_get_current_pid_tgid() >> 32;
__u32(*pname)[TASK_COMM_LEN] = bpf_map_lookup_elem(&tgid_pname_map,
&val.pid);
__u32(*pname)[TASK_COMM_LEN] =
bpf_map_lookup_elem(&tgid_pname_map, &val.pid);
if (pname) {
__builtin_memcpy(val.pname, *pname, TASK_COMM_LEN);
ret = 0;
@ -1642,42 +1730,48 @@ int local_tcp_sockops(struct bpf_sock_ops *skops)
switch (skops->op) {
case BPF_SOCK_OPS_PASSIVE_ESTABLISHED_CB: // dae sockets
{
struct tuples_key rev_tuple = {};
{
struct tuples_key rev_tuple = {};
rev_tuple.l4proto = IPPROTO_TCP;
rev_tuple.sport = tuple.dport;
rev_tuple.dport = tuple.sport;
__builtin_memcpy(&rev_tuple.sip, &tuple.dip, IPV6_BYTE_LENGTH);
__builtin_memcpy(&rev_tuple.dip, &tuple.sip, IPV6_BYTE_LENGTH);
rev_tuple.l4proto = IPPROTO_TCP;
rev_tuple.sport = tuple.dport;
rev_tuple.dport = tuple.sport;
__builtin_memcpy(&rev_tuple.sip, &tuple.dip, IPV6_BYTE_LENGTH);
__builtin_memcpy(&rev_tuple.dip, &tuple.sip, IPV6_BYTE_LENGTH);
struct routing_result *routing_result;
struct routing_result *routing_result;
routing_result = bpf_map_lookup_elem(&routing_tuples_map, &rev_tuple);
if (!routing_result || !routing_result->pid)
break;
if (!bpf_sock_hash_update(skops, &fast_sock, &tuple, BPF_ANY))
bpf_printk("fast_sock added: %pI4:%lu -> %pI4:%lu",
&tuple.sip.u6_addr32[3], bpf_ntohs(tuple.sport),
&tuple.dip.u6_addr32[3], bpf_ntohs(tuple.dport));
routing_result =
bpf_map_lookup_elem(&routing_tuples_map, &rev_tuple);
if (!routing_result || !routing_result->pid)
break;
}
if (!bpf_sock_hash_update(skops, &fast_sock, &tuple, BPF_ANY))
bpf_printk("fast_sock added: %pI4:%lu -> %pI4:%lu",
&tuple.sip.u6_addr32[3],
bpf_ntohs(tuple.sport),
&tuple.dip.u6_addr32[3],
bpf_ntohs(tuple.dport));
break;
}
case BPF_SOCK_OPS_ACTIVE_ESTABLISHED_CB: // local client sockets
{
struct routing_result *routing_result;
{
struct routing_result *routing_result;
routing_result = bpf_map_lookup_elem(&routing_tuples_map, &tuple);
if (!routing_result || !routing_result->pid)
break;
if (!bpf_sock_hash_update(skops, &fast_sock, &tuple, BPF_ANY))
bpf_printk("fast_sock added: %pI4:%lu -> %pI4:%lu",
&tuple.sip.u6_addr32[3], bpf_ntohs(tuple.sport),
&tuple.dip.u6_addr32[3], bpf_ntohs(tuple.dport));
routing_result =
bpf_map_lookup_elem(&routing_tuples_map, &tuple);
if (!routing_result || !routing_result->pid)
break;
}
if (!bpf_sock_hash_update(skops, &fast_sock, &tuple, BPF_ANY))
bpf_printk("fast_sock added: %pI4:%lu -> %pI4:%lu",
&tuple.sip.u6_addr32[3],
bpf_ntohs(tuple.sport),
&tuple.dip.u6_addr32[3],
bpf_ntohs(tuple.dport));
break;
}
default:
break;
@ -1712,10 +1806,13 @@ int sk_msg_fast_redirect(struct sk_msg_md *msg)
return SK_PASS;
}
if (bpf_msg_redirect_hash(msg, &fast_sock, &rev_tuple, BPF_F_INGRESS) == SK_PASS)
if (bpf_msg_redirect_hash(msg, &fast_sock, &rev_tuple, BPF_F_INGRESS) ==
SK_PASS)
bpf_printk("tcp fast redirect: %pI4:%lu -> %pI4:%lu",
&rev_tuple.sip.u6_addr32[3], bpf_ntohs(rev_tuple.sport),
&rev_tuple.dip.u6_addr32[3], bpf_ntohs(rev_tuple.dport));
&rev_tuple.sip.u6_addr32[3],
bpf_ntohs(rev_tuple.sport),
&rev_tuple.dip.u6_addr32[3],
bpf_ntohs(rev_tuple.dport));
return SK_PASS;
}